repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Der-Eddy/pyepvp
|
pyepvp/session.py
|
1
|
4903
|
import requests
import cfscrape
import xmlrpc.client
import hashlib
import time
import platform
import json
import os
import sys
import logging
from .regexp import *
from .exceptions import *
from .parser import *
from .tapatalk import *
from .user import *
from . import __version__, __title__, __author__
class session:
'''
Needed for several methods, logs into an elitepvpers user account and provides several useful informations about that account.
'''
system = platform.system()
userAgent = "{0}:{1}:{2} (by {3})".format(system.lower(), __title__, __version__, __author__)
sess = cfscrape.create_scraper()
sess.headers = {
"User-Agent" : userAgent,
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip,deflate,br",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.3",
"Content-Type": "application/x-www-form-urlencoded"
}
username = ""
guestSession = False
securityToken = ""
logoutHash = ""
secretWord = None
userID = ""
ranks = ["guest"]
paramsGet = "&langid=1"
refreshTime = 30
_notifications = {'last_update': 0,
'unread_private_messages': 0,
'unread_vistor_messages': 0,
'unapproved_visitor_messages': 0,
'incoming_friend_requests': 0,
'groups_request': 0,
'groups_invitations': 0,
'unread_picture_comments': 0,
'unapproved_picture_comments': 0,
'unapproved_group_messages': 0,
'new_mentions': 0,
'new_post_quotes': 0,
'staff_changes': 0,
'subscribed_threads': 0}
_elite_gold = 0
@property
def elite_gold(self):
if self._notifications['last_update'] + self.refreshTime < time.time():
self.updateNotifications()
return self._elite_gold
@elite_gold.setter
def elite_gold(self, value):
self._elite.gold = value
def __enter__(self):
return self
def __init__(self, uname='guest', passwd=None, md5bool=False, secretWord=None):
logging.info("Running on" + systemInfo())
if passwd is not None: #Checks if User Session
if md5bool == True:
md5 = passwd
else:
md5 = hashlib.md5(passwd.encode("utf-8"));md5 = md5.hexdigest()
self.username = uname
self._login(uname, md5)
if secretWord is not None:
self.secretWord = secretWord
elif uname == "guest": #Checks if Guest Session
self.username = "guest"
self.guestSession = True
self.securityToken = "guest"
else:
raise noAuthenticationException()
def __exit__(self, *kwargs):
self.__del__()
def __del__(self):
try:
self._logout()
except Exception:
pass
def _login(self, uname, md5):
loginnurl = "https://www.elitepvpers.com/forum/login.php?do=login" + self.paramsGet
params = {
"do": "login",
"vb_login_md5password": md5,
"vb_login_md5password_utf": md5,
"s": "",
"cookieuser": "1",
"vb_login_username": uname,
"security_token": "guest"
}
params = dicttostr(params)
r = self.sess.post(loginnurl, data=params, verify=True)
content = parser(self, "https://www.elitepvpers.com/forum/usercp.php")
self.securityToken = securityTokenParser(content)
self.logoutHash = logoutHashParser(content)
if self.securityToken == "guest":
raise invalidAuthenticationException()
self.userID = userIDParser(content)
usercontent = parser(self, "https://www.elitepvpers.com/forum/member.php?userid=" + self.userID)
self.ranks = rankParser(usercontent)
logging.info("User-Session created: {0}:{1}:{2}".format(self.username, self.userID, self.ranks))
self.updateNotifications()
self.tapatalk = tapatalk(uname, md5)
def logout(self):
'''
Logout the user session and destroys itself.
'''
self.__del__()
def _logout(self):
self.sess.get("https://www.elitepvpers.com/forum/login.php?do=logout&logouthash=" + self.logoutHash)
self.tapatalk.logout()
def updateNotifications(self):
'''
Updates the notifications of the session user and returns them.
'''
url = 'https://www.elitepvpers.com/forum/usercp.php'
getUpdates(session, url)
self._notifications['last_update'] = time.time()
logging.info('Updated notifications - {0}'.format(time.time()))
return self._notifications
|
mit
| -5,756,216,365,573,822,000
| 33.286713
| 130
| 0.570671
| false
| 3.89127
| false
| false
| false
|
rschnapka/odoo
|
addons/purchase/purchase.py
|
1
|
70094
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import pytz
from openerp import SUPERUSER_ID
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp import netsvc
from openerp import pooler
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools.float_utils import float_compare
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
cr.execute("""update purchase_order_line set
date_planned=%s
where
order_id=%s and
(date_planned=%s or date_planned<%s)""", (value,po.id,po.minimum_planned_date,value))
cr.execute("""update purchase_order set
minimum_planned_date=%s where id=%s""", (value, po.id))
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.purchase_id,sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
WHERE
p.purchase_id IN %s GROUP BY m.state, p.purchase_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id] = all(line.invoiced for line in purchase.order_line)
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ Sent'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'approved',
},
}
_columns = {
'name': fields.char('Order Reference', size=64, required=True, select=True, help="Unique number of the purchase order, computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that generated this purchase order request; a sales order or an internal procurement request."
),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, size=64,
help="Reference of the sales order or quotation sent by your supplier. It's mainly used to do the matching when you receive the products as this reference is usually written on the delivery order sent by your supplier."),
'date_order':fields.date('Order Date', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}, select=True, help="Date on which this document has been created."),
'date_approve':fields.date('Date Approved', readonly=1, select=True, help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'warehouse_id': fields.many2one('stock.warehouse', 'Destination Warehouse'),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]} ),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency",readonly=True, required=True),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="The status of the purchase order or the quotation request. A quotation is a purchase order in a 'Draft' status. Then the order has to be confirmed by the user, the status switch to 'Confirmed'. Then the supplier must confirm the order to change the status to 'Approved'. When the purchase order is paid and received, the status becomes 'Done'. If a cancel action occurs in the invoice or in the reception of goods, the status becomes in exception.", select=True),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines', states={'approved':[('readonly',True)],'done':[('readonly',True)]}),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id', 'invoice_id', 'Invoices', help="Invoices generated for a purchase order"),
'picking_ids': fields.one2many('stock.picking.in', 'purchase_id', 'Picking List', readonly=True, help="This is the list of incoming shipments that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', help="It indicates that an invoice has been validated"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control > Based on P.O. lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Bases on incoming shipments: let you create an invoice when receptions are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums",help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'product_id': fields.related('order_line','product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company','Company',required=True,select=1, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
}
_defaults = {
'date_order': fields.date.context_today,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = 'date_order desc, id desc'
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order') or '/'
order = super(purchase_order, self).create(cr, uid, vals, context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
wf_service = netsvc.LocalService("workflow")
for id in unlink_ids:
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def set_order_line_status(self, cr, uid, ids, status, context=None):
line = self.pool.get('purchase.order.line')
order_line_ids = []
move_ids = []
proc_obj = self.pool.get('procurement.order')
for order in self.browse(cr, uid, ids, context=context):
order_line_ids += [po_line.id for po_line in order.order_line]
move_ids += [po_line.move_dest_id.id for po_line in order.order_line if po_line.move_dest_id]
if order_line_ids:
line.write(cr, uid, order_line_ids, {'state': status}, context=context)
if order_line_ids and status == 'cancel':
procs = proc_obj.search(cr, uid, [('move_id', 'in', move_ids)], context=context)
if procs:
proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context)
return True
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
def onchange_dest_address_id(self, cr, uid, ids, address_id):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {'warehouse_id': False}
supplier = address.browse(cr, uid, address_id)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id):
if not warehouse_id:
return {}
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id)
return {'value':{'location_id': warehouse.lot_input_id.id, 'dest_address_id': False}}
def onchange_partner_id(self, cr, uid, ids, partner_id):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'])
supplier = partner.browse(cr, uid, partner_id)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line]})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing pîcking orders of given purchase order ids.
'''
mod_obj = self.pool.get('ir.model.data')
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
action_model, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree4'))
action = self.pool.get(action_model).read(cr, uid, action_id, context=context)
ctx = eval(action['context'])
ctx.update({
'search_default_purchase_id': ids[0]
})
if pick_ids and len(pick_ids) == 1:
form_view_ids = [view_id for view_id, view in action['views'] if view == 'form']
view_id = form_view_ids and form_view_ids[0] or False
action.update({
'views': [],
'view_mode': 'form',
'view_id': view_id,
'res_id': pick_ids[0]
})
action.update({
'context': ctx,
})
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def print_confirm(self,cr,uid,ids,context=None):
print "Confirmed"
def print_double(self,cr,uid,ids,context=None):
print "double Approval"
def print_router(self,cr,uid,ids,context=None):
print "Routed"
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', ids[0], 'send_rfq', cr)
datas = {
'model': 'purchase.order',
'ids': ids,
'form': self.read(cr, uid, ids[0], context=context),
}
return {'type': 'ir.actions.report.xml', 'report_name': 'purchase.quotation', 'datas': datas, 'nodestroy': True}
#TODO: implement messages system
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not po.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define expense account for this product: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position or False
return fiscal_obj.map_account(cr, uid, fpos, acc_id)
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
for purchase in self.browse(cr, uid, ids, context=context):
self.pool['purchase.order.line'].write(cr, uid, [l.id for l in purchase.order_line], {'state': 'draft'})
wf_service = netsvc.LocalService("workflow")
for p_id in ids:
# Deleting the existing instance of workflow for PO
wf_service.trg_delete(uid, 'purchase.order', p_id, cr)
wf_service.trg_create(uid, 'purchase.order', p_id, cr)
return True
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
pay_acc_id = order.partner_id.property_account_payable.id
journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase'), ('company_id', '=', order.company_id.id)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]}, context=context)
# get invoice data and create invoice
inv_data = {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': pay_acc_id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.pricelist_id.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, inv_lines)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]}, context=context)
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def wkf_action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
self.set_order_line_status(cr, uid, ids, 'cancel', context=context)
def action_cancel(self, cr, uid, ids, context=None):
wf_service = netsvc.LocalService("workflow")
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
if pick.state not in ('draft','cancel'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('First cancel all receptions related to this purchase order.'))
for pick in purchase.picking_ids:
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_cancel', cr)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel','draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all receptions related to this purchase order.'))
if inv:
wf_service.trg_validate(uid, 'account.invoice', inv.id, 'invoice_cancel', cr)
self.pool['purchase.order.line'].write(cr, uid, [l.id for l in purchase.order_line],
{'state': 'cancel'})
for id in ids:
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return True
def date_to_datetime(self, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
# TODO: move to fields.datetime in server after 7.0
user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + relativedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
def _prepare_order_picking(self, cr, uid, order, context=None):
return {
'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),
'origin': order.name + ((order.origin and (':' + order.origin)) or ''),
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'partner_id': order.partner_id.id,
'invoice_state': '2binvoiced' if order.invoice_method == 'picking' else 'none',
'type': 'in',
'purchase_id': order.id,
'company_id': order.company_id.id,
'move_lines' : [],
}
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, context=None):
price_unit = order_line.price_unit
if order.currency_id.id != order.company_id.currency_id.id:
#we don't round the price_unit, as we may want to store the standard price with more digits than allowed by the currency
price_unit = self.pool.get('res.currency').compute(cr, uid, order.currency_id.id, order.company_id.currency_id.id, price_unit, round=False, context=context)
return {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_qty': order_line.product_qty,
'product_uos_qty': order_line.product_qty,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'date_expected': self.date_to_datetime(cr, uid, order_line.date_planned, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'move_dest_id': order_line.move_dest_id.id,
'state': 'draft',
'type':'in',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': price_unit
}
def _create_pickings(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates pickings and appropriate stock moves for given order lines, then
confirms the moves, makes them available, and confirms the picking.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise
a standard outgoing picking will be created to wrap the stock moves, as returned
by :meth:`~._prepare_order_picking`.
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: list of IDs of pickings used/created for the given order lines (usually just one)
"""
if not picking_id:
picking_id = self.pool.get('stock.picking').create(cr, uid, self._prepare_order_picking(cr, uid, order, context=context))
todo_moves = []
stock_move = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
for order_line in order_lines:
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
move = stock_move.create(cr, uid, self._prepare_order_line_move(cr, uid, order, order_line, picking_id, context=context))
if order_line.move_dest_id and order_line.move_dest_id.state != 'done':
order_line.move_dest_id.write({'location_id': order.location_id.id})
todo_moves.append(move)
stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
return [picking_id]
def action_picking_create(self, cr, uid, ids, context=None):
picking_ids = []
for order in self.browse(cr, uid, ids):
picking_ids.extend(self._create_pickings(cr, uid, order, order.order_line, None, context=context))
# Must return one unique picking ID: the one to connect in the subflow of the purchase order.
# In case of multiple (split) pickings, we should return the ID of the critical one, i.e. the
# one that should trigger the advancement of the purchase workflow.
# By default we will consider the first one as most important, but this behavior can be overridden.
return picking_ids[0] if picking_ids else False
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'state':'draft',
'shipped':False,
'invoiced':False,
'invoice_ids': [],
'picking_ids': [],
'partner_ref': '',
'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order'),
})
return super(purchase_order, self).copy(cr, uid, id, default, context)
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
wf_service = netsvc.LocalService("workflow")
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'move_dest_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
# Compute what the new orders should contain
new_orders = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'warehouse_id': porder.warehouse_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
for order_line in porder.order_line:
line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'product_id', 'move_dest_id', 'account_analytic_id'))
o_line = order_infos['order_line'].setdefault(line_key, {})
if o_line:
# merge the line with an existing line
o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor']
else:
# append a new "standalone" line
for field in ('product_qty', 'product_uom'):
field_val = getattr(order_line, field)
if isinstance(field_val, browse_record):
field_val = field_val.id
o_line[field] = field_val
o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()]
# create the new order
neworder_id = self.create(cr, uid, order_data)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
wf_service.trg_redirect(uid, 'purchase.order', old_id, neworder_id, cr)
wf_service.trg_validate(uid, 'purchase.order', old_id, 'purchase_cancel', cr)
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'move_dest_id': fields.many2one('stock.move', 'Reservation Destination', ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Status', required=True, readonly=True,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.boolean('Invoiced', readonly=True),
'partner_id': fields.related('order_id','partner_id',string='Partner',readonly=True,type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id','date_order',string='Order Date',readonly=True,type="date")
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state':'draft', 'move_ids':[],'invoiced':0,'invoice_lines':[]})
return super(purchase_order_line, self).copy_data(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, context=None):
procurement_ids_to_cancel = []
for line in self.browse(cr, uid, ids, context=context):
if line.state not in ['draft', 'cancel']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a purchase order line which is in state \'%s\'.') %(line.state,))
if line.move_dest_id:
procurement_ids_to_cancel.extend(procurement.id for procurement in line.move_dest_id.procurements)
if procurement_ids_to_cancel:
self.pool['procurement.order'].action_cancel(cr, uid, procurement_ids_to_cancel)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_uom.
"""
if context is None:
context = {}
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
context = dict(context, purchase_uom_check=True)
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order, as a string in
DEFAULT_SERVER_DATE_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(days=supplier_delay)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_supplierinfo = self.pool.get('product.supplierinfo')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if context.get('purchase_uom_check') and self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.date.context_today(self,cr,uid,context=context)
supplierinfo = False
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure')
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if float_compare(min_qty , qty, precision_digits=precision) == 1: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
# - determine price_unit and taxes_id
if pricelist_id:
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
purchase_order_line()
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_id': fields.many2one('purchase.order', 'Purchase Order'),
}
def check_buy(self, cr, uid, ids, context=None):
''' return True if the supply method of the mto product is 'buy'
'''
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.product_id.supply_method <> 'buy':
return False
return True
def check_supplier_info(self, cr, uid, ids, context=None):
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
elif not partner:
message = _('No default supplier defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the supplier')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def action_po_assign(self, cr, uid, ids, context=None):
""" This is action which call from workflow to assign purchase order to procurements
@return: True
"""
res = self.make_po(cr, uid, ids, context=context)
res = res.values()
return len(res) and res[0] or 0 #TO CHECK: why workflow is generated error if return not integer value
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_warehouse(self, procurement, user_company):
"""
Return the warehouse containing the procurment stock location (or one of it ancestors)
If none match, returns then first warehouse of the company
"""
# TODO refactor the domain once we implement the "parent_of" domain operator
# NOTE This method has been copied in the `purchase_requisition` module to ensure
# retro-compatibility. This code duplication will be deleted in next stable version.
# Do not forget to update both version in case of modification.
company_id = (procurement.company_id or user_company).id
domains = [
[
'&', ('company_id', '=', company_id),
'|', '&', ('lot_stock_id.parent_left', '<', procurement.location_id.parent_left),
('lot_stock_id.parent_right', '>', procurement.location_id.parent_right),
('lot_stock_id', '=', procurement.location_id.id)
],
[('company_id', '=', company_id)]
]
cr, uid = procurement._cr, procurement._uid
context = procurement._context
Warehouse = self.pool['stock.warehouse']
for domain in domains:
ids = Warehouse.search(cr, uid, domain, context=context)
if ids:
return ids[0]
return False
def make_po(self, cr, uid, ids, context=None):
""" Make purchase order from procurement
@return: New created Purchase Orders procurement wise
"""
res = {}
if context is None:
context = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
partner_obj = self.pool.get('res.partner')
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
seq_obj = self.pool.get('ir.sequence')
for procurement in self.browse(cr, uid, ids, context=context):
res_id = procurement.move_id.id
partner = procurement.product_id.seller_id # Taken Main Supplier of Product of Procurement.
seller_qty = procurement.product_id.seller_qty
partner_id = partner.id
address_id = partner_obj.address_get(cr, uid, [partner_id], ['delivery'])['delivery']
pricelist_id = partner.property_product_pricelist_purchase.id
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty,seller_qty)
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner_id, {'uom': uom_id})[pricelist_id]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner_id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, partner.property_account_position, taxes_ids)
name = product.partner_ref
if product.description_purchase:
name += '\n'+ product.description_purchase
line_vals = {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'move_dest_id': res_id,
'taxes_id': [(6,0,taxes)],
}
name = seq_obj.get(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner_id,
'location_id': procurement.location_id.id,
'warehouse_id': self._get_warehouse(procurement, company),
'pricelist_id': pricelist_id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': partner.property_account_position and partner.property_account_position.id or False,
'payment_term_id': partner.property_supplier_payment_term.id or False,
}
res[procurement.id] = self.create_procurement_purchase_order(cr, uid, procurement, po_vals, line_vals, context=new_context)
self.write(cr, uid, [procurement.id], {'state': 'running', 'purchase_id': res[procurement.id]})
self.message_post(cr, uid, ids, body=_("Draft Purchase Order created"), context=context)
return res
def _product_virtual_get(self, cr, uid, order_point):
procurement = order_point.procurement_id
if procurement and procurement.state != 'exception' and procurement.purchase_id and procurement.purchase_id.state in ('draft', 'confirmed'):
return None
return super(procurement_order, self)._product_virtual_get(cr, uid, order_point)
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None):
if mail.model == 'purchase.order':
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', mail.res_id, 'send_rfq', cr)
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
}
_defaults = {
'purchase_ok': 1,
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', context['default_res_id'], 'send_rfq', cr)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
wf_service = netsvc.LocalService("workflow")
for order in purchase_order_obj.browse(cr, uid, po_ids, context=context):
# Signal purchase order workflow that an invoice has been validated.
invoiced = []
for po_line in order.order_line:
if any(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines):
invoiced.append(po_line.id)
if invoiced:
self.pool['purchase.order.line'].write(cr, uid, invoiced, {'invoiced': True})
wf_service.trg_write(uid, 'purchase.order', order.id, cr)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 903,126,199,072,234,500
| 51.622372
| 545
| 0.596122
| false
| 3.923262
| false
| false
| false
|
Thermondo/viewflow-extensions
|
docs/conf.py
|
1
|
2916
|
# -*- coding: utf-8 -*-
import datetime
import importlib
import inspect
import sys
import os
import django
year = datetime.datetime.now().strftime("%Y")
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.testapp.settings")
django.setup()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../tests.testapp'))
sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinxcontrib.spelling',
]
def linkcode_resolve(domain, info):
"""Link source code to GitHub."""
project = 'viewflow-extensions'
github_user = 'Thermondo'
head = 'master'
if domain != 'py' or not info['module']:
return None
filename = info['module'].replace('.', '/')
mod = importlib.import_module(info['module'])
basename = os.path.splitext(mod.__file__)[0]
if basename.endswith('__init__'):
filename += '/__init__'
item = mod
lineno = ''
for piece in info['fullname'].split('.'):
item = getattr(item, piece)
try:
lineno = '#L%d' % inspect.getsourcelines(item)[1]
except (TypeError, IOError):
pass
return ("https://github.com/%s/%s/blob/%s/%s.py%s" %
(github_user, project, head, filename, lineno))
intersphinx_mapping = {
'python': ('http://docs.python.org/3', None),
'django': ('https://docs.djangoproject.com/en/stable/',
'https://docs.djangoproject.com/en/stable/_objects/'),
'viewflow': ('https://viewflow.readthedocs.io/en/latest/', None),
}
# spell check
spelling_word_list_filename = 'spelling_wordlist.txt'
spelling_show_suggestions = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
master_doc = 'index'
project = 'Viewflow Extensions'
copyright = '%s, Thermondo GmbH' % year
exclude_patterns = ['_build']
pygments_style = 'sphinx'
def skip(app, what, name, obj, skip, options):
if name == "__init__" and obj.__doc__:
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
autodoc_default_flags = ['members', 'show-inheritance']
autodoc_member_order = 'bysource'
inheritance_graph_attrs = dict(rankdir='TB')
inheritance_node_attrs = dict(shape='rect', fontsize=14, fillcolor='gray90',
color='gray30', style='filled')
inheritance_edge_attrs = dict(penwidth=0.75)
html_theme = 'sphinx_rtd_theme'
|
apache-2.0
| -8,658,033,130,985,556,000
| 26.252336
| 84
| 0.653292
| false
| 3.496403
| false
| false
| false
|
chfw/pyexcel-ods3
|
tests/test_multiple_sheets.py
|
2
|
7687
|
import os
from collections import OrderedDict
import pyexcel
from base import PyexcelMultipleSheetBase
from nose.tools import raises
class TestOdsNxlsMultipleSheets(PyexcelMultipleSheetBase):
def setUp(self):
self.testfile = "multiple1.ods"
self.testfile2 = "multiple1.xls"
self.content = _produce_ordered_dict()
self._write_test_file(self.testfile)
def tearDown(self):
self._clean_up()
class TestXlsNOdsMultipleSheets(PyexcelMultipleSheetBase):
def setUp(self):
self.testfile = "multiple1.xls"
self.testfile2 = "multiple1.ods"
self.content = _produce_ordered_dict()
self._write_test_file(self.testfile)
def tearDown(self):
self._clean_up()
class TestAddBooks:
def _write_test_file(self, file):
"""
Make a test file as:
1,1,1,1
2,2,2,2
3,3,3,3
"""
self.rows = 3
pyexcel.save_book_as(bookdict=self.content, dest_file_name=file)
def setUp(self):
self.testfile = "multiple1.ods"
self.testfile2 = "multiple1.xls"
self.content = _produce_ordered_dict()
self._write_test_file(self.testfile)
self._write_test_file(self.testfile2)
def test_load_a_single_sheet(self):
b1 = pyexcel.get_book(file_name=self.testfile, sheet_name="Sheet1")
assert len(b1.sheet_names()) == 1
assert b1["Sheet1"].to_array() == self.content["Sheet1"]
def test_load_a_single_sheet2(self):
b1 = pyexcel.load_book(self.testfile, sheet_index=0)
assert len(b1.sheet_names()) == 1
assert b1["Sheet1"].to_array() == self.content["Sheet1"]
@raises(IndexError)
def test_load_a_single_sheet3(self):
pyexcel.get_book(file_name=self.testfile, sheet_index=10000)
@raises(ValueError)
def test_load_a_single_sheet4(self):
pyexcel.get_book(file_name=self.testfile, sheet_name="Not exist")
def test_delete_sheets(self):
b1 = pyexcel.load_book(self.testfile)
assert len(b1.sheet_names()) == 3
del b1["Sheet1"]
assert len(b1.sheet_names()) == 2
try:
del b1["Sheet1"]
assert 1 == 2
except KeyError:
assert 1 == 1
del b1[1]
assert len(b1.sheet_names()) == 1
try:
del b1[1]
assert 1 == 2
except IndexError:
assert 1 == 1
def test_delete_sheets2(self):
"""repetitively delete first sheet"""
b1 = pyexcel.load_book(self.testfile)
del b1[0]
assert len(b1.sheet_names()) == 2
del b1[0]
assert len(b1.sheet_names()) == 1
del b1[0]
assert len(b1.sheet_names()) == 0
def test_add_book1(self):
"""
test this scenario: book3 = book1 + book2
"""
b1 = pyexcel.get_book(file_name=self.testfile)
b2 = pyexcel.get_book(file_name=self.testfile2)
b3 = b1 + b2
content = b3.dict
sheet_names = content.keys()
assert len(sheet_names) == 6
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book1_in_place(self):
"""
test this scenario: book1 += book2
"""
b1 = pyexcel.BookReader(self.testfile)
b2 = pyexcel.BookReader(self.testfile2)
b1 += b2
content = b1.dict
sheet_names = content.keys()
assert len(sheet_names) == 6
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book2(self):
"""
test this scenario: book3 = book1 + sheet3
"""
b1 = pyexcel.BookReader(self.testfile)
b2 = pyexcel.BookReader(self.testfile2)
b3 = b1 + b2["Sheet3"]
content = b3.dict
sheet_names = content.keys()
assert len(sheet_names) == 4
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book2_in_place(self):
"""
test this scenario: book3 = book1 + sheet3
"""
b1 = pyexcel.BookReader(self.testfile)
b2 = pyexcel.BookReader(self.testfile2)
b1 += b2["Sheet3"]
content = b1.dict
sheet_names = content.keys()
assert len(sheet_names) == 4
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book3(self):
"""
test this scenario: book3 = sheet1 + sheet2
"""
b1 = pyexcel.BookReader(self.testfile)
b2 = pyexcel.BookReader(self.testfile2)
b3 = b1["Sheet1"] + b2["Sheet3"]
content = b3.dict
sheet_names = content.keys()
assert len(sheet_names) == 2
assert content["Sheet3"] == self.content["Sheet3"]
assert content["Sheet1"] == self.content["Sheet1"]
def test_add_book4(self):
"""
test this scenario: book3 = sheet1 + book
"""
b1 = pyexcel.BookReader(self.testfile)
b2 = pyexcel.BookReader(self.testfile2)
b3 = b1["Sheet1"] + b2
content = b3.dict
sheet_names = content.keys()
assert len(sheet_names) == 4
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book_error(self):
"""
test this scenario: book3 = sheet1 + book
"""
b1 = pyexcel.BookReader(self.testfile)
try:
b1 + 12
assert 1 == 2
except TypeError:
assert 1 == 1
try:
b1 += 12
assert 1 == 2
except TypeError:
assert 1 == 1
def tearDown(self):
if os.path.exists(self.testfile):
os.unlink(self.testfile)
if os.path.exists(self.testfile2):
os.unlink(self.testfile2)
class TestMultiSheetReader:
def setUp(self):
self.testfile = "file_with_an_empty_sheet.ods"
def test_reader_with_correct_sheets(self):
r = pyexcel.BookReader(
os.path.join("tests", "fixtures", self.testfile)
)
assert r.number_of_sheets() == 3
def _produce_ordered_dict():
data_dict = OrderedDict()
data_dict.update({"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]})
data_dict.update({"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]]})
data_dict.update(
{"Sheet3": [[u"X", u"Y", u"Z"], [1, 4, 7], [2, 5, 8], [3, 6, 9]]}
)
return data_dict
|
bsd-3-clause
| 368,446,140,860,946,300
| 30.896266
| 76
| 0.55015
| false
| 3.534253
| true
| false
| false
|
ckrooss/shortpath
|
shortpath/astar.py
|
1
|
1996
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from random import randint
if __name__ == "__main__":
from node import Node
from nodelist import Nodelist
else:
from .node import Node
from .nodelist import Nodelist
Node.xmax = 30
Node.ymax = 15
def gen_obstacles(xmax: int, ymax: int) -> Nodelist:
obstacles = Nodelist()
def ao(x, y):
o = Node(x, y)
obstacles.append(o)
for _ in range(2):
ao(randint(-xmax, xmax), randint(-ymax, ymax))
for y in range(-ymax, ymax):
ao(0, y + 1)
for y in range(-ymax, ymax + 1):
ao(-xmax, y)
for k, j in zip(range(1, xmax), range(ymax)):
ao(xmax - k, j - (ymax // 3))
return obstacles
def find_path(ziel: Node, openlist: Nodelist, closedlist: Nodelist, obstacles: Nodelist) -> bool:
while openlist:
current_node = openlist.pop_min()
if current_node == ziel:
ziel.pre = current_node
return True
closedlist.append(current_node)
current_node.expand_node(ziel, openlist, closedlist, obstacles)
return False
def main():
start = Node(-Node.xmax, Node.ymax)
ziel = Node(Node.xmax, Node.ymax)
while True:
# List of elements that form the optimal way
optimal_path = Nodelist()
# Openlist: Path elements that have not been fully evaluated and might be good
openlist = Nodelist(start)
# Closedlist: Path elements that have been fully evaluated
closedlist = Nodelist()
# Blocking the path
obstacles = gen_obstacles(Node.xmax, Node.ymax)
if find_path(ziel, openlist, closedlist, obstacles):
Node.select_optimal_path(ziel, optimal_path)
Node.printf(start, ziel, optimal_path, openlist, closedlist, obstacles)
break
else:
Node.printf(start, ziel, optimal_path, openlist, closedlist, obstacles)
raise Exception()
if __name__ == '__main__':
main()
|
mit
| 4,708,236,576,825,571,000
| 24.589744
| 97
| 0.602204
| false
| 3.447323
| false
| false
| false
|
SyllogismRXS/misc
|
gui/widget-test/graveyard/mycustomwidgetplugin.py
|
1
|
3624
|
#!/usr/bin/env python
"""
helloglwidgetplugin.py
A simple OpenGL custom widget plugin for Qt Designer.
Copyright (C) 2006 David Boddie <david@boddie.org.uk>
Copyright (C) 2005-2006 Trolltech ASA. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from PyQt5.QtGui import QIcon
from PyQt5.QtDesigner import QPyDesignerCustomWidgetPlugin
from my_custom_widget import MyCustomWidget
class MyCustomWidgetPlugin(QPyDesignerCustomWidgetPlugin):
"""MyCustomWidgetPlugin(QPyDesignerCustomWidgetPlugin)
Provides a Python custom plugin for Qt Designer by implementing the
QDesignerCustomWidgetPlugin via a PyQt-specific custom plugin class.
"""
# The __init__() method is only used to set up the plugin and define its
# initialized variable.
def __init__(self, parent=None):
super(MyCustomWidgetPlugin, self).__init__(parent)
self.initialized = False
# The initialize() and isInitialized() methods allow the plugin to set up
# any required resources, ensuring that this can only happen once for each
# plugin.
def initialize(self, core):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
# This factory method creates new instances of our custom widget with the
# appropriate parent.
def createWidget(self, parent):
return MyCustomWidget(parent)
# This method returns the name of the custom widget class that is provided
# by this plugin.
def name(self):
return "MyCustomWidget"
# Returns the name of the group in Qt Designer's widget box that this
# widget belongs to.
def group(self):
return "Display Widgets"
# Returns the icon used to represent the custom widget in Qt Designer's
# widget box.
def icon(self):
return QIcon()
# Returns a short description of the custom widget for use in a tool tip.
def toolTip(self):
return ""
# Returns a short description of the custom widget for use in a "What's
# This?" help message for the widget.
def whatsThis(self):
return ""
# Returns True if the custom widget acts as a container for other widgets;
# otherwise returns False. Note that plugins for custom containers also
# need to provide an implementation of the QDesignerContainerExtension
# interface if they need to add custom editing support to Qt Designer.
def isContainer(self):
return False
# Returns an XML description of a custom widget instance that describes
# default values for its properties. Each custom widget created by this
# plugin will be configured using this description.
def domXml(self):
return '<widget class="MyCustomWidget" name="mycustomwidget" />\n'
# Returns the module containing the custom widget class. It may include
# a module path.
def includeFile(self):
return "my_custom_widget"
|
mit
| 8,384,229,615,302,424,000
| 35.606061
| 78
| 0.721578
| false
| 4.441176
| false
| false
| false
|
Cqfuj/disco-cake
|
disco_cake/link/mp3_track_number_album_linker.py
|
1
|
1939
|
# Disco-cake
# Copyright (C) 2017 Maugere Lucas
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import mutagen.id3
from .abstract_album_linker import AbstractAlbumLinker
class Mp3TrackNumberAlbumLinker(AbstractAlbumLinker):
allowed_extensions = ['.mp3']
def __init__(self, cfg):
super().__init__(cfg)
def can_apply_to(self, link):
return link.file.extension in self.allowed_extensions
def find_disc(self, trck_values, album):
if len(album.discs) == 1:
return album.discs[0]
if len(trck_values) <= 1:
return None
nb_tracks = int(trck_values[1])
discs = album.get_discs_with_nb_tracks(nb_tracks)
if len(discs) != 1:
return None
return discs[0]
def find_linked_track(self, link, album):
if not self.can_apply_to(link):
return
try:
metadata = mutagen.id3.ID3(link.file.filepath)
trck_values = metadata['TRCK'].text[0].split('/')
track_number = int(trck_values[0])
disc = self.find_disc(trck_values, album)
if disc and disc.has_track_number(track_number):
link.track = disc.tracks[track_number-1]
return
except (mutagen.id3.ID3NoHeaderError, ValueError) as e:
print(str(e))
return
|
gpl-3.0
| 2,301,692,807,720,406,000
| 35.584906
| 71
| 0.645694
| false
| 3.728846
| false
| false
| false
|
bpsmith/tia
|
tia/analysis/model/interface.py
|
1
|
3276
|
__all__ = ['CostCalculator', 'EodMarketData', 'MarketDataColumns', 'TxnColumns', 'PositionColumns', 'PlColumns',
'TxnPlColumns']
class CostCalculator(object):
"""Define the methods necessary to be able to calculator the premium for a trade."""
def get_premium(self, qty, px, ts=None):
raise NotImplementedError()
def get_mkt_val(self, qty, px, ts=None):
raise NotImplementedError()
class EodMarketData(object):
def get_eod_frame(self):
"""Return an end of day DataFrame with columns ('close', 'mktval', 'dvd')"""
raise NotImplementedError()
class MarketDataColumns(object):
CLOSE = 'close'
MKT_VAL = 'mkt_val'
DVDS = 'dvds'
class TxnColumns(object):
DT = 'date'
TS = 'txn_ts'
PID = 'pid'
TID = 'tid'
QTY = 'txn_qty'
PX = 'txn_px'
FEES = 'txn_fees'
PREMIUM = 'txn_premium'
OPEN_VAL = 'open_val'
POS = 'pos'
INTENT = 'txn_intent'
ACTION = 'txn_action'
DESCRIPTIONS = {
DT: 'Date-only portion of transaction',
TS: 'Timestamp of transaction',
PID: 'position id',
TID: 'trade id',
QTY: 'quantity',
PX: 'price',
FEES: 'fees',
PREMIUM: 'premium',
OPEN_VAL: 'open value of position',
POS: 'position quantity',
INTENT: 'trade intent',
ACTION: 'trade action',
}
class PlColumns(object):
DT = 'date'
DVDS = 'dvds'
FEES = 'fees'
RPL_GROSS = 'rpl_gross'
RPL = 'rpl'
UPL = 'upl'
PL = 'pl'
DESCRIPTIONS = {
DT: 'p/l date',
DVDS: 'dividends',
FEES: 'fees',
RPL_GROSS: 'realized gross p/l (TOT_VAL - OPEN_VAL)',
RPL: 'realized pl (RPL_GROSS + FEES + DVDS)',
UPL: 'unrealized pl (MKT_VAL + OPEN_VAL)',
PL: 'Total p/l (UPL + RPL)'
}
ALL = [DT, DVDS, FEES, RPL_GROSS, RPL, UPL, PL]
LTDS = [DVDS, FEES, RPL_GROSS, RPL, UPL, PL]
class TxnPlColumns(object):
DT = 'date'
PID = TxnColumns.PID
TID = TxnColumns.TID
POS = 'pos'
TXN_QTY = 'txn_qty'
TXN_PX = 'txn_px'
TXN_FEES = 'txn_fees'
TXN_PREMIUM = 'txn_premium'
TXN_INTENT = 'txn_intent'
TXN_ACTION = 'txn_action'
CLOSE_PX = 'close'
OPEN_VAL = 'open_val'
MKT_VAL = 'mkt_val'
TOT_VAL = 'total_val'
DVDS = 'dvds'
FEES = 'fees'
RPL_GROSS = 'rpl_gross'
RPL = 'rpl'
UPL = 'upl'
PL = 'pl'
DESCRIPTIONS = {
DT: 'p/l date',
POS: 'end of day position quantity',
CLOSE_PX: 'end of day closing price',
OPEN_VAL: 'open value of the position',
MKT_VAL: 'market value',
TOT_VAL: 'total of trade premiums',
DVDS: 'dividends',
FEES: 'fees',
RPL_GROSS: 'realized gross p/l (TOT_VAL - OPEN_VAL)',
RPL: 'realized pl (RPL_GROSS + FEES + DVDS)',
UPL: 'unrealized pl (MKT_VAL + OPEN_VAL)',
PL: 'Total p/l (UPL + RPL)'
}
class PositionColumns(object):
PID = 'pid'
SIDE = 'side'
OPEN_DT = 'open_dt'
CLOSE_DT = 'close_dt'
OPEN_QTY = 'open_qty'
OPEN_PX = 'open_px'
CLOSE_PX = 'close_px'
OPEN_PREMIUM = 'open_premium'
PL = 'pl'
DURATION = 'duration'
NUM_TXNS = 'ntxns'
RET = 'ret'
STATE = 'state'
|
bsd-3-clause
| -7,107,958,425,368,233,000
| 23.825758
| 112
| 0.548535
| false
| 2.816853
| false
| false
| false
|
vgripon/PyRat
|
imports/display.py
|
1
|
22177
|
# Copyright © 2017 Vincent Gripon (vincent.gripon@imt-atlatique.fr) and IMT Atlantique
#
# This file is part of PyRat.
#
# PyRat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyRat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyRat. If not, see <http://www.gnu.org/licenses/>.
from imports.parameters import *
import pygame
import random
import datetime
from pygame import locals
def image_of_maze(maze, tiles, image_tile, image_wall, image_corner, image_mud, offset_x, offset_y, scale, width, height, screen, window_height):
global mud_range
for i in range(width):
for j in range(height):
screen.blit(image_tile[tiles[i][j]], (offset_x + scale * i, window_height - offset_y - scale * (j+1)))
if not args.mud_no_display:
for i in range(width):
for j in range(height):
if not((i-1,j) in maze[(i,j)]):
pass
elif maze[(i,j)][(i-1,j)] > 1:
screen.blit(image_mud, (offset_x + scale * i - scale/2, window_height - offset_y - scale * (j+1)))
if not((i,j+1) in maze[(i,j)]):
pass
elif maze[(i,j)][(i,j+1)] > 1:
screen.blit(pygame.transform.rotate(image_mud, 270), (offset_x + scale * i, window_height - offset_y - scale * (j+1) - scale/2))
for i in range(width):
for j in range(height):
if not((i-1,j) in maze[(i,j)]):
screen.blit(image_wall, (offset_x + scale * i - scale / 2, window_height - offset_y - scale * (j+1)))
if not((i,j+1) in maze[(i,j)]):
screen.blit(pygame.transform.rotate(image_wall, 270), (offset_x + scale * i, window_height - offset_y - scale * (j+1) - scale/2))
for i in range(width):
screen.blit(pygame.transform.rotate(image_wall, 270), (offset_x + scale * i, window_height - offset_y - scale/2))
for j in range(height):
screen.blit(image_wall, (offset_x + scale * width -scale/2, window_height - offset_y - scale * (j+1)))
for i in range(width+1):
for j in range(height+1):
horiz = False
vert = False
count = 0
if i == 0 or i == width:
vert = True
if j == 0 or j == height:
horiz = True
# is there a wall left?
if i > 0 and j < height and j > 0:
if (i-1,j) not in maze[(i-1,j-1)]:
horiz = True
count = count + 1
# is there a wall right?
if i < width and j < height and j > 0:
if (i,j-1) not in maze[(i,j)]:
horiz = True
count = count + 1
# is there a wall up?
if i > 0 and i < width and j < height:
if (i,j) not in maze[(i-1,j)]:
vert = True
count = count + 1
# is there a wall down?
if i > 0 and i < width and j > 0:
if (i,j-1) not in maze[(i-1,j-1)]:
vert = True
count = count + 1
if vert and horiz or count == 1:
screen.blit(image_corner, (offset_x + scale * i - scale/2, window_height - offset_y - scale * j - scale/2))
def draw_pieces_of_cheese(pieces_of_cheese, image_cheese, offset_x, offset_y, scale, width, height, screen, window_height):
for (i,j) in pieces_of_cheese:
screen.blit(image_cheese, (offset_x + scale * i, window_height - offset_y - scale * (j+1)))
def draw_players(player1_location, player2_location, image_python, image_rat, offset_x, offset_y, scale, width, height, screen, window_height):
i, j = player1_location
screen.blit(image_python, (offset_x + scale * i, window_height - offset_y - scale * (j+1)))
i, j = player2_location
screen.blit(image_rat, (offset_x + scale * i, window_height - offset_y - scale * (j+1)))
def draw_players_animate(player1_location, player2_location, image_python, image_rat, offset_x, offset_y, scale, width, height, screen, window_height):
i, j = player1_location
screen.blit(image_python, (offset_x + scale * i, window_height - offset_y - scale * (j+1)))
i, j = player2_location
screen.blit(image_rat, (offset_x + scale * i, window_height - offset_y - scale * (j+1)))
font_sizes = [50, 25, 50, 25, 50, 50, 50]
def draw_text(text, font, color, max_size, index_size, x, y, screen):
global font_sizes
font = pygame.font.Font("resources/fonts/" + font + ".ttf", font_sizes[index_size])
label = font.render(text, 1, color)
while(label.get_rect().width > max_size):
font_sizes[index_size] = font_sizes[index_size] - 1
font = pygame.font.SysFont("monospace", font_sizes[index_size])
label = font.render(text, 1, color)
# pygame.draw.rect(screen, (57,57,64), (x - label.get_rect().width // 2, y, label.get_rect().width,label.get_rect().height))
screen.blit(label, (x - label.get_rect().width // 2,y))
def draw_scores(p1name, score1, image1, p2name, score2, image2, window_width, window_height, screen, player1_is_alive, player2_is_alive, moves1, miss1, moves2, miss2, stuck1, stuck2):
if player1_is_alive:
draw_text("Score: "+str(score1), "Kalam-Bold", (50,50,50), window_width / 6, 0, int(window_width / 12), window_width / 3 + 50, screen)
draw_text(p1name, "Kalam-Bold", (50,50,50), window_width / 6, 5, int(window_width / 12), window_width / 3, screen)
draw_text("Moves: " + str(moves1), "Kalam-Regular", (2,118,137), window_width / 6, 1, int(window_width / 12), window_width / 3 + 150, screen)
draw_text("Miss: " + str(miss1), "Kalam-Regular", (229,35,64), window_width / 6, 1, int(window_width / 12), window_width / 3 + 180, screen)
draw_text("Mud: " + str(stuck1), "Kalam-Regular", (229,35,64), window_width / 6, 1, int(window_width / 12), window_width / 3 + 210, screen)
if player2_is_alive:
draw_text("Score: "+str(score2), "Kalam-Bold", (50,50,50), window_width / 6, 2, int(11 * window_width / 12), window_width / 3 + 50, screen)
draw_text(p2name, "Kalam-Bold", (50,50,50), window_width / 6, 6, int(11 * window_width / 12), window_width / 3, screen)
draw_text("Moves: " + str(moves2), "Kalam-Regular", (2,118,137), window_width / 6, 3, int(11 * window_width / 12), window_width / 3 + 150, screen)
draw_text("Miss: " + str(miss2), "Kalam-Regular", (229,35,64), window_width / 6, 3, int(11 * window_width / 12), window_width / 3 + 180, screen)
draw_text("Mud: " + str(stuck2), "Kalam-Regular", (229,35,64), window_width / 6, 3, int(11 * window_width / 12), window_width / 3 + 210, screen)
def display_exit():
pygame.quit()
def play(q_out, move):
while not q_out.empty():
q_out.get()
q_out.put(move)
def init_coords_and_images(width, height, player1_is_alive, player2_is_alive, window_width, window_height):
scale = int(min((window_height - 50) / height, window_width * 2/3 / width))
offset_x = window_width // 2 - int(width / 2 * scale)
offset_y = max(25, window_height // 2 - int(scale * height / 2))
scale_portrait_w = int(window_width / 6)
scale_portrait_h = int(window_width / 6)
image_background = pygame.transform.smoothscale(pygame.image.load("resources/illustrations/background.jpg"),(window_width, window_height))
image_cheese = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/cheese.png"),(scale, scale))
image_corner = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/corner.png"),(scale, scale))
image_moving_python = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/movingPython.png"),(scale, scale))
image_moving_rat = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/movingRat.png"),(scale, scale))
image_python = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/python.png"),(scale, scale))
image_rat = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/rat.png"),(scale, scale))
image_wall = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/wall.png"),(scale, scale))
image_mud = pygame.transform.smoothscale(pygame.image.load("resources/gameElements/mud.png"),(scale, scale))
image_portrait_python = pygame.transform.smoothscale(pygame.image.load("resources/illustrations/python_left.png"),(scale_portrait_w, scale_portrait_h))
image_portrait_rat = pygame.transform.smoothscale(pygame.image.load("resources/illustrations/rat.png"),(scale_portrait_w, scale_portrait_h))
image_tile = []
for i in range(10):
image_tile.append(pygame.transform.smoothscale(pygame.image.load("resources/gameElements/tile"+str(i+1)+".png"),(scale, scale)))
tiles = []
for i in range(width):
tiles.append([])
for j in range(height):
tiles[i].append(random.randrange(10))
if not(args.save_images):
if not(player1_is_alive):
image_rat = image_rat.convert()
image_rat.set_alpha(0)
image_moving_rat = image_moving_rat.convert()
image_moving_rat.set_alpha(0)
if not(player2_is_alive):
image_python = image_python.convert()
image_python.set_alpha(0)
image_moving_python = image_moving_python.convert()
image_moving_python.set_alpha(0)
return scale, offset_x, offset_y, image_background, image_cheese, image_corner, image_moving_python, image_moving_rat, image_python, image_rat, image_wall, image_mud, image_portrait_python, image_portrait_rat, tiles, image_tile
def build_background(screen, maze, tiles, image_background, image_tile, image_wall, image_corner, image_mud, offset_x, offset_y, width, height, window_width, window_height, image_portrait_rat, image_portrait_python, scale, player1_is_alive, player2_is_alive):
global font_sizes
# screen.fill((57,57,64))
font_sizes = [50, 25, 50, 25, 50, 50, 50]
maze_image = screen.copy()
maze_image.blit(image_background, (0,0))
image_of_maze(maze, tiles, image_tile, image_wall, image_corner, image_mud, offset_x, offset_y, scale, width, height, maze_image, window_height)
if player1_is_alive:
maze_image.blit(image_portrait_rat, (int(window_width /12 - image_portrait_python.get_rect().width / 2), 100))
if player2_is_alive:
maze_image.blit(image_portrait_python, (int(window_width * 11 / 12 - image_portrait_python.get_rect().width / 2), 100))
return maze_image
def run(maze, width, height, q, q_render_in, q_quit, p1name, p2name, q1_out, q2_out, is_human_rat, is_human_python, q_info, pieces_of_cheese, player1_location, player2_location, player1_is_alive, player2_is_alive, screen, infoObject):
global args
debug("Starting rendering",2)
if args.save_images:
window_width, window_height = args.window_width, args.window_height
else:
window_width, window_height = pygame.display.get_surface().get_size()
turn_time = args.turn_time
scale, offset_x, offset_y, image_background, image_cheese, image_corner, image_moving_python, image_moving_rat, image_python, image_rat, image_wall, image_mud, image_portrait_python, image_portrait_rat, tiles, image_tile = init_coords_and_images(width, height, player1_is_alive, player2_is_alive, window_width, window_height)
debug("Defining constants",2)
d = 10000000
clock = pygame.time.Clock()
new_player1_location = player1_location
new_player2_location = player2_location
time_to_go1 = pygame.time.get_ticks()
time_to_go2 = pygame.time.get_ticks()
score1 = 0
score2 = 0
image1 = image_rat
image2 = image_python
moves1 = 0
moves2 = 0
miss1 = 0
miss2 = 0
stuck1 = 0
stuck2 = 0
debug("Trying to initialize Joystick",2)
pygame.joystick.init()
try:
j0 = pygame.joystick.Joystick(0)
j0.init()
print('Enabled joystick: ' + j0.get_name() + ' with ' + str(j0.get_numaxes()) + ' axes', file=sys.stderr)
j1 = pygame.joystick.Joystick(1)
j1.init()
print('Enabled joystick: ' + j1.get_name() + ' with ' + str(j1.get_numaxes()) + ' axes', file=sys.stderr)
except pygame.error:
()
debug("Building background image",2)
maze_image = build_background(screen, maze, tiles, image_background, image_tile, image_wall, image_corner, image_mud, offset_x, offset_y, width, height, window_width, window_height, image_portrait_rat, image_portrait_python, scale, player1_is_alive, player2_is_alive)
starting_time = pygame.time.get_ticks()
text_info = ""
debug("Starting main loop",2)
while q_quit.empty() or (args.desactivate_animations and not(q.empty())):
debug("Checking events",2)
if not(args.save_images):
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and (event.key == pygame.K_q or event.key == pygame.K_ESCAPE)):
q_quit.put("")
break
if event.type == pygame.VIDEORESIZE or (event.type == pygame.KEYDOWN and event.key == pygame.K_f):
if event.type == pygame.KEYDOWN and not(screen.get_flags() & 0x80000000):
screen = pygame.display.set_mode((infoObject.current_w, infoObject.current_h), pygame.FULLSCREEN)
window_width, window_height = infoObject.current_w, infoObject.current_h
else:
if event.type == pygame.VIDEORESIZE:
window_width, window_height = event.w, event.h
screen = pygame.display.set_mode((window_width, window_height),pygame.RESIZABLE)
scale, offset_x, offset_y, image_background, image_cheese, image_corner, image_moving_python, image_moving_rat, image_python, image_rat, image_wall, image_mud, image_portrait_python, image_portrait_rat, tiles, image_tile = init_coords_and_images(width, height, player1_is_alive, player2_is_alive, window_width, window_height)
maze_image = build_background(screen, maze, tiles, image_background, image_tile, image_wall, image_corner, image_mud, offset_x, offset_y, width, height, window_width, window_height, image_portrait_rat, image_portrait_python, scale, player1_is_alive, player2_is_alive)
if event.type == pygame.KEYDOWN and (is_human_rat or is_human_python):
if event.key == pygame.K_LEFT:
play(q1_out, "L")
if event.key == pygame.K_RIGHT:
play(q1_out, "R")
if event.key == pygame.K_UP:
play(q1_out, "U")
if event.key == pygame.K_DOWN:
play(q1_out, "D")
if event.key == pygame.K_KP4:
play(q2_out, "L")
if event.key == pygame.K_KP6:
play(q2_out, "R")
if event.key == pygame.K_KP8:
play(q2_out, "U")
if event.key == pygame.K_KP5:
play(q2_out, "D")
debug("Processing joysticks",2)
try:
x , y = j0.get_axis(3), j0.get_axis(4)
if x < -0.7:
play(q1_out, "L")
if x > 0.7:
play(q1_out, "R")
if y < -0.7:
play(q1_out, "U")
if y > 0.7:
play(q1_out, "D")
except:
()
try:
x , y = j1.get_axis(3), j1.get_axis(4)
if x < -0.7:
play(q2_out, "L")
if x > 0.7:
play(q2_out, "R")
if y < -0.7:
play(q2_out, "U")
if y > 0.7:
play(q2_out, "D")
except:
()
debug("Looking for updates from core program",2)
if (args.desactivate_animations and not(q.empty())) or not(args.desactivate_animations):
if args.desactivate_animations:
pieces_of_cheese, nnew_player1_location, nnew_player2_location, score1, score2, moves1, moves2, miss1, miss2, stuck1, stuck2 = q.get()
player1_location = nnew_player1_location
player2_location = nnew_player2_location
else:
while not(q.empty()):
pieces_of_cheese, nnew_player1_location, nnew_player2_location, score1, score2, moves1, moves2, miss1, miss2, stuck1, stuck2 = q.get()
if not(args.desactivate_animations):
if nnew_player1_location != new_player1_location:
time_to_go1 = pygame.time.get_ticks() + turn_time * maze[new_player1_location][nnew_player1_location]
player1_location = new_player1_location
if nnew_player2_location != new_player2_location:
player2_location = new_player2_location
time_to_go2 = pygame.time.get_ticks() + turn_time * maze[new_player2_location][nnew_player2_location]
new_player1_location = nnew_player1_location
new_player2_location = nnew_player2_location
debug("Starting draw",2)
screen.fill((57, 57, 64))
screen.blit(maze_image, (0, 0))
draw_pieces_of_cheese(pieces_of_cheese, image_cheese, offset_x, offset_y, scale, width, height, screen, window_height)
if not(args.desactivate_animations):
if time_to_go1 <= pygame.time.get_ticks() or player1_location == new_player1_location:
player1_location = new_player1_location
player1_draw_location = player1_location
else:
prop = (time_to_go1 - pygame.time.get_ticks()) / (maze[player1_location][new_player1_location] * turn_time)
i, j = player1_location
ii, jj = new_player1_location
player1_draw_location = i * prop + ii * (1 - prop), j * prop + jj * (1 - prop)
if ii > i:
image1 = pygame.transform.rotate(image_moving_rat, 270)
elif ii < i:
image1 = pygame.transform.rotate(image_moving_rat, 90)
elif j < jj:
image1 = pygame.transform.rotate(image_moving_rat, 0)
else:
image1 = pygame.transform.rotate(image_moving_rat, 180)
if time_to_go2 <= pygame.time.get_ticks() or player2_location == new_player2_location:
player2_location = new_player2_location
player2_draw_location = player2_location
else:
prop = (time_to_go2 - pygame.time.get_ticks()) / (maze[player2_location][new_player2_location] * turn_time)
i, j = player2_location
ii, jj = new_player2_location
player2_draw_location = i * prop + ii * (1 - prop), j * prop + jj * (1 - prop)
if ii > i:
image2 = pygame.transform.rotate(image_moving_python, 270)
elif ii < i:
image2 = pygame.transform.rotate(image_moving_python, 90)
elif j < jj:
image2 = pygame.transform.rotate(image_moving_python, 0)
else:
image2 = pygame.transform.rotate(image_moving_python, 180)
draw_players_animate(player1_draw_location, player2_draw_location, image1, image2, offset_x, offset_y, scale, width, height, screen, window_height)
else:#if desactivate_animations
draw_players(player1_location, player2_location, image_rat, image_python, offset_x, offset_y, scale, width, height, screen, window_height)
draw_scores(p1name, score1, image_portrait_rat, p2name, score2, image_portrait_python, window_width, window_height, screen, player1_is_alive, player2_is_alive, moves1, miss1, moves2, miss2, stuck1, stuck2)
if not(q_info.empty()):
text_info = q_info.get()
if text_info != "":
draw_text(text_info, "Kalam-Bold", (50,50,50), window_width, 4, window_width // 2, 25, screen)
if (pygame.time.get_ticks() - starting_time < args.preparation_time) and not(args.desactivate_animations):
remaining = args.preparation_time - pygame.time.get_ticks() + starting_time
if remaining > 0:
draw_text("Starting in " + str(remaining // 1000) + "." + (str(remaining % 1000)).zfill(3), "Kalam-Bold", (50,50,50), window_width, 4, window_width // 2, 25, screen)
debug("Drawing on screen",2)
if not(args.save_images):
pygame.display.flip()
if not(args.desactivate_animations):
clock.tick(60)
else:
if not(args.synchronous):
clock.tick(1000/turn_time)
if args.save_images:
pygame.image.save(screen, "output_images/image" + str(d)[1:] + ".png")
d = d + 1
else:
clock.tick(60)
debug("Exiting rendering", 2)
q_render_in.put("quit")
if is_human_python:
q2_out.put("")
if is_human_rat:
q1_out.put("")
|
gpl-3.0
| -2,253,685,481,212,059,100
| 55.861538
| 353
| 0.584055
| false
| 3.359491
| false
| false
| false
|
Acehaidrey/incubator-airflow
|
airflow/www/views.py
|
1
|
135065
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import copy
import itertools
import json
import logging
import math
import socket
import sys
import traceback
from collections import defaultdict
from datetime import datetime, timedelta
from json import JSONDecodeError
from typing import Dict, List, Optional, Tuple
from urllib.parse import unquote, urlparse
import lazy_object_proxy
import nvd3
import sqlalchemy as sqla
import yaml
from flask import (
Markup,
Response,
abort,
current_app,
escape,
flash,
g,
jsonify,
make_response,
redirect,
render_template,
request,
session as flask_session,
url_for,
)
from flask_appbuilder import BaseView, ModelView, expose
from flask_appbuilder.actions import action
from flask_appbuilder.models.sqla.filters import BaseFilter # noqa
from flask_babel import lazy_gettext
from jinja2.utils import htmlsafe_json_dumps, pformat # type: ignore
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter # noqa pylint: disable=no-name-in-module
from sqlalchemy import and_, desc, func, or_, union_all
from sqlalchemy.orm import joinedload
from wtforms import SelectField, validators
import airflow
from airflow import models, plugins_manager, settings
from airflow.api.common.experimental.mark_tasks import (
set_dag_run_state_to_failed,
set_dag_run_state_to_success,
)
from airflow.configuration import AIRFLOW_CONFIG, conf
from airflow.exceptions import AirflowException
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.base_job import BaseJob
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models import Connection, DagModel, DagTag, Log, SlaMiss, TaskFail, XCom, errors
from airflow.models.baseoperator import BaseOperator
from airflow.models.dagcode import DagCode
from airflow.models.dagrun import DagRun, DagRunType
from airflow.models.taskinstance import TaskInstance
from airflow.security import permissions
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import RUNNING_DEPS, SCHEDULER_QUEUED_DEPS
from airflow.utils import json as utils_json, timezone
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.log.log_reader import TaskLogReader
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.version import version
from airflow.www import auth, utils as wwwutils
from airflow.www.decorators import action_logging, gzipped
from airflow.www.forms import (
ConnectionForm,
DagRunForm,
DateTimeForm,
DateTimeWithNumRunsForm,
DateTimeWithNumRunsWithDagRunsForm,
)
from airflow.www.widgets import AirflowModelListWidget
PAGE_SIZE = conf.getint('webserver', 'page_size')
FILTER_TAGS_COOKIE = 'tags_filter'
FILTER_STATUS_COOKIE = 'dag_status_filter'
def get_safe_url(url):
"""Given a user-supplied URL, ensure it points to our web server"""
valid_schemes = ['http', 'https', '']
valid_netlocs = [request.host, '']
parsed = urlparse(url)
if parsed.scheme in valid_schemes and parsed.netloc in valid_netlocs:
return url
return url_for('Airflow.index')
def get_date_time_num_runs_dag_runs_form_data(www_request, session, dag):
"""Get Execution Data, Base Date & Number of runs from a Request"""
date_time = www_request.args.get('execution_date')
if date_time:
date_time = timezone.parse(date_time)
else:
date_time = dag.get_latest_execution_date(session=session) or timezone.utcnow()
base_date = www_request.args.get('base_date')
if base_date:
base_date = timezone.parse(base_date)
else:
# The DateTimeField widget truncates milliseconds and would loose
# the first dag run. Round to next second.
base_date = (date_time + timedelta(seconds=1)).replace(microsecond=0)
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = www_request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
drs = (
session.query(DagRun)
.filter(DagRun.dag_id == dag.dag_id, DagRun.execution_date <= base_date)
.order_by(desc(DagRun.execution_date))
.limit(num_runs)
.all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if date_time == dr.execution_date:
dr_state = dr.state
# Happens if base_date was changed and the selected dag run is not in result
if not dr_state and drs:
dr = drs[0]
date_time = dr.execution_date
dr_state = dr.state
return {
'dttm': date_time,
'base_date': base_date,
'num_runs': num_runs,
'execution_date': date_time.isoformat(),
'dr_choices': dr_choices,
'dr_state': dr_state,
}
def task_group_to_dict(task_group):
"""
Create a nested dict representation of this TaskGroup and its children used to construct
the Graph View.
"""
if isinstance(task_group, BaseOperator):
return {
'id': task_group.task_id,
'value': {
'label': task_group.label,
'labelStyle': f"fill:{task_group.ui_fgcolor};",
'style': f"fill:{task_group.ui_color};",
'rx': 5,
'ry': 5,
},
}
children = [
task_group_to_dict(child) for child in sorted(task_group.children.values(), key=lambda t: t.label)
]
if task_group.upstream_group_ids or task_group.upstream_task_ids:
children.append(
{
'id': task_group.upstream_join_id,
'value': {
'label': '',
'labelStyle': f"fill:{task_group.ui_fgcolor};",
'style': f"fill:{task_group.ui_color};",
'shape': 'circle',
},
}
)
if task_group.downstream_group_ids or task_group.downstream_task_ids:
# This is the join node used to reduce the number of edges between two TaskGroup.
children.append(
{
'id': task_group.downstream_join_id,
'value': {
'label': '',
'labelStyle': f"fill:{task_group.ui_fgcolor};",
'style': f"fill:{task_group.ui_color};",
'shape': 'circle',
},
}
)
return {
"id": task_group.group_id,
'value': {
'label': task_group.label,
'labelStyle': f"fill:{task_group.ui_fgcolor};",
'style': f"fill:{task_group.ui_color}",
'rx': 5,
'ry': 5,
'clusterLabelPos': 'top',
},
'tooltip': task_group.tooltip,
'children': children,
}
def dag_edges(dag):
"""
Create the list of edges needed to construct the Graph View.
A special case is made if a TaskGroup is immediately upstream/downstream of another
TaskGroup or task. Two dummy nodes named upstream_join_id and downstream_join_id are
created for the TaskGroup. Instead of drawing an edge onto every task in the TaskGroup,
all edges are directed onto the dummy nodes. This is to cut down the number of edges on
the graph.
For example: A DAG with TaskGroups group1 and group2:
group1: task1, task2, task3
group2: task4, task5, task6
group2 is downstream of group1:
group1 >> group2
Edges to add (This avoids having to create edges between every task in group1 and group2):
task1 >> downstream_join_id
task2 >> downstream_join_id
task3 >> downstream_join_id
downstream_join_id >> upstream_join_id
upstream_join_id >> task4
upstream_join_id >> task5
upstream_join_id >> task6
"""
# Edges to add between TaskGroup
edges_to_add = set()
# Edges to remove between individual tasks that are replaced by edges_to_add.
edges_to_skip = set()
task_group_map = dag.task_group.get_task_group_dict()
def collect_edges(task_group):
"""Update edges_to_add and edges_to_skip according to TaskGroups."""
if isinstance(task_group, BaseOperator):
return
for target_id in task_group.downstream_group_ids:
# For every TaskGroup immediately downstream, add edges between downstream_join_id
# and upstream_join_id. Skip edges between individual tasks of the TaskGroups.
target_group = task_group_map[target_id]
edges_to_add.add((task_group.downstream_join_id, target_group.upstream_join_id))
for child in task_group.get_leaves():
edges_to_add.add((child.task_id, task_group.downstream_join_id))
for target in target_group.get_roots():
edges_to_skip.add((child.task_id, target.task_id))
edges_to_skip.add((child.task_id, target_group.upstream_join_id))
for child in target_group.get_roots():
edges_to_add.add((target_group.upstream_join_id, child.task_id))
edges_to_skip.add((task_group.downstream_join_id, child.task_id))
# For every individual task immediately downstream, add edges between downstream_join_id and
# the downstream task. Skip edges between individual tasks of the TaskGroup and the
# downstream task.
for target_id in task_group.downstream_task_ids:
edges_to_add.add((task_group.downstream_join_id, target_id))
for child in task_group.get_leaves():
edges_to_add.add((child.task_id, task_group.downstream_join_id))
edges_to_skip.add((child.task_id, target_id))
# For every individual task immediately upstream, add edges between the upstream task
# and upstream_join_id. Skip edges between the upstream task and individual tasks
# of the TaskGroup.
for source_id in task_group.upstream_task_ids:
edges_to_add.add((source_id, task_group.upstream_join_id))
for child in task_group.get_roots():
edges_to_add.add((task_group.upstream_join_id, child.task_id))
edges_to_skip.add((source_id, child.task_id))
for child in task_group.children.values():
collect_edges(child)
collect_edges(dag.task_group)
# Collect all the edges between individual tasks
edges = set()
def get_downstream(task):
for child in task.downstream_list:
edge = (task.task_id, child.task_id)
if edge not in edges:
edges.add(edge)
get_downstream(child)
for root in dag.roots:
get_downstream(root)
return [
{'source_id': source_id, 'target_id': target_id}
for source_id, target_id in sorted(edges.union(edges_to_add) - edges_to_skip)
]
######################################################################################
# Error handlers
######################################################################################
def circles(error): # pylint: disable=unused-argument
"""Show Circles on screen for any error in the Webserver"""
return (
render_template(
'airflow/circles.html',
hostname=socket.getfqdn()
if conf.getboolean('webserver', 'EXPOSE_HOSTNAME', fallback=True) # noqa
else 'redact',
),
404,
)
def show_traceback(error): # pylint: disable=unused-argument
"""Show Traceback for a given error"""
return (
render_template(
'airflow/traceback.html', # noqa
python_version=sys.version.split(" ")[0],
airflow_version=version,
hostname=socket.getfqdn()
if conf.getboolean('webserver', 'EXPOSE_HOSTNAME', fallback=True)
else 'redact',
info=traceback.format_exc()
if conf.getboolean('webserver', 'EXPOSE_STACKTRACE', fallback=True)
else 'Error! Please contact server admin.',
),
500,
)
######################################################################################
# BaseViews
######################################################################################
class AirflowBaseView(BaseView): # noqa: D101
"""Base View to set Airflow related properties"""
from airflow import macros
route_base = ''
# Make our macros available to our UI templates too.
extra_args = {
'macros': macros,
}
def render_template(self, *args, **kwargs):
return super().render_template(
*args,
# Cache this at most once per request, not for the lifetime of the view instance
scheduler_job=lazy_object_proxy.Proxy(SchedulerJob.most_recent_job),
**kwargs,
)
class Airflow(AirflowBaseView): # noqa: D101 pylint: disable=too-many-public-methods
"""Main Airflow application."""
@expose('/health')
def health(self):
"""
An endpoint helping check the health status of the Airflow instance,
including metadatabase and scheduler.
"""
payload = {'metadatabase': {'status': 'unhealthy'}}
latest_scheduler_heartbeat = None
scheduler_status = 'unhealthy'
payload['metadatabase'] = {'status': 'healthy'}
try:
scheduler_job = SchedulerJob.most_recent_job()
if scheduler_job:
latest_scheduler_heartbeat = scheduler_job.latest_heartbeat.isoformat()
if scheduler_job.is_alive():
scheduler_status = 'healthy'
except Exception: # noqa pylint: disable=broad-except
payload['metadatabase']['status'] = 'unhealthy'
payload['scheduler'] = {
'status': scheduler_status,
'latest_scheduler_heartbeat': latest_scheduler_heartbeat,
}
return wwwutils.json_response(payload)
@expose('/home')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
]
) # pylint: disable=too-many-locals,too-many-statements
def index(self):
"""Home view."""
hide_paused_dags_by_default = conf.getboolean('webserver', 'hide_paused_dags_by_default')
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search')
arg_tags_filter = request.args.getlist('tags')
arg_status_filter = request.args.get('status')
if request.args.get('reset_tags') is not None:
flask_session[FILTER_TAGS_COOKIE] = None
# Remove the reset_tags=reset from the URL
return redirect(url_for('Airflow.index'))
cookie_val = flask_session.get(FILTER_TAGS_COOKIE)
if arg_tags_filter:
flask_session[FILTER_TAGS_COOKIE] = ','.join(arg_tags_filter)
elif cookie_val:
# If tags exist in cookie, but not URL, add them to the URL
return redirect(url_for('Airflow.index', tags=cookie_val.split(',')))
if arg_status_filter is None:
cookie_val = flask_session.get(FILTER_STATUS_COOKIE)
if cookie_val:
arg_status_filter = cookie_val
else:
arg_status_filter = 'active' if hide_paused_dags_by_default else 'all'
flask_session[FILTER_STATUS_COOKIE] = arg_status_filter
else:
status = arg_status_filter.strip().lower()
flask_session[FILTER_STATUS_COOKIE] = status
arg_status_filter = status
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
start = current_page * dags_per_page
end = start + dags_per_page
# Get all the dag id the user could access
filter_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
with create_session() as session:
# read orm_dags from the db
dags_query = session.query(DagModel).filter(~DagModel.is_subdag, DagModel.is_active)
# pylint: disable=no-member
if arg_search_query:
dags_query = dags_query.filter(
DagModel.dag_id.ilike('%' + arg_search_query + '%')
| DagModel.owners.ilike('%' + arg_search_query + '%') # noqa # noqa
)
if arg_tags_filter:
dags_query = dags_query.filter(DagModel.tags.any(DagTag.name.in_(arg_tags_filter)))
if permissions.RESOURCE_DAG not in filter_dag_ids:
dags_query = dags_query.filter(DagModel.dag_id.in_(filter_dag_ids))
# pylint: enable=no-member
all_dags = dags_query
active_dags = dags_query.filter(~DagModel.is_paused)
paused_dags = dags_query.filter(DagModel.is_paused)
is_paused_count = dict(
all_dags.with_entities(DagModel.is_paused, func.count(DagModel.dag_id))
.group_by(DagModel.is_paused)
.all()
)
status_count_active = is_paused_count.get(False, 0)
status_count_paused = is_paused_count.get(True, 0)
all_dags_count = status_count_active + status_count_paused
if arg_status_filter == 'active':
current_dags = active_dags
num_of_all_dags = status_count_active
elif arg_status_filter == 'paused':
current_dags = paused_dags
num_of_all_dags = status_count_paused
else:
current_dags = all_dags
num_of_all_dags = all_dags_count
dags = (
current_dags.order_by(DagModel.dag_id)
.options(joinedload(DagModel.tags))
.offset(start)
.limit(dags_per_page)
.all()
)
dagtags = session.query(DagTag.name).distinct(DagTag.name).all()
tags = [
{"name": name, "selected": bool(arg_tags_filter and name in arg_tags_filter)}
for name, in dagtags
]
import_errors = session.query(errors.ImportError).all()
for import_error in import_errors:
flash("Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=import_error), "dag_import_error")
from airflow.plugins_manager import import_errors as plugin_import_errors
for filename, stacktrace in plugin_import_errors.items():
flash(
f"Broken plugin: [{filename}] {stacktrace}",
"error",
)
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
state_color_mapping = State.state_color.copy()
state_color_mapping["null"] = state_color_mapping.pop(None)
return self.render_template(
'airflow/dags.html',
dags=dags,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=min(start + 1, num_of_all_dags),
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(
current_page,
num_of_pages,
search=escape(arg_search_query) if arg_search_query else None,
status=arg_status_filter if arg_status_filter else None,
),
num_runs=num_runs,
tags=tags,
state_color=state_color_mapping,
status_filter=arg_status_filter,
status_count_all=all_dags_count,
status_count_active=status_count_active,
status_count_paused=status_count_paused,
tags_filter=arg_tags_filter,
)
@expose('/dag_stats', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def dag_stats(self, session=None):
"""Dag statistics."""
dr = models.DagRun
allowed_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
if permissions.RESOURCE_DAG in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
dag_state_stats = session.query(dr.dag_id, dr.state, sqla.func.count(dr.state)).group_by(
dr.dag_id, dr.state
)
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response({})
payload = {}
dag_state_stats = dag_state_stats.filter(dr.dag_id.in_(filter_dag_ids)) # pylint: disable=no-member
data = {}
for dag_id, state, count in dag_state_stats:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
for dag_id in filter_dag_ids:
payload[dag_id] = []
for state in State.dag_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({'state': state, 'count': count})
return wwwutils.json_response(payload)
@expose('/task_stats', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@provide_session
def task_stats(self, session=None):
"""Task Statistics"""
allowed_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
if not allowed_dag_ids:
return wwwutils.json_response({})
if permissions.RESOURCE_DAG in allowed_dag_ids:
allowed_dag_ids = {dag_id for dag_id, in session.query(models.DagModel.dag_id)}
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
# pylint: disable=comparison-with-callable
running_dag_run_query_result = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(DagModel, DagModel.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING, DagModel.is_active)
)
# pylint: enable=comparison-with-callable
# pylint: disable=no-member
if selected_dag_ids:
running_dag_run_query_result = running_dag_run_query_result.filter(
DagRun.dag_id.in_(filter_dag_ids)
)
# pylint: enable=no-member
running_dag_run_query_result = running_dag_run_query_result.subquery('running_dag_run')
# pylint: disable=no-member
# Select all task_instances from active dag_runs.
running_task_instance_query_result = session.query(
TaskInstance.dag_id.label('dag_id'), TaskInstance.state.label('state')
).join(
running_dag_run_query_result,
and_(
running_dag_run_query_result.c.dag_id == TaskInstance.dag_id,
running_dag_run_query_result.c.execution_date == TaskInstance.execution_date,
),
)
if selected_dag_ids:
running_task_instance_query_result = running_task_instance_query_result.filter(
TaskInstance.dag_id.in_(filter_dag_ids)
)
# pylint: enable=no-member
if conf.getboolean('webserver', 'SHOW_RECENT_STATS_FOR_COMPLETED_RUNS', fallback=True):
# pylint: disable=comparison-with-callable
last_dag_run = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(DagModel, DagModel.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING, DagModel.is_active)
.group_by(DagRun.dag_id)
)
# pylint: enable=comparison-with-callable
# pylint: disable=no-member
if selected_dag_ids:
last_dag_run = last_dag_run.filter(DagRun.dag_id.in_(filter_dag_ids))
last_dag_run = last_dag_run.subquery('last_dag_run')
# pylint: enable=no-member
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
last_task_instance_query_result = session.query(
TaskInstance.dag_id.label('dag_id'), TaskInstance.state.label('state')
).join(
last_dag_run,
and_(
last_dag_run.c.dag_id == TaskInstance.dag_id,
last_dag_run.c.execution_date == TaskInstance.execution_date,
),
)
# pylint: disable=no-member
if selected_dag_ids:
last_task_instance_query_result = last_task_instance_query_result.filter(
TaskInstance.dag_id.in_(filter_dag_ids)
)
# pylint: enable=no-member
final_task_instance_query_result = union_all(
last_task_instance_query_result, running_task_instance_query_result
).alias('final_ti')
else:
final_task_instance_query_result = running_task_instance_query_result.subquery('final_ti')
qry = session.query(
final_task_instance_query_result.c.dag_id,
final_task_instance_query_result.c.state,
sqla.func.count(),
).group_by(final_task_instance_query_result.c.dag_id, final_task_instance_query_result.c.state)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag_id in filter_dag_ids:
payload[dag_id] = []
for state in State.task_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({'state': state, 'count': count})
return wwwutils.json_response(payload)
@expose('/last_dagruns', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def last_dagruns(self, session=None):
"""Last DAG runs"""
allowed_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
if permissions.RESOURCE_DAG in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response({})
query = session.query(
DagRun.dag_id,
sqla.func.max(DagRun.execution_date).label('execution_date'),
sqla.func.max(DagRun.start_date).label('start_date'),
).group_by(DagRun.dag_id)
# Filter to only ask for accessible and selected dags
query = query.filter(DagRun.dag_id.in_(filter_dag_ids)) # pylint: enable=no-member
resp = {
r.dag_id.replace('.', '__dot__'): {
'dag_id': r.dag_id,
'execution_date': r.execution_date.isoformat(),
'start_date': r.start_date.isoformat(),
}
for r in query
}
return wwwutils.json_response(resp)
@expose('/code')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
]
)
@provide_session
def code(self, session=None):
"""Dag Code."""
all_errors = ""
dag_orm = None
dag_id = None
try:
dag_id = request.args.get('dag_id')
dag_orm = DagModel.get_dagmodel(dag_id, session=session)
code = DagCode.get_code_by_fileloc(dag_orm.fileloc)
html_code = Markup(
highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True) # pylint: disable=no-member
)
)
except Exception as e: # pylint: disable=broad-except
all_errors += (
"Exception encountered during "
+ f"dag_id retrieval/dag retrieval fallback/code highlighting:\n\n{e}\n"
)
html_code = Markup('<p>Failed to load file.</p><p>Details: {}</p>').format( # noqa
escape(all_errors)
)
return self.render_template(
'airflow/dag_code.html',
html_code=html_code,
dag=dag_orm,
title=dag_id,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'),
wrapped=conf.getboolean('webserver', 'default_wrap'),
)
@expose('/dag_details')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def dag_details(self, session=None):
"""Get Dag details."""
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
title = "DAG Details"
root = request.args.get('root', '')
states = (
session.query(TaskInstance.state, sqla.func.count(TaskInstance.dag_id))
.filter(TaskInstance.dag_id == dag_id)
.group_by(TaskInstance.state)
.all()
)
active_runs = models.DagRun.find(dag_id=dag_id, state=State.RUNNING, external_trigger=False)
tags = session.query(models.DagTag).filter(models.DagTag.dag_id == dag_id).all()
return self.render_template(
'airflow/dag_details.html',
dag=dag,
title=title,
root=root,
states=states,
State=State,
active_runs=active_runs,
tags=tags,
)
@expose('/rendered-templates')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def rendered_templates(self):
"""Get rendered Dag."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
logging.info("Retrieving rendered templates.")
dag = current_app.dag_bag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.get_rendered_template_fields()
except AirflowException as e: # pylint: disable=broad-except
msg = "Error rendering template: " + escape(e)
if e.__cause__: # pylint: disable=using-constant-test
msg += Markup("<br><br>OriginalError: ") + escape(e.__cause__)
flash(msg, "error")
except Exception as e: # pylint: disable=broad-except
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
renderers = wwwutils.get_attr_renderer()
for template_field in task.template_fields:
content = getattr(task, template_field)
renderer = task.template_fields_renderers.get(template_field, template_field)
if renderer in renderers:
if isinstance(content, (dict, list)):
content = json.dumps(content, sort_keys=True, indent=4)
html_dict[template_field] = renderers[renderer](content)
else:
html_dict[template_field] = Markup("<pre><code>{}</pre></code>").format(
pformat(content)
) # noqa
return self.render_template(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
title=title,
)
@expose('/rendered-k8s')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def rendered_k8s(self):
"""Get rendered k8s yaml."""
if not settings.IS_K8S_OR_K8SCELERY_EXECUTOR:
abort(404)
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
logging.info("Retrieving rendered templates.")
dag = current_app.dag_bag.get_dag(dag_id)
task = dag.get_task(task_id)
ti = models.TaskInstance(task=task, execution_date=dttm)
pod_spec = None
try:
pod_spec = ti.get_rendered_k8s_spec()
except AirflowException as e:
msg = "Error rendering Kubernetes POD Spec: " + escape(e)
if e.__cause__: # pylint: disable=using-constant-test
msg += Markup("<br><br>OriginalError: ") + escape(e.__cause__)
flash(msg, "error")
except Exception as e: # pylint: disable=broad-except
flash("Error rendering Kubernetes Pod Spec: " + str(e), "error")
title = "Rendered K8s Pod Spec"
html_dict = {}
renderers = wwwutils.get_attr_renderer()
if pod_spec:
content = yaml.dump(pod_spec)
content = renderers["yaml"](content)
else:
content = Markup("<pre><code>Error rendering Kubernetes POD Spec</pre></code>")
html_dict['k8s'] = content
return self.render_template(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
title=title,
)
@expose('/get_logs_with_metadata')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@action_logging
@provide_session
def get_logs_with_metadata(self, session=None):
"""Retrieve logs including metadata."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
if request.args.get('try_number') is not None:
try_number = int(request.args.get('try_number'))
else:
try_number = None
metadata = request.args.get('metadata')
metadata = json.loads(metadata)
response_format = request.args.get('format', 'json')
# metadata may be null
if not metadata:
metadata = {}
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(execution_date)
)
response = jsonify({'error': error_message})
response.status_code = 400
return response
task_log_reader = TaskLogReader()
if not task_log_reader.supports_read:
return jsonify(
message="Task log handler does not support read logs.",
error=True,
metadata={"end_of_log": True},
)
ti = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == execution_date,
)
.first()
)
if ti is None:
return jsonify(
message="*** Task instance did not exist in the DB\n",
error=True,
metadata={"end_of_log": True},
)
try:
dag = current_app.dag_bag.get_dag(dag_id)
if dag:
ti.task = dag.get_task(ti.task_id)
if response_format == 'json':
logs, metadata = task_log_reader.read_log_chunks(ti, try_number, metadata)
message = logs[0] if try_number is not None else logs
return jsonify(message=message, metadata=metadata)
metadata['download_logs'] = True
attachment_filename = task_log_reader.render_log_filename(ti, try_number)
log_stream = task_log_reader.read_log_stream(ti, try_number, metadata)
return Response(
response=log_stream,
mimetype="text/plain",
headers={"Content-Disposition": f"attachment; filename={attachment_filename}"},
)
except AttributeError as e:
error_message = [f"Task log handler does not support read logs.\n{str(e)}\n"]
metadata['end_of_log'] = True
return jsonify(message=error_message, error=True, metadata=metadata)
@expose('/log')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@action_logging
@provide_session
def log(self, session=None):
"""Retrieve log."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag_model = DagModel.get_dagmodel(dag_id)
ti = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm,
)
.first()
)
num_logs = 0
if ti is not None:
num_logs = ti.next_try_number - 1
if ti.state == State.UP_FOR_RESCHEDULE:
# Tasks in reschedule state decremented the try number
num_logs += 1
logs = [''] * num_logs
root = request.args.get('root', '')
return self.render_template(
'airflow/ti_log.html',
logs=logs,
dag=dag_model,
title="Log by attempts",
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
wrapped=conf.getboolean('webserver', 'default_wrap'),
)
@expose('/redirect_to_external_log')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@action_logging
@provide_session
def redirect_to_external_log(self, session=None):
"""Redirects to external log."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
try_number = request.args.get('try_number', 1)
ti = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm,
)
.first()
)
if not ti:
flash(f"Task [{dag_id}.{task_id}] does not exist", "error")
return redirect(url_for('Airflow.index'))
task_log_reader = TaskLogReader()
if not task_log_reader.supports_external_link:
flash("Task log handler does not support external links", "error")
return redirect(url_for('Airflow.index'))
handler = task_log_reader.log_handler
url = handler.get_external_log_url(ti, try_number)
return redirect(url)
@expose('/task')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def task(self):
"""Retrieve task."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
dag = current_app.dag_bag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(f"Task [{dag_id}.{task_id}] doesn't seem to exist at the moment", "error")
return redirect(url_for('Airflow.index'))
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TaskInstance(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task): # noqa pylint: disable=unidiomatic-typecheck
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
# pylint: disable=unidiomatic-typecheck
if type(attr) != type(self.task) and attr_name not in wwwutils.get_attr_renderer(): # noqa
task_attrs.append((attr_name, str(attr)))
# pylint: enable=unidiomatic-typecheck
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in wwwutils.get_attr_renderer():
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = wwwutils.get_attr_renderer()[attr_name](source)
no_failed_deps_result = [
(
"Unknown",
"All dependencies are met but the task instance is not running. In most "
"cases this just means that the task will probably be scheduled soon "
"unless:<br>\n- The scheduler is down or under heavy load<br>\n{}\n"
"<br>\nIf this task instance does not start soon please contact your "
"Airflow administrator for assistance.".format(
"- This task instance already ran and had it's state changed manually "
"(e.g. cleared in the UI)<br>"
if ti.state == State.NONE
else ""
),
)
]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_QUEUED_DEPS)
failed_dep_reasons = [
(dep.dep_name, dep.reason) for dep in ti.get_failed_dep_statuses(dep_context=dep_context)
]
title = "Task Instance Details"
return self.render_template(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
root=root,
dag=dag,
title=title,
)
@expose('/xcom')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
]
)
@action_logging
@provide_session
def xcom(self, session=None):
"""Retrieve XCOM."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
dm_db = models.DagModel
ti_db = models.TaskInstance
dag = session.query(dm_db).filter(dm_db.dag_id == dag_id).first()
ti = session.query(ti_db).filter(ti_db.dag_id == dag_id and ti_db.task_id == task_id).first()
if not ti:
flash(f"Task [{dag_id}.{task_id}] doesn't seem to exist at the moment", "error")
return redirect(url_for('Airflow.index'))
xcomlist = (
session.query(XCom)
.filter(XCom.dag_id == dag_id, XCom.task_id == task_id, XCom.execution_date == dttm)
.all()
)
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render_template(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
dag=dag,
title=title,
)
@expose('/run', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def run(self):
"""Runs Task Instance."""
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = get_safe_url(request.form.get('origin'))
dag = current_app.dag_bag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.form.get('execution_date')
execution_date = timezone.parse(execution_date)
ignore_all_deps = request.form.get('ignore_all_deps') == "true"
ignore_task_deps = request.form.get('ignore_task_deps') == "true"
ignore_ti_state = request.form.get('ignore_ti_state') == "true"
executor = ExecutorLoader.get_default_executor()
valid_celery_config = False
valid_kubernetes_config = False
try:
from airflow.executors.celery_executor import CeleryExecutor # noqa
valid_celery_config = isinstance(executor, CeleryExecutor)
except ImportError:
pass
try:
from airflow.executors.kubernetes_executor import KubernetesExecutor # noqa
valid_kubernetes_config = isinstance(executor, KubernetesExecutor)
except ImportError:
pass
if not valid_celery_config and not valid_kubernetes_config:
flash("Only works with the Celery or Kubernetes executors, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be run
dep_context = DepContext(
deps=RUNNING_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join([f"{dep.dep_name}: {dep.reason}" for dep in failed_deps])
flash(
"Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error",
)
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
)
executor.heartbeat()
flash(f"Sent {ti} to the message queue, it should start any moment now.")
return redirect(origin)
@expose('/delete', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
]
)
@action_logging
def delete(self):
"""Deletes DAG."""
from airflow.api.common.experimental import delete_dag
from airflow.exceptions import DagFileExists, DagNotFound
dag_id = request.values.get('dag_id')
origin = get_safe_url(request.values.get('origin'))
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash(f"DAG with id {dag_id} not found. Cannot delete", 'error')
return redirect(request.referrer)
except DagFileExists:
flash(f"Dag id {dag_id} is still in DagBag. Remove the DAG file first.", 'error')
return redirect(request.referrer)
flash(f"Deleting DAG with id {dag_id}. May take a couple minutes to fully disappear.")
# Upon success return to origin.
return redirect(origin)
@expose('/trigger', methods=['POST', 'GET'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
@provide_session
def trigger(self, session=None):
"""Triggers DAG Run."""
dag_id = request.values.get('dag_id')
origin = get_safe_url(request.values.get('origin'))
request_conf = request.values.get('conf')
if request.method == 'GET':
# Populate conf textarea with conf requests parameter, or dag.params
default_conf = ''
if request_conf:
default_conf = request_conf
else:
try:
dag = current_app.dag_bag.get_dag(dag_id)
default_conf = json.dumps(dag.params, indent=4)
except TypeError:
flash("Could not pre-populate conf field due to non-JSON-serializable data-types")
return self.render_template(
'airflow/trigger.html', dag_id=dag_id, origin=origin, conf=default_conf
)
dag_orm = session.query(models.DagModel).filter(models.DagModel.dag_id == dag_id).first()
if not dag_orm:
flash(f"Cannot find dag {dag_id}")
return redirect(origin)
execution_date = timezone.utcnow()
dr = DagRun.find(dag_id=dag_id, execution_date=execution_date, run_type=DagRunType.MANUAL)
if dr:
flash(f"This run_id {dr.run_id} already exists") # noqa
return redirect(origin)
run_conf = {}
if request_conf:
try:
run_conf = json.loads(request_conf)
except json.decoder.JSONDecodeError:
flash("Invalid JSON configuration", "error")
return self.render_template(
'airflow/trigger.html', dag_id=dag_id, origin=origin, conf=request_conf
)
dag = current_app.dag_bag.get_dag(dag_id)
dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
dag_hash=current_app.dag_bag.dags_hash.get(dag_id),
)
flash(f"Triggered {dag_id}, it should start any moment now.")
return redirect(origin)
def _clear_dag_tis(
self, dag, start_date, end_date, origin, recursive=False, confirmed=False, only_failed=False
):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
)
flash(f"{count} task instances have been cleared")
return redirect(origin)
try:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
dry_run=True,
)
except AirflowException as ex:
flash(str(ex), 'error')
return redirect(origin)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render_template(
'airflow/confirm.html',
message="Here's the list of task instances you are about to clear:",
details=details,
)
return response
@expose('/clear', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def clear(self):
"""Clears the Dag."""
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = get_safe_url(request.form.get('origin'))
dag = current_app.dag_bag.get_dag(dag_id)
execution_date = request.form.get('execution_date')
execution_date = timezone.parse(execution_date)
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('upstream') == "true"
downstream = request.form.get('downstream') == "true"
future = request.form.get('future') == "true"
past = request.form.get('past') == "true"
recursive = request.form.get('recursive') == "true"
only_failed = request.form.get('only_failed') == "true"
dag = dag.sub_dag(
task_ids_or_regex=fr"^{task_id}$",
include_downstream=downstream,
include_upstream=upstream,
)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(
dag,
start_date,
end_date,
origin,
recursive=recursive,
confirmed=confirmed,
only_failed=only_failed,
)
@expose('/dagrun_clear', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def dagrun_clear(self):
"""Clears the DagRun"""
dag_id = request.form.get('dag_id')
origin = get_safe_url(request.form.get('origin'))
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
dag = current_app.dag_bag.get_dag(dag_id)
execution_date = timezone.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin, recursive=True, confirmed=confirmed)
@expose('/blocked', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def blocked(self, session=None):
"""Mark Dag Blocked."""
allowed_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
if permissions.RESOURCE_DAG in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response([])
# pylint: disable=comparison-with-callable
dags = (
session.query(DagRun.dag_id, sqla.func.count(DagRun.id))
.filter(DagRun.state == State.RUNNING)
.filter(DagRun.dag_id.in_(filter_dag_ids))
.group_by(DagRun.dag_id)
)
# pylint: enable=comparison-with-callable
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
dag = current_app.dag_bag.get_dag(dag_id)
if dag:
# TODO: Make max_active_runs a column so we can query for it directly
max_active_runs = dag.max_active_runs
payload.append(
{
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
}
)
return wwwutils.json_response(payload)
def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = timezone.parse(execution_date)
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
flash(f'Cannot find DAG: {dag_id}', 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed)
if confirmed:
flash('Marked failed on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render_template(
'airflow/confirm.html',
message="Here's the list of task instances you are about to mark as failed",
details=details,
)
return response
def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = timezone.parse(execution_date)
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
flash(f'Cannot find DAG: {dag_id}', 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_success(dag, execution_date, commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render_template(
'airflow/confirm.html',
message="Here's the list of task instances you are about to mark as success",
details=details,
)
return response
@expose('/dagrun_failed', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
def dagrun_failed(self):
"""Mark DagRun failed."""
dag_id = request.form.get('dag_id')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == 'true'
origin = get_safe_url(request.form.get('origin'))
return self._mark_dagrun_state_as_failed(dag_id, execution_date, confirmed, origin)
@expose('/dagrun_success', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
def dagrun_success(self):
"""Mark DagRun success"""
dag_id = request.form.get('dag_id')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == 'true'
origin = get_safe_url(request.form.get('origin'))
return self._mark_dagrun_state_as_success(dag_id, execution_date, confirmed, origin)
def _mark_task_instance_state( # pylint: disable=too-many-arguments
self,
dag_id,
task_id,
origin,
execution_date,
confirmed,
upstream,
downstream,
future,
past,
state,
):
dag = current_app.dag_bag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
latest_execution_date = dag.get_latest_execution_date()
if not latest_execution_date:
flash(f"Cannot make {state}, seem that dag {dag_id} has never run", "error")
return redirect(origin)
execution_date = timezone.parse(execution_date)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(
tasks=[task],
execution_date=execution_date,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
state=state,
commit=True,
)
flash("Marked {} on {} task instances".format(state, len(altered)))
return redirect(origin)
to_be_altered = set_state(
tasks=[task],
execution_date=execution_date,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
state=state,
commit=False,
)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render_template(
"airflow/confirm.html",
message=f"Here's the list of task instances you are about to mark as {state}:",
details=details,
)
return response
@expose('/failed', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def failed(self):
"""Mark task as failed."""
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = get_safe_url(request.form.get('origin'))
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('failed_upstream') == "true"
downstream = request.form.get('failed_downstream') == "true"
future = request.form.get('failed_future') == "true"
past = request.form.get('failed_past') == "true"
return self._mark_task_instance_state(
dag_id,
task_id,
origin,
execution_date,
confirmed,
upstream,
downstream,
future,
past,
State.FAILED,
)
@expose('/success', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def success(self):
"""Mark task as success."""
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = get_safe_url(request.form.get('origin'))
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('success_upstream') == "true"
downstream = request.form.get('success_downstream') == "true"
future = request.form.get('success_future') == "true"
past = request.form.get('success_past') == "true"
return self._mark_task_instance_state(
dag_id,
task_id,
origin,
execution_date,
confirmed,
upstream,
downstream,
future,
past,
State.SUCCESS,
)
@expose('/tree')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@gzipped # pylint: disable=too-many-locals
@action_logging # pylint: disable=too-many-locals
def tree(self):
"""Get Dag as tree."""
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing from DagBag.', "error")
return redirect(url_for('Airflow.index'))
root = request.args.get('root')
if root:
dag = dag.sub_dag(task_ids_or_regex=root, include_downstream=False, include_upstream=True)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
if num_runs:
num_runs = int(num_runs)
else:
num_runs = conf.getint('webserver', 'default_dag_run_display_number')
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
with create_session() as session:
dag_runs = (
session.query(DagRun)
.filter(DagRun.dag_id == dag.dag_id, DagRun.execution_date <= base_date)
.order_by(DagRun.execution_date.desc())
.limit(num_runs)
.all()
)
dag_runs = {dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(dag_runs.keys())
max_date = max(dates) if dates else None
min_date = min(dates) if dates else None
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
task_instances: Dict[Tuple[str, datetime], models.TaskInstance] = {}
for ti in tis:
task_instances[(ti.task_id, ti.execution_date)] = ti
expanded = set()
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = 0
node_limit = 5000 / max(1, len(dag.leaves))
def encode_ti(task_instance: Optional[models.TaskInstance]) -> Optional[List]:
if not task_instance:
return None
# NOTE: order of entry is important here because client JS relies on it for
# tree node reconstruction. Remember to change JS code in tree.html
# whenever order is altered.
task_instance_data = [
task_instance.state,
task_instance.try_number,
None, # start_ts
None, # duration
]
if task_instance.start_date:
# round to seconds to reduce payload size
task_instance_data[2] = int(task_instance.start_date.timestamp())
if task_instance.duration is not None:
task_instance_data[3] = int(task_instance.duration)
return task_instance_data
def recurse_nodes(task, visited):
nonlocal node_count
node_count += 1
visited.add(task)
task_id = task.task_id
node = {
'name': task.task_id,
'instances': [encode_ti(task_instances.get((task_id, d))) for d in dates],
'num_dep': len(task.downstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'ui_color': task.ui_color,
}
if task.downstream_list:
children = [
recurse_nodes(t, visited)
for t in task.downstream_list
if node_count < node_limit or t not in visited
]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
if task.task_id not in expanded:
children_key = 'children'
expanded.add(task.task_id)
else:
children_key = "_children"
node[children_key] = children
if task.depends_on_past:
node['depends_on_past'] = task.depends_on_past
if task.start_date:
# round to seconds to reduce payload size
node['start_ts'] = int(task.start_date.timestamp())
if task.end_date:
# round to seconds to reduce payload size
node['end_ts'] = int(task.end_date.timestamp())
if task.extra_links:
node['extra_links'] = task.extra_links
return node
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [dag_runs.get(d) or {'execution_date': d.isoformat()} for d in dates],
}
form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs})
doc_md = wwwutils.wrapped_markdown(getattr(dag, 'doc_md', None), css_class='dag-doc')
task_log_reader = TaskLogReader()
if task_log_reader.supports_external_link:
external_log_name = task_log_reader.log_handler.log_name
else:
external_log_name = None
# avoid spaces to reduce payload size
data = htmlsafe_json_dumps(data, separators=(',', ':'))
return self.render_template(
'airflow/tree.html',
operators=sorted({op.task_type: op for op in dag.tasks}.values(), key=lambda x: x.task_type),
root=root,
form=form,
dag=dag,
doc_md=doc_md,
data=data,
blur=blur,
num_runs=num_runs,
show_external_log_redirect=task_log_reader.supports_external_link,
external_log_name=external_log_name,
)
@expose('/graph')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@gzipped
@action_logging
@provide_session
def graph(self, session=None):
"""Get DAG as Graph."""
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for('Airflow.index'))
root = request.args.get('root')
if root:
dag = dag.sub_dag(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = task_group_to_dict(dag.task_group)
edges = dag_edges(dag)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dt_nr_dr_data['arrange'] = arrange
dttm = dt_nr_dr_data['dttm']
class GraphForm(DateTimeWithNumRunsWithDagRunsForm):
"""Graph Form class."""
arrange = SelectField(
"Layout",
choices=(
('LR', "Left > Right"),
('RL', "Right > Left"),
('TB', "Top > Bottom"),
('BT', "Bottom > Top"),
),
)
form = GraphForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
task_instances = {ti.task_id: alchemy_to_dict(ti) for ti in dag.get_task_instances(dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
'extra_links': t.extra_links,
}
for t in dag.tasks
}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = wwwutils.wrapped_markdown(getattr(dag, 'doc_md', None), css_class='dag-doc')
task_log_reader = TaskLogReader()
if task_log_reader.supports_external_link:
external_log_name = task_log_reader.log_handler.log_name
else:
external_log_name = None
return self.render_template(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=wwwutils.state_token(dt_nr_dr_data['dr_state']),
doc_md=doc_md,
arrange=arrange,
operators=sorted({op.task_type: op for op in dag.tasks}.values(), key=lambda x: x.task_type),
blur=blur,
root=root or '',
task_instances=task_instances,
tasks=tasks,
nodes=nodes,
edges=edges,
show_external_log_redirect=task_log_reader.supports_external_link,
external_log_name=external_log_name,
dag_run_state=dt_nr_dr_data['dr_state'],
)
@expose('/duration')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging # pylint: disable=too-many-locals
@provide_session # pylint: disable=too-many-locals
def duration(self, session=None):
"""Get Dag as duration graph."""
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
try:
dag = current_app.dag_bag.get_dag(dag_id)
except airflow.exceptions.SerializedDagNotFound:
dag = None
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if dag is None:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for('Airflow.index'))
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y_points = defaultdict(list)
x_points = defaultdict(list)
cumulative_y = defaultdict(list)
task_instances = dag.get_task_instances(start_date=min_date, end_date=base_date)
ti_fails = (
session.query(TaskFail)
.filter(
TaskFail.dag_id == dag.dag_id,
TaskFail.execution_date >= min_date,
TaskFail.execution_date <= base_date,
TaskFail.task_id.in_([t.task_id for t in dag.tasks]),
)
.all()
)
fails_totals = defaultdict(int)
for failed_task_instance in ti_fails:
dict_key = (
failed_task_instance.dag_id,
failed_task_instance.task_id,
failed_task_instance.execution_date,
)
if failed_task_instance.duration:
fails_totals[dict_key] += failed_task_instance.duration
for task_instance in task_instances:
if task_instance.duration:
date_time = wwwutils.epoch(task_instance.execution_date)
x_points[task_instance.task_id].append(date_time)
y_points[task_instance.task_id].append(float(task_instance.duration))
fails_dict_key = (task_instance.dag_id, task_instance.task_id, task_instance.execution_date)
fails_total = fails_totals[fails_dict_key]
cumulative_y[task_instance.task_id].append(float(task_instance.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y_points.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cumulative_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False, label=f'Duration ({y_unit})')
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False, label=f'Duration ({cum_y_unit})')
cum_chart.axislist['yAxis']['axisLabelDistance'] = '-15'
for task_id in x_points:
chart.add_serie(
name=task_id,
x=x_points[task_id],
y=scale_time_units(y_points[task_id], y_unit),
)
cum_chart.add_serie(
name=task_id,
x=x_points[task_id],
y=scale_time_units(cumulative_y[task_id], cum_y_unit),
)
dates = sorted({ti.execution_date for ti in task_instances})
max_date = max([ti.execution_date for ti in task_instances]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (
cum_chart.htmlcontent[:s_index]
+ "$( document ).trigger('chartload')"
+ cum_chart.htmlcontent[s_index:]
)
return self.render_template(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=Markup(chart.htmlcontent),
cum_chart=Markup(cum_chart.htmlcontent),
)
@expose('/tries')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def tries(self, session=None):
"""Shows all tries."""
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height, width="1200"
)
for task in dag.tasks:
y_points = []
x_points = []
for ti in task.get_task_instances(start_date=min_date, end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x_points.append(dttm)
# y value should reflect completed tries to have a 0 baseline.
y_points.append(ti.prev_attempted_tries)
if x_points:
chart.add_serie(name=task.task_id, x=x_points, y=y_points)
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
tries = sorted({ti.try_number for ti in tis})
max_date = max([ti.execution_date for ti in tis]) if tries else None
chart.create_y_axis('yAxis', format='.02f', custom_format=False, label='Tries')
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs})
chart.buildcontent()
return self.render_template(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=Markup(chart.htmlcontent),
tab_title='Tries',
)
@expose('/landing_times')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def landing_times(self, session=None):
"""Shows landing times."""
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(name="lineChart", x_is_date=True, height=chart_height, width="1200")
y_points = {}
x_points = {}
for task in dag.tasks:
task_id = task.task_id
y_points[task_id] = []
x_points[task_id] = []
for ti in task.get_task_instances(start_date=min_date, end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x_points[task_id].append(dttm)
y_points[task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y_points.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False, label=f'Landing Time ({y_unit})')
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
for task_id in x_points:
chart.add_serie(
name=task_id,
x=x_points[task_id],
y=scale_time_units(y_points[task_id], y_unit),
)
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
dates = sorted({ti.execution_date for ti in tis})
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs})
chart.buildcontent()
return self.render_template(
'airflow/chart.html',
dag=dag,
chart=Markup(chart.htmlcontent),
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
tab_title='Landing times',
)
@expose('/paused', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
@action_logging
def paused(self):
"""Toggle paused."""
dag_id = request.args.get('dag_id')
is_paused = request.args.get('is_paused') == 'false'
models.DagModel.get_dagmodel(dag_id).set_is_paused(is_paused=is_paused)
return "OK"
@expose('/refresh', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
@action_logging
@provide_session
def refresh(self, session=None):
"""Refresh DAG."""
dag_id = request.values.get('dag_id')
orm_dag = session.query(DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = timezone.utcnow()
session.merge(orm_dag)
session.commit()
dag = current_app.dag_bag.get_dag(dag_id)
# sync dag permission
current_app.appbuilder.sm.sync_perm_for_dag(dag_id, dag.access_control)
flash(f"DAG [{dag_id}] is now fresh as a daisy")
return redirect(request.referrer)
@expose('/refresh_all', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
@action_logging
def refresh_all(self):
"""Refresh everything"""
current_app.dag_bag.collect_dags_from_db()
# sync permissions for all dags
for dag_id, dag in current_app.dag_bag.dags.items():
current_app.appbuilder.sm.sync_perm_for_dag(dag_id, dag.access_control)
flash("All DAGs are now up to date")
return redirect(url_for('Airflow.index'))
@expose('/gantt')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def gantt(self, session=None):
"""Show GANTT chart."""
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dttm = dt_nr_dr_data['dttm']
form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
tis = [ti for ti in dag.get_task_instances(dttm, dttm) if ti.start_date and ti.state]
tis = sorted(tis, key=lambda ti: ti.start_date)
ti_fails = list(
itertools.chain(
*[
(
session.query(TaskFail)
.filter(
TaskFail.dag_id == ti.dag_id,
TaskFail.task_id == ti.task_id,
TaskFail.execution_date == ti.execution_date,
)
.all()
)
for ti in tis
]
)
)
# determine bars to show in the gantt chart
gantt_bar_items = []
tasks = []
for ti in tis:
end_date = ti.end_date or timezone.utcnow()
# prev_attempted_tries will reflect the currently running try_number
# or the try_number of the last complete run
# https://issues.apache.org/jira/browse/AIRFLOW-2143
try_count = ti.prev_attempted_tries
gantt_bar_items.append((ti.task_id, ti.start_date, end_date, ti.state, try_count))
task_dict = alchemy_to_dict(ti)
task_dict['extraLinks'] = dag.get_task(ti.task_id).extra_links
tasks.append(task_dict)
tf_count = 0
try_count = 1
prev_task_id = ""
for failed_task_instance in ti_fails:
end_date = failed_task_instance.end_date or timezone.utcnow()
start_date = failed_task_instance.start_date or end_date
if tf_count != 0 and failed_task_instance.task_id == prev_task_id:
try_count += 1
else:
try_count = 1
prev_task_id = failed_task_instance.task_id
gantt_bar_items.append(
(failed_task_instance.task_id, start_date, end_date, State.FAILED, try_count)
)
tf_count += 1
task = dag.get_task(failed_task_instance.task_id)
task_dict = alchemy_to_dict(failed_task_instance)
task_dict['state'] = State.FAILED
task_dict['operator'] = task.task_type
task_dict['try_number'] = try_count
task_dict['extraLinks'] = task.extra_links
tasks.append(task_dict)
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render_template(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=data,
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/extra_links')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def extra_links(self):
"""
A restful endpoint that returns external links for a given Operator
It queries the operator that sent the request for the links it wishes
to provide for a given external link name.
API: GET
Args: dag_id: The id of the dag containing the task in question
task_id: The id of the task in question
execution_date: The date of execution of the task
link_name: The name of the link reference to find the actual URL for
Returns:
200: {url: <url of link>, error: None} - returned when there was no problem
finding the URL
404: {url: None, error: <error message>} - returned when the operator does
not return a URL
"""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
link_name = request.args.get('link_name')
dttm = timezone.parse(execution_date)
dag = current_app.dag_bag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
response = jsonify(
{
'url': None,
'error': f"can't find dag {dag} or task_id {task_id}",
}
)
response.status_code = 404
return response
task = dag.get_task(task_id)
try:
url = task.get_extra_links(dttm, link_name)
except ValueError as err:
response = jsonify({'url': None, 'error': str(err)})
response.status_code = 404
return response
if url:
response = jsonify({'error': None, 'url': url})
response.status_code = 200
return response
else:
response = jsonify({'url': None, 'error': f'No URL found for {link_name}'})
response.status_code = 404
return response
@expose('/object/task_instances')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def task_instances(self):
"""Shows task instances."""
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = timezone.parse(dttm)
else:
return "Error: Invalid execution_date"
task_instances = {ti.task_id: alchemy_to_dict(ti) for ti in dag.get_task_instances(dttm, dttm)}
return json.dumps(task_instances, cls=utils_json.AirflowJsonEncoder)
class ConfigurationView(AirflowBaseView):
"""View to show Airflow Configurations"""
default_view = 'conf'
class_permission_name = permissions.RESOURCE_CONFIG
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
@expose('/configuration')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
]
)
def conf(self):
"""Shows configuration."""
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = AIRFLOW_CONFIG
# Don't show config when expose_config variable is False in airflow config
if conf.getboolean("webserver", "expose_config"):
with open(AIRFLOW_CONFIG) as file:
config = file.read()
table = [
(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()
]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons."
)
table = None
if raw:
return Response(response=config, status=200, mimetype="application/text")
else:
code_html = Markup(
highlight(
config,
lexers.IniLexer(), # Lexer call pylint: disable=no-member
HtmlFormatter(noclasses=True),
)
)
return self.render_template(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html,
title=title,
subtitle=subtitle,
table=table,
)
class RedocView(AirflowBaseView):
"""Redoc Open API documentation"""
default_view = 'redoc'
@expose('/redoc')
def redoc(self):
"""Redoc API documentation."""
openapi_spec_url = url_for("/api/v1./api/v1_openapi_yaml")
return self.render_template('airflow/redoc.html', openapi_spec_url=openapi_spec_url)
######################################################################################
# ModelViews
######################################################################################
class DagFilter(BaseFilter):
"""Filter using DagIDs"""
def apply(self, query, func): # noqa pylint: disable=redefined-outer-name,unused-argument
if current_app.appbuilder.sm.has_all_dags_access():
return query
filter_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
return query.filter(self.model.dag_id.in_(filter_dag_ids))
class AirflowModelView(ModelView): # noqa: D101
"""Airflow Mode View."""
list_widget = AirflowModelListWidget
page_size = PAGE_SIZE
CustomSQLAInterface = wwwutils.CustomSQLAInterface
class SlaMissModelView(AirflowModelView):
"""View to show SlaMiss table"""
route_base = '/slamiss'
datamodel = AirflowModelView.CustomSQLAInterface(SlaMiss) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_SLA_MISS
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
add_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
edit_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
search_columns = ['dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
class XComModelView(AirflowModelView):
"""View to show records from XCom table"""
route_base = '/xcom'
list_title = 'List XComs'
datamodel = AirflowModelView.CustomSQLAInterface(XCom)
class_permission_name = permissions.RESOURCE_XCOM
method_permission_name = {
'list': 'read',
'delete': 'delete',
'action_muldelete': 'delete',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
search_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
list_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
@action('muldelete', 'Delete', "Are you sure you want to delete selected records?", single=False)
def action_muldelete(self, items):
"""Multiple delete action."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pre_add(self, item):
"""Pre add hook."""
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(item.value)
def pre_update(self, item):
"""Pre update hook."""
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(item.value)
class ConnectionModelView(AirflowModelView):
"""View to show records from Connections table"""
route_base = '/connection'
datamodel = AirflowModelView.CustomSQLAInterface(Connection) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_CONNECTION
method_permission_name = {
'add': 'create',
'list': 'read',
'edit': 'edit',
'delete': 'delete',
'action_muldelete': 'delete',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
extra_fields = [
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
'extra__google_cloud_platform__num_retries',
'extra__grpc__auth_type',
'extra__grpc__credential_pem_file',
'extra__grpc__scopes',
'extra__yandexcloud__service_account_json',
'extra__yandexcloud__service_account_json_path',
'extra__yandexcloud__oauth',
'extra__yandexcloud__public_ssh_key',
'extra__yandexcloud__folder_id',
'extra__kubernetes__in_cluster',
'extra__kubernetes__kube_config',
'extra__kubernetes__namespace',
]
list_columns = ['conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted']
add_columns = edit_columns = [
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
] + extra_fields
add_form = edit_form = ConnectionForm
add_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
base_order = ('conn_id', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_muldelete(self, items):
"""Multiple delete."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def process_form(self, form, is_created):
"""Process form data."""
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform', 'grpc', 'yandexcloud', 'kubernetes']:
extra = {key: formdata[key] for key in self.extra_fields if key in formdata}
form.extra.data = json.dumps(extra)
def prefill_form(self, form, pk):
"""Prefill the form."""
try:
extra = form.data.get('extra')
if extra is None:
extra_dictionary = {}
else:
extra_dictionary = json.loads(extra)
except JSONDecodeError:
extra_dictionary = {}
if not isinstance(extra_dictionary, dict):
logging.warning('extra field for %s is not a dictionary', form.data.get('conn_id', '<unknown>'))
return
for field in self.extra_fields:
value = extra_dictionary.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class PluginView(AirflowBaseView):
"""View to show Airflow Plugins"""
default_view = 'list'
class_permission_name = permissions.RESOURCE_PLUGIN
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
plugins_attributes_to_dump = [
"hooks",
"executors",
"macros",
"admin_views",
"flask_blueprints",
"menu_links",
"appbuilder_views",
"appbuilder_menu_items",
"global_operator_extra_links",
"operator_extra_links",
"source",
]
@expose('/plugin')
def list(self):
"""List loaded plugins."""
plugins_manager.ensure_plugins_loaded()
plugins_manager.integrate_executor_plugins()
plugins_manager.initialize_extra_operators_links_plugins()
plugins_manager.initialize_web_ui_plugins()
plugins = []
for plugin_no, plugin in enumerate(plugins_manager.plugins, 1):
plugin_data = {
'plugin_no': plugin_no,
'plugin_name': plugin.name,
'attrs': {},
}
for attr_name in self.plugins_attributes_to_dump:
attr_value = getattr(plugin, attr_name)
plugin_data['attrs'][attr_name] = attr_value
plugins.append(plugin_data)
title = "Airflow Plugins"
return self.render_template(
'airflow/plugin.html',
plugins=plugins,
title=title,
)
class PoolModelView(AirflowModelView):
"""View to show records from Pool table"""
route_base = '/pool'
datamodel = AirflowModelView.CustomSQLAInterface(models.Pool) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_POOL
method_permission_name = {
'add': 'create',
'list': 'read',
'edit': 'edit',
'delete': 'delete',
'action_muldelete': 'delete',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ['pool', 'slots', 'running_slots', 'queued_slots']
add_columns = ['pool', 'slots', 'description']
edit_columns = ['pool', 'slots', 'description']
base_order = ('pool', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?', single=False)
def action_muldelete(self, items):
"""Multiple delete."""
if any(item.pool == models.Pool.DEFAULT_POOL_NAME for item in items):
flash("default_pool cannot be deleted", 'error')
self.update_redirect()
return redirect(self.get_redirect())
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pool_link(self):
"""Pool link rendering."""
pool_id = self.get('pool') # noqa pylint: disable=no-member
if pool_id is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id)
return Markup("<a href='{url}'>{pool_id}</a>").format(url=url, pool_id=pool_id) # noqa
else:
return Markup('<span class="label label-danger">Invalid</span>')
def frunning_slots(self):
"""Running slots rendering."""
pool_id = self.get('pool') # noqa pylint: disable=no-member
running_slots = self.get('running_slots') # noqa pylint: disable=no-member
if pool_id is not None and running_slots is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='running')
return Markup("<a href='{url}'>{running_slots}</a>").format( # noqa
url=url, running_slots=running_slots
)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def fqueued_slots(self):
"""Queued slots rendering."""
pool_id = self.get('pool') # noqa pylint: disable=no-member
queued_slots = self.get('queued_slots') # noqa pylint: disable=no-member
if pool_id is not None and queued_slots is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='queued')
return Markup("<a href='{url}'>{queued_slots}</a>").format( # noqa
url=url, queued_slots=queued_slots
)
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {'pool': pool_link, 'running_slots': frunning_slots, 'queued_slots': fqueued_slots}
validators_columns = {'pool': [validators.DataRequired()], 'slots': [validators.NumberRange(min=-1)]}
class VariableModelView(AirflowModelView):
"""View to show records from Variable table"""
route_base = '/variable'
list_template = 'airflow/variable_list.html'
edit_template = 'airflow/variable_edit.html'
datamodel = AirflowModelView.CustomSQLAInterface(models.Variable) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_VARIABLE
method_permission_name = {
'add': 'create',
'list': 'read',
'edit': 'edit',
'delete': 'delete',
'action_muldelete': 'delete',
'action_varexport': 'read',
'varimport': 'create',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ['key', 'val', 'is_encrypted']
add_columns = ['key', 'val']
edit_columns = ['key', 'val']
search_columns = ['key', 'val']
base_order = ('key', 'asc')
def hidden_field_formatter(self):
"""Formats hidden fields"""
key = self.get('key') # noqa pylint: disable=no-member
val = self.get('val') # noqa pylint: disable=no-member
if wwwutils.should_hide_value_for_key(key):
return Markup('*' * 8)
if val:
return val
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
'val': hidden_field_formatter,
}
validators_columns = {'key': [validators.DataRequired()]}
def prefill_form(self, form, request_id): # pylint: disable=unused-argument
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?', single=False)
def action_muldelete(self, items):
"""Multiple delete."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action('varexport', 'Export', '', single=False)
def action_varexport(self, items):
"""Export variables."""
var_dict = {}
decoder = json.JSONDecoder()
for var in items:
try:
val = decoder.decode(var.val)
except Exception: # noqa pylint: disable=broad-except
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
@expose('/varimport', methods=["POST"])
@action_logging
def varimport(self):
"""Import variables"""
try:
out = request.files['file'].read()
if isinstance(out, bytes):
variable_dict = json.loads(out.decode('utf-8'))
else:
variable_dict = json.loads(out)
except Exception: # noqa pylint: disable=broad-except
self.update_redirect()
flash("Missing file or syntax error.", 'error')
return redirect(self.get_redirect())
else:
suc_count = fail_count = 0
for k, v in variable_dict.items():
try:
models.Variable.set(k, v, serialize_json=not isinstance(v, str))
except Exception as e: # pylint: disable=broad-except
logging.info('Variable import failed: %s', repr(e))
fail_count += 1
else:
suc_count += 1
flash(f"{suc_count} variable(s) successfully updated.")
if fail_count:
flash(f"{fail_count} variable(s) failed to be updated.", 'error')
self.update_redirect()
return redirect(self.get_redirect())
class JobModelView(AirflowModelView):
"""View to show records from Job table"""
route_base = '/job'
datamodel = AirflowModelView.CustomSQLAInterface(BaseJob) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_JOB
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = [
'id',
'dag_id',
'state',
'job_type',
'start_date',
'end_date',
'latest_heartbeat',
'executor_class',
'hostname',
'unixname',
]
search_columns = [
'id',
'dag_id',
'state',
'job_type',
'start_date',
'end_date',
'latest_heartbeat',
'executor_class',
'hostname',
'unixname',
]
base_order = ('start_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'latest_heartbeat': wwwutils.datetime_f('latest_heartbeat'),
}
class DagRunModelView(AirflowModelView):
"""View to show records from DagRun table"""
route_base = '/dagrun'
datamodel = AirflowModelView.CustomSQLAInterface(models.DagRun) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_DAG_RUN
method_permission_name = {
'add': 'create',
'list': 'read',
'action_muldelete': 'delete',
'action_set_running': 'edit',
'action_set_failed': 'edit',
'action_set_success': 'edit',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
add_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger', 'conf']
list_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'run_type', 'external_trigger', 'conf']
search_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'run_type', 'external_trigger', 'conf']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
add_form = edit_form = DagRunForm
formatters_columns = {
'execution_date': wwwutils.datetime_f('execution_date'),
'state': wwwutils.state_f,
'start_date': wwwutils.datetime_f('start_date'),
'dag_id': wwwutils.dag_link,
'run_id': wwwutils.dag_run_link,
'conf': wwwutils.json_f('conf'),
}
@action('muldelete', "Delete", "Are you sure you want to delete selected records?", single=False)
@provide_session
def action_muldelete(self, items, session=None): # noqa # pylint: disable=unused-argument
"""Multiple delete."""
self.datamodel.delete_all(items)
self.update_redirect()
dirty_ids = []
for item in items:
dirty_ids.append(item.dag_id)
return redirect(self.get_redirect())
@action('set_running', "Set state to 'running'", '', single=False)
@provide_session
def action_set_running(self, drs, session=None):
"""Set state to running."""
try:
count = 0
dirty_ids = []
for dr in (
session.query(DagRun).filter(DagRun.id.in_([dagrun.id for dagrun in drs])).all()
): # noqa pylint: disable=no-member
dirty_ids.append(dr.dag_id)
count += 1
dr.start_date = timezone.utcnow()
dr.state = State.RUNNING
session.commit()
flash(f"{count} dag runs were set to running")
except Exception as ex: # pylint: disable=broad-except
flash(str(ex), 'error')
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action(
'set_failed',
"Set state to 'failed'",
"All running task instances would also be marked as failed, are you sure?",
single=False,
)
@provide_session
def action_set_failed(self, drs, session=None):
"""Set state to failed."""
try:
count = 0
dirty_ids = []
altered_tis = []
for dr in (
session.query(DagRun).filter(DagRun.id.in_([dagrun.id for dagrun in drs])).all()
): # noqa pylint: disable=no-member
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += set_dag_run_state_to_failed(
current_app.dag_bag.get_dag(dr.dag_id), dr.execution_date, commit=True, session=session
)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to failed".format(count=count, altered_ti_count=altered_ti_count)
)
except Exception: # noqa pylint: disable=broad-except
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action(
'set_success',
"Set state to 'success'",
"All task instances would also be marked as success, are you sure?",
single=False,
)
@provide_session
def action_set_success(self, drs, session=None):
"""Set state to success."""
try:
count = 0
dirty_ids = []
altered_tis = []
for dr in (
session.query(DagRun).filter(DagRun.id.in_([dagrun.id for dagrun in drs])).all()
): # noqa pylint: disable=no-member
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += set_dag_run_state_to_success(
current_app.dag_bag.get_dag(dr.dag_id), dr.execution_date, commit=True, session=session
)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to success".format(count=count, altered_ti_count=altered_ti_count)
)
except Exception: # noqa pylint: disable=broad-except
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action('clear', "Clear the state", "All task instances would be cleared, are you sure?", single=False)
@provide_session
def action_clear(self, drs, session=None):
"""Clears the state."""
try:
count = 0
cleared_ti_count = 0
dag_to_tis = {}
for dr in session.query(DagRun).filter(DagRun.id.in_([dagrun.id for dagrun in drs])).all():
count += 1
dag = current_app.dag_bag.get_dag(dr.dag_id)
tis_to_clear = dag_to_tis.setdefault(dag, [])
tis_to_clear += dr.get_task_instances()
for dag, tis in dag_to_tis.items():
cleared_ti_count += len(tis)
models.clear_task_instances(tis, session, dag=dag)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were cleared".format(count=count, altered_ti_count=cleared_ti_count)
)
except Exception: # noqa pylint: disable=broad-except
flash('Failed to clear state', 'error')
return redirect(self.get_default_url())
class LogModelView(AirflowModelView):
"""View to show records from Log table"""
route_base = '/log'
datamodel = AirflowModelView.CustomSQLAInterface(Log) # noqa # type:ignore
class_permission_name = permissions.RESOURCE_AUDIT_LOG
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ['id', 'dttm', 'dag_id', 'task_id', 'event', 'execution_date', 'owner', 'extra']
search_columns = ['dag_id', 'task_id', 'event', 'execution_date', 'owner', 'extra']
base_order = ('dttm', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'dttm': wwwutils.datetime_f('dttm'),
'execution_date': wwwutils.datetime_f('execution_date'),
'dag_id': wwwutils.dag_link,
}
class TaskRescheduleModelView(AirflowModelView):
"""View to show records from Task Reschedule table"""
route_base = '/taskreschedule'
datamodel = AirflowModelView.CustomSQLAInterface(models.TaskReschedule) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_TASK_RESCHEDULE
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = [
'id',
'dag_id',
'task_id',
'execution_date',
'try_number',
'start_date',
'end_date',
'duration',
'reschedule_date',
]
search_columns = ['dag_id', 'task_id', 'execution_date', 'start_date', 'end_date', 'reschedule_date']
base_order = ('id', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
def duration_f(self):
"""Duration calculation."""
end_date = self.get('end_date') # noqa pylint: disable=no-member
duration = self.get('duration') # noqa pylint: disable=no-member
if end_date and duration:
return timedelta(seconds=duration)
return None
formatters_columns = {
'dag_id': wwwutils.dag_link,
'task_id': wwwutils.task_instance_link,
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'execution_date': wwwutils.datetime_f('execution_date'),
'reschedule_date': wwwutils.datetime_f('reschedule_date'),
'duration': duration_f,
}
class TaskInstanceModelView(AirflowModelView):
"""View to show records from TaskInstance table"""
route_base = '/taskinstance'
datamodel = AirflowModelView.CustomSQLAInterface(models.TaskInstance) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_TASK_INSTANCE
method_permission_name = {
'list': 'read',
'action_clear': 'edit',
'action_set_running': 'edit',
'action_set_failed': 'edit',
'action_set_success': 'edit',
'action_set_retry': 'edit',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
page_size = PAGE_SIZE
list_columns = [
'state',
'dag_id',
'task_id',
'execution_date',
'operator',
'start_date',
'end_date',
'duration',
'job_id',
'hostname',
'unixname',
'priority_weight',
'queue',
'queued_dttm',
'try_number',
'pool',
'log_url',
]
order_columns = [item for item in list_columns if item not in ['try_number', 'log_url']]
search_columns = [
'state',
'dag_id',
'task_id',
'execution_date',
'hostname',
'queue',
'pool',
'operator',
'start_date',
'end_date',
]
base_order = ('job_id', 'asc')
base_filters = [['dag_id', DagFilter, lambda: []]]
def log_url_formatter(self):
"""Formats log URL."""
log_url = self.get('log_url') # noqa pylint: disable=no-member
return Markup( # noqa
'<a href="{log_url}"><span class="material-icons" aria-hidden="true">reorder</span></a>'
).format(log_url=log_url)
def duration_f(self):
"""Formats duration."""
end_date = self.get('end_date') # noqa pylint: disable=no-member
duration = self.get('duration') # noqa pylint: disable=no-member
if end_date and duration:
return timedelta(seconds=duration)
return None
formatters_columns = {
'log_url': log_url_formatter,
'task_id': wwwutils.task_instance_link,
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'execution_date': wwwutils.datetime_f('execution_date'),
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'queued_dttm': wwwutils.datetime_f('queued_dttm'),
'dag_id': wwwutils.dag_link,
'duration': duration_f,
}
@provide_session
@action(
'clear',
lazy_gettext('Clear'),
lazy_gettext(
'Are you sure you want to clear the state of the selected task'
' instance(s) and set their dagruns to the running state?'
),
single=False,
)
def action_clear(self, task_instances, session=None):
"""Clears the action."""
try:
dag_to_tis = {}
for ti in task_instances:
dag = current_app.dag_bag.get_dag(ti.dag_id)
task_instances_to_clean = dag_to_tis.setdefault(dag, [])
task_instances_to_clean.append(ti)
for dag, task_instances_list in dag_to_tis.items():
models.clear_task_instances(task_instances_list, session, dag=dag)
session.commit()
flash("{} task instances have been cleared".format(len(task_instances)))
self.update_redirect()
return redirect(self.get_redirect())
except Exception: # noqa pylint: disable=broad-except
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, tis, target_state, session=None):
"""Set task instance state."""
try:
count = len(tis)
for ti in tis:
ti.set_state(target_state, session)
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(
count=count, target_state=target_state
)
)
except Exception: # noqa pylint: disable=broad-except
flash('Failed to set state', 'error')
@action('set_running', "Set state to 'running'", '', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_set_running(self, tis):
"""Set state to 'running'"""
self.set_task_instance_state(tis, State.RUNNING)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_failed', "Set state to 'failed'", '', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_set_failed(self, tis):
"""Set state to 'failed'"""
self.set_task_instance_state(tis, State.FAILED)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_success', "Set state to 'success'", '', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_set_success(self, tis):
"""Set state to 'success'"""
self.set_task_instance_state(tis, State.SUCCESS)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_retry', "Set state to 'up_for_retry'", '', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_set_retry(self, tis):
"""Set state to 'up_for_retry'"""
self.set_task_instance_state(tis, State.UP_FOR_RETRY)
self.update_redirect()
return redirect(self.get_redirect())
class DagModelView(AirflowModelView):
"""View to show records from DAG table"""
route_base = '/dagmodel'
datamodel = AirflowModelView.CustomSQLAInterface(DagModel) # noqa # type: ignore
class_permission_name = permissions.RESOURCE_DAG
method_permission_name = {
'list': 'read',
'show': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
list_columns = [
'dag_id',
'is_paused',
'last_scheduler_run',
'last_expired',
'scheduler_lock',
'fileloc',
'owners',
]
formatters_columns = {'dag_id': wwwutils.dag_link}
base_filters = [['dag_id', DagFilter, lambda: []]]
def get_query(self):
"""Default filters for model"""
return (
super() # noqa pylint: disable=no-member
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""Default filters for model"""
return (
super() # noqa pylint: disable=no-member
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
]
)
@provide_session
@expose('/autocomplete')
def autocomplete(self, session=None):
"""Autocomplete."""
query = unquote(request.args.get('query', ''))
if not query:
wwwutils.json_response([])
# Provide suggestions of dag_ids and owners
dag_ids_query = session.query(DagModel.dag_id.label('item')).filter( # pylint: disable=no-member
~DagModel.is_subdag, DagModel.is_active, DagModel.dag_id.ilike('%' + query + '%')
) # noqa pylint: disable=no-member
owners_query = session.query(func.distinct(DagModel.owners).label('item')).filter(
~DagModel.is_subdag, DagModel.is_active, DagModel.owners.ilike('%' + query + '%')
) # noqa pylint: disable=no-member
# Hide DAGs if not showing status: "all"
status = flask_session.get(FILTER_STATUS_COOKIE)
if status == 'active':
dag_ids_query = dag_ids_query.filter(~DagModel.is_paused)
owners_query = owners_query.filter(~DagModel.is_paused)
elif status == 'paused':
dag_ids_query = dag_ids_query.filter(DagModel.is_paused)
owners_query = owners_query.filter(DagModel.is_paused)
filter_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
# pylint: disable=no-member
if permissions.RESOURCE_DAG not in filter_dag_ids:
dag_ids_query = dag_ids_query.filter(DagModel.dag_id.in_(filter_dag_ids))
owners_query = owners_query.filter(DagModel.dag_id.in_(filter_dag_ids))
# pylint: enable=no-member
payload = [row[0] for row in dag_ids_query.union(owners_query).limit(10).all()]
return wwwutils.json_response(payload)
|
apache-2.0
| 6,772,566,299,823,838,000
| 35.249329
| 110
| 0.57077
| false
| 3.892812
| false
| false
| false
|
neuronalmotion/plasmoid-amixer
|
src/contents/code/main.py
|
1
|
3785
|
# -*- coding: utf-8 -*-
# -----------------------#
# License: GPL #
# Author: NeuronalMotion #
# -----------------------#
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QGraphicsLinearLayout
from PyKDE4.plasma import Plasma
from PyKDE4 import plasmascript
from PyKDE4 import kdecore
import subprocess
import re
import time
import os
class NMAmixer(plasmascript.Applet):
cardName = "DGX" #from cat /proc/asound/cards
mixerControlName = "Analog Output"
frontAmixerValue = "Stereo Headphones FP"
rearAmixerValue = "Stereo Headphones"
frontLabel = "HP"
rearLabel = "Rear"
frontPicture = "/images/headphones.png"
rearPicture = "/images/speaker.png"
cardIndex = 0
buttonSwitchOutput = None
rootPath = None
def __init__(self,parent,args=None):
plasmascript.Applet.__init__(self,parent)
def init(self):
self.rootPath = kdecore.KGlobal.dirs().locate("data", "plasma/plasmoids/nm-plasmoid-amixer/contents/")
self.setHasConfigurationInterface(False)
self.setAspectRatioMode(Plasma.Square)
self.searchCardIndex()
self.layout = QGraphicsLinearLayout(Qt.Vertical, self.applet)
self.buttonSwitchOutput = Plasma.PushButton(self.applet)
#self.buttonSwitchOutput.setText(self.getCurrentOutputLabel())
self.buttonSwitchOutput.setImage(self.getCurrentOutputPicture())
self.buttonSwitchOutput.clicked.connect(self.onClickButtonSwitchOutput)
self.layout.addItem(self.buttonSwitchOutput)
self.applet.setLayout(self.layout)
self.resize(75,150)
def searchCardIndex(self):
proc = subprocess.Popen(["cat /proc/asound/cards | grep %s" % self.cardName], shell = True, stdout = subprocess.PIPE)
for line in proc.stdout:
m = re.search("(\\d).*\\[", line)
self.cardIndex = m.group(1)
print "card index is %s" % self.cardIndex
def getCurrentOutputName(self):
output = ""
cli = "amixer -c %s sget \"%s\"" % (self.cardIndex, self.mixerControlName)
print cli
proc = subprocess.Popen([cli], shell = True, stdout = subprocess.PIPE)
for line in proc.stdout:
if "Item0" in line:
m = re.search("'(.*)'", line)
output = m.group(1)
print output
return output
def getCurrentOutputLabel(self):
label = "?"
outputName = self.getCurrentOutputName()
if outputName == self.frontAmixerValue:
label = self.frontLabel
elif outputName == self.rearAmixerValue:
label = self.rearLabel
return label
def getCurrentOutputPicture(self):
picture = ""
outputName = self.getCurrentOutputName()
if outputName == self.frontAmixerValue:
picture = self.frontPicture
elif outputName == self.rearAmixerValue:
picture = self.rearPicture
return self.rootPath + picture
def onClickButtonSwitchOutput(self):
outputName = self.getCurrentOutputName()
outputNameTarget = None
if outputName == self.frontAmixerValue:
outputNameTarget = self.rearAmixerValue
elif outputName == self.rearAmixerValue:
outputNameTarget = self.frontAmixerValue
cli = "amixer -c %s sset \"Analog Output\" \"%s\"" % (self.cardIndex, outputNameTarget)
print cli
subprocess.Popen([cli], shell = True, stdout = subprocess.PIPE)
# Avoid IOError: [Errno 4] Interrupted system call
time.sleep(1)
#self.buttonSwitchOutput.setText(self.getCurrentOutputLabel())
self.buttonSwitchOutput.setImage(self.getCurrentOutputPicture())
def CreateApplet(parent):
return NMAmixer(parent)
|
gpl-2.0
| -2,672,511,067,184,466,000
| 30.541667
| 125
| 0.643329
| false
| 3.736426
| false
| false
| false
|
bl4de/irc-client
|
irc_client.py
|
1
|
3291
|
#!/usr/bin/env python
# by bl4de | github.com/bl4de | twitter.com/_bl4de | hackerone.com/bl4de
import socket
import sys
import threading
def usage():
print "IRC simple Python client | by bl4de | github.com/bl4de | twitter.com/_bl4de | hackerone.com/bl4de\n"
print "$ ./irc_client.py USERNAME CHANNEL\n"
print "where: USERNAME - your username, CHANNEL - channel you'd like to join (eg. channelname or #channelname)"
def channel(channel):
if channel.startswith("#") == False:
return "#" + channel
return channel
def quit():
client.send_cmd("QUIT", "Good bye!")
print "Quitting ..."
exit(0)
class IRCSimpleClient:
def __init__(self, username, channel, server="irc.freenode.net", port=6667):
self.username = username
self.server = server
self.port = port
self.channel = channel
def connect(self):
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((self.server, self.port))
def get_response(self):
return self.conn.recv(512)
def send_cmd(self, cmd, message):
command = "{} {}\r\n".format(cmd, message)
self.conn.send(command)
def send_message_to_channel(self, message):
command = "PRIVMSG {}".format(self.channel)
message = ":" + message
self.send_cmd(command, message)
def join_channel(self):
cmd = "JOIN"
channel = self.channel
self.send_cmd(cmd, channel)
def print_response(self):
resp = self.get_response()
if resp:
msg = resp.strip().split(":")
print "\n< {}> {}".format(msg[1].split("!")[0], msg[2].strip())
if __name__ == "__main__":
if len(sys.argv) != 3:
usage()
exit(0)
else:
username = sys.argv[1]
channel = channel(sys.argv[2])
cmd = ""
joined = False
client = IRCSimpleClient(username, channel)
client.connect()
while(joined == False):
resp = client.get_response()
print resp.strip()
if "No Ident response" in resp:
client.send_cmd("NICK", username)
client.send_cmd(
"USER", "{} * * :{}".format(username, username))
# we're accepted, now let's join the channel!
if "376" in resp:
client.join_channel()
# username already in use? try to use username with _
if "433" in resp:
username = "_" + username
client.send_cmd("NICK", username)
client.send_cmd(
"USER", "{} * * :{}".format(username, username))
# if PING send PONG with name of the server
if "PING" in resp:
client.send_cmd("PONG", ":" + resp.split(":")[1])
# we've joined
if "366" in resp:
joined = True
t = threading.Thread(target=client.print_response)
t.start()
try:
while(cmd != "/quit"):
cmd = raw_input("< {}> ".format(username)).strip()
if cmd == "/quit":
quit()
if cmd and len(cmd) > 0:
client.send_message_to_channel(cmd)
except KeyboardInterrupt:
quit()
t = threading.Thread(target=client.print_response)
t.start()
|
mit
| 8,789,912,966,740,696,000
| 28.657658
| 115
| 0.553327
| false
| 3.735528
| false
| false
| false
|
CroceRossaItaliana/jorvik
|
api/v1/views.py
|
1
|
4044
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions
from oauth2_provider.ext.rest_framework import TokenHasScope
from api.settings import SCOPE_ANAGRAFICA_LETTURA_BASE, SCOPE_ANAGRAFICA_LETTURA_COMPLETA, SCOPE_APPARTENENZE_LETTURA
from api.v1 import serializzatori
from anagrafica.permessi.applicazioni import PERMESSI_NOMI_DICT
# /me/anagrafica/base/
class MiaAnagraficaBase(APIView):
"""
Una vista che ritorna informazioni sulla persona identificata.
"""
permission_classes = (permissions.IsAuthenticated,
TokenHasScope)
required_scopes = [SCOPE_ANAGRAFICA_LETTURA_BASE]
def get(self, request, format=None):
dati = serializzatori.persona_anagrafica_base(request.user.persona)
return Response(dati)
# /me/anagrafica/completa/
class MiaAnagraficaCompleta(APIView):
"""
Una vista che ritorna l'anagrafica completa della persona identificata
(anagrafica base, più dati aggiuntivi).
"""
permission_classes = (permissions.IsAuthenticated,
TokenHasScope)
required_scopes = [SCOPE_ANAGRAFICA_LETTURA_BASE,
SCOPE_ANAGRAFICA_LETTURA_COMPLETA]
def get(self, request, format=None):
dati = serializzatori.persona_anagrafica_completa(request.user.persona)
return Response(dati)
# /me/appartenenze/attuali/
class MieAppartenenzeAttuali(APIView):
"""
Una vista che ritorna informazioni sulle appartenenze attuali.
"""
required_scopes = [SCOPE_APPARTENENZE_LETTURA]
def get(self, request, format=None):
me = request.user.persona
appartenenze = me.appartenenze_attuali()
appartenenze = [serializzatori.appartenenza(i) for i in appartenenze]
dati = {"appartenenze": appartenenze}
return Response(dati)
class MiaAppartenenzaComplaeta(APIView):
"""
ID utente, - Persona
nome, - Persona
cognome, - Persona
indirizzo mail di contatto - Persona
rispettiva sede di appartenenza, - Persona
ID comitato,
nome comitato,
estensione del comitato R/P/L/T,
delega
"""
permission_classes = (permissions.IsAuthenticated,
TokenHasScope)
required_scopes = [SCOPE_ANAGRAFICA_LETTURA_BASE,
SCOPE_ANAGRAFICA_LETTURA_COMPLETA,
SCOPE_APPARTENENZE_LETTURA]
def get(self, request, format=None):
me = request.user.persona
# Persona
dati = {
'id_persona': me.pk,
'nome': me.nome,
'cognome': me.cognome,
'data_di_nascita': me.data_nascita,
'codice_fiscale': me.codice_fiscale,
}
if me.email is not None:
dati['email'] = me.email
# Comitato
deleghe = me.deleghe_attuali()
l_deleghe = []
for delega in deleghe:
d_delega = {
'id': delega.id,
'tipo': PERMESSI_NOMI_DICT[delega.tipo],
'appartenenza': delega.oggetto.nome,
}
l_deleghe.append(d_delega)
dati['deleghe'] = l_deleghe
# appartenenze
appartenenze = me.appartenenze_attuali()
l_appartenenza = []
for appartenenza in appartenenze:
comitato = appartenenza.sede
l_appartenenza.append({
'id': comitato.id,
'nome': comitato.nome,
'tipo': {
'id': appartenenza.membro,
'descrizione': appartenenza.get_membro_display()
},
'comitato': {
'id': comitato.estensione,
'descrizione': comitato.get_estensione_display()
},
})
dati['appartenenze'] = l_appartenenza
return Response(dati)
#serializzatori._campo(comitato.estensione, comitato.get_estensione_display())
|
gpl-3.0
| 1,223,268,520,127,786,500
| 29.171642
| 117
| 0.606728
| false
| 3.153666
| false
| false
| false
|
vertelab/odoo-smart
|
smart_salary_simulator_se/models/hr_payroll.py
|
1
|
1318
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution, third party addon
# Copyright (C) 2004-2015 Vertel AB (<http://vertel.se>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
from openerp.exceptions import except_orm, Warning, RedirectWarning
import logging
_logger = logging.getLogger(__name__)
class hr_salary_rule(models.Model):
_inherit = 'hr.salary.rule'
name = fields.Char(string="Name",required=True, translate=True, readonly=False)
|
agpl-3.0
| 6,327,244,166,136,757,000
| 42.933333
| 83
| 0.637329
| false
| 4.364238
| false
| false
| false
|
susca/funge
|
minibefu93.py
|
1
|
1657
|
#!/usr/bin/python3
# minibefu93.py -- a minimal Befunge93 interpreter written in Python
# usage: minibefu93.py <prog.fu>
import sys,random
o=sys.stdout
q=0,0
d=1,0
m=80,25
def k(x,y):return x+m[0]*y+y
def g(s,p=None):
x,y=q if p is None else p
return s[k(x,y)]
def p(s,p,v):
o=k(*p)
return s[:o]+chr(v)+s[o+1:]
def a():return (q[0]+d[0])%m[0],(q[1]+d[1])%m[1]
class S(list):
def p(s,*a):return super().pop(*a) if s else 0
def a(s,v):s.append(v)
def __getitem__(s,key):return super().__getitem__(key) if s else 0
with open(sys.argv[1]) as f:r=f.read()
l=r.split('\n')
[l.append('') for _ in range(len(l),m[1])]
r='\n'.join(f'{s:<{m[0]}}' for s in l)
s=S()
f=False
while True:
c=g(r)
if c=='"':f=not f
elif f:s.a(ord(c))
elif c in '1234567890':s.a(int(c))
elif c=='>':d=(1,0)
elif c=='<':d=(-1,0)
elif c=='^':d=(0,-1)
elif c=='v':d=(0,1)
elif c=='?':d=random.choice(((0,1),(1,0),(-1,0),(0,-1)))
elif c=='#':q=a()
elif c=='+':s.a(s.p()+s.p())
elif c=='-':s.a(s.p(-2)-s.p())
elif c=='*':s.a(s.p()*s.p())
elif c=='/':s.a(int(s.p(-2) // s.p()))
elif c=='%':s.a(s.p(-2) % s.p())
elif c=='!':s.a(int(not bool(s.p())))
elif c=='`':s.a(int(s.p(-2)>s.p()))
elif c=='_':d=(-1,0) if s.p() else (1,0)
elif c=='|':d=(0,-1) if s.p() else (0,1)
elif c==':':s.a(s[-1])
elif c=='\\':i,j=s.p(),s.p();s.a(i);s.a(j)
elif c=='$':s.p()
elif c=='.':o.write(str(s.p()));o.flush()
elif c==',':o.write(chr(s.p()));o.flush()
elif c=='&':s.a(int(input()))
elif c=='~':s.a(ord(input()[0]))
elif c=='g':y,x=s.p(),s.p();s.a(ord(g(r,(x,y))))
elif c=='p':y,x,v=s.p(),s.p(),s.p();r=p(r,(x,y),v)
elif c=='@':break
q=a()
|
mit
| 7,724,251,767,620,839,000
| 28.070175
| 68
| 0.493663
| false
| 1.954009
| false
| false
| false
|
DanielJDufour/person-extractor
|
person_extractor/__init__.py
|
1
|
2950
|
from nltk.chunk import _MULTICLASS_NE_CHUNKER
from nltk.data import load
from nltk.tag.perceptron import PerceptronTagger
from nltk import ne_chunk, word_tokenize
from os import listdir
from os.path import dirname, realpath
from re import findall, finditer, MULTILINE, UNICODE
from re import compile as re_compile
flags = MULTILINE|UNICODE
directory_of_this_file = dirname(realpath(__file__))
# load patterns
directory_of_patterns = directory_of_this_file + "/prep/patterns"
language_pattern = {}
for filename in listdir(directory_of_patterns):
language = filename.split(".")[0]
with open(directory_of_patterns + "/" + language + ".txt") as f:
pattern_as_string = (f.read().decode("utf-8").strip())
#print "p"
#print pattern_as_string
pattern = re_compile(pattern_as_string, flags=flags)
language_pattern[language] = pattern
global tagger
tagger = None
global chunker
chunker = None
def loadTaggerIfNecessary():
global tagger
if tagger is None:
tagger = PerceptronTagger()
def loadChunkerIfNecessary():
global chunker
if chunker is None:
chunker = load(_MULTICLASS_NE_CHUNKER)
def flatten(lst):
result = []
for element in lst:
if hasattr(element, '__iter__'):
result.extend(flatten(element))
else:
result.append(element)
return result
def extract_people_quickly(text, language=None):
if isinstance(text, str):
text = text.decode("utf-8")
people = set()
if language:
for mo in finditer(pattern, text):
people.add(mo.group("person"))
else:
for pattern in language_pattern.values():
print "pattern is"
print pattern
for mo in finditer(pattern, text):
people.add(mo.group("person"))
return list(people)
def extract_person_quickly(text, language=None):
return (extract_people_quickly(text, language=language) or [None])[0]
def extract_people_slowly(text, language=None):
global tagger
loadTaggerIfNecessary()
global chunker
loadChunkerIfNecessary()
if isinstance(text, str):
text = text.decode("utf-8")
people = []
for tree in chunker.parse(tagger.tag(word_tokenize(text))).subtrees():
if tree.label() == "PERSON":
people.append(" ".join([leaf[0] for leaf in tree.leaves()]))
people = findall("(?:[A-Z][a-z]+ )?(?:" + "|".join(people) + ")(?: [A-Z][a-z]+)?", text)
return people
def extract_person_slowly(text):
return extract_people(text)[0]
def extract_people(text, language=None, speed="slowly"):
if speed == "slowly":
return extract_people_slowly(text, language)
else:
return extract_people_quickly(text, language)
def extract_person(text, language=None, speed="slowly"):
return (extract_people(text, language, speed) or [None]) [0]
epq=extract_people_quickly
eps=extract_people_slowly
|
apache-2.0
| 3,573,599,924,659,248,000
| 27.095238
| 92
| 0.654237
| false
| 3.53717
| false
| false
| false
|
gschizas/praw
|
praw/models/reddit/redditor.py
|
1
|
13544
|
"""Provide the Redditor class."""
from json import dumps
from typing import Any, Dict, Generator, List, Optional, TypeVar, Union
from ...const import API_PATH
from ...util.cache import cachedproperty
from ..listing.mixins import RedditorListingMixin
from ..util import stream_generator
from .base import RedditBase
from .mixins import FullnameMixin, MessageableMixin
_Redditor = TypeVar("_Redditor")
_RedditorStream = TypeVar("_RedditorStream")
Comment = TypeVar("Comment")
Multireddit = TypeVar("Multireddit")
Reddit = TypeVar("Reddit")
Submission = TypeVar("Submission")
Subreddit = TypeVar("Subreddit")
Trophy = TypeVar("Trophy")
class Redditor(
MessageableMixin, RedditorListingMixin, FullnameMixin, RedditBase
):
"""A class representing the users of reddit.
**Typical Attributes**
This table describes attributes that typically belong to objects of this
class. Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a
guarantee that these attributes will always be present, nor is this list
comprehensive in any way.
.. note:: Shadowbanned accounts are treated the same as non-existent
accounts, meaning that they will not have any attributes.
.. note:: Suspended/banned accounts will only return the ``name`` and
``is_suspended`` attributes.
==================================== ======================================
Attribute Description
==================================== ======================================
``comment_karma`` The comment karma for the Redditor.
``comments`` Provide an instance of
:class:`.SubListing` for comment
access.
``created_utc`` Time the account was created,
represented in `Unix Time`_.
``has_verified_email`` Whether or not the Redditor has
verified their email.
``icon_img`` The url of the Redditors' avatar.
``id`` The ID of the Redditor.
``is_employee`` Whether or not the Redditor is a
Reddit employee.
``is_friend`` Whether or not the Redditor is friends
with the authenticated user.
``is_mod`` Whether or not the Redditor mods any
subreddits.
``is_gold`` Whether or not the Redditor has active
Reddit Premium status.
``is_suspended`` Whether or not the Redditor is
currently suspended.
``link_karma`` The link karma for the Redditor.
``name`` The Redditor's username.
``subreddit`` If the Redditor has created a
user-subreddit, provides a dictionary
of additional attributes. See below.
``subreddit['banner_img']`` The URL of the user-subreddit banner.
``subreddit['name']`` The fullname of the user-subreddit.
``subreddit['over_18']`` Whether or not the user-subreddit is
NSFW.
``subreddit['public_description']`` The public description of the user-
subreddit.
``subreddit['subscribers']`` The number of users subscribed to the
user-subreddit.
``subreddit['title']`` The title of the user-subreddit.
==================================== ======================================
.. _Unix Time: https://en.wikipedia.org/wiki/Unix_time
"""
STR_FIELD = "name"
@classmethod
def from_data(cls, reddit, data):
"""Return an instance of Redditor, or None from ``data``."""
if data == "[deleted]":
return None
return cls(reddit, data)
@cachedproperty
def stream(self) -> _RedditorStream:
"""Provide an instance of :class:`.RedditorStream`.
Streams can be used to indefinitely retrieve new comments made by a
redditor, like:
.. code-block:: python
for comment in reddit.redditor('spez').stream.comments():
print(comment)
Additionally, new submissions can be retrieved via the stream. In the
following example all submissions are fetched via the redditor
``spez``:
.. code-block:: python
for submission in reddit.redditor('spez').stream.submissions():
print(submission)
"""
return RedditorStream(self)
@property
def _kind(self):
"""Return the class's kind."""
return self._reddit.config.kinds["redditor"]
@property
def _path(self):
return API_PATH["user"].format(user=self)
def __init__(
self,
reddit: Reddit,
name: Optional[str] = None,
fullname: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Initialize a Redditor instance.
:param reddit: An instance of :class:`~.Reddit`.
:param name: The name of the redditor.
:param fullname: The fullname of the redditor, starting with ``t2_``.
Exactly one of ``name``, ``fullname`` or ``_data`` must be provided.
"""
if (name, fullname, _data).count(None) != 2:
raise TypeError(
"Exactly one of `name`, `fullname`, or `_data` must be "
"provided."
)
if _data:
assert (
isinstance(_data, dict) and "name" in _data
), "Please file a bug with PRAW"
super().__init__(reddit, _data=_data)
self._listing_use_sort = True
if name:
self.name = name
elif fullname:
self._fullname = fullname
def _fetch_username(self, fullname):
return self._reddit.get(
API_PATH["user_by_fullname"], params={"ids": fullname}
)[fullname]["name"]
def _fetch_info(self):
if hasattr(self, "_fullname"):
self.name = self._fetch_username(self._fullname)
return ("user_about", {"user": self.name}, None)
def _fetch_data(self):
name, fields, params = self._fetch_info()
path = API_PATH[name].format(**fields)
return self._reddit.request("GET", path, params)
def _fetch(self):
data = self._fetch_data()
data = data["data"]
other = type(self)(self._reddit, _data=data)
self.__dict__.update(other.__dict__)
self._fetched = True
def _friend(self, method, data):
url = API_PATH["friend_v1"].format(user=self)
self._reddit.request(method, url, data=dumps(data))
def block(self):
"""Block the Redditor.
For example, to block Redditor ``spez``:
.. code-block:: python
reddit.redditor("spez").block()
"""
self._reddit.post(
API_PATH["block_user"], params={"account_id": self.fullname}
)
def friend(self, note: str = None):
"""Friend the Redditor.
:param note: A note to save along with the relationship. Requires
Reddit Premium (default: None).
Calling this method subsequent times will update the note.
For example, to friend Redditor ``spez``:
.. code-block:: python
reddit.redditor("spez").friend()
To add a note to the friendship (requires Reddit Premium):
.. code-block:: python
reddit.redditor("spez").friend(note="My favorite admin")
"""
self._friend("PUT", data={"note": note} if note else {})
def friend_info(self) -> _Redditor:
"""Return a Redditor instance with specific friend-related attributes.
:returns: A :class:`.Redditor` instance with fields ``date``, ``id``,
and possibly ``note`` if the authenticated user has Reddit Premium.
For example, to get the friendship information of Redditor ``spez``:
.. code-block:: python
info = reddit.redditor("spez").friend_info
friend_data = info.date
"""
return self._reddit.get(API_PATH["friend_v1"].format(user=self))
def gild(self, months: int = 1):
"""Gild the Redditor.
:param months: Specifies the number of months to gild up to 36
(default: 1).
For example, to gild Redditor ``spez`` for 1 month:
.. code-block:: python
reddit.redditor("spez").gild(months=1)
"""
if months < 1 or months > 36:
raise TypeError("months must be between 1 and 36")
self._reddit.post(
API_PATH["gild_user"].format(username=self),
data={"months": months},
)
def moderated(self) -> List[Subreddit]:
"""Return a list of the redditor's moderated subreddits.
:returns: A ``list`` of :class:`~praw.models.Subreddit` objects.
Return ``[]`` if the redditor has no moderated subreddits.
.. note:: The redditor's own user profile subreddit will not be
returned, but other user profile subreddits they moderate
will be returned.
Usage:
.. code-block:: python
for subreddit in reddit.redditor('spez').moderated():
print(subreddit.display_name)
print(subreddit.title)
"""
modded_data = self._reddit.get(API_PATH["moderated"].format(user=self))
if "data" not in modded_data:
return []
else:
subreddits = [
self._reddit.subreddit(x["sr"]) for x in modded_data["data"]
]
return subreddits
def multireddits(self) -> List[Multireddit]:
"""Return a list of the redditor's public multireddits.
For example, to to get Redditor ``spez``'s multireddits:
.. code-block:: python
multireddits = reddit.redditor("spez").multireddits()
"""
return self._reddit.get(API_PATH["multireddit_user"].format(user=self))
def trophies(self) -> List[Trophy]:
"""Return a list of the redditor's trophies.
:returns: A ``list`` of :class:`~praw.models.Trophy` objects.
Return ``[]`` if the redditor has no trophy.
Raise ``prawcore.exceptions.BadRequest`` if the redditor doesn't exist.
Usage:
.. code-block:: python
for trophy in reddit.redditor('spez').trophies():
print(trophy.name)
print(trophy.description)
"""
return list(self._reddit.get(API_PATH["trophies"].format(user=self)))
def unblock(self):
"""Unblock the Redditor.
For example, to unblock Redditor ``spez``:
.. code-block:: python
reddit.redditor("spez").unblock()
"""
data = {
"container": self._reddit.user.me().fullname,
"name": str(self),
"type": "enemy",
}
url = API_PATH["unfriend"].format(subreddit="all")
self._reddit.post(url, data=data)
def unfriend(self):
"""Unfriend the Redditor.
For example, to unfriend Redditor ``spez``:
.. code-block:: python
reddit.redditor("spez").unfriend()
"""
self._friend(method="DELETE", data={"id": str(self)})
class RedditorStream:
"""Provides submission and comment streams."""
def __init__(self, redditor: Redditor):
"""Create a RedditorStream instance.
:param redditor: The redditor associated with the streams.
"""
self.redditor = redditor
def comments(
self, **stream_options: Union[str, int, Dict[str, str]]
) -> Generator[Comment, None, None]:
"""Yield new comments as they become available.
Comments are yielded oldest first. Up to 100 historical comments will
initially be returned.
Keyword arguments are passed to :func:`.stream_generator`.
For example, to retrieve all new comments made by redditor ``spez``,
try:
.. code-block:: python
for comment in reddit.redditor('spez').stream.comments():
print(comment)
"""
return stream_generator(self.redditor.comments.new, **stream_options)
def submissions(
self, **stream_options: Union[str, int, Dict[str, str]]
) -> Generator[Submission, None, None]:
"""Yield new submissions as they become available.
Submissions are yielded oldest first. Up to 100 historical submissions
will initially be returned.
Keyword arguments are passed to :func:`.stream_generator`.
For example to retrieve all new submissions made by redditor
``spez``, try:
.. code-block:: python
for submission in reddit.redditor('spez').stream.submissions():
print(submission)
"""
return stream_generator(
self.redditor.submissions.new, **stream_options
)
|
bsd-2-clause
| -3,467,038,217,675,936,000
| 32.691542
| 79
| 0.551831
| false
| 4.455263
| false
| false
| false
|
glennhickey/hal
|
assemblyHub/wigTrack.py
|
1
|
7699
|
#!/usr/bin/env python3
#Copyright (C) 2013 by Ngan Nguyen
# Copyright (C) 2012-2019 by UCSC Computational Genomics Lab
#
#Released under the MIT license, see LICENSE.txt
"""Creating wiggle (annotation) tracks and lifted-over wiggle tracks for the hubs
"""
import os, re, time
from sonLib.bioio import system
from toil.job import Job
from optparse import OptionGroup
from hal.assemblyHub.assemblyHubCommon import *
class LiftoverWigFiles( Job ):
def __init__(self, indir, halfile, genome2seq2len, bigwigdir, noLiftover, outdir):
Job.__init__(self)
self.indir = indir
self.halfile = halfile
self.genome2seq2len = genome2seq2len
self.bigwigdir = bigwigdir
self.noLiftover = noLiftover
self.outdir = outdir
def run(self, fileStore):
#wigdir has the hierachy: indir/genome/chr1.wig, chr2.wig...
#for each genome in wigdir, liftover the wig records of that genome to the coordinate of all other genomes
#liftover wig file of each genome with available wigs to all genomes
genomes = list(self.genome2seq2len.keys())
tempwigs = []
for genome in os.listdir(self.indir):
if genome not in genomes:
continue
genomeindir = os.path.join(self.indir, genome)
assert os.path.isdir(genomeindir)
#Create wig directory for current genome
genomeoutdir = os.path.join(self.bigwigdir, genome)
system("mkdir -p %s" %genomeoutdir)
#get all the wig files (".wig" ext)
wigfiles = getFilesByExt(genomeindir, "wig")
#Concatenate all the input wig files and convert it into bigwig to outdir/genome/genome.bw
tempwig = "%s-temp.wig" % os.path.join(genomeoutdir, genome)
system( "cat %s/*wig > %s" %(genomeindir, tempwig) )
if os.stat(tempwig).st_size > 0:#make sure the file is not empty
outbigwig = os.path.join(genomeoutdir, "%s.bw" %genome)
chrsizefile = os.path.join(self.outdir, genome, "chrom.sizes")
system("wigToBigWig %s %s %s" %(tempwig, chrsizefile, outbigwig))
#Liftover to all other genomes:
if not self.noLiftover:
for othergenome in genomes:
if othergenome != genome:
self.addChild( LiftoverWig(genomeoutdir, tempwig, genome, othergenome, self.halfile, self.outdir) )
tempwigs.append( tempwig )
self.addFollowOn( CleanupFiles(tempwigs) )
class LiftoverWig( Job ):
def __init__(self, genomeoutdir, wig, genome, othergenome, halfile, outdir):
Job.__init__(self)
self.genomeoutdir = genomeoutdir
self.wig = wig
self.genome = genome
self.othergenome = othergenome
self.halfile = halfile
self.outdir = outdir
def run(self, fileStore):
liftovertempwig = "%s.wig" % os.path.join(self.genomeoutdir, self.othergenome)
system("halWiggleLiftover %s %s %s %s %s" %(self.halfile, self.genome, self.wig, self.othergenome, liftovertempwig))
outbigwig = os.path.join(self.genomeoutdir, "%s.bw" %self.othergenome)
chrsizefile = os.path.join(self.outdir, self.othergenome, "chrom.sizes")
if os.stat(liftovertempwig).st_size > 0:#make sure the file is not empty
system("wigToBigWig %s %s %s" %(liftovertempwig, chrsizefile, outbigwig))
#Cleanup:
system("rm %s" % liftovertempwig)
#def writeTrackDb_bigwigs(f, bigwigdir, genomes, subgenomes, currgenome, properName):
def writeTrackDb_bigwigs(f, bigwigdir, genomes, currgenome, properName):
annotation = os.path.basename(bigwigdir)
genome2priority = {}
for i, genome in enumerate(genomes):
if genome == currgenome:
genome2priority[genome] = 1
else:
genome2priority[genome] = i + 2
for genome in os.listdir(bigwigdir):
bwfile = os.path.join(bigwigdir, genome, "%s.bw" %currgenome)
if not os.path.exists(bwfile):
continue
#start writing track
genomeProperName = genome
if genome in properName:
genomeProperName = properName[genome]
priority = 1
if genome in genome2priority:
priority = genome2priority[genome]
f.write("\t\ttrack %s%s\n" % (annotation, genome))
if genome == currgenome:
f.write("\t\tlongLabel %s %s\n" % (genomeProperName, annotation))
else:
f.write("\t\tlongLabel %s Lifted-over %s\n" % (genomeProperName, annotation))
f.write("\t\tpriority %d\n" %priority)
f.write("\t\tshortLabel %s%s\n" % (genomeProperName, annotation))
f.write("\t\tbigDataUrl ../liftoverwig/%s\n" % os.path.join( annotation, genome, "%s.bw" % currgenome ) )
f.write("\t\ttype bigWig\n")
f.write("\t\tgroup annotation%s\n" %annotation)
f.write("\t\titemRgb On\n")
#if genome == currgenome or genome in subgenomes:
if genome == currgenome:
f.write("\t\tvisibility dense\n")
f.write("\t\tparent hubCentral%s\n"%annotation)
else:
f.write("\t\tvisibility hide\n")
f.write("\t\tparent hubCentral%s off\n"%annotation)
f.write("\t\twindowingFunction Mean\n")
f.write("\t\tautoScale On\n")
f.write("\t\tmaxHeightPixels 128:36:16\n")
f.write("\t\tgraphTypeDefault Bar\n")
f.write("\t\tgridDefault OFF\n")
f.write("\t\tcolor 0,0,0\n")
f.write("\t\taltColor 128,128,128\n")
f.write("\t\tviewLimits 30:70\n")
f.write("\t\tsubGroups view=%s orgs=%s\n" %(annotation, genome))
f.write("\n")
def addWigOptions(parser):
group = parser.add_argument_group("WIGGLE-FORMATTED ANNOTATIONS", "All annotations in wiggle or bigWig formats.")
group.add_argument('--wigDirs', dest='wigdirs', help='comma separated list of directories containing wig files of the input genomes. Each directory represents a type of annotation. The annotations of each genome will then be liftovered to all other genomes in the MSA. Example: "genes,genomicIsland,tRNA". Format of each directory: wigDir/ then genome1/ then chr1.wig, chr2.wig... ' )
group.add_argument('--finalBigwigDirs', dest='bwdirs', help='comma separated list of directories containing final big wig files to be displayed. No liftover will be done for these files. Each directory represents a type of annotation. Example: "readCoverage,". Format of each directory: bwDir/ then queryGenome/ then targetGenome1.bw, targetGenome2.bw ... (so annotation of queryGenome has been mapped to targetGenomes and will be display on the targetGenome browsers). ' )
group.add_argument('--nowigLiftover', dest='noWigLiftover', action='store_true', default=False, help='If specified, will not lift over the wig annotations. ')
group = parser.add_argument_group(group)
def checkWigOptions(parser, options):
options.bigwigdirs = []
if options.wigdirs:
dirs = [d.rstrip('/') for d in options.wigdirs.split(',')]
options.wigdirs = dirs
for d in dirs:
if not os.path.exists(d) or not os.path.isdir(d):
parser.error("Wig directory %s does not exist or is not a directory.\n" %d)
if options.bwdirs:
dirs = [d.rstrip('/') for d in options.bwdirs.split(',')]
options.bwdirs = dirs
for d in dirs:
if not os.path.exists(d) or not os.path.isdir(d):
parser.error("Bigwig directory %s does not exist or is not a directory.\n" %d)
|
mit
| -1,007,999,811,680,412,400
| 47.727848
| 477
| 0.636446
| false
| 3.420258
| false
| false
| false
|
drabastomek/practicalDataAnalysisCookbook
|
Codes/Chapter07/ts_timeSeriesFunctions.py
|
1
|
1760
|
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
# change the font size
matplotlib.rc('xtick', labelsize=9)
matplotlib.rc('ytick', labelsize=9)
matplotlib.rc('font', size=14)
# time series tools
import statsmodels.api as sm
# folder with data
data_folder = '../../Data/Chapter07/'
# colors
colors = ['#FF6600', '#000000', '#29407C', '#660000']
# read the data
riverFlows = pd.read_csv(data_folder + 'combined_flow.csv',
index_col=0, parse_dates=[0])
# autocorrelation function
acf = {} # to store the results
f = {}
for col in riverFlows.columns:
acf[col] = sm.tsa.stattools.acf(riverFlows[col])
# partial autocorrelation function
pacf = {}
for col in riverFlows.columns:
pacf[col] = sm.tsa.stattools.pacf(riverFlows[col])
# periodogram (spectral density)
sd = {}
for col in riverFlows.columns:
sd[col] = sm.tsa.stattools.periodogram(riverFlows[col])
# plot the data
fig, ax = plt.subplots(2, 3) # 2 rows and 3 columns
# set the size of the figure explicitly
fig.set_size_inches(12, 7)
# plot the charts for American
ax[0, 0].plot(acf['american_flow'], colors[0])
ax[0, 1].plot(pacf['american_flow'],colors[1])
ax[0, 2].plot(sd['american_flow'], colors[2])
ax[0, 2].yaxis.tick_right() # shows the numbers on the right
# plot the charts for Columbia
ax[1, 0].plot(acf['columbia_flow'], colors[0])
ax[1, 1].plot(pacf['columbia_flow'],colors[1])
ax[1, 2].plot(sd['columbia_flow'], colors[2])
ax[1, 2].yaxis.tick_right()
# set titles for columns
ax[0, 0].set_title('ACF')
ax[0, 1].set_title('PACF')
ax[0, 2].set_title('Spectral density')
# set titles for rows
ax[0, 0].set_ylabel('American')
ax[1, 0].set_ylabel('Columbia')
# save the chart
plt.savefig(data_folder + 'charts/acf_pacf_sd.png', dpi=300)
|
gpl-2.0
| 3,228,647,342,116,916,700
| 24.142857
| 60
| 0.688636
| false
| 2.6546
| false
| false
| false
|
Ictp/indico
|
indico/MaKaC/plugins/Collaboration/Vidyo/api/api.py
|
1
|
4270
|
# -*- coding: utf-8 -*-
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from contextlib import contextmanager
from MaKaC.common.logger import Logger
from MaKaC.plugins.Collaboration.Vidyo.api.client import AdminClient, UserClient
from suds import WebFault
from MaKaC.plugins.Collaboration.Vidyo.common import VidyoConnectionException
from urllib2 import URLError
AUTOMUTE_API_PROFILE = "NoAudioAndVideo"
class ApiBase(object):
""" Provides the _handleServiceCallException method
"""
@classmethod
def _handleServiceCallException(cls, e):
Logger.get("Vidyo").exception("Service call exception")
cause = e.args[0]
if type(cause) is tuple and cause[0] == 401:
raise VidyoConnectionException(e)
elif type(e) == URLError:
raise VidyoConnectionException(e)
else:
raise
@classmethod
def _api_operation(cls, service, *params, **kwargs):
try:
vidyoClient = cls.getVidyoClient()
except Exception, e:
raise VidyoConnectionException(e)
try:
return getattr(vidyoClient.service, service)(*params, **kwargs)
except WebFault, e:
raise
except Exception, e:
cls._handleServiceCallException(e)
class AdminApi(ApiBase):
""" This class performs low-level operations by getting the corresponding
client and calling a SOAP service.
We write info statements to the log with the details of what we are doing.
Each class method performs a single service call to Vidyo.
"""
@classmethod
def getVidyoClient(cls):
return AdminClient.getInstance()
@classmethod
def addRoom(cls, newRoom):
return cls._api_operation('addRoom', newRoom)
@classmethod
def updateRoom(cls, roomId, updatedRoom):
return cls._api_operation('updateRoom', roomId, updatedRoom)
@classmethod
def getRooms(cls, searchFilter):
return cls._api_operation('getRooms', searchFilter)
@classmethod
def getRoom(cls, roomId):
return cls._api_operation('getRoom', roomId)
@classmethod
def deleteRoom(cls, roomId):
return cls._api_operation('deleteRoom', roomId)
@classmethod
def setAutomute(cls, roomId, enabled):
if enabled:
return cls._api_operation('setRoomProfile', roomId, AUTOMUTE_API_PROFILE)
else:
return cls._api_operation('removeRoomProfile', roomId)
@classmethod
def getAutomute(cls, roomId):
answer = cls._api_operation('getRoomProfile', roomId)
if answer is None or answer == "":
return False
return answer.roomProfileName == AUTOMUTE_API_PROFILE
@classmethod
def setModeratorPIN(cls, roomId, moderatorPIN):
if moderatorPIN:
return cls._api_operation('createModeratorPIN', roomId, moderatorPIN)
else:
return cls._api_operation('removeModeratorPIN', roomId)
@classmethod
def connectRoom(cls, roomId, legacyMember):
return cls._api_operation('inviteToConference', roomId, entityID=legacyMember)
class UserApi(ApiBase):
""" This class performs low-level operations by getting the corresponding
client and calling a SOAP service.
We write info statements to the log with the details of what we are doing.
"""
@classmethod
def getVidyoClient(cls):
return UserClient.getInstance()
@classmethod
def search(cls, searchFilter):
return cls._api_operation('search', searchFilter)
|
gpl-3.0
| 1,933,884,575,633,309,700
| 32.359375
| 86
| 0.679625
| false
| 4.133591
| false
| false
| false
|
jon-jacky/PyModel
|
samples/Marquee/fsmpy/PeriodFiveFSM1.py
|
1
|
1330
|
# pma.py --maxTransitions 100 --output PeriodFiveFSM1 LoadFirst Marquee DisplayFive
# 6 states, 6 transitions, 6 accepting states, 0 unsafe states, 0 finished and 0 deadend states
# actions here are just labels, but must be symbols with __name__ attribute
def Load(): pass
def Shift(): pass
# states, key of each state here is its number in graph etc. below
states = {
0 : {'LoadFirst': 0, 'Marquee': {'display': '* * * * * * * * * * * * * '}},
1 : {'Marquee': {'display': 'Bye Bye Bye Bye Bye '}, 'LoadFirst': 1},
2 : {'Marquee': {'display': 'ye Bye Bye Bye Bye B'}, 'LoadFirst': 1},
3 : {'Marquee': {'display': 'e Bye Bye Bye Bye By'}, 'LoadFirst': 1},
4 : {'Marquee': {'display': ' Bye Bye Bye Bye Bye'}, 'LoadFirst': 1},
5 : {'Marquee': {'display': ' Bye Bye Bye Bye Bye '}, 'LoadFirst': 1},
}
# initial state, accepting states, unsafe states, frontier states, deadend states
initial = 0
accepting = [0, 1, 2, 3, 4, 5]
unsafe = []
frontier = []
finished = []
deadend = []
runstarts = [0]
# finite state machine, list of tuples: (current, (action, args, result), next)
graph = (
(0, (Load, ('Bye Bye Bye Bye Bye ',), None), 1),
(1, (Shift, (), None), 2),
(2, (Shift, (), None), 3),
(3, (Shift, (), None), 4),
(4, (Shift, (), None), 5),
(5, (Shift, (), None), 1),
)
|
bsd-3-clause
| 97,296,459,689,789,100
| 32.25
| 95
| 0.581955
| false
| 2.714286
| false
| false
| false
|
isb-cgc/ISB-CGC-Webapp
|
bq_data_access/v2/seqpeek/seqpeek_view.py
|
1
|
7709
|
#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from builtins import map
from builtins import str
from builtins import object
from copy import deepcopy
import logging
from bq_data_access.v2.seqpeek.seqpeek_interpro import InterProDataProvider
logger = logging.getLogger('main_logger')
SAMPLE_ID_FIELD_NAME = 'sample_id'
TRACK_ID_FIELD = "tumor"
COORDINATE_FIELD_NAME = 'uniprot_aapos'
PROTEIN_ID_FIELD = 'ensg_id'
PROTEIN_DOMAIN_DB = 'PFAM'
SEQPEEK_VIEW_DEBUG_MODE = False
def get_number_of_unique_samples(track):
sample_ids = set()
for mutation in track['mutations']:
sample_ids.add(mutation[SAMPLE_ID_FIELD_NAME])
return len(sample_ids)
def get_number_of_mutated_positions(track):
sample_locations = set()
for mutation in track['mutations']:
sample_locations.add(mutation[COORDINATE_FIELD_NAME])
return len(sample_locations)
# TODO remove if not needed
def clean_track_mutations(mutations_array):
retval = []
for mutation in mutations_array:
cleaned = deepcopy(mutation)
cleaned[COORDINATE_FIELD_NAME] = int(mutation[COORDINATE_FIELD_NAME])
retval.append(cleaned)
return retval
def sort_track_mutations(mutations_array):
return sorted(mutations_array, key=lambda k: k[COORDINATE_FIELD_NAME])
def get_track_statistics_by_track_type(track, cohort_info_map):
track_id = track[TRACK_ID_FIELD]
result = {
'samples': {
'numberOf': get_number_of_unique_samples(track),
'mutated_positions': get_number_of_mutated_positions(track)
}
}
if track['type'] == 'tumor':
cohort_info = cohort_info_map[track_id]
result['cohort_size'] = cohort_info['size']
else:
# Do not assign cohort size for the 'COMBINED' track.
result['cohort_size'] = None
return result
def filter_protein_domains(match_array):
return [m for m in match_array if m['dbname'] == PROTEIN_DOMAIN_DB]
def get_table_row_id(tumor_type):
return "seqpeek_row_{0}".format(tumor_type)
def build_seqpeek_regions(protein_data):
return [{
'type': 'exon',
'start': 0,
'end': protein_data['length']
}]
def build_summary_track(tracks):
all = []
for track in tracks:
all.extend(track["mutations"])
return {
'mutations': all,
'label': 'COMBINED',
'tumor': 'none-combined',
'type': 'summary'
}
def get_track_label_and_cohort_information(track_id_value, cohort_info_map):
cohort_info = cohort_info_map[track_id_value]
label = cohort_info['name']
cohort_size = cohort_info['size']
return label, cohort_size
def get_track_label(track, cohort_info_array):
# The IDs in cohort_info_array are integers, whereas the track IDs are strings.
cohort_map = {str(item['id']): item['name'] for item in cohort_info_array}
return cohort_map[track[TRACK_ID_FIELD]]
def get_protein_domains(uniprot_id):
protein = InterProDataProvider().get_data(uniprot_id)
return protein
class MAFData(object):
def __init__(self, cohort_info, data):
self.cohort_info = cohort_info
self.data = data
@classmethod
def from_dict(cls, param):
return cls(param['cohort_set'], param['items'])
def build_track_data(track_id_list, all_tumor_mutations):
tracks = []
for track_id in track_id_list:
tracks.append({
TRACK_ID_FIELD: track_id,
'mutations': [m for m in all_tumor_mutations if int(track_id) in set(m['cohort'])]
})
return tracks
def find_uniprot_id(mutations):
uniprot_id = None
for m in mutations:
if PROTEIN_ID_FIELD in m:
uniprot_id = m[PROTEIN_ID_FIELD]
break
return uniprot_id
def get_genes_tumors_lists_debug():
return {
'symbol_list': ['EGFR', 'TP53', 'PTEN'],
'disease_codes': ['ACC', 'BRCA', 'GBM']
}
def get_genes_tumors_lists_remote():
context = {
'symbol_list': [],
'track_id_list': []
}
return context
def get_genes_tumors_lists():
if SEQPEEK_VIEW_DEBUG_MODE:
return get_genes_tumors_lists_debug()
else:
return get_genes_tumors_lists_remote()
def get_track_id_list(param):
return list(map(str, param))
def format_removed_row_statistics_to_list(stats_dict):
result = []
for key, value in list(stats_dict.items()):
result.append({
'name': key,
'num': value
})
return result
class SeqPeekViewDataBuilder(object):
def build_view_data(self, hugo_symbol, filtered_maf_vector, seqpeek_cohort_info, cohort_id_list, removed_row_statistics, tables_used):
context = get_genes_tumors_lists()
cohort_info_map = {str(item['id']): item for item in seqpeek_cohort_info}
track_id_list = get_track_id_list(cohort_id_list)
# Since the gene (hugo_symbol) parameter is part of the GNAB feature ID,
# it will be sanity-checked in the SeqPeekMAFDataAccess instance.
uniprot_id = find_uniprot_id(filtered_maf_vector)
logging.info("UniProt ID: " + str(uniprot_id))
protein_data = get_protein_domains(uniprot_id)
track_data = build_track_data(track_id_list, filtered_maf_vector)
plot_data = {
'gene_label': hugo_symbol,
'tracks': track_data,
'protein': protein_data
}
# Pre-processing
# - Sort mutations by chromosomal coordinate
for track in plot_data['tracks']:
track['mutations'] = sort_track_mutations(track['mutations'])
# Annotations
# - Add label, possibly human readable
# - Add type that indicates whether the track is driven by data from search or
# if the track is aggregate
for track in plot_data['tracks']:
track['type'] = 'tumor'
label, cohort_size = get_track_label_and_cohort_information(track[TRACK_ID_FIELD], cohort_info_map)
track['label'] = label
# Display the "combined" track only if more than one cohort is visualized
if len(cohort_id_list) >= 2:
plot_data['tracks'].append(build_summary_track(plot_data['tracks']))
for track in plot_data['tracks']:
# Calculate statistics
track['statistics'] = get_track_statistics_by_track_type(track, cohort_info_map)
# Unique ID for each row
track['render_info'] = {
'row_id': get_table_row_id(track[TRACK_ID_FIELD])
}
plot_data['regions'] = build_seqpeek_regions(plot_data['protein'])
plot_data['protein']['matches'] = filter_protein_domains(plot_data['protein']['matches'])
tumor_list = ','.join(track_id_list)
context.update({
'plot_data': plot_data,
'hugo_symbol': hugo_symbol,
'tumor_list': tumor_list,
'cohort_id_list': track_id_list,
'removed_row_statistics': format_removed_row_statistics_to_list(removed_row_statistics),
'bq_tables': list(set(tables_used))
})
return context
|
apache-2.0
| -1,271,494,110,282,073,900
| 27.764925
| 138
| 0.634064
| false
| 3.478791
| false
| false
| false
|
marcosbontempo/inatelos
|
poky-daisy/bitbake/lib/bb/server/process.py
|
1
|
7728
|
#
# BitBake Process based server.
#
# Copyright (C) 2010 Bob Foerster <robert@erafx.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
This module implements a multiprocessing.Process based server for bitbake.
"""
import bb
import bb.event
import itertools
import logging
import multiprocessing
import os
import signal
import sys
import time
import select
from Queue import Empty
from multiprocessing import Event, Process, util, Queue, Pipe, queues, Manager
from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
logger = logging.getLogger('BitBake')
class ServerCommunicator():
def __init__(self, connection, event_handle):
self.connection = connection
self.event_handle = event_handle
def runCommand(self, command):
# @todo try/except
self.connection.send(command)
while True:
# don't let the user ctrl-c while we're waiting for a response
try:
if self.connection.poll(20):
return self.connection.recv()
else:
bb.fatal("Timeout while attempting to communicate with bitbake server")
except KeyboardInterrupt:
pass
def getEventHandle(self):
return self.event_handle.value
class EventAdapter():
"""
Adapter to wrap our event queue since the caller (bb.event) expects to
call a send() method, but our actual queue only has put()
"""
def __init__(self, queue):
self.queue = queue
def send(self, event):
try:
self.queue.put(event)
except Exception as err:
print("EventAdapter puked: %s" % str(err))
class ProcessServer(Process, BaseImplServer):
profile_filename = "profile.log"
profile_processed_filename = "profile.log.processed"
def __init__(self, command_channel, event_queue, featurelist):
BaseImplServer.__init__(self)
Process.__init__(self)
self.command_channel = command_channel
self.event_queue = event_queue
self.event = EventAdapter(event_queue)
self.featurelist = featurelist
self.quit = False
self.quitin, self.quitout = Pipe()
self.event_handle = multiprocessing.Value("i")
def run(self):
for event in bb.event.ui_queue:
self.event_queue.put(event)
self.event_handle.value = bb.event.register_UIHhandler(self)
bb.cooker.server_main(self.cooker, self.main)
def main(self):
# Ignore SIGINT within the server, as all SIGINT handling is done by
# the UI and communicated to us
self.quitin.close()
signal.signal(signal.SIGINT, signal.SIG_IGN)
while not self.quit:
try:
if self.command_channel.poll():
command = self.command_channel.recv()
self.runCommand(command)
if self.quitout.poll():
self.quitout.recv()
self.quit = True
self.idle_commands(.1, [self.event_queue._reader, self.command_channel, self.quitout])
except Exception:
logger.exception('Running command %s', command)
self.event_queue.close()
bb.event.unregister_UIHhandler(self.event_handle.value)
self.command_channel.close()
self.cooker.shutdown(True)
def idle_commands(self, delay, fds = []):
nextsleep = delay
for function, data in self._idlefuns.items():
try:
retval = function(self, data, False)
if retval is False:
del self._idlefuns[function]
nextsleep = None
elif retval is True:
nextsleep = None
elif nextsleep is None:
continue
else:
fds = fds + retval
except SystemExit:
raise
except Exception:
logger.exception('Running idle function')
if nextsleep is not None:
select.select(fds,[],[],nextsleep)
def runCommand(self, command):
"""
Run a cooker command on the server
"""
self.command_channel.send(self.cooker.command.runCommand(command))
def stop(self):
self.quitin.send("quit")
self.quitin.close()
class BitBakeProcessServerConnection(BitBakeBaseServerConnection):
def __init__(self, serverImpl, ui_channel, event_queue):
self.procserver = serverImpl
self.ui_channel = ui_channel
self.event_queue = event_queue
self.connection = ServerCommunicator(self.ui_channel, self.procserver.event_handle)
self.events = self.event_queue
def sigterm_terminate(self):
bb.error("UI received SIGTERM")
self.terminate()
def terminate(self):
def flushevents():
while True:
try:
event = self.event_queue.get(block=False)
except (Empty, IOError):
break
if isinstance(event, logging.LogRecord):
logger.handle(event)
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.procserver.stop()
while self.procserver.is_alive():
flushevents()
self.procserver.join(0.1)
self.ui_channel.close()
self.event_queue.close()
self.event_queue.setexit()
# Wrap Queue to provide API which isn't server implementation specific
class ProcessEventQueue(multiprocessing.queues.Queue):
def __init__(self, maxsize):
multiprocessing.queues.Queue.__init__(self, maxsize)
self.exit = False
def setexit(self):
self.exit = True
def waitEvent(self, timeout):
if self.exit:
raise KeyboardInterrupt()
try:
return self.get(True, timeout)
except Empty:
return None
def getEvent(self):
try:
return self.get(False)
except Empty:
return None
class BitBakeServer(BitBakeBaseServer):
def initServer(self):
# establish communication channels. We use bidirectional pipes for
# ui <--> server command/response pairs
# and a queue for server -> ui event notifications
#
self.ui_channel, self.server_channel = Pipe()
self.event_queue = ProcessEventQueue(0)
self.serverImpl = ProcessServer(self.server_channel, self.event_queue, None)
def detach(self):
self.serverImpl.start()
return
def establishConnection(self, featureset):
self.connection = BitBakeProcessServerConnection(self.serverImpl, self.ui_channel, self.event_queue)
_, error = self.connection.connection.runCommand(["setFeatures", featureset])
if error:
logger.error("Unable to set the cooker to the correct featureset: %s" % error)
raise BaseException(error)
signal.signal(signal.SIGTERM, lambda i, s: self.connection.sigterm_terminate())
return self.connection
|
mit
| 239,827,762,069,532,380
| 31.745763
| 108
| 0.618659
| false
| 4.290949
| false
| false
| false
|
demelin/learning_reinforcement_learning
|
recurrent_deep_q_network/training_session.py
|
1
|
8324
|
import os
import tensorflow as tf
import numpy as np
from tensorflow.contrib.rnn import LSTMCell
from q_learning.q_network import MentorAgent, ExperienceBuffer, update_target_graph, perform_update, process_capture
import gym
import universe
tf.reset_default_graph()
env = gym.make('Pong-v0')
# Network constants
FILTER_DIMS = [[8, 8], [4, 4], [3, 3], [6, 6]]
FILTER_NUMS = [32, 64, 64, 512]
STRIDES = [[4, 4], [2, 2], [1, 1], [1, 1]]
HIDDEN_SIZE = 512
ACTION_NUM = 6 # According to documentation
LEARNING_RATE = 1e-4
BUFFER_SIZE = 1000
# Session constants
BATCH_SIZE = 4
TRACE_LENGTH = 8
UPDATE_FREQ = 5
TAU = 0.99 # Discount factor on target Q-values
START_RAND = 1.0
END_RAND = 0.1
ANN_STEPS = 10000
NUM_EPISODES = 10000
PRE_TRAIN_STEPS = 10000
LOAD_MODEL = False
PATH = os.curdir + '/rdqn/model'
MAX_EPISODE_LENGTH = 50
SUMMARY_LENGTH = 100
SAVING_FREQ = 10000
# Defines cells to be used in the actor and the target network
actor_cell = LSTMCell(num_units=HIDDEN_SIZE, state_is_tuple=True)
target_cell = LSTMCell(num_units=HIDDEN_SIZE, state_is_tuple=True)
# Initialize networks and buffer
actor_qn = MentorAgent(HIDDEN_SIZE, actor_cell, FILTER_DIMS, FILTER_NUMS, STRIDES, 'actor', ACTION_NUM, LEARNING_RATE)
target_qn = \
MentorAgent(HIDDEN_SIZE, target_cell, FILTER_DIMS, FILTER_NUMS, STRIDES, 'target', ACTION_NUM, LEARNING_RATE)
session_buffer = ExperienceBuffer(BUFFER_SIZE)
# Define target_qn update OPs to be used in the session (tf.trainable_variables() operates on the graph)
tvars = tf.trainable_variables()
actor_tvars, target_tvars = tvars[:len(tvars)//2], tvars[len(tvars)//2:]
target_ops = update_target_graph(actor_tvars, target_tvars, TAU)
saver = tf.train.Saver(max_to_keep=5)
# Scheduling e-greedy exploration
epsilon = START_RAND
drop_per_step = (START_RAND - END_RAND) / ANN_STEPS
# Initialize tracking variables
steps_per_episode = list()
total_rewards = list()
total_steps = 0
# Make path for model saving
if not os.path.exists(PATH):
os.makedirs(PATH)
# Start the session
with tf.Session() as sess:
if LOAD_MODEL:
print('Loading model ... ')
checkpoint = tf.train.get_checkpoint_state(PATH)
saver.restore(sess, checkpoint.model_checkpoint_path)
sess.run(tf.global_variables_initializer())
# Set target network equal to the agent network
perform_update(target_ops, sess)
# Manage summaries
merged = tf.summary.merge_all()
training_writer = tf.summary.FileWriter('./train', sess.graph)
# Enter training loop
for i in range(NUM_EPISODES):
# Keep track of episodes and steps completed
print('Episode %d | Total steps taken: %d' % (i, total_steps))
episode_buffer = list()
# Get new observations
env_state = env.reset()
proc_env_state = process_capture(env_state)
done = False
running_reward = 0
step = 0
# Reset RNN hidden state
rnn_state = (np.zeros([1, HIDDEN_SIZE]), np.zeros([1, HIDDEN_SIZE]))
# Enter the Q-Network loop (play until a single game is completed, alternatively uncomment for max_ep_len)
# while step < MAX_EPISODE_LENGTH:
while True:
# step += 1
feed_dict = {actor_qn.scalar_input: proc_env_state, actor_qn.trace_length: 1,
actor_qn.state_in: rnn_state, actor_qn.batch_size: 1}
# Choose action following the e-greedy strategy
if np.random.rand(1) < epsilon or total_steps < PRE_TRAIN_STEPS:
# Take a random action
rnn_state_1 = sess.run(actor_qn.final_state, feed_dict=feed_dict)
action = np.random.randint(0, 3)
else:
# Obtain action from model
action, rnn_state_1 = sess.run([actor_qn.prediction, actor_qn.final_state], feed_dict=feed_dict)
action = action[0]
# Take a step in the environment
env_state_1, reward, done, _ = env.step(action)
proc_env_state_1 = process_capture(env_state_1)
total_steps += 1
# Add interaction to the episode buffer
episode_buffer.append(np.reshape([proc_env_state, action, reward, proc_env_state_1, done], [1, 5]))
# Proceed with exploitation once the exploration phase is concluded
if total_steps > PRE_TRAIN_STEPS:
if epsilon > END_RAND:
epsilon -= drop_per_step
# Update target network
if total_steps % (UPDATE_FREQ * 1000) == 0:
perform_update(target_ops, sess)
# Update agent network
if total_steps % UPDATE_FREQ == 0:
# Reset the RNN hidden state
rnn_state_train = (np.zeros([BATCH_SIZE, HIDDEN_SIZE]), np.zeros([BATCH_SIZE, HIDDEN_SIZE]))
# Get random batch of experiences from the experience buffer
train_batch = session_buffer.sample_experience(BATCH_SIZE, TRACE_LENGTH)
# Perform the Double-DQN update to the target Q-values
# Agent network
q_1 = sess.run(actor_qn.prediction,
feed_dict={actor_qn.scalar_input: (np.vstack(train_batch[:, 3]) / 255.0),
actor_qn.trace_length: TRACE_LENGTH,
actor_qn.state_in: rnn_state_train,
actor_qn.batch_size: BATCH_SIZE})
# Target network
q_2 = sess.run(target_qn.q_out,
feed_dict={target_qn.scalar_input: (np.vstack(train_batch[:, 3]) / 255.0),
target_qn.trace_length: TRACE_LENGTH,
target_qn.state_in: rnn_state_train,
target_qn.batch_size: BATCH_SIZE})
# Exclude final steps in each episode
end_multiplier = np.abs(train_batch[:, 4] - 1)
# Select q-values from target network based on actions predicted by the agent network
double_q = q_2[range(BATCH_SIZE * TRACE_LENGTH), q_1]
# See traget-Q double-DQN update equation
target_q = train_batch[:, 2] + (TAU * double_q * end_multiplier)
# Update agent network with the so obtained target_q values
_ = sess.run(actor_qn.update_model,
feed_dict={actor_qn.scalar_input: (np.vstack(train_batch[:, 0]) / 255.0),
actor_qn.target_q_holder: target_q,
actor_qn.action_holder: train_batch[:, 1],
actor_qn.trace_length: TRACE_LENGTH,
actor_qn.state_in: rnn_state_train,
actor_qn.batch_size: BATCH_SIZE})
# Update environment interaction variables
running_reward += reward
proc_env_state = proc_env_state_1
env_state = env_state_1
rnn_state = rnn_state_1
# Terminate episode once done
if done:
break
# Add episode to the experience buffer
buffer_array = np.array(episode_buffer)
# episode_buffer = zip(buffer_array)
session_buffer.add_experience(buffer_array, TRACE_LENGTH, buffer_array.shape[0])
# Update tracking lists
steps_per_episode.append(step)
total_rewards.append(running_reward)
# Save model periodically
if i % SAVING_FREQ == 0 and i != 0:
saver.save(sess, PATH + '/model-' + str(i) + '.cptk')
print('Model saved after %d steps!' % i)
# Report on the training performance of the actor network
if i % SUMMARY_LENGTH == 0 and i != 0:
print('Episode: %d | Steps taken: %d | Average episodic reward: %.4f | epsilon value: %.4f'
% (i, total_steps, np.mean(total_rewards[-SUMMARY_LENGTH:]), epsilon))
# Save final model
saver.save(sess, PATH + '/model-final' + '.cptk')
|
mit
| -2,580,441,314,753,550,300
| 43.042328
| 118
| 0.575925
| false
| 3.737764
| false
| false
| false
|
qsantos/crpyt
|
digests/md2.py
|
1
|
3065
|
# crpyt: toy cryptographic python library
# Copyright (C) 2014 Quentin SANTOS
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# END LICENCE
from digest import Digest
# Reference: RFC 1319
# Note: "Set C[j] to S[c xor L]." should be "Set C[j] to C[j] xor S[c xor L]."
class MD2(Digest):
def __init__(self):
super(MD2,self).__init__(16, 1)
self.C = [0] * 16
self.X = [0] * 16
def pad(self,l):
rem = self.blocksize - (l%self.blocksize)
return [rem]*rem
def final(self):
self.round(self.C)
return self.X
def round(self, block):
X = self.X + block + [xi ^ bi for (xi,bi) in zip(self.X,block)]
t = 0;
for i in range(18):
for j in range(48):
X[j] ^= self.S[t]
t = X[j]
t = (t+i) & 0xff
self.X = X[:16]
def block(self, block):
L = self.C[15];
for i in range(16):
self.C[i] ^= self.S[block[i] ^ L]
L = self.C[i]
self.round(block)
S = [
0x29, 0x2e, 0x43, 0xc9, 0xa2, 0xd8, 0x7c, 0x01, 0x3d, 0x36, 0x54, 0xa1, 0xec, 0xf0, 0x06, 0x13,
0x62, 0xa7, 0x05, 0xf3, 0xc0, 0xc7, 0x73, 0x8c, 0x98, 0x93, 0x2b, 0xd9, 0xbc, 0x4c, 0x82, 0xca,
0x1e, 0x9b, 0x57, 0x3c, 0xfd, 0xd4, 0xe0, 0x16, 0x67, 0x42, 0x6f, 0x18, 0x8a, 0x17, 0xe5, 0x12,
0xbe, 0x4e, 0xc4, 0xd6, 0xda, 0x9e, 0xde, 0x49, 0xa0, 0xfb, 0xf5, 0x8e, 0xbb, 0x2f, 0xee, 0x7a,
0xa9, 0x68, 0x79, 0x91, 0x15, 0xb2, 0x07, 0x3f, 0x94, 0xc2, 0x10, 0x89, 0x0b, 0x22, 0x5f, 0x21,
0x80, 0x7f, 0x5d, 0x9a, 0x5a, 0x90, 0x32, 0x27, 0x35, 0x3e, 0xcc, 0xe7, 0xbf, 0xf7, 0x97, 0x03,
0xff, 0x19, 0x30, 0xb3, 0x48, 0xa5, 0xb5, 0xd1, 0xd7, 0x5e, 0x92, 0x2a, 0xac, 0x56, 0xaa, 0xc6,
0x4f, 0xb8, 0x38, 0xd2, 0x96, 0xa4, 0x7d, 0xb6, 0x76, 0xfc, 0x6b, 0xe2, 0x9c, 0x74, 0x04, 0xf1,
0x45, 0x9d, 0x70, 0x59, 0x64, 0x71, 0x87, 0x20, 0x86, 0x5b, 0xcf, 0x65, 0xe6, 0x2d, 0xa8, 0x02,
0x1b, 0x60, 0x25, 0xad, 0xae, 0xb0, 0xb9, 0xf6, 0x1c, 0x46, 0x61, 0x69, 0x34, 0x40, 0x7e, 0x0f,
0x55, 0x47, 0xa3, 0x23, 0xdd, 0x51, 0xaf, 0x3a, 0xc3, 0x5c, 0xf9, 0xce, 0xba, 0xc5, 0xea, 0x26,
0x2c, 0x53, 0x0d, 0x6e, 0x85, 0x28, 0x84, 0x09, 0xd3, 0xdf, 0xcd, 0xf4, 0x41, 0x81, 0x4d, 0x52,
0x6a, 0xdc, 0x37, 0xc8, 0x6c, 0xc1, 0xab, 0xfa, 0x24, 0xe1, 0x7b, 0x08, 0x0c, 0xbd, 0xb1, 0x4a,
0x78, 0x88, 0x95, 0x8b, 0xe3, 0x63, 0xe8, 0x6d, 0xe9, 0xcb, 0xd5, 0xfe, 0x3b, 0x00, 0x1d, 0x39,
0xf2, 0xef, 0xb7, 0x0e, 0x66, 0x58, 0xd0, 0xe4, 0xa6, 0x77, 0x72, 0xf8, 0xeb, 0x75, 0x4b, 0x0a,
0x31, 0x44, 0x50, 0xb4, 0x8f, 0xed, 0x1f, 0x1a, 0xdb, 0x99, 0x8d, 0x33, 0x9f, 0x11, 0x83, 0x14,
]
|
gpl-3.0
| 9,166,256,507,868,525,000
| 42.169014
| 98
| 0.651876
| false
| 1.895485
| false
| false
| false
|
matslindh/codingchallenges
|
knowit2019/16.py
|
1
|
1520
|
def fjordify(f):
lines = [line.strip() for line in open(f).readlines()]
width = len(lines[0])
fjord = {
'map': [],
'boat': None,
}
for y, line in enumerate(lines):
row = [' '] * width
for x in range(0, len(line)):
if line[x] == '#':
row[x] = '#'
elif line[x] == 'B':
row[x] = 'B'
fjord['boat'] = (x, y)
fjord['map'].append(row)
return fjord
def navigate(fjord):
x, y = fjord['boat']
d = 'ne'
changed = 0
while True:
x += 1
if x == len(fjord['map'][0]):
break
if d == 'ne':
y -= 1
elif d == 'se':
y += 1
fjord['map'][y][x] = '/' if d == 'ne' else '\\'
if (d == 'ne' and fjord['map'][y-3][x] == '#') or \
(d == 'se' and fjord['map'][y+3][x] == '#'):
changed += 1
if d == 'ne':
y -= 1
d = 'se'
else:
d = 'ne'
y += 1
return changed + 1
def print_map(fjord):
print("\n")
for row in fjord['map']:
print(''.join(row))
def test_fjordify():
fjord = fjordify('input/fjord.test.txt')
assert len(fjord['map']) == 11
assert len(fjord['map'][0]) == 20
assert fjord['boat'] == (1, 8)
result = navigate(fjord)
assert 5 == result
if __name__ == '__main__':
fjord = fjordify('input/fjord.txt')
print(navigate(fjord))
|
mit
| -2,061,431,919,864,742,100
| 19
| 59
| 0.404605
| false
| 3.12115
| false
| false
| false
|
capitalone/cloud-custodian
|
tests/test_cwa.py
|
1
|
1155
|
# Copyright 2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
class AlarmTest(BaseTest):
def test_delete(self):
alarm_name = "c7n-test-alarm-delete"
factory = self.replay_flight_data("test_alarm_delete")
client = factory().client("cloudwatch")
client.put_metric_alarm(
AlarmName=alarm_name,
MetricName="CPUUtilization",
Namespace="AWS/EC2",
Statistic="Average",
Period=3600,
EvaluationPeriods=5,
Threshold=10,
ComparisonOperator="GreaterThanThreshold",
)
p = self.load_policy(
{
"name": "delete-alarm",
"resource": "alarm",
"filters": [{"AlarmName": alarm_name}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
client.describe_alarms(AlarmNames=[alarm_name])["MetricAlarms"], []
)
|
apache-2.0
| -5,600,428,226,361,943,000
| 29.394737
| 79
| 0.550649
| false
| 4.184783
| true
| false
| false
|
streed/PyEndicia
|
endicia/builders/ChangePassPhraseXmlBuilder.py
|
1
|
1454
|
from EndiciaXmlBuilder import EndiciaXmlBuilder
from EndiciaXmlBuilder import ValueToLongError
from lxml.builder import E
class ChangePassPhraseXmlBuilder( EndiciaXmlBuilder ):
xml = {}
def __init__( self ):
EndiciaXmlBuilder.__init__( self )
def setPartnerID( self, __id ):
if len( __id ) <= 50:
self.xml["RequesterID"] = __id
else:
raise ValueToLongError( "PartnerID", str( __id ) )
def setRequestID( self, __id ):
if len( __id ) <= 50:
self.xml["RequestID"] = __id
else:
raise ValueToLongError( "RequestID", str( __id ) )
def setAccountID( self, __id ):
if len( __id ) <= 6:
self.xml["AccountID"] = __id
else:
raise ValueToLongError( "AccountID", str( __id ) )
def setPassPhrase( self, passPhrase ):
if len( passPhrase ) <= 64:
self.xml["PassPhrase"] = passPhrase
else:
raise ValueToLongError( "PassPhrase", str( passPhrase ) )
def setNewPassPhrase( self, newPassPhrase ):
if len( newPassPhrase ) <= 64:
self.xml["NewPassPhrase"] = newPassPhrase
else:
raise ValueToLongError( "NewPassPhrase", str( newPassPhrase ) )
def to_xml( self ):
self.xmlString = (
E.ChangePassPhraseRequest(
E.RequesterID( self.xml["RequesterID"] ),
E.RequestID( self.xml["RequestID"] ),
E.CertifiedIntermediary(
E.AccountID( self.xml["AccountID"] ),
E.PassPhrase( self.xml["PassPhrase"] )
),
E.NewPassPhrase( self.xml["NewPassPhrase"] )
)
)
return self.xmlString
|
gpl-3.0
| -5,050,795,239,346,306,000
| 25.436364
| 66
| 0.658184
| false
| 2.817829
| false
| false
| false
|
rafaelolg/visiondataset
|
visiondataset/datasets/util.py
|
1
|
1040
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
def base_name(filename):
"""
return the string filename without extensions nor directory path
>>> base_name('asdf.tar.gz')
'asdf'
>>> base_name('/root/ver_strange.dir/asdf.tar.gz')
'asdf'
>>> base_name(r'c:\Windows With Space\sdf.tar.gz')
'asdf'
"""
s = re.split(r'[\\|/]', filename)[-1]
s = re.split(r'\.', s)[0]
return s
def extension_name(filename):
"""
return the extension of the file
>>> extension_name('asdf.tar.gz')
'tar.gz'
>>> extension_name('/root/ver_strange.dir/asdf.tar.gz')
'tar.gz'
>>> extension_name(r'c:\Windows With Spaces\asdf.tar.gz')
'tar.gz'
"""
s = re.split(r'[\\|/]', filename)[-1]
m = re.search(r'((\.\w\w?\w?)+)',s)
if m:
s = m.group(0)[1:]
else:
s = ''
return s
def listfy(e):
"""
Make sure e is inside a list. If e is a list returns e.
"""
if isinstance(e, list):
return e
else:
return [e]
|
gpl-3.0
| 8,852,463,943,848,875,000
| 20.666667
| 68
| 0.528846
| false
| 2.979943
| false
| false
| false
|
stvoutsin/pyrothorn
|
pyrothorn/pyroquery/atpy/votable.py
|
1
|
9010
|
import os
from distutils import version
import numpy as np
import warnings
from exceptions import TableException
import atpy
from helpers import smart_dtype
from decorators import auto_download_to_file, auto_decompress_to_fileobj, auto_fileobj_to_file
vo_minimum_version = version.LooseVersion('0.3')
try:
from vo.table import parse
from vo.tree import VOTableFile, Resource, Table, Field
vo_installed = True
except:
vo_installed = False
def _check_vo_installed():
if not vo_installed:
raise Exception("Cannot read/write VO table files - vo " + \
vo_minimum_version.vstring + " or later required")
# Define type conversion dictionary
type_dict = {}
type_dict[np.bool_] = "boolean"
type_dict[np.uint8] = "unsignedByte"
type_dict[np.int16] = "short"
type_dict[np.int32] = "int"
type_dict[np.int64] = "long"
type_dict[np.float32] = "float"
type_dict[np.float64] = "double"
type_dict[np.str] = "char"
type_dict[np.string_] = "char"
type_dict[str] = "char"
def _list_tables(filename, pedantic=False):
votable = parse(filename, pedantic=pedantic)
tables = {}
for i, table in enumerate(votable.iter_tables()):
tables[i] = table.name
return tables
# VO can handle file objects, but because we need to read it twice we don't
# use that capability
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read(self, filename, pedantic=False, tid=-1, verbose=True):
'''
Read a table from a VOT file
Required Arguments:
*filename*: [ string ]
The VOT file to read the table from
Optional Keyword Arguments:
*tid*: [ integer ]
The ID of the table to read from the VO file (this is
only required if there are more than one table in the VO file)
*pedantic*: [ True | False ]
When *pedantic* is True, raise an error when the file violates
the VO Table specification, otherwise issue a warning.
'''
_check_vo_installed()
self.reset()
# If no table is requested, check that there is only one table
if tid==-1:
tables = _list_tables(filename, pedantic=pedantic)
if len(tables) == 1:
tid = 0
elif len(tables) == 0:
raise Exception("There are no tables present in this file")
else:
raise TableException(tables, 'tid')
votable = parse(filename, pedantic=pedantic)
for id, table in enumerate(votable.iter_tables()):
if id==tid:
break
if table.ID:
self.table_name = str(table.ID)
elif table.name:
self.table_name = str(table.name)
for field in table.fields:
if type(field.name) == str:
colname = field.name
else:
if type(field._ID) == str:
colname = field._ID
else:
raise Exception("Error reading in the VO table: no name or ID for field")
data = table.array[colname]
if len(data) > 0 and data.ndim == 1 and not np.all([np.isscalar(x) for x in data]):
warnings.warn("VO Variable length vector column detected (%s) - converting to string" % colname)
data = np.array([str(x) for x in data])
if self._masked:
self.add_column(colname, data, \
unit=field.unit, mask=table.mask[colname])
else:
self.add_column(colname, data, \
unit=field.unit)
def _to_table(self, VOTable):
'''
Return the current table as a VOT object
'''
table = Table(VOTable)
# Define some fields
n_rows = len(self)
fields = []
for i, name in enumerate(self.names):
data = self.data[name]
unit = self.columns[name].unit
dtype = self.columns[name].dtype
column_type = smart_dtype(dtype)
if data.ndim > 1:
arraysize = str(data.shape[1])
else:
arraysize = None
if column_type == np.string_:
arraysize = "1024"
if column_type in type_dict:
datatype = type_dict[column_type]
elif column_type == np.int8:
warnings.warn("int8 unsupported - converting to int16")
datatype = type_dict[np.int16]
elif column_type == np.uint16:
warnings.warn("uint16 unsupported - converting to int32")
datatype = type_dict[np.int32]
elif column_type == np.uint32:
warnings.warn("uint32 unsupported - converting to int64")
datatype = type_dict[np.int64]
elif column_type == np.uint64:
raise Exception("uint64 unsupported")
else:
raise Exception("cannot use numpy type " + str(column_type))
if column_type == np.float32:
precision = 'F9'
elif column_type == np.float64:
precision = 'F17'
else:
precision = None
fields.append(Field(VOTable, ID="col" + str(i), name=name, \
datatype=datatype, unit=unit, arraysize=arraysize, \
precision=precision))
table.fields.extend(fields)
table.create_arrays(n_rows)
# Character columns are stored as object columns in the VOTable
# instance. Leaving the type as string should work, but causes
# a segmentation fault on MacOS X with Python 2.6 64-bit so
# we force the conversion to object type columns.
for name in self.names:
dtype = self.columns[name].dtype
column_type = smart_dtype(dtype)
# Add data to the table
# At the moment, null values in VO table are dealt with via a
# 'mask' record array
if column_type == np.string_:
table.array[name] = self.data[name]
if self._masked:
table.mask[name] = self.data[name].mask.astype(np.object_)
else:
table.mask[name] = (self.data[name] == \
self.columns[name].null).astype(np.object_)
else:
table.array[name] = self.data[name]
if self._masked:
table.mask[name] = self.data[name].mask
else:
table.mask[name] = self.data[name] == \
self.columns[name].null
table.name = self.table_name
return table
def write(self, filename, votype='ascii', overwrite=False):
'''
Write the table to a VOT file
Required Arguments:
*filename*: [ string ]
The VOT file to write the table to
Optional Keyword Arguments:
*votype*: [ 'ascii' | 'binary' ]
Whether to write the table as ASCII or binary
'''
_check_vo_installed()
#if os.path.exists(filename):
# if overwrite:
# os.remove(filename)
# else:
# raise Exception("File exists: %s" % filename)
VOTable = VOTableFile()
resource = Resource()
VOTable.resources.append(resource)
resource.tables.append(_to_table(self, VOTable))
if votype is 'binary':
VOTable.get_first_table().format = 'binary'
VOTable.set_all_tables_format('binary')
VOTable.to_xml(filename)
# VO can handle file objects, but because we need to read it twice we don't
# use that capability
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read_set(self, filename, pedantic=False, verbose=True):
'''
Read all tables from a VOT file
Required Arguments:
*filename*: [ string ]
The VOT file to read the tables from
Optional Keyword Arguments:
*pedantic*: [ True | False ]
When *pedantic* is True, raise an error when the file violates
the VO Table specification, otherwise issue a warning.
'''
_check_vo_installed()
self.reset()
for tid in _list_tables(filename, pedantic=pedantic):
t = atpy.Table()
read(t, filename, tid=tid, verbose=verbose, pedantic=pedantic)
self.append(t)
def write_set(self, filename, votype='ascii', overwrite=False):
'''
Write all tables to a VOT file
Required Arguments:
*filename*: [ string ]
The VOT file to write the tables to
Optional Keyword Arguments:
*votype*: [ 'ascii' | 'binary' ]
Whether to write the tables as ASCII or binary tables
'''
_check_vo_installed()
if os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
VOTable = VOTableFile()
resource = Resource()
VOTable.resources.append(resource)
for table_key in self.tables:
resource.tables.append(_to_table(self.tables[table_key], VOTable))
if votype is 'binary':
VOTable.get_first_table().format = 'binary'
VOTable.set_all_tables_format('binary')
VOTable.to_xml(filename)
|
gpl-3.0
| -2,821,985,157,562,026,000
| 26.723077
| 108
| 0.595117
| false
| 3.762004
| false
| false
| false
|
hecate-xw/Miscellaneous
|
TsinghuaCSLT/audioEmbedded/Mission/develop/MCLT.py
|
1
|
4754
|
#!usr/bin/env python
#coding=utf-8
import numpy as np
import math
import disposeWav
pi = math.pi
def MCLT(x):
'''
Written by the MCLT definition, and it is slow.
'''
M = len(x)/2
#M = 8192
h = (2*M)*[0]
for n in range(M):
h[n] = -math.sin((2.0*n+1.0)*math.pi/(4.0*M))
h[2*M-n-1] = h[n]
X = []
for k in range(M):
X.append(0)
for n in range(2*M):
pc = math.sqrt(2.0/M) * h[n] * math.cos( (2.0*n+1.0+M)*(2.0*k+1)*pi/(4.0*M) )
ps = math.sqrt(2.0/M) * h[n] * math.sin( (2.0*n+1.0+M)*(2.0*k+1)*pi/(4.0*M) )
p = pc + 1j*ps
X[k] += x[n]*p
return X
def IMCLT(X):
'''
Written by IMCLT definition, and it is slow.
'''
M = len(X)
#M = 8192
h = (2*M)*[0]
for n in range(M):
h[n] = -math.sin((2.0*n+1.0)*math.pi/(4.0*M))
h[2*M-n-1] = h[n]
y = []
Bc = 1.0/2
Bs = 1.0-Bc #Bc+Bs = 1即可
for n in range(2*M):
y.append(0)
for k in range(M):
pc = math.sqrt(2.0/M) * h[n] * math.cos( (2.0*n+1.0+M)*(2.0*k+1)*pi/(4.0*M) )
ps = math.sqrt(2.0/M) * h[n] * math.sin( (2.0*n+1.0+M)*(2.0*k+1)*pi/(4.0*M) )
#p = pc + 1j*ps
y[n] += (Bc*X[k].real*pc + Bs*X[k].imag*ps)
return y
def W(M,r): #Local function: complex exponential
e = math.e
w = e ** (-1j*2.0*pi*r/M)
return w
def FastMCLT(audio):
'''
Written by the paper 'http://research.microsoft.com/pubs/70128/tr-2005-02.pdf'
Prefer to use this.
'''
# determine subbands, M
L = len(audio)
M = L/2
# normalized FFT of input
U = []
for f in np.fft.fft(audio):
U.append(math.sqrt(1/(2.0*M)) * f)
# compute modulation function
c = []
for i in range(M+1):
c.append( W(8.0,2*i+1.0) * W(4.0*M,i) )
# modulate U into V
V = []
for i in range(M+1):
V.append( c[i] * U[i])
X = []
# compute MCLT coefficients
for each in range(M):
X.append( 1j * V[each] + V[each+1] )
return X
def FastIMCLT(X):
'''
Written by the paper 'http://research.microsoft.com/pubs/70128/tr-2005-02.pdf'
Prefer to use this.
'''
# determine subbands, M
M = len(X)
# compute modulation function
c = []
for i in range(M-1):
k = i+1
c.append( W(8,2*k+1) * W(4*M,k) )
# allocate vector Y
Y = (2*M)*[0]
# map X into Y
for j in range(M-1):
i = j+1
Y[i] = 1.0/4 * c[j].conjugate() * (X[j] - 1j * X[j+1])
# determine first and last Y values
Y[0] = math.sqrt(1.0/8) * (X[0].real + X[0].imag)
Y[M] = -math.sqrt(1.0/8) * (X[M-1].real + X[M-1].imag)
# complete vector Y via conjugate symmetry property for the
# FFT of a real vector (not needed if the inverse FFT
# routine is a "real FFT", which should take only as input
# only M+1 coefficients)
for i in range(M-1):
Y[i+M+1] = Y[M-i-1].conjugate()
# inverse normalized FFT to compute the output vector
# output of ifft should have zero imaginary part; but
# by calling real(.) we remove the small rounding noise
# that's present in the imaginary part
yt = []
for i in Y:
yt.append( math.sqrt(2*M) * i )
y = (np.fft.ifft(yt)).real
return y
def test():
nchannels, sampwidth, framerate, nframes, wave_data, time = disposeWav.read_wave_data("../wavFile/test1.wav")
x = range(4000,8000,2)
BL = 32
B = len(x)*2/BL - 1
for i in range(B-1):
if i == 0:
print x[:BL]
continue
X_prev = MCLT(x[(i-1)*BL/2:(i+1)*BL/2])
X_curr = MCLT(x[(i)*BL/2:(i+2)*BL/2])
X_next = MCLT(x[(i+1)*BL/2:(i+3)*BL/2])
X = X_curr
y = IMCLT(X) #将嵌入信息后的复数域信息反变换到实数域
y_prev = IMCLT(X_prev)[BL/2:]
y_next = IMCLT(X_next)[:BL/2]
y = np.array(y_prev + y_next) + np.array(y)
print y[BL/2:]
print x[-BL/2:]
print "\n\n\n"
for i in range(B-1):
if i == 0:
print x[:BL]
continue
X_prev = FastMCLT(x[(i-1)*BL/2:(i+1)*BL/2])
X_curr = FastMCLT(x[(i)*BL/2:(i+2)*BL/2])
X_next = FastMCLT(x[(i+1)*BL/2:(i+3)*BL/2])
X = X_curr
y = FastIMCLT(X) #将嵌入信息后的复数域信息反变换到实数域
y_prev = FastIMCLT(X_prev).tolist()[BL/2:]
y_next = FastIMCLT(X_next).tolist()[:BL/2]
y = np.array(y_prev + y_next) + y
print y[BL/2:]
print x[-BL/2:]
if __name__ == "__main__":
test()
|
mit
| 5,250,337,636,828,632,000
| 26.174419
| 113
| 0.482028
| false
| 2.508857
| false
| false
| false
|
socolofs/tamoc
|
bin/dbm/gas_bubbles.py
|
1
|
3178
|
"""
Gas fluid particles
===================
Use the ``TAMOC`` ``DBM`` to specify a natural gas bubble that can dissolve
and calculate all of its properties in deepwater conditions.
In particular, this script demonstrates the methods:
* `dbm.FluidParticle.mass_frac`
* `dbm.FluidParticle.density`
* `dbm.FluidParticle.mass_by_diameter`
* `dbm.FluidParticle.diameter`
* `dbm.FluidParticle.particle_shape`
* `dbm.FluidParticle.slip_velocity`
* `dbm.FluidParticle.surface_area`
* `dbm.FluidParticle.mass_transfer`
* `dbm.FluidParticle.heat_transfer`
* `dbm.FluidParticle.solubility`
"""
# S. Socolofsky, July 2013, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from tamoc import dbm
from tamoc import seawater
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Define the composition of a natural gas
composition = ['methane', 'ethane', 'propane']
mol_frac = np.array([0.90, 0.07, 0.03])
# Specify that we are interested in properties for the gas phase
fl_type = 0
# Create a DBM FluidParticle object for this natural gas assuming zeros
# for all the binary interaction coefficients
delta = np.zeros((3,3))
ng = dbm.FluidParticle(composition, fl_type, delta)
# Specify some generic deepwater ocean conditions
P = 150.0 * 1.0e5
Ta = 273.15 + 4.0
Sa = 34.5
# Echo the ambient conditions to the screen
print('\nAmbient conditions: \n')
print(' P = %g (Pa)' % P)
print(' T = %g (K)' % Ta)
print(' S = %g (psu)' % Sa)
print(' rho_sw = %g (kg/m^3)' % (seawater.density(Ta, Sa, P)))
# Get the general properties of the gas
mf = ng.mass_frac(mol_frac)
T = 273.15 + 60.
print('\nBasic properties of gas: \n')
print(' T = %g (K)' % T)
print(' mol_frac = [' + ', '.join('%g' % mol_frac[i] for i in
range(ng.nc)) + '] (--)')
print(' mass_frac = [' + ', '.join('%g' % mf[i] for i in
range(ng.nc)) + '] (--)')
print(' rho_p = %g (kg/m^3) at %g (K) and %g (Pa)' %
(ng.density(mf, T, P), T, P))
# Get the masses in a 1.0 cm effective diameter bubble
de = 0.01
m = ng.masses_by_diameter(de, T, P, mol_frac)
# Echo the properties of the bubble to the screen
print('\nBasic bubbles properties: \n')
print(' de = %g (m)' % (ng.diameter(m, T, P)))
shape, de, rho_p, rho, mu_p, mu, sigma = ng.particle_shape(m, T, P, Sa,
Ta)
print(' shape = %g (1: Sphere, 2: Ellipsoid, 3: Spherical Cap)'
% shape)
print(' us = %g (m/s)' % (ng.slip_velocity(m, T, P, Sa, Ta)))
print(' A = %g (m^2)' % (ng.surface_area(m, T, P, Sa, Ta)))
beta = ng.mass_transfer(m, T, P, Sa, Ta)
print(' beta = [' + ', '.join('%g' % beta[i] for i in
range(ng.nc)) + '] (m/s)')
print(' beta_T = %g (m/s)' % (ng.heat_transfer(m, T, P, Sa, Ta)))
Cs = ng.solubility(m, T, P, Sa)
print(' Cs = [' + ', '.join('%g' % Cs[i] for i in
range(ng.nc)) + '] (kg/m^3)')
|
mit
| 8,101,110,010,766,100,000
| 33.923077
| 76
| 0.568911
| false
| 2.886467
| false
| false
| false
|
CNR-Engineering/TelTools
|
cli/slf_3d_to_2d.py
|
1
|
4035
|
#!/usr/bin/env python
"""
Perform a vertical operation on a 3D results file to get 2D
"""
import numpy as np
import sys
from tqdm import tqdm
from pyteltools.geom.transformation import Transformation
import pyteltools.slf.misc as operations
from pyteltools.slf import Serafin
from pyteltools.utils.cli_base import logger, PyTelToolsArgParse
def slf_3d_to_2d(args):
with Serafin.Read(args.in_slf, args.lang) as resin:
resin.read_header()
logger.info(resin.header.summary())
resin.get_time()
if resin.header.is_2d:
logger.critical('The input file is not 3D.')
sys.exit(1)
if 'Z' not in resin.header.var_IDs:
logger.critical('The elevation variable Z is not found in the Serafin file.')
sys.exit(1)
if args.layer is not None:
upper_plane = resin.header.nb_planes
if args.layer < 1 or args.layer > upper_plane:
logger.critical('Layer has to be in [1, %i]' % upper_plane)
sys.exit(1)
output_header = resin.header.copy_as_2d()
# Shift mesh coordinates if necessary
if args.shift:
output_header.transform_mesh([Transformation(0, 1, 1, args.shift[0], args.shift[1], 0)])
# Toggle output file endianness if necessary
if args.toggle_endianness:
output_header.toggle_endianness()
# Convert to single precision
if args.to_single_precision:
if resin.header.is_double_precision():
output_header.to_single_precision()
else:
logger.warn('Input file is already single precision! Argument `--to_single_precision` is ignored')
if args.aggregation is not None:
if args.aggregation == 'max':
operation_type = operations.MAX
elif args.aggregation == 'min':
operation_type = operations.MIN
else: # args.aggregation == 'mean'
operation_type = operations.MEAN
selected_vars = [var for var in output_header.iter_on_all_variables()]
vertical_calculator = operations.VerticalMaxMinMeanCalculator(operation_type, resin, output_header,
selected_vars, args.vars)
output_header.set_variables(vertical_calculator.get_variables()) # sort variables
# Add some elevation variables
for var_ID in args.vars:
output_header.add_variable_from_ID(var_ID)
with Serafin.Write(args.out_slf, args.lang, overwrite=args.force) as resout:
resout.write_header(output_header)
vars_2d = np.empty((output_header.nb_var, output_header.nb_nodes_2d), dtype=output_header.np_float_type)
for time_index, time in enumerate(tqdm(resin.time, unit='frame')):
if args.aggregation is not None:
vars_2d = vertical_calculator.max_min_mean_in_frame(time_index)
else:
for i, var in enumerate(output_header.var_IDs):
vars_2d[i, :] = resin.read_var_in_frame_as_3d(time_index, var)[args.layer - 1, :]
resout.write_entire_frame(output_header, time, vars_2d)
parser = PyTelToolsArgParse(description=__doc__, add_args=['in_slf', 'out_slf', 'shift'])
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--layer', help='layer number (1=lower, nb_planes=upper)', type=int, metavar=1)
group.add_argument('--aggregation', help='operation over the vertical', choices=('max', 'min', 'mean'))
parser.add_argument('--vars', nargs='+', help='variable(s) deduced from Z', default=[], choices=('B', 'S', 'H'))
parser.add_group_general(['force', 'verbose'])
if __name__ == '__main__':
args = parser.parse_args()
try:
slf_3d_to_2d(args)
except (Serafin.SerafinRequestError, Serafin.SerafinValidationError):
# Message is already reported by slf logger
sys.exit(1)
|
gpl-3.0
| 2,957,268,180,725,916,700
| 41.925532
| 116
| 0.615366
| false
| 3.695055
| false
| false
| false
|
storpool/python-storpool
|
storpool/sputils.py
|
1
|
1408
|
#
# Copyright (c) 2014 - 2019 StorPool.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Utility functions and constants for the StorPool API bindings. """
from __future__ import print_function
import os.path
import time
import six.moves
sec = 1.0
msec = 1.0e-3 * sec
usec = 1e-6 * sec
KB = 1024
MB = 1024 ** 2
GB = 1024 ** 3
TB = 1024 ** 4
def pr(x):
""" Display a value and return it; useful for lambdas. """
print(x)
return x
def pathPollWait(path, shouldExist, isLink, pollTime, maxTime):
""" Poll/listen for path to appear/disappear. """
for i in six.moves.range(int(maxTime / pollTime)):
pathExists = os.path.exists(path)
if pathExists and isLink:
assert os.path.islink(path)
if pathExists == shouldExist:
return True
else:
time.sleep(pollTime)
else:
return False
|
apache-2.0
| 3,257,509,325,098,804,700
| 24.6
| 74
| 0.674006
| false
| 3.638243
| false
| false
| false
|
loggrio/loggr-unit-raspberry
|
raspi_loggr/util.py
|
1
|
3137
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import logging
from enum import Enum
class LedStatusTypes(Enum):
"""Enum of led status types resp. colors"""
ok = 1 # green
sensor_broken = 2 # red
request_error = 3 # orange
pairing_succeeded = 4 # blue
class SensorTypes(Enum):
"""Enum of sensor types"""
temperature = 1
brightness = 2
humidity = 3
pressure = 4
def log_info(info):
"""Log info messages in logfile and console
Args:
info (str): info message which has to be logged
"""
logging.info(info)
print info
def log_error(err):
"""Log error messages in logfile and console
Args:
err (str): error message which has to be logged
"""
logging.error(err)
print err
def treat_sensor_errors(cpe):
"""Log sensor errors
Args:
cpe (subprocess.CalledProcessError): called process error exception object
"""
log_error('called process error: ' + str(cpe.cmd) + ' returned ' + str(cpe.returncode) + ': ' + cpe.output)
def treat_os_errors(ose):
"""Log os errors
Args:
ose (OSError): os error exception object
"""
log_error('oserror: ' + str(ose.strerror))
def treat_led_errors(cpe):
"""Log led errors
Args:
cpe (subprocess.CalledProcessError): called process error exception object
"""
if cpe.returncode == 1:
log_error('called process error: ' + str(cpe.cmd[0]) + ' returned 1: setup wiringPi failed')
elif cpe.returncode == 2:
log_error('called process error: ' + str(cpe.cmd[0]) + ' returned 2: invalid arguments')
def treat_requests_errors(re):
"""Log requests errors and set status led color to orange
Args:
re (requests.exceptions.RequestException): request exception object
"""
log_error('requests failure: ' + str(re))
set_status_led(LedStatusTypes.request_error.name)
def treat_sensor_broken_errors(sensortype):
"""Log broken sensor errors and set status led color to red
Args:
sensortype (str): type of sensor
"""
log_error(str(sensortype) + ' sensor broken')
set_status_led(LedStatusTypes.sensor_broken.name)
def treat_missing_config_errors():
"""Log missing config file errors"""
log_error('No valid config file found! Please start config server!')
def treat_pairing_errors():
"""Log pairing errors"""
log_error('No Token and/or UserId set in config file. Please pair your Raspberry Pi!')
def set_status_led(status):
"""Set status led color
Args:
status (LedStatusTypes): led status type resp. color of rgb led
"""
command = ['sensors/rgb.out', str(status)]
try:
subproc = subprocess.check_call(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as cpe:
# catch invalid arguments errors
# catch wiringPi errors
treat_led_errors(cpe)
except OSError as ose:
# catch os errors, e.g. file-not-found
treat_os_errors(ose)
|
gpl-3.0
| -4,851,364,773,348,961,000
| 24.92562
| 111
| 0.629582
| false
| 3.8633
| false
| false
| false
|
wwrechard/pydlm
|
doc/source/conf.py
|
1
|
10025
|
# -*- coding: utf-8 -*-
#
# PyDLM documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 9 23:34:57 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, '/Users/samuel/Documents/Github/PyDLM')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#collapse_navigation = True
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyDLM'
copyright = u'2016, Xiangyu Wang'
author = u'Xiangyu Wang'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'PyDLM v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyDLMdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyDLM.tex', u'PyDLM Documentation',
u'Xiangyu Wang', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pydlm', u'PyDLM Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyDLM', u'PyDLM Documentation',
author, 'PyDLM', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
bsd-3-clause
| -657,461,185,511,227,300
| 27.724928
| 80
| 0.691671
| false
| 3.553704
| true
| false
| false
|
libyal/libyal
|
yaldevtools/source_generators/common.py
|
1
|
1219
|
# -*- coding: utf-8 -*-
"""The source file generator for common source files."""
from __future__ import unicode_literals
import os
from yaldevtools.source_generators import interface
class CommonSourceFileGenerator(interface.SourceFileGenerator):
"""Common source files generator."""
_AUTHORS = 'Joachim Metz <joachim.metz@gmail.com>'
_AUTHORS_SEPARATOR = ',\n * '
def Generate(self, project_configuration, output_writer):
"""Generates common source files.
Args:
project_configuration (ProjectConfiguration): project configuration.
output_writer (OutputWriter): output writer.
"""
template_mappings = self._GetTemplateMappings(
project_configuration, authors_separator=self._AUTHORS_SEPARATOR)
template_mappings['authors'] = self._AUTHORS
for directory_entry in os.listdir(self._template_directory):
template_filename = os.path.join(
self._template_directory, directory_entry)
if not os.path.isfile(template_filename):
continue
output_filename = os.path.join('common', directory_entry)
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
|
apache-2.0
| 6,604,040,997,463,279,000
| 31.945946
| 79
| 0.696473
| false
| 4.292254
| false
| false
| false
|
fniephaus/power-system-simulation
|
systems/producers.py
|
1
|
4740
|
import random
class GasPoweredGenerator(object):
def __init__(self, env):
self.env = env
self.gas_price_per_kwh = 0.0655 # Euro
self.running = False
self.workload = 0
self.current_gas_consumption = 0 # kWh
self.current_thermal_production = 0 # kWh
self.total_gas_consumption = 0.0 # kWh
self.total_thermal_production = 0.0 # kWh
def start(self):
self.running = True
def stop(self):
self.running = False
def consume_gas(self):
self.total_gas_consumption += self.current_gas_consumption
self.total_thermal_production += self.current_thermal_production
def get_operating_costs(self):
return self.total_gas_consumption * self.gas_price_per_kwh
class CogenerationUnit(GasPoweredGenerator):
def __init__(self, env, heat_storage, electrical_infeed):
GasPoweredGenerator.__init__(self, env)
self.heat_storage = heat_storage
# XRGI 15kW
self.max_gas_input = 49.0 # kW
self.electrical_efficiency = 0.3 # max 14.7 kW
self.thermal_efficiency = 0.62 # max 30.38 kW
self.maintenance_interval = 8500 # hours
self.electrical_infeed = electrical_infeed
self.minimal_workload = 40.0
self.noise = True
self.current_electrical_production = 0 # kWh
self.total_electrical_production = 0.0 # kWh
def calculate_workload(self):
calculated_workload = self.heat_storage.target_energy + \
self.minimal_workload - self.heat_storage.energy_stored()
if self.noise:
calculated_workload += random.random() - 0.5
# make sure that minimal_workload <= workload <= 99.0 or workload = 0
if calculated_workload >= self.minimal_workload:
self.workload = min(calculated_workload, 99.0)
else:
self.workload = 0.0
# calulate current consumption and production values
self.current_gas_consumption = self.workload / \
99.0 * self.max_gas_input
self.current_electrical_production = self.current_gas_consumption * \
self.electrical_efficiency
self.current_thermal_production = self.current_gas_consumption * \
self.thermal_efficiency
def consume_gas(self):
super(CogenerationUnit, self).consume_gas()
self.total_electrical_production += self.current_electrical_production
def update(self):
self.env.log('Starting cogeneration unit...')
self.start()
while True:
if self.running:
self.calculate_workload()
self.env.log(
'CU workload:', '%f %%' % self.workload, 'Total:', '%f kWh (%f Euro)' %
(self.total_gas_consumption, self.get_operating_costs()))
self.electrical_infeed.add_energy(
self.current_electrical_production)
self.heat_storage.add_energy(self.current_thermal_production)
self.consume_gas()
else:
self.env.log('Cogeneration unit stopped')
yield self.env.timeout(3600)
class PeakLoadBoiler(GasPoweredGenerator):
def __init__(self, env, heat_storage):
GasPoweredGenerator.__init__(self, env)
self.heat_storage = heat_storage
self.max_gas_input = 100.0 # kW
self.thermal_efficiency = 0.8
def calculate_workload(self):
# turn on if heat_storage is undersupplied
if self.heat_storage.undersupplied():
self.workload = 99.0
# turn off if heat storage's target_energy is almost reached
elif self.heat_storage.energy_stored() + self.current_thermal_production >= self.heat_storage.target_energy:
self.workload = 0
# calulate current consumption and production values
self.current_gas_consumption = self.workload / \
99.0 * self.max_gas_input
self.current_thermal_production = self.current_gas_consumption * \
self.thermal_efficiency
def update(self):
self.env.log('Starting peak load boiler...')
self.start()
while True:
if self.running:
self.calculate_workload()
self.env.log(
'PLB workload:', '%f %%' % self.workload, 'Total:', '%f kWh (%f Euro)' %
(self.total_gas_consumption, self.get_operating_costs()))
self.heat_storage.add_energy(self.current_thermal_production)
self.consume_gas()
else:
self.env.log('PLB stopped.')
self.env.log('=' * 80)
yield self.env.timeout(3600)
|
mit
| 1,032,790,854,854,087,200
| 33.59854
| 116
| 0.597679
| false
| 3.776892
| false
| false
| false
|
rusty1s/graph-based-image-classification
|
patchy/patchy.py
|
1
|
9132
|
import os
import sys
import json
import tensorflow as tf
from data import DataSet, Record, datasets
from data import iterator, read_tfrecord, write_tfrecord
from grapher import graphers
from .helper.labeling import labelings, scanline
from .helper.neighborhood_assembly import neighborhood_assemblies as neighb,\
neighborhoods_weights_to_root
from .helper.node_sequence import node_sequence
DATA_DIR = '/tmp/patchy_san_data'
FORCE_WRITE = False
WRITE_NUM_EPOCHS = 1
DISTORT_INPUTS = False
NUM_NODES = 100
NODE_STRIDE = 1
NEIGHBORHOOD_SIZE = 9
INFO_FILENAME = 'info.json'
TRAIN_FILENAME = 'train.tfrecords'
TRAIN_INFO_FILENAME = 'train_info.json'
TRAIN_EVAL_FILENAME = 'train_eval.tfrecords'
TRAIN_EVAL_INFO_FILENAME = 'train_eval_info.json'
EVAL_FILENAME = 'eval.tfrecords'
EVAL_INFO_FILENAME = 'eval_info.json'
class PatchySan(DataSet):
def __init__(self, dataset, grapher, data_dir=DATA_DIR,
force_write=FORCE_WRITE, write_num_epochs=WRITE_NUM_EPOCHS,
distort_inputs=DISTORT_INPUTS, node_labeling=None,
num_nodes=NUM_NODES, node_stride=NODE_STRIDE,
neighborhood_assembly=None,
neighborhood_size=NEIGHBORHOOD_SIZE):
node_labeling = scanline if node_labeling is None else node_labeling
neighborhood_assembly = neighborhoods_weights_to_root if\
neighborhood_assembly is None else neighborhood_assembly
self._dataset = dataset
self._grapher = grapher
self._num_nodes = num_nodes
self._neighborhood_size = neighborhood_size
self._distort_inputs = distort_inputs
super().__init__(data_dir)
if tf.gfile.Exists(data_dir) and force_write:
tf.gfile.DeleteRecursively(data_dir)
tf.gfile.MakeDirs(data_dir)
info_file = os.path.join(data_dir, INFO_FILENAME)
if not tf.gfile.Exists(info_file) or force_write:
with open(info_file, 'w') as f:
json.dump({'max_num_epochs': write_num_epochs,
'distort_inputs': distort_inputs,
'node_labeling': node_labeling.__name__,
'num_nodes': num_nodes,
'num_node_channels': grapher.num_node_channels,
'node_stride': node_stride,
'neighborhood_assembly':
neighborhood_assembly.__name__,
'neighborhood_size': neighborhood_size,
'num_edge_channels': grapher.num_edge_channels}, f)
train_file = os.path.join(data_dir, TRAIN_FILENAME)
train_info_file = os.path.join(data_dir, TRAIN_INFO_FILENAME)
if not tf.gfile.Exists(train_file):
_write(dataset, grapher, False, train_file, train_info_file,
write_num_epochs, distort_inputs, True, node_labeling,
num_nodes, node_stride, neighborhood_assembly,
neighborhood_size)
eval_file = os.path.join(data_dir, EVAL_FILENAME)
eval_info_file = os.path.join(data_dir, EVAL_INFO_FILENAME)
if not tf.gfile.Exists(eval_file):
_write(dataset, grapher, True, eval_file, eval_info_file,
1, distort_inputs, False, node_labeling, num_nodes,
node_stride, neighborhood_assembly, neighborhood_size)
train_eval_file = os.path.join(data_dir, TRAIN_EVAL_FILENAME)
train_eval_info_file = os.path.join(data_dir, TRAIN_EVAL_INFO_FILENAME)
if distort_inputs and not tf.gfile.Exists(train_eval_file):
_write(dataset, grapher, False, train_eval_file,
train_eval_info_file, 1, distort_inputs, False,
node_labeling, num_nodes, node_stride,
neighborhood_assembly, neighborhood_size)
@classmethod
def create(cls, config):
"""Static constructor to create a PatchySan dataset based on a json
object.
Args:
config: A configuration object with sensible defaults for
missing values.
Returns:
A PatchySan dataset.
"""
dataset_config = config['dataset']
grapher_config = config['grapher']
return cls(datasets[dataset_config['name']].create(dataset_config),
graphers[grapher_config['name']].create(grapher_config),
config.get('data_dir', DATA_DIR),
config.get('force_write', FORCE_WRITE),
config.get('write_num_epochs', WRITE_NUM_EPOCHS),
config.get('distort_inputs', DISTORT_INPUTS),
labelings.get(config.get('node_labeling')),
config.get('num_nodes', NUM_NODES),
config.get('node_stride', NODE_STRIDE),
neighb.get(config.get('neighborhood_assembly')),
config.get('neighborhood_size', NEIGHBORHOOD_SIZE))
@property
def train_filenames(self):
return [os.path.join(self.data_dir, TRAIN_FILENAME)]
@property
def eval_filenames(self):
return [os.path.join(self.data_dir, EVAL_FILENAME)]
@property
def train_eval_filenames(self):
if self._distort_inputs:
return [os.path.join(self.data_dir, TRAIN_EVAL_FILENAME)]
else:
return [os.path.join(self.data_dir, TRAIN_FILENAME)]
@property
def labels(self):
return self._dataset.labels
@property
def num_examples_per_epoch_for_train(self):
with open(os.path.join(self._data_dir, TRAIN_INFO_FILENAME), 'r') as f:
count = json.load(f)['count']
return min(count, self._dataset.num_examples_per_epoch_for_train)
@property
def num_examples_per_epoch_for_eval(self):
with open(os.path.join(self._data_dir, EVAL_INFO_FILENAME), 'r') as f:
count = json.load(f)['count']
return min(count, self._dataset.num_examples_per_epoch_for_eval)
@property
def num_examples_per_epoch_for_train_eval(self):
if self._distort_inputs:
filename = os.path.join(self._data_dir, TRAIN_EVAL_INFO_FILENAME)
with open(filename, 'r') as f:
count = json.load(f)['count']
return min(count,
self._dataset.num_examples_per_epoch_for_train_eval)
else:
return self._dataset.num_examples_per_epoch_for_train
def read(self, filename_queue):
data, label = read_tfrecord(
filename_queue,
{'nodes': [-1, self._grapher.num_node_channels],
'neighborhood': [self._num_nodes, self._neighborhood_size]})
nodes = data['nodes']
# Convert the neighborhood to a feature map.
def _map_features(node):
i = tf.maximum(node, 0)
positive = tf.strided_slice(nodes, [i], [i+1], [1])
negative = tf.zeros([1, self._grapher.num_node_channels])
return tf.where(i < 0, negative, positive)
data = tf.reshape(data['neighborhood'], [-1])
data = tf.cast(data, tf.int32)
data = tf.map_fn(_map_features, data, dtype=tf.float32)
shape = [self._num_nodes, self._neighborhood_size,
self._grapher.num_node_channels]
data = tf.reshape(data, shape)
return Record(data, shape, label)
def _write(dataset, grapher, eval_data, tfrecord_file, info_file,
write_num_epochs, distort_inputs, shuffle,
node_labeling, num_nodes, node_stride, neighborhood_assembly,
neighborhood_size):
writer = tf.python_io.TFRecordWriter(tfrecord_file)
iterate = iterator(dataset, eval_data, distort_inputs=distort_inputs,
num_epochs=write_num_epochs, shuffle=shuffle)
def _before(image, label):
nodes, adjacencies = grapher.create_graph(image)
# Only take the first adjacency matrix.
count = tf.shape(adjacencies)[0]
adjacency = tf.strided_slice(
adjacencies, [0, 0, 0], [count, count, 1], [1, 1, 1])
adjacency = tf.squeeze(adjacency, axis=2)
sequence = node_labeling(adjacency)
sequence = node_sequence(sequence, num_nodes, node_stride)
neighborhood = neighborhood_assembly(adjacency, sequence,
neighborhood_size)
return [nodes, neighborhood, label]
def _each(output, index, last_index):
write_tfrecord(writer,
{'nodes': output[0], 'neighborhood': output[1]},
output[2])
sys.stdout.write(
'\r>> Saving graphs to {} {:.1f}%'
.format(tfrecord_file, 100.0 * index / last_index))
sys.stdout.flush()
def _done(index, last_index):
print('')
print('Successfully saved {} graphs to {}.'
.format(index, tfrecord_file))
with open(info_file, 'w') as f:
json.dump({'count': index}, f)
iterate(_each, _before, _done)
|
mit
| 1,591,233,635,957,854,200
| 36.892116
| 79
| 0.591984
| false
| 3.734969
| true
| false
| false
|
internap/almanach
|
tests/builder.py
|
1
|
4540
|
# Copyright 2016 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytz
from copy import copy
from datetime import datetime
from uuid import uuid4
from almanach.core.model import build_entity_from_dict, Instance, Volume, VolumeType
class Builder(object):
def __init__(self, dict_object):
self.dict_object = dict_object
class EntityBuilder(Builder):
def build(self):
return build_entity_from_dict(self.dict_object)
def with_id(self, entity_id):
self.dict_object["entity_id"] = entity_id
return self
def with_project_id(self, project_id):
self.dict_object["project_id"] = project_id
return self
def with_last_event(self, last_event):
self.dict_object["last_event"] = last_event
return self
def with_start(self, year, month, day, hour, minute, second):
self.with_datetime_start(datetime(year, month, day, hour, minute, second, tzinfo=pytz.utc))
return self
def with_datetime_start(self, date):
self.dict_object["start"] = date
return self
def with_end(self, year, month, day, hour, minute, second):
self.dict_object["end"] = datetime(year, month, day, hour, minute, second, tzinfo=pytz.utc)
return self
def with_no_end(self):
self.dict_object["end"] = None
return self
def with_flavor(self, flavor):
self.dict_object["flavor"] = flavor
return self
def with_metadata(self, metadata):
self.dict_object['metadata'] = metadata
return self
def build_from(self, other):
self.dict_object = copy(other.__dict__)
return self
def with_all_dates_in_string(self):
self.dict_object['start'] = self.dict_object['start'].strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.dict_object['last_event'] = self.dict_object['last_event'].strftime("%Y-%m-%dT%H:%M:%S.%fZ")
return self
class VolumeBuilder(EntityBuilder):
def with_attached_to(self, attached_to):
self.dict_object["attached_to"] = attached_to
return self
def with_no_attachment(self):
self.dict_object["attached_to"] = []
return self
def with_display_name(self, display_name):
self.dict_object["name"] = display_name
return self
def with_volume_type(self, volume_type):
self.dict_object["volume_type"] = volume_type
return self
class VolumeTypeBuilder(Builder):
def build(self):
return VolumeType(**self.dict_object)
def with_volume_type_id(self, volume_type_id):
self.dict_object["volume_type_id"] = volume_type_id
return self
def with_volume_type_name(self, volume_type_name):
self.dict_object["volume_type_name"] = volume_type_name
return self
def instance():
return EntityBuilder({
"entity_id": str(uuid4()),
"project_id": str(uuid4()),
"start": datetime(2014, 1, 1, 0, 0, 0, 0, pytz.utc),
"end": None,
"last_event": datetime(2014, 1, 1, 0, 0, 0, 0, pytz.utc),
"flavor": "A1.1",
"os": {
"os_type": "windows",
"distro": "windows",
"version": "2012r2"
},
"entity_type": Instance.TYPE,
"name": "some-instance",
"metadata": {
"a_metadata.to_filter": "include.this",
"a_metadata.to_exclude": "exclude.this"
}
})
def volume():
return VolumeBuilder({
"entity_id": str(uuid4()),
"project_id": str(uuid4()),
"start": datetime(2014, 1, 1, 0, 0, 0, 0, pytz.utc),
"end": None,
"last_event": datetime(2014, 1, 1, 0, 0, 0, 0, pytz.utc),
"volume_type": "SF400",
"size": 1000000,
"entity_type": Volume.TYPE,
"name": "some-volume",
"attached_to": None,
})
def volume_type():
return VolumeTypeBuilder({
"volume_type_id": str(uuid4()),
"volume_type_name": "a_type_name"
})
def a(builder):
return builder.build()
|
apache-2.0
| 7,253,645,098,605,232,000
| 27.553459
| 105
| 0.609471
| false
| 3.57762
| false
| false
| false
|
crichardson17/starburst_atlas
|
SFH_comparison/data/Padova_inst/padova_inst_6/fullgrid/peaks_reader.py
|
1
|
5057
|
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
headerloc = "/Users/helen/Documents/Elon/Thesis_Research/github_repo/starburst_atlas/headers_dir/headers.txt"
# ------------------------------------------------------------------------------------------------------
#data files' names from source directory constructed here. default source directory is working directory
numFiles = 3 #change this if you have more/less files
gridFiles = [None]*numFiles
emissionFiles = [None]*numFiles
for i in range(numFiles):
for file in os.listdir('.'):
if file.endswith("{:d}.grd".format(i+1)):
gridFiles[i] = file
#keep track of all the files you'll be importing by printing
#print file
if file.endswith("{:d}.txt".format(i+1)):
emissionFiles[i] = file
#keep track of all the files you'll be importing by printing
#print file
print ("Files names constructed")
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
print("Beginning file import")
for i in range(numFiles):
gridI = [];
with open(gridFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
for row in csvReader:
gridI.append(row)
gridI = asarray(gridI)
gridI = gridI[1:,6:8]
if ( i == 0 ):
grid = gridI
else :
grid = concatenate((grid,gridI))
for i in range(numFiles):
emissionLineI = [];
with open(emissionFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
headers = csvReader.next()
for row in csvReader:
emissionLineI.append(row)
emissionLineI = asarray(emissionLineI)
emissionLineI = emissionLineI[:,1:]
if ( i == 0 ):
Emissionlines = emissionLineI
else :
Emissionlines = concatenate((Emissionlines,emissionLineI))
hdens_values = grid[:,1]
phi_values = grid[:,0]
print("Import files complete")
#To fix when hdens > 10
#many of my grids were run off with hdens up to 12 so we needed to cut off part of the data
#first create temorary arrays
print("modifications begun")
hdens_values_2 = empty(shape=[0, 1])
phi_values_2 = empty(shape=[0, 1])
Emissionlines_2 = empty(shape=[0, len(Emissionlines[0,:])])
#save data in range desired to temp arrays
for i in range(len(hdens_values)):
if (float(hdens_values[i]) < 6.100) & (float(phi_values[i]) < 17.100) :
hdens_values_2 = append(hdens_values_2, hdens_values[i])
phi_values_2 = append(phi_values_2, phi_values[i])
Emissionlines_2 = vstack([Emissionlines_2, Emissionlines[i,:]])
#overwrite old arrays
hdens_values = hdens_values_2
phi_values = phi_values_2
Emissionlines = Emissionlines_2
print("modifications complete")
# ---------------------------------------------------
# ---------------------------------------------------
#there are the emission line names properly formatted
print("Importing headers from header file")
headersFile = open(headerloc,'r')
headers = headersFile.read().splitlines()
headersFile.close()
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
savetxt('peaks_Padova_inst_6', max_values, delimiter='\t')
|
gpl-2.0
| -5,176,362,807,463,443,000
| 32.490066
| 109
| 0.636741
| false
| 3.170533
| false
| false
| false
|
luoshao23/ML_algorithm
|
luolearn/metrics/classification.py
|
1
|
1875
|
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import column_or_1d
from ..utils import check_consistent_length
from ..utils.multiclass import type_of_target
from ..utils.sparsefuncs import count_nonzero
def _check_targets(y_true, y_pred):
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Cannot handle!")
y_type = y_type.pop()
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_pred == "binary":
unique_values = np.union1d(y_true, y_pred)
if len(unique_values) > 2:
y_type = "multiclass"
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
|
mit
| -3,364,809,980,190,395,400
| 30.25
| 72
| 0.629333
| false
| 3.301056
| false
| false
| false
|
amboycharlie/Child-Friendly-LCMS
|
leonardo/module/web/widgets/mixins.py
|
1
|
6783
|
from __future__ import unicode_literals
import json
from django.db import models
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from .const import PAGINATION_CHOICES
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class ListWidgetMixin(models.Model):
"""Provide API for listing items
"""
objects_per_page = models.PositiveIntegerField(
verbose_name=_('Objects per page'), blank=True, default=6)
objects_per_row = models.PositiveIntegerField(
verbose_name=_('Objects per row'), blank=True, default=3)
pagination_style = models.CharField(
verbose_name=_("Pagination Style"), max_length=50,
choices=PAGINATION_CHOICES, default='paginator')
def get_items(self, request=None):
'''returns queryset or array of items for listing'''
raise NotImplementedError
def filter_items(self, items):
'''perform filtering items by specific criteria'''
return items
def set_items(self, items):
'''just setter for items'''
self._items = items
@cached_property
def items(self):
'''access for filtered items'''
if hasattr(self, '_items'):
return self.filter_items(self._items)
self._items = self.get_items()
return self.filter_items(self._items)
def populate_items(self, request):
'''populate and returns filtered items'''
self._items = self.get_items(request)
return self.items
@cached_property
def get_list_template(self):
'''returns base list template by pagination_style'''
return "base/widget/list/_%s.html" % self.pagination_style
@cached_property
def get_rows(self):
'''returns rows with items
[[item1 item2 item3], [item2 ]]'''
rows = []
row = []
for i, item in enumerate(self.items):
if self.objects_per_row == i:
rows.append(row)
row = []
i = 0
row.append(item)
rows.append(row)
return rows
@cached_property
def columns_classes(self):
'''returns columns count'''
md = 12 / self.objects_per_row
sm = None
if self.objects_per_row > 2:
sm = 12 / (self.objects_per_row / 2)
return md, (sm or md), 12
@cached_property
def get_pages(self):
'''returns pages with rows'''
pages = []
page = []
for i, item in enumerate(self.get_rows):
if self.objects_per_page == i:
pages.append(page)
page = []
i = 0
page.append(item)
pages.append(page)
return pages
@cached_property
def needs_pagination(self):
if self.objects_per_page == 0:
return False
if len(self.items) > self.objects_per_page \
or len(self.get_pages[0]) >= self.objects_per_page:
return True
return False
@cached_property
def get_item_template(self):
'''returns template for one item from queryset'''
return "widget/%s/_item.html" % self.widget_name
def __init__(self, *args, **kwargs):
super(ListWidgetMixin, self).__init__(*args, **kwargs)
get_items = getattr(self, 'get_items', None)
render = getattr(self, 'render', None)
if not callable(get_items) or not callable(render):
raise Exception('bases on ListWidgetMixin must '
'have implemented get_items or render method')
class Meta:
abstract = True
class ContentProxyWidgetMixin(models.Model):
"""Provide basic fields and routines
for loading and caching data from external resource
define your implementation for getting data in ``get_data``
and use ``data`` property in your templates
"""
source_address = models.CharField(
verbose_name=_("Source Address"), max_length=255)
cache_validity = models.PositiveIntegerField(
verbose_name=_('Cache validity'), default=3600)
cache_data = models.TextField(
verbose_name=_("Cache data"), blank=True)
cache_updated = models.DateTimeField(
verbose_name=_('Cache update'),
editable=False, null=True, blank=True)
@cached_property
def address_parser(self):
return urlparse(self.source_address)
@cached_property
def get_port(self):
"""returns parsed port from ``source_address``
"""
return self.address_parser.port
@cached_property
def get_host(self):
"""returns parsed host from ``source_address``
"""
return self.address_parser.hostname
def is_obsolete(self):
"""returns True is data is obsolete and needs revalidation
"""
if self.cache_updated:
now = timezone.now()
delta = now - self.cache_updated
if delta.seconds < self.cache_validity:
return False
return True
def update_cache(self, data=None):
"""call with new data or set data to self.cache_data and call this
"""
if data:
self.cache_data = data
self.cache_updated = timezone.now()
self.save()
def get_data(self, *args, **kwargs):
"""define your behavior for loading raw data
"""
raise NotImplementedError
@property
def data(self):
"""this property just calls ``get_data``
but here you can serilalize your data or render as html
these data will be saved to self.cached_content
also will be accessable from template
"""
if self.is_obsolete():
self.update_cache(self.get_data())
return self.cache_data
class Meta:
abstract = True
class JSONContentMixin(object):
"""just expect json data from ``get_data`` method
"""
@property
def data(self):
"""load and cache data in json format
"""
if self.is_obsolete():
self.cache_data = json.dumps(self.get_data())
self.update_cache()
return json.loads(self.cache_data)
class AuthContentProxyWidgetMixin(models.Model):
"""widget mixin for getting remote content with credentials
"""
username = models.CharField(
verbose_name=_("Username"), max_length=255, blank=True, null=True)
password = models.CharField(
verbose_name=_('Password'), max_length=255, blank=True, null=True)
token = models.CharField(
verbose_name=_('API Token'), max_length=255, blank=True, null=True)
class Meta:
abstract = True
|
apache-2.0
| 7,673,815,014,192,175,000
| 27.620253
| 75
| 0.600914
| false
| 4.290323
| false
| false
| false
|
AQuadroTeam/CellsCycle
|
doc/tests/list-communication/KeyTest.py
|
1
|
6180
|
from CellCycle.ChainModule.ListThread import *
from start import loadSettings
from start import loadLogger
from CellCycle.MemoryModule.calculateSon import calculateSonId
def add_check():
currentProfile = {"profile_name": "alessandro_fazio", "key_pair": "AWSCellCycle", "branch": "ListUtilities"}
settings_to_launch = loadSettings(currentProfile=currentProfile)
logger_to_launch = loadLogger(settings_to_launch)
n1 = Node("1", "172.10.1.1", settings_to_launch.getIntPort(), settings_to_launch.getExtPort(), "0", "19")
n2 = Node("2", "172.10.1.2", settings_to_launch.getIntPort(), settings_to_launch.getExtPort(), "20", "39")
n3 = Node("3", "172.10.1.3", settings_to_launch.getIntPort(), settings_to_launch.getExtPort(), "40", "59")
n4 = Node("4", "172.10.1.4", settings_to_launch.getIntPort(), settings_to_launch.getExtPort(), "60", "79")
n5 = Node("5", "172.10.1.5", settings_to_launch.getIntPort(), settings_to_launch.getExtPort(), "80", "99")
i3 = ListThread(master_of_master=n1, master=n2, myself=n3, slave=n4, slave_of_slave=n5, logger=logger_to_launch,
settings=settings_to_launch, name=n3.id)
i4 = ListThread(master_of_master=n2, master=n3, myself=n4, slave=n5, slave_of_slave=n1, logger=logger_to_launch,
settings=settings_to_launch, name=n4.id)
i5 = ListThread(master_of_master=n3, master=n4, myself=n5, slave=n1, slave_of_slave=n2, logger=logger_to_launch,
settings=settings_to_launch, name=n5.id)
i1 = ListThread(master_of_master=n4, master=n5, myself=n1, slave=n2, slave_of_slave=n3, logger=logger_to_launch,
settings=settings_to_launch, name=n1.id)
i2 = ListThread(master_of_master=n5, master=n1, myself=n2, slave=n3, slave_of_slave=n4, logger=logger_to_launch,
settings=settings_to_launch, name=n2.id)
# pretend that we add the new node
m_o = MemoryObject(n1, n2, n3, n4, n5)
new_min_max_key = keyCalcToCreateANewNode(m_o).newNode
new_node_id_to_add = str(calculateSonId(float(n3.id), float(n4.id)))
new_node_instance_to_add = Node(new_node_id_to_add, None, settings_to_launch.getIntPort(),
settings_to_launch.getExtPort(),
new_min_max_key.min_key, new_min_max_key.max_key)
'''
logger_to_launch.debug("########## BEFORE ADD ############")
i1.print_relatives()
i2.print_relatives()
i3.print_relatives()
i4.print_relatives()
i5.print_relatives()
'''
logger_to_launch.debug("########## AFTER ADD #############")
i4.change_added_keys_to(n3.id)
i4.test_update(source_id=n3.id, target_relative_id=n4.id, node_to_add=new_node_instance_to_add)
i4.change_parents_from_list()
i5.change_added_keys_to(n3.id)
i5.test_update(source_id=n3.id, target_relative_id=n4.id, node_to_add=new_node_instance_to_add)
i5.change_parents_from_list()
i1.change_added_keys_to(n3.id)
i1.test_update(source_id=n3.id, target_relative_id=n4.id, node_to_add=new_node_instance_to_add)
i1.change_parents_from_list()
i2.change_added_keys_to(n3.id)
i2.test_update(source_id=n3.id, target_relative_id=n4.id, node_to_add=new_node_instance_to_add)
i2.change_parents_from_list()
i3.change_added_keys_to(n3.id)
i3.test_update(source_id=n3.id, target_relative_id=n4.id, node_to_add=new_node_instance_to_add)
i3.change_parents_from_list()
i1.print_relatives()
i2.print_relatives()
i3.print_relatives()
i4.print_relatives()
i5.print_relatives()
def dead_check():
currentProfile = {"profile_name": "alessandro_fazio", "key_pair": "AWSCellCycle", "branch": "ListUtilities"}
settings_to_launch = loadSettings(currentProfile=currentProfile)
logger_to_launch = loadLogger(settings_to_launch)
n1 = Node("1", "172.10.1.1", settings_to_launch.getIntPort(), settings_to_launch.getExtPort(), "12", "19")
n2 = Node("2", "172.10.1.2", settings_to_launch.getIntPort(), settings_to_launch.getExtPort(), "20", "39")
n3 = Node("3", "172.10.1.3", settings_to_launch.getIntPort(), settings_to_launch.getExtPort(), "40", "59")
n4 = Node("4", "172.10.1.4", settings_to_launch.getIntPort(), settings_to_launch.getExtPort(), "60", "79")
n5 = Node("5", "172.10.1.5", settings_to_launch.getIntPort(), settings_to_launch.getExtPort(), "80", "11")
i3 = ListThread(master_of_master=n1, master=n2, myself=n3, slave=n4, slave_of_slave=n5, logger=logger_to_launch,
settings=settings_to_launch, name=n3.id)
i4 = ListThread(master_of_master=n2, master=n3, myself=n4, slave=n5, slave_of_slave=n1, logger=logger_to_launch,
settings=settings_to_launch, name=n4.id)
i5 = ListThread(master_of_master=n3, master=n4, myself=n5, slave=n1, slave_of_slave=n2, logger=logger_to_launch,
settings=settings_to_launch, name=n5.id)
i1 = ListThread(master_of_master=n4, master=n5, myself=n1, slave=n2, slave_of_slave=n3, logger=logger_to_launch,
settings=settings_to_launch, name=n1.id)
'''
logger_to_launch.debug("########## BEFORE ADD ############")
i1.print_relatives()
i2.print_relatives()
i3.print_relatives()
i4.print_relatives()
i5.print_relatives()
'''
logger_to_launch.debug("########## AFTER DEAD #############")
i4.change_dead_keys_to(n3.id)
i4.test_remove(target_id=n2.id, source_id=n3.id, target_relative_id=n1.id)
i4.change_parents_from_list()
i5.change_dead_keys_to(n3.id)
i5.test_remove(target_id=n2.id, source_id=n3.id, target_relative_id=n1.id)
i5.change_parents_from_list()
i1.change_dead_keys_to(n3.id)
i1.test_remove(target_id=n2.id, source_id=n3.id, target_relative_id=n1.id)
i1.change_parents_from_list()
i3.change_dead_keys_to(n3.id)
i3.test_remove(target_id=n2.id, source_id=n3.id, target_relative_id=n1.id)
i3.change_parents_from_list()
i1.print_relatives()
i3.print_relatives()
i4.print_relatives()
i5.print_relatives()
logger_to_launch.debug("this is the ip found {}".format((i1.node_list.find_memory_key(0)).target.ip))
|
mit
| -7,520,850,627,014,340,000
| 49.243902
| 116
| 0.652427
| false
| 2.71529
| true
| false
| false
|
sweon/edx-dl
|
edx_dl/common.py
|
1
|
5361
|
# -*- coding: utf-8 -*-
"""
Common type definitions and constants for edx-dl
The classes in this module represent the structure of courses in edX. The
structure is:
* A Course contains Sections
* Each Section contains Subsections
* Each Subsection contains Units
Notice that we don't represent the full tree structure for both performance
and UX reasons:
Course -> [Section] -> [SubSection] -> [Unit] -> [Video]
In the script the data structures used are:
1. The data structures to represent the course information:
Course, Section->[SubSection]
2. The data structures to represent the chosen courses and sections:
selections = {Course, [Section]}
3. The data structure of all the downloable resources which represent each
subsection via its URL and the of resources who can be extracted from the
Units it contains:
all_units = {Subsection.url: [Unit]}
4. The units can contain multiple videos:
Unit -> [Video]
"""
class Course(object):
"""
Course class represents course information.
"""
def __init__(self, id, name, url, state):
"""
@param id: The id of a course in edX is composed by the path
{organization}/{course_number}/{course_run}
@type id: str or None
@param name: Name of the course. The name is taken from course page
h3 header.
@type name: str
@param url: URL of the course.
@type url: str or None
@param state: State of the course. One of the following values:
* 'Not yet'
* 'Started'
@type state: str
"""
self.id = id
self.name = name
self.url = url
self.state = state
def __repr__(self):
url = self.url if self.url else "None"
return self.name + ": " + url
class Section(object):
"""
Representation of a section of the course.
"""
def __init__(self, position, name, url, navsections):
"""
@param position: Integer position of the section in the list of
sections. Starts at 1.
@type position: int
@param name: Name of the section.
@type name: str
@param url: URL of the section. None when section contains no
subsections.
@type url: str or None
@param navsections: List of navsections.
@type navsections: [NavSection]
"""
self.position = position
self.name = name
self.url = url
self.navsections = navsections
class NavSection(object):
def __init__(self, position, name, subsections):
self.position = position
self.name = name
self.subsections = subsections
class SubSection(object):
"""
Representation of a subsection in a section.
"""
def __init__(self, position, name, url):
"""
@param position: Integer position of the subsection in the subsection
list. Starts at 1.
@type position: int
@param name: Name of the subsection.
@type name: str
@param url: URL of the subsection.
@type url: str
"""
self.position = position
self.name = name
self.url = url
def __repr__(self):
return self.name + ": " + self.url
class Unit(object):
"""
Representation of a single unit of the course.
"""
def __init__(self, videos, resources_urls, position):
"""
@param videos: List of videos present in the unit.
@type videos: [Video]
@param resources_urls: List of additional resources that are come along
with the unit. Resources include files with certain extensions
and youtube links.
@type resources_urls: [str]
@param position: Integer position of the subsection in the subsection
list. Starts at 1.
@type position: int
"""
self.position = position
self.videos = videos
self.resources_urls = resources_urls
class Video(object):
"""
Representation of a single video.
"""
def __init__(self, video_youtube_url, available_subs_url,
sub_template_url, mp4_urls):
"""
@param video_youtube_url: Youtube link (if any).
@type video_youtube_url: str or None
@param available_subs_url: URL to the available subtitles.
@type available_subs_url: str
@param sub_template_url: ???
@type sub_template_url: str
@param mp4_urls: List of URLs to mp4 video files.
@type mp4_urls: [str]
"""
self.video_youtube_url = video_youtube_url
self.available_subs_url = available_subs_url
self.sub_template_url = sub_template_url
self.mp4_urls = mp4_urls
class ExitCode(object):
"""
Class that contains all exit codes of the program.
"""
OK = 0
MISSING_CREDENTIALS = 1
WRONG_EMAIL_OR_PASSWORD = 2
MISSING_COURSE_URL = 3
INVALID_COURSE_URL = 4
UNKNOWN_PLATFORM = 5
NO_DOWNLOADABLE_VIDEO = 6
YOUTUBE_DL_CMD = ['youtube-dl', '--ignore-config']
DEFAULT_CACHE_FILENAME = 'edx-dl.cache'
DEFAULT_FILE_FORMATS = ['e?ps', 'pdf', 'txt', 'doc', 'xls', 'ppt',
'docx', 'xlsx', 'pptx', 'odt', 'ods', 'odp', 'odg',
'zip', 'rar', 'gz', 'mp3', 'R', 'Rmd', 'ipynb', 'py']
|
lgpl-3.0
| -4,726,805,692,538,409,000
| 27.365079
| 79
| 0.598769
| false
| 3.979955
| false
| false
| false
|
jbasko/pytest-random-order
|
random_order/bucket_types.py
|
1
|
1572
|
import functools
import os.path
from collections import OrderedDict
bucket_type_keys = OrderedDict()
def bucket_type_key(bucket_type):
"""
Registers a function that calculates test item key for the specified bucket type.
"""
def decorator(f):
@functools.wraps(f)
def wrapped(item, session):
key = f(item)
if session is not None:
for handler in session.random_order_bucket_type_key_handlers:
key = handler(item, key)
return key
bucket_type_keys[bucket_type] = wrapped
return wrapped
return decorator
@bucket_type_key('global')
def get_global_key(item):
return None
@bucket_type_key('package')
def get_package_key(item):
if not hasattr(item, "module"):
return os.path.split(item.location[0])[0]
return item.module.__package__
@bucket_type_key('module')
def get_module_key(item):
return item.location[0]
@bucket_type_key('class')
def get_class_key(item):
if not hasattr(item, "cls"):
return item.location[0]
if item.cls:
return item.module.__name__, item.cls.__name__
else:
return item.module.__name__
@bucket_type_key('parent')
def get_parent_key(item):
return item.parent
@bucket_type_key('grandparent')
def get_grandparent_key(item):
return item.parent.parent
@bucket_type_key('none')
def get_none_key(item):
raise RuntimeError('When shuffling is disabled (bucket_type=none), item key should not be calculated')
bucket_types = bucket_type_keys.keys()
|
mit
| 7,146,111,292,078,620,000
| 20.534247
| 106
| 0.646947
| false
| 3.62212
| false
| false
| false
|
okfse/froide
|
froide/foirequest/urls.py
|
1
|
3247
|
from django.utils.six import text_type as str
from django.core.urlresolvers import reverse
from django.conf.urls import patterns
from django.utils.translation import pgettext
from django.shortcuts import redirect
from .models import FoiRequest
urlpatterns = patterns("froide.foirequest.views",
(r'^%s/$' % pgettext('URL part', 'not-foi'), 'list_requests',
{'not_foi': True}, 'foirequest-list_not_foi'),
# Old feed URL
(r'^%s/feed/$' % pgettext('URL part', 'latest'),
lambda r: redirect(reverse('foirequest-list_feed_atom'), permanent=True),
{}, 'foirequest-feed_latest_atom'),
(r'^%s/rss/$' % pgettext('URL part', 'latest'),
lambda r: redirect(reverse('foirequest-list_feed'), permanent=True),
{}, 'foirequest-feed_latest'),
(r'^unchecked/$', 'list_unchecked', {}, 'foirequest-list_unchecked'),
# Translators: part in /request/to/public-body-slug URL
(r'^submit$', 'submit_request', {}, 'foirequest-submit_request'),
)
foirequest_urls = [
(r'^$', 'list_requests', {}, 'foirequest-list'),
(r'^feed/$', 'list_requests',
{'feed': 'atom'}, 'foirequest-list_feed_atom'),
(r'^rss/$', 'list_requests',
{'feed': 'rss'}, 'foirequest-list_feed'),
# Translators: part in request filter URL
(r'^%s/(?P<topic>[-\w]+)/$' % pgettext('URL part', 'topic'), 'list_requests', {},
'foirequest-list'),
(r'^%s/(?P<topic>[-\w]+)/feed/$' % pgettext('URL part', 'topic'), 'list_requests',
{'feed': 'atom'}, 'foirequest-list_feed_atom'),
(r'^%s/(?P<topic>[-\w]+)/rss/$' % pgettext('URL part', 'topic'), 'list_requests',
{'feed': 'rss'}, 'foirequest-list_feed'),
# Translators: part in request filter URL
(r'^%s/(?P<tag>[-\w]+)/$' % pgettext('URL part', 'tag'), 'list_requests', {},
'foirequest-list'),
(r'^%s/(?P<tag>[-\w]+)/feed/$' % pgettext('URL part', 'tag'), 'list_requests',
{'feed': 'atom'}, 'foirequest-list_feed_atom'),
(r'^%s/(?P<tag>[-\w]+)/rss/$' % pgettext('URL part', 'tag'), 'list_requests',
{'feed': 'rss'}, 'foirequest-list_feed'),
# Translators: part in request filter URL
(r'^%s/(?P<public_body>[-\w]+)/$' % pgettext('URL part', 'to'), 'list_requests', {},
'foirequest-list'),
(r'^%s/(?P<public_body>[-\w]+)/feed/$' % pgettext('URL part', 'to'), 'list_requests',
{'feed': 'atom'}, 'foirequest-list_feed_atom'),
(r'^%s/(?P<public_body>[-\w]+)/rss/$' % pgettext('URL part', 'to'), 'list_requests',
{'feed': 'rss'}, 'foirequest-list_feed'),
] + [(r'^(?P<status>%s)/$' % str(urlinfo[0]), 'list_requests', {},
'foirequest-list') for urlinfo in FoiRequest.STATUS_URLS
] + [(r'^(?P<status>%s)/feed/$' % str(urlinfo[0]), 'list_requests',
{'feed': 'atom'},
'foirequest-list_feed_atom') for urlinfo in FoiRequest.STATUS_URLS
] + [(r'^(?P<status>%s)/rss/$' % str(urlinfo[0]), 'list_requests',
{'feed': 'rss'},
'foirequest-list_feed') for urlinfo in FoiRequest.STATUS_URLS]
urlpatterns += patterns("froide.foirequest.views",
*foirequest_urls
)
urlpatterns += patterns("froide.foirequest.views",
*[(r'^(?P<jurisdiction>[-\w]+)/%s' % r[0][1:], r[1], r[2], r[3]) for r in foirequest_urls]
)
|
mit
| -6,307,762,861,912,977,000
| 42.293333
| 94
| 0.581152
| false
| 3.028918
| false
| false
| false
|
bluesquall/okeanidanalysis
|
examples/sensors/rowe-adcp-bottom-track-summary.py
|
1
|
3719
|
#!/bin/env python
"""
e.g.: $ python rowe-adcp-bottom-track-summary.py /mbari/LRAUV/makai/missionlogs/devel/20150617-ADCP-in-tank/20150617T172914/ADCP-2015061717.ENS.mat
"""
import numpy as np
import scipy as sp
import scipy.io
import matplotlib.pyplot as plt
def plot_adcp_bottom_track_summary(infile, save=True, show=True, autoscale_ylims=False):
if infile.endswith('ENS.mat'):
bt = sp.io.loadmat(infile)['E000010'].squeeze()
idx = 14 # TODO: There may be some sort of misalignment in ENS files...
vr = bt[:,idx:idx+4].squeeze()
snr = bt[:,idx+4:idx+8].squeeze()
amp = bt[:,idx+8:idx+12].squeeze()
cor = bt[:,idx+12:idx+16].squeeze()
bv = bt[:,idx+16:idx+20].squeeze()
bnum = bt[:,idx+20:idx+24].squeeze()
iv = bt[:,idx+24:idx+28].squeeze()
inum = bt[:,idx+28:idx+32].squeeze()
elif infile.endswith('mat'):
import okeanidanalysis
s = okeanidanalysis.logs.OkeanidLog(infile)
vr, t_vr = s.timeseries('Rowe_600.vertical_range')
snr, t_snr = s.timeseries('Rowe_600.signal_to_noise')
amp, t_amp = s.timeseries('Rowe_600.bottom_track_amplitude')
cor, t_cor = s.timeseries('Rowe_600.bottom_track_correlation')
bv, t_bv = s.timeseries('Rowe_600.bottom_track_beam_velocity')
iv, t_iv = s.timeseries('Rowe_600.bottom_track_instrument_velocity')
fig, axs = plt.subplots(6, 4, sharex=True, sharey='row', figsize=(6.5,9))
vrax = axs[0]
snrax = axs[1]
ampax = axs[2]
corax = axs[3]
bvax = axs[4]
ivax = axs[5]
for i in range(4):
vrax[i].plot(vr[:,i])
snrax[i].plot(snr[:,i])
ampax[i].plot(amp[:,i])
corax[i].plot(cor[:,i])
bvax[i].plot(bv[:,i])
ivax[i].plot(iv[:,i])
ylkw = dict(rotation='horizontal', horizontalalignment='right')
vrax[0].set_ylabel('vertical\nrange [m]', **ylkw)
snrax[0].set_ylabel('SNR [dB]', **ylkw)
ampax[0].set_ylabel('amplitude [dB]', **ylkw)
corax[0].set_ylabel('correlation [-]', **ylkw)
bvax[0].set_ylabel('beam\nvelocity [m/s]', **ylkw)
ivax[0].set_ylabel('instrument\nvelocity [m/s]', **ylkw)
ivax[0].set_xlabel('ensemble number')
for i, ax in enumerate(vrax): ax.set_title('beam {0}'.format(i))
if not autoscale_ylims:
vrax[0].set_ylim([0,125])
snrax[0].set_ylim([0,100])
ampax[0].set_ylim([0,200])
corax[0].set_ylim([0,1])
bvax[0].set_ylim([-2,2])
ivax[0].set_ylim([-2,2])
# TODO: Get the lines below to work.
#print([t.get_text() for t in ivax[0].xaxis.get_majorticklabels()])
#ivax[0].xaxis.set_ticklabels([t.get_text() for t in ivax[0].xaxis.get_majorticklabels()], rotation='vertical') # should propogate to ther x axes
for ax in ivax:
plt.sca(ax)
plt.setp(plt.xticks()[1], rotation=90, fontsize=6)
fig.suptitle(infile.rsplit('/')[-1])
plt.subplots_adjust(left=0.25, right=0.95, top=0.9, bottom=0.1, wspace=0)
if save: fig.savefig('/tmp/{0}.png'.format(infile.rsplit('/')[-1]))
if show: plt.show()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='plot summary of ADCP bottom track data')
parser.add_argument('-V', '--version', action='version',
version='%(prog)s 0.0.1',
help='display version information and exit')
parser.add_argument('infile', metavar='filename',
type=str, help='LRAUV slate or RTI .ENS unpacked into .mat')
parser.add_argument('-y', '--autoscale-ylims', action='store_true')
args = parser.parse_args()
plot_adcp_bottom_track_summary(**args.__dict__)
|
mit
| -4,899,205,760,434,004,000
| 37.340206
| 149
| 0.600161
| false
| 2.958632
| false
| false
| false
|
ofilipowicz/owndb
|
store/views.py
|
1
|
34136
|
from django.contrib.auth.models import User
from django.views.generic import ListView, DetailView, View, CreateView, DeleteView
from django.views.generic.base import TemplateView
from django.db.models import Q
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, JsonResponse, Http404
from django.template.response import TemplateResponse as TR
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.shortcuts import render, get_object_or_404, redirect
from allauth.account.decorators import verified_email_required
from friendship.models import Friend, Follow
from store.forms import FormShareForm # Check if guest is a logged user
from store import models
from datetime import datetime
import re, json
class VerifiedMixin(object):
@method_decorator(verified_email_required)
def dispatch(self, *args, **kwargs):
return super(VerifiedMixin, self).dispatch(*args, **kwargs)
class FormAdd(VerifiedMixin, TemplateView):
template_name = 'store/form_add.html'
def get(self, request, *args, **kwargs):
project = get_object_or_404(models.Project, pk=self.kwargs['project'])
if project.owner == request.user:
return super(FormAdd, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to add form."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(FormAdd, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
print(request.POST)
print(request.FILES)
if request.POST.get('connection') == "forms":
if request.POST.get('form'):
fields = "<ul>"
for field in models.FormField.objects.filter(form=request.POST.get('form')).exclude(type__pk__in=[6,8,9,10]).order_by('position'):
fields += '<li><input type="checkbox" class="spec_field" name="'+str(field.pk)+'" /> <span>' + field.caption + '</span></li>'
return HttpResponse(fields + "</ul>")
forms = ""
for form in models.Form.objects.filter(project=self.kwargs['project']):
forms += '<option value="' + str(form.pk) + '">' + form.title + '</option>'
return HttpResponse(forms)
else:
form_title = request.POST.get('title')
if form_title.isspace() or form_title=='':
return HttpResponse(_("Form name is invalid!"))
names = json.loads(request.POST.get('names'))
types = json.loads(request.POST.get('types'))
settings = json.loads(request.POST.get('settings'))
c = 0
for type in types:
if type == "LabelImage":
c = c + 1
if c > 0:
if request.FILES:
if len(request.FILES) < c:
return HttpResponse(_("You should provide all images for labels."))
else:
return HttpResponse(_("You should provide image for label."))
p = models.Project.objects.get(pk=self.kwargs['project'])
f = models.Form(
title=form_title,
project=p,
slug = slugify(form_title)
)
f.save()
try:
i = 0
for name in names:
t = models.Type.objects.get(name=types[i])
s = settings[i]
ff = models.FormField(
form=f,
type=t,
caption=name,
settings=s,
position=i
)
ff.save()
if (t.name == "LabelText"):
data = models.DataText(
formfield = ff,
data = s
)
data.save()
elif (t.name == "LabelImage"):
imgname = "labelimage" + str(i)
img = models.Image(
formfield=ff,
image=request.FILES[imgname]
)
img.save()
elif (t.name == "Connection"):
d = s.split(';')
cf = models.Form.objects.get(pk=d[0])
c = models.Connection(
formfield = ff,
form = cf
)
c.save()
i += 1
except:
f.delete()
return HttpResponse(_("Error occurred while creating form!"))
messages.success(request, _("Form successfully added!"))
return HttpResponse("OK")
class FormEdit(VerifiedMixin, TemplateView):
template_name = 'store/form_edit.html'
def get(self, request, *args, **kwargs):
form = get_object_or_404(models.Form, pk=self.kwargs['form'])
if form.project.owner == self.request.user and not models.FormInstance.objects.filter(form__pk=self.kwargs['form']).exists():
return super(FormEdit, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You cannot edit this form."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(FormEdit, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form'] = models.Form.objects.get(pk=self.kwargs['form'])
fields = models.FormField.objects.filter(form=self.kwargs['form']).order_by('position')
for field in fields:
if field.type.pk == 7:
v = field.settings.split(';')
fpk = v[0]
field.fpk = fpk
con = models.Form.objects.get(pk=fpk)
field.conname = con.title
del v[0]
field.visibles = models.FormField.objects.filter(form=fpk).exclude(type__pk__in=[6,8,9,10]).order_by('pk')
for vis in field.visibles:
if str(vis.pk) in v:
vis.checked = True
context['fields'] = fields
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
print(request.POST)
print(request.FILES)
if request.POST.get('connection') == "forms":
if request.POST.get('form'):
fields = "<ul>"
for field in models.FormField.objects.filter(form=request.POST.get('form')).exclude(type__pk__in=[6,8,9,10]).order_by('position'):
fields += '<li><input type="checkbox" class="spec_field" name="'+str(field.pk)+'" /> <span>' + field.caption + '</span></li>'
return HttpResponse(fields + "</ul>")
forms = ""
for form in models.Form.objects.filter(project=self.kwargs['project']):
forms += '<option value="' + str(form.pk) + '">' + form.title + '</option>'
return HttpResponse(forms)
else:
form_title = request.POST.get('title')
if form_title.isspace() or form_title=='':
return HttpResponse(_("Form name is invalid!"))
names = json.loads(request.POST.get('names'))
types = json.loads(request.POST.get('types'))
settings = json.loads(request.POST.get('settings'))
c = 0
for type in types:
if type == "LabelImage":
c = c + 1
if c > 0:
if request.FILES:
if len(request.FILES) < c:
return HttpResponse(_("You should provide all images for labels."))
else:
return HttpResponse(_("You should provide image for label."))
f = models.Form.objects.get(pk=self.kwargs['form'])
f.title = form_title
f.slug = slugify(form_title)
f.save()
models.FormInstance.objects.filter(form=f).delete()
models.FormField.objects.filter(form=f).delete()
try:
i = 0
for name in names:
t = models.Type.objects.get(name=types[i])
s = settings[i]
ff = models.FormField(
form=f,
type=t,
caption=name,
settings=s,
position=i
)
ff.save()
if t.name == "LabelText":
data = models.DataText(
formfield = ff,
data = s
)
data.save()
elif t.name == "LabelImage":
imgname = "labelimage" + str(i)
img = models.Image(
formfield=ff,
image=request.FILES[imgname]
)
img.save()
elif t.name == "Connection":
d = s.split(';')
cf = models.Form.objects.get(pk=d[0])
c = models.Connection(
formfield = ff,
form = cf
)
c.save()
i += 1
for c in models.Connection.objects.filter(form=f):
models.FormField.objects.filter(pk=c.formfield.pk).delete()
except:
f.delete()
return HttpResponse(_("Error occurred while saving changes!"))
messages.success(request, _("Form updated successfully!"))
return HttpResponse("OK")
class FormInstanceAdd(VerifiedMixin, TemplateView):
template_name = 'store/forminstance_add.html'
def get(self, request, *args, **kwargs):
form = get_object_or_404(models.Form, pk=self.kwargs['form'])
sharing = models.Sharing.objects.filter(owner=self.request.user).filter(form=form)
if form.project.owner == self.request.user or sharing:
return super(FormInstanceAdd, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to add instances to this form."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(FormInstanceAdd, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form'] = models.Form.objects.get(pk=self.kwargs['form'])
fields = models.FormField.objects.filter(form=self.kwargs['form']).order_by('position')
for field in fields:
if models.Image.objects.filter(formfield=field, forminstance__isnull=True).exists():
field.labelimage = models.Image.objects.get(formfield=field, forminstance__isnull=True)
elif field.type.pk == 7:
field.fpk = field.settings.split(';')[0]
context['fields'] = fields
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
print(request.POST)
print(request.FILES)
if request.POST.get('connection') == "instances":
visibles = models.FormField.objects.get(pk=request.POST.get('formfield')).settings.split(';')
del visibles[0]
fpk = request.POST.get('form')
forms = '<div class="table-responsive"><table class="instances table table-hover"><thead><tr>'
for field in models.FormField.objects.filter(form=fpk).order_by('position'):
if (str(field.pk) in visibles and field.type.pk != 5 and field.type.pk != 8 and field.type.pk != 9 and field.type.pk != 10):
forms += '<th>'+ field.caption +'</th>'
forms += "</tr></thead><tbody>"
i = 0
for instance in models.FormInstance.objects.filter(form=models.Form.objects.get(pk=fpk)).order_by('-pk'):
forms += '<tr class="cmodal-select" name="'+str(instance.pk)+'">'
for field in models.FormField.objects.filter(form=fpk).order_by('position'):
if (str(field.pk) in visibles and field.type.pk != 8 and field.type.pk != 9 and field.type.pk != 10):
if field.type.pk == 7:
insd = models.ConnectionInstance.objects.get(connection__formfield = field, forminstance = instance)
elif field.type.pk == 6:
insd = models.File.objects.get(formfield = field, forminstance = instance)
elif field.type.pk == 5:
insd = models.Image.objects.get(formfield = field, forminstance = instance)
else:
insd = models.DataText.objects.get(formfield = field, forminstance = instance)
forms += '<td>' + insd.display() + '</td>'
forms += '</tr>'
i += 1
forms += '</tbody></table></div>'
if i==0:
forms = _('Connected form is empty! There is no data to show.')
return HttpResponse(forms)
else:
fields = models.FormField.objects.filter(form=self.kwargs['form']).order_by('position')
contents = json.loads(request.POST.get('contents'))
i = 0
c = 0
for field in fields:
if field.type.pk == 6 or field.type.pk == 5:
c = c + 1
elif field.type.pk == 7:
if contents[i] == '':
return HttpResponse(_("You have to choose all instances!"))
i += 1
if c > 0:
if request.FILES:
if len(request.FILES) < c:
return HttpResponse(_("You should choose all images or files."))
else:
return HttpResponse(_("You should choose image or file."))
f = models.Form.objects.get(pk=self.kwargs['form'])
fi = models.FormInstance(
form = f,
user = self.request.user
)
if fi.form.project.owner != self.request.user:
fi.approved = False
fi.save()
i = 0
for field in fields:
if (field.type.pk != 8 and field.type.pk != 9 and field.type.pk != 10):
if field.type.pk == 7:
if contents[i] != '':
con = models.Connection.objects.get(formfield=field)
chfi = models.FormInstance.objects.get(pk=contents[i])
ins = models.ConnectionInstance(
connection=con,
forminstance = fi,
choseninstance = chfi
)
ins.save()
elif field.type.pk == 6:
filename = "file" + str(i)
file = models.File(
formfield=field,
forminstance = fi,
file=request.FILES[filename]
)
file.save()
elif field.type.pk == 5:
imgname = "image" + str(i)
img = models.Image(
formfield=field,
forminstance = fi,
image=request.FILES[imgname]
)
img.save()
else:
data = models.DataText(
formfield = field,
forminstance = fi,
data = contents[i]
)
data.save()
i += 1
messages.success(request, _("Form instance added successfully!"))
return HttpResponse("OK")
class ProjectList(VerifiedMixin, ListView):
model = models.Project
paginate_by = 4
context_object_name = 'project_list'
def get_queryset(self):
q = self.request.GET.get('search')
if q:
ret = self.model.objects.filter(owner__pk=self.request.user.pk, title__icontains=q)
if not ret.exists():
messages.error(self.request, _("Projects with \"") + q + _("\" were not found!"))
else:
messages.success(self.request, _("List of projects with \"") + q + _("\" term."))
return ret
return self.model.objects.filter(owner__pk=self.request.user.pk).order_by('-pk')
def get_context_data(self, **kwargs):
context = super(ProjectList, self).get_context_data(**kwargs)
return context
class FormList(VerifiedMixin, ListView):
model = models.Form
paginate_by = 4
context_object_name = 'form_list'
def get(self, request, *args, **kwargs):
project = get_object_or_404(models.Project, pk=self.kwargs['project'])
if project.owner == request.user:
return super(FormList, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to see this forms list because you are not an owner."))
raise Http404()
def get_queryset(self):
q = self.request.GET.get('search')
if q:
ret = self.model.objects.filter(project__pk=self.kwargs['project'], title__icontains=q)
if not ret.exists():
messages.error(self.request, _("Forms with \"") + q + _("\" were not found!"))
else:
messages.success(self.request, _("List of forms with \"") + q + _("\" term."))
return ret
return self.model.objects.filter(project__pk=self.kwargs['project']).order_by('-pk')
def get_context_data(self, **kwargs):
context = super(FormList, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
return context
class SharingSomeones(VerifiedMixin, ListView):
model = models.Sharing
paginate_by = 2
context_object_name = 'sharedform_list'
template_name = 'store/sharing_someones.html'
def get_queryset(self):
return self.request.user.sharing_set.all()
class SharingMy(VerifiedMixin, ListView):
model = models.Sharing
paginate_by = 2
context_object_name = 'sharedform_list'
template_name = 'store/sharing_my.html'
def get_queryset(self):
return self.model.objects.filter(form__project__owner=self.request.user)
class SharingDelete(VerifiedMixin, DeleteView):
model = models.Sharing
slug_field = 'id'
slug_url_kwarg = 'shared_form'
success_url = reverse_lazy('project-list')
def get_success_url(self):
messages.success(self.request, _('Shared form successfully deleted!'))
return super(SharingDelete, self).get_success_url()
class FormInstanceList(VerifiedMixin, ListView):
model = models.FormInstance
paginate_by = 4
context_object_name = 'forminstance_list'
def get(self, request, *args, **kwargs):
form = get_object_or_404(models.Form, pk=self.kwargs['form'])
sharing = models.Sharing.objects.filter(owner=self.request.user).filter(
form=form)
if form.project.owner == self.request.user or sharing:
return super(FormInstanceList, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to add instances to this form."))
raise Http404()
def get_queryset(self):
q = self.request.GET.get('search')
if q:
datalist = models.DataText.objects.filter(formfield__form__pk=self.kwargs['form'], forminstance__isnull=False, data__icontains=q)
instanceslist = []
for i in datalist:
instanceslist.append(i.forminstance.pk)
ret = self.model.objects.filter(Q(form__pk=self.kwargs['form']), Q(approved=True), Q(pk__in=instanceslist) | Q(date__icontains=q) | Q(user__username__icontains=q)).order_by('-pk')
if not ret.exists():
messages.error(self.request, _("Instances with \"") + q + _("\" were not found!"))
else:
messages.success(self.request, _("List of instances with \"") + q + _("\" term."))
return ret
return self.model.objects.filter(form__pk=self.kwargs['form'], approved=True).order_by('-pk')
def get_context_data(self, **kwargs):
context = super(FormInstanceList, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form'] = models.Form.objects.get(pk=self.kwargs['form'])
context['fields'] = models.FormField.objects.filter(form__pk=self.kwargs['form']).exclude(type__pk__in=[7,8,9,10]).order_by('position')
return context
class Dashboard(VerifiedMixin, ListView):
model = models.FormInstance
paginate_by = 10
context_object_name = 'instances'
template_name = 'store/dashboard.html'
def get_queryset(self):
return self.model.objects.filter(
form__project__owner=self.request.user,
approved=False)
@verified_email_required
def approve_instance(request, forminstance):
instance_obj = get_object_or_404(models.FormInstance, pk=forminstance)
if instance_obj.form.project.owner == request.user:
instance_obj.approved = True
instance_obj.save()
messages.success(request, _('Form instance approved'))
return HttpResponseRedirect(reverse_lazy('dashboard'))
else:
messages.error(request, _("You are not allowed to approve this instance."))
raise Http404()
class DeleteInstance(VerifiedMixin, DeleteView):
model = models.FormInstance
slug_field = 'id'
slug_url_kwarg = 'forminstance'
success_url = reverse_lazy('dashboard')
def get_success_url(self):
messages.success(self.request, _('Form instance deleted'))
return super(DeleteInstance, self).get_success_url()
class FormInstanceDetail(VerifiedMixin, DetailView):
model = models.FormInstance
context_object_name = 'forminstance'
slug_field = 'id'
slug_url_kwarg = 'forminstance'
def get(self, request, *args, **kwargs):
form = get_object_or_404(models.Form, pk=self.kwargs['form'])
if form.project.owner == self.request.user or self.get_object().user == self.request.user:
return super(FormInstanceDetail, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You can't view this instance details because it wasn't added by you."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(FormInstanceDetail, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form'] = models.Form.objects.get(pk=self.kwargs['form'])
formfield_list = models.FormField.objects.filter(form__pk=self.kwargs['form']).exclude(Q(type=10)).order_by('position')
for field in formfield_list:
if field.type.pk == 3 or field.type.pk == 4:
t = field.settings.split(';')
c = models.DataText.objects.get(formfield=field, forminstance=self.kwargs['forminstance']).data.split(';')
del t[0]
del c[0]
field.options = zip(t,c)
elif field.type.pk == 7:
field.fpk = field.settings.split(';')[0]
context['formfield_list'] = formfield_list
context['instances_count'] = models.FormInstance.objects.filter(form__pk=self.kwargs['form']).count()
return context
def post(self, request, *args, **kwargs):
form = get_object_or_404(models.Form, pk=self.kwargs['form'])
if not form.project.owner == self.request.user:
return HttpResponse(_("You can't update instances of this form because you are not an owner."))
print(request.POST)
print(request.FILES)
fields = models.FormField.objects.filter(form=self.kwargs['form']).order_by('position')
contents = json.loads(request.POST.get('contents'))
i = 0
for field in fields:
if (field.type.pk != 8 and field.type.pk != 9 and field.type.pk != 10):
if field.type.pk == 7:
if contents[i] != '':
chfi = models.FormInstance.objects.get(pk=contents[i])
con = models.ConnectionInstance.objects.get(connection__formfield=field, forminstance=self.kwargs['forminstance'])
con.choseninstance = chfi
con.save()
elif field.type.pk == 6:
filename = "file" + str(i)
if request.FILES.get(filename):
f = models.File.objects.get(formfield=field, forminstance=self.kwargs['forminstance'])
f.file.delete(save=False)
f.file=request.FILES[filename]
f.save()
elif field.type.pk == 5:
imgname = "image" + str(i)
if request.FILES.get(imgname):
f = models.Image.objects.get(formfield=field, forminstance=self.kwargs['forminstance'])
f.image.delete(save=False)
f.image=request.FILES[imgname]
f.save()
else:
f = models.DataText.objects.get(formfield=field, forminstance=self.kwargs['forminstance'])
f.data = contents[i]
f.save()
i += 1
messages.success(request, _("Instance successfully updated!"))
return HttpResponse("OK")
class FormInstanceDelete(VerifiedMixin, TemplateView):
template_name = 'store/forminstance_delete.html'
def get_context_data(self, **kwargs):
context = super(FormInstanceDelete, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form'] = models.Form.objects.get(pk=self.kwargs['form'])
context['forminstance'] = models.FormInstance.objects.get(pk=self.kwargs['forminstance'])
context['dependent_count'] = models.ConnectionInstance.objects.filter(choseninstance__pk=self.kwargs['forminstance']).count()
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
try:
models.FormInstance.objects.filter(form__pk=self.kwargs['form'], pk=self.kwargs['forminstance']).delete()
messages.success(request, _("Form instance successfully deleted!"))
except:
messages.error(request, _("Error occurred while deleting form instance!"))
return HttpResponseRedirect(reverse('forminstance-list', kwargs={'project': self.kwargs['project'], 'form': self.kwargs['form'] } ))
class ProjectAdd(VerifiedMixin, TemplateView):
template_name = 'store/project_add.html'
def get_context_data(self, **kwargs):
context = super(ProjectAdd, self).get_context_data(**kwargs)
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
name = self.request.POST.get('project_name')
if name.isspace() or name=='':
messages.error(request, _("Bad project name!"))
return HttpResponseRedirect(reverse('project-add'))
p = models.Project(
title=name,
owner=self.request.user,
slug=slugify(name)
)
p.save()
messages.success(request, _("Project successfully added!"))
return HttpResponseRedirect(reverse('form-list', kwargs={'project': p.pk} ))
class ProjectEdit(VerifiedMixin, TemplateView):
template_name = 'store/project_edit.html'
def get(self, request, *args, **kwargs):
project = get_object_or_404(models.Project, pk=self.kwargs['project'])
if project.owner == request.user:
return super(ProjectEdit, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to see this forms list"))
raise Http404()
def get_context_data(self, **kwargs):
context = super(ProjectEdit, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
name = self.request.POST.get('project_name')
if name.isspace() or name=='':
messages.error(request, _("Bad project name!"))
return HttpResponseRedirect(reverse('project-edit', kwargs={'project': self.kwargs['project'] } ))
p = models.Project.objects.get(pk=self.kwargs['project'])
p.title = name
p.slug = slugify(name)
p.save()
messages.success(request, _("Project successfully updated!"))
return HttpResponseRedirect(reverse('form-list', kwargs={'project': self.kwargs['project'] } ))
class FormDelete(VerifiedMixin, TemplateView):
template_name = 'store/form_delete.html'
def get(self, request, *args, **kwargs):
project = get_object_or_404(models.Project, pk=self.kwargs['project'])
if project.owner == request.user:
return super(FormDelete, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to delete this form."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(FormDelete, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form'] = models.Form.objects.get(pk=self.kwargs['form'])
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
try:
models.Form.objects.get(pk=self.kwargs['form']).delete()
messages.success(request, _("Form successfully deleted!"))
except:
messages.error(request, _("Error occurred while deleting form!"))
return HttpResponseRedirect(reverse('form-list', kwargs={'project': self.kwargs['project'] } ))
class ProjectDelete(VerifiedMixin, TemplateView):
template_name = 'store/project_delete.html'
def get(self, request, *args, **kwargs):
project = get_object_or_404(models.Project, pk=self.kwargs['project'])
if project.owner == request.user:
return super(ProjectDelete, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to delete this project."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(ProjectDelete, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
try:
models.Project.objects.get(pk=self.kwargs['project']).delete()
messages.success(request, _("Project successfully deleted!"))
except:
messages.error(request, _("Error occurred while deleting project!"))
return HttpResponseRedirect(reverse('project-list'))
class FormShare(VerifiedMixin, CreateView):
model = models.Sharing
template_name = 'store/form_share.html'
form_class = FormShareForm
def get(self, request, *args, **kwargs):
form = get_object_or_404(models.Form, pk=self.kwargs['form'])
if form.project.owner == self.request.user:
return super(FormShare, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You can't share this form."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(FormShare, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form_id'] = models.Form.objects.get(pk=self.kwargs['form'])
return context
def form_valid(self, form):
form.instance.form = models.Form.objects.get(pk=self.kwargs['form'])
form.instance.owner = User.objects.get(pk=form.cleaned_data.get('user'))
return super(FormShare, self).form_valid(form)
def get_success_url(self):
messages.success(self.request, _("Form successfully shared!"))
return reverse_lazy('forminstance-list', args=[self.kwargs['project'], self.kwargs['form']])
def get_form_kwargs(self):
kwargs = super(FormShare, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
|
mit
| 6,421,516,021,437,837,000
| 44.031662
| 191
| 0.555516
| false
| 4.244467
| false
| false
| false
|
rapkis/prekiu-automatas
|
aparatas1.py
|
1
|
4408
|
#!/usr/bin/python
import MySQLdb
import datetime
prekes = [None] * 16
automatoId=1
data = str(datetime.date.today())
prekesId = None
prekesPav = "Niekas"
def prisijungimas():
db = MySQLdb.connect(host="pardavimuaparatas2.mysql.database.azure.com",
user="pi@pardavimuaparatas2",
passwd="Aparatas1",
db="prekiuautomatai")
cur = db.cursor()
return cur
def ikelimas():
db = MySQLdb.connect(host="pardavimuaparatas2.mysql.database.azure.com",
user="pi@pardavimuaparatas2",
passwd="Aparatas1",
db="prekiuautomatai")
cur = db.cursor()
#ikeliami duomenys su SQL uzklausa
cur.execute("SELECT * FROM prekesautomate WHERE Automatai_id='%d';" % automatoId)
#duomenys irasomi i lista
i = 0
for row in cur.fetchall():
prekes[i] = int(row[3])
i = i + 1
#uzdaromas prisijungimas prie duombazes
db.close()
def saugojimas():
db = MySQLdb.connect(host="pardavimuaparatas2.mysql.database.azure.com",
user="pi@pardavimuaparatas2",
passwd="Aparatas1",
db="prekiuautomatai")
cur = db.cursor()
#naujinama duombazes lentele su naujais duomenimis
for x in range(1, 17):
cur.execute("UPDATE prekesautomate SET Kiekis=%d WHERE Automatai_id = %d AND NumerisAutomate = %d;" % (prekes[x-1], automatoId, x))
cur.execute("INSERT INTO pardavimustatistika (Automatai_id, Prekes_pavadinimas, data) VALUES ('%d', '%s', '%s' );" % (automatoId, prekesPav, data))
#pushinimas i duombaze ir prisijungimo uzdarymas
db.commit()
db.close()
#metodas skirtas atstatyti numatytasias db kiekiu reiksmes is aparato puses
def reset():
db = MySQLdb.connect(host="pardavimuaparatas2.mysql.database.azure.com",
user="pi@pardavimuaparatas2",
passwd="Aparatas1",
db="prekiuautomatai")
cur = db.cursor()
for x in range(1, 17):
cur.execute("UPDATE prekesautomate SET Kiekis=10 WHERE Automatai_id = %d AND NumerisAutomate = %d;" % (automatoId, x))
db.commit()
db.close()
#duomenu nuskaitymas is failo
def nuskaitymas():
file = open("duomenys1.txt", "r")
temp = file.read().splitlines()
i = 0
for line in temp:
prekes[i] = int(line)
i = i + 1
file.close()
#prekes numerio ivedimas is klaviaturos
def ivedimas():
db = MySQLdb.connect(host="pardavimuaparatas2.mysql.database.azure.com",
user="pi@pardavimuaparatas2",
passwd="Aparatas1",
db="prekiuautomatai")
cur = db.cursor()
try:
perkamaPreke = input('Iveskite perkamos prekes ID:')
except SyntaxError:
perkamaPreke = None
if perkamaPreke >= 1 and perkamaPreke <= len(prekes) and isinstance(perkamaPreke, int) and perkamaPreke != None:
print 'Perkamos prekes id: %s' % perkamaPreke
prekes[perkamaPreke-1] = prekes[perkamaPreke-1] - 1
cur.execute("SELECT Prekes_Pavadinimas FROM prekesautomate WHERE Automatai_id = %d AND NumerisAutomate = %d;" % (automatoId, perkamaPreke))
#prekesPav = str(cur.fetchone())
kiekis = 0
for row in cur.fetchall():
prekesPav = str(row[0])
#print(prekes)
#print(prekesPav)
else:
print('Neivestas arba neteisingai ivestas prekes kodas')
ivedimas();
return prekesPav
#duomenu irasymas i faila (atsarginiai kopijai saugoti)
def irasymas():
file = open("duomenys1.txt", "w")
for item in prekes:
file.write("%s\n" % item)
file.close()
while True:
#reset()
ikelimas()
prekesPav = ivedimas()
#print(prekesPav)
irasymas()
saugojimas()
|
mit
| 849,941,584,960,501,600
| 35.675214
| 155
| 0.533348
| false
| 3.289552
| false
| false
| false
|
fulfilio/trytond-picking-list-report
|
picking_list_report.py
|
1
|
2960
|
# -*- coding: utf-8 -*-
"""
picking_list_report.py
"""
from trytond.pool import PoolMeta, Pool
from trytond.transaction import Transaction
from openlabs_report_webkit import ReportWebkit
__metaclass__ = PoolMeta
__all__ = ['PickingListReport']
class ReportMixin(ReportWebkit):
"""
Mixin Class to inherit from, for all HTML reports.
"""
@classmethod
def wkhtml_to_pdf(cls, data, options=None):
"""
Call wkhtmltopdf to convert the html to pdf
"""
Company = Pool().get('company.company')
company = ''
if Transaction().context.get('company'):
company = Company(Transaction().context.get('company')).party.name
options = {
'margin-bottom': '0.50in',
'margin-left': '0.50in',
'margin-right': '0.50in',
'margin-top': '0.50in',
'footer-font-size': '8',
'footer-left': company,
'footer-line': '',
'footer-right': '[page]/[toPage]',
'footer-spacing': '5',
'page-size': 'Letter',
}
return super(ReportMixin, cls).wkhtml_to_pdf(
data, options=options
)
class PickingListReport(ReportMixin):
"""
HTML Report for Picking List
"""
__name__ = 'stock.shipment.out.picking_list.html'
@classmethod
def parse(cls, report, records, data, localcontext):
compare_context = cls.get_compare_context(report, records, data)
sorted_moves = {}
for shipment in records:
sorted_moves[shipment.id] = sorted(
shipment.inventory_moves,
lambda x, y: cmp(
cls.get_compare_key(x, compare_context),
cls.get_compare_key(y, compare_context)
)
)
localcontext['moves'] = sorted_moves
return super(PickingListReport, cls).parse(
report, records, data, localcontext
)
@staticmethod
def get_compare_context(report, records, data):
Location = Pool().get('stock.location')
from_location_ids = set()
to_location_ids = set()
for record in records:
for move in record.inventory_moves:
from_location_ids.add(move.from_location)
to_location_ids.add(move.to_location)
from_locations = Location.browse(list(from_location_ids))
to_locations = Location.browse(list(to_location_ids))
return {
'from_location_ids': [l.id for l in from_locations],
'to_location_ids': [l.id for l in to_locations],
}
@staticmethod
def get_compare_key(move, compare_context):
from_location_ids = compare_context['from_location_ids']
to_location_ids = compare_context['to_location_ids']
return [from_location_ids.index(move.from_location.id),
to_location_ids.index(move.to_location.id)]
|
bsd-3-clause
| -1,181,353,906,449,676,300
| 28.89899
| 78
| 0.571284
| false
| 3.889619
| false
| false
| false
|
Cecca/lydoc
|
setup.py
|
1
|
1336
|
from setuptools import setup, find_packages
import io
version = dict()
with io.open("lydoc/_version.py", "r", encoding='utf-8') as fp:
exec(fp.read(), version)
with io.open("README.rst", "r", encoding='utf-8') as fp:
long_desc = fp.read()
setup(
name='lydoc',
version=version['__version__'],
author='Matteo Ceccarello',
author_email='matteo.ceccarello@gmail.com',
license='GPLv3',
url='https://github.com/Cecca/lydoc',
description='An API documentation generator for Lilypond files',
long_description=long_desc,
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
install_requires=[
'jinja2',
'grako'
],
extras_require={
'dev': ['pyinstaller'],
'test': ['coverage', 'nose'],
},
classifiers={
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Development Status :: 3 - Alpha",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
},
entry_points={
'console_scripts': [
'lydoc=lydoc:main'
]
}
)
|
gpl-3.0
| 2,782,565,075,269,225,000
| 28.043478
| 75
| 0.583832
| false
| 3.680441
| false
| false
| false
|
susundberg/Networkdroid
|
src/main_client_speak.py
|
1
|
4093
|
import json
import argparse
import sys
import zmq
import datetime
from Sundberg.Logger import *
from subprocess import call
def get_command_line_arguments( ):
parser = argparse.ArgumentParser(description='Speak-aloud espeak client')
parser.add_argument("configfile", help = "Set configuration file to be used")
return parser.parse_args()
def main( args ):
log = Logger("client_speak.log")
log.info("Using configuration file: '%s'" % args.configfile )
with open( args.configfile ) as fid:
config = json.loads( fid.read() )
# Handle the module print messages
client = SpeakClient( config, log )
client.mainloop()
class SpeakClient:
def __init__(self, config, log ):
self.module_alives = {}
self.speak_lines = {
"pingalive" : "Connection to droid server",
"pingip" : "Connection to internet",
"pinghost" : "Domain name server",
"ssh" : "Nat compromised"
}
self.timeout = int( config["client_speak_timeout"] )
self.address = config["protocol"] + ":" + config["port_client_pub"]
self.log = log
self.speak_command = config["client_speak_cmd"]
def mainloop(self):
context = zmq.Context(1)
receive_socket = context.socket(zmq.SUB)
receive_socket.connect( self.address )
receive_socket.setsockopt(zmq.SUBSCRIBE, "")
receive_socket.setsockopt(zmq.RCVTIMEO, self.timeout )
deadline = datetime.timedelta( milliseconds = self.timeout )
while( True ):
# We need to first check if we have messages waiting, if yes, process those
# If not, do dead module check and enter timeout receive
# We need to do this to avoid a) dead modules check omitted b) dead modules check done while we have lines
# waiting to be processed (since the speak can take several secs)
# First check if we have messages waiting
try:
message = receive_socket.recv( flags = zmq.NOBLOCK )
self.process_message( message )
continue
except zmq.ZMQError as error:
if error.errno != zmq.EAGAIN :
raise( error )
self.check_for_dead_modules( deadline )
# No messages ready, do timeout receive
try:
message = receive_socket.recv( )
self.process_message( message )
except zmq.ZMQError as error:
if error.errno != zmq.EAGAIN :
raise( error )
def process_message( self, message ):
fields = message.split(":")
if len(fields) == 2 and fields[1].strip() == "HB":
module_name = fields[0].strip().lower()
if module_name in self.speak_lines:
if module_name not in self.module_alives:
self.speak_aloud( self.speak_lines[ module_name ] + " OK" )
self.log.info("Module '%s' ONLINE" % module_name )
self.module_alives[ module_name ] = datetime.datetime.now()
else:
print "GOT LINE:" + message
def check_for_dead_modules(self, deadline ):
# Check for timeouted speak aloud 'connection dropped'
current_time = datetime.datetime.now()
to_remove = []
for module_name in self.module_alives:
elapsed_since_last_hb = current_time - self.module_alives[ module_name ]
if elapsed_since_last_hb > deadline:
self.speak_aloud( self.speak_lines[ module_name ] + " got dropped")
to_remove.append( module_name )
self.log.info("Module '%s' went offline" % module_name )
for module_name in to_remove:
del self.module_alives[ module_name ]
def speak_aloud( self, line ):
retcode = call( [ self.speak_command, line ] )
if retcode != 0 :
self.log.error("Call '%s' returned nonzero: %d" % ( self.speak_command, retcode ) )
self.log.debug("Speak aloud: " + line )
if __name__ == "__main__":
sys.exit( main( get_command_line_arguments() ) )
|
gpl-2.0
| -7,579,788,459,842,370,000
| 33.686441
| 113
| 0.597361
| false
| 3.832397
| true
| false
| false
|
open-synergy/opnsynid-stock-logistics-warehouse
|
stock_picking_type_accounting_configuration/models/stock_move_account_source.py
|
1
|
1177
|
# 2020 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, api, fields
from openerp.tools.safe_eval import safe_eval as eval
class StockMoveAccountSource(models.Model):
_name = "stock.move_account_source"
_description = "Stock Move Account Source"
name = fields.Char(
string="Source Name",
required=True,
)
active = fields.Boolean(
string="Active",
default=True,
)
note = fields.Text(
string="Note",
)
python_code = fields.Text(
string="Python Code for Account Source",
required=True,
default="result = False",
)
def _get_localdict(self, move):
self.ensure_one()
return {
"env": self.env,
"move": move,
}
@api.multi
def _get_account(self, move):
self.ensure_one()
localdict = self._get_localdict(move)
try:
eval(self.python_code,
localdict, mode="exec", nocopy=True)
result = localdict["result"]
except: # noqa: E722
result = False
return result
|
agpl-3.0
| -1,646,716,042,506,528,800
| 24.586957
| 68
| 0.571793
| false
| 3.859016
| false
| false
| false
|
Juniper/tempest
|
tempest/api/compute/test_extensions.py
|
1
|
2103
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest.api.compute import base
from tempest.common import utils
from tempest import config
from tempest.lib import decorators
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ExtensionsTest(base.BaseV2ComputeTest):
@decorators.idempotent_id('3bb27738-b759-4e0d-a5fa-37d7a6df07d1')
def test_list_extensions(self):
# List of all extensions
if not CONF.compute_feature_enabled.api_extensions:
raise self.skipException('There are not any extensions configured')
extensions = self.extensions_client.list_extensions()['extensions']
ext = CONF.compute_feature_enabled.api_extensions[0]
# Log extensions list
extension_list = map(lambda x: x['alias'], extensions)
LOG.debug("Nova extensions: %s", ','.join(extension_list))
if ext == 'all':
self.assertIn('Hosts', map(lambda x: x['name'], extensions))
elif ext:
self.assertIn(ext, extension_list)
else:
raise self.skipException('There are not any extensions configured')
@decorators.idempotent_id('05762f39-bdfa-4cdb-9b46-b78f8e78e2fd')
@utils.requires_ext(extension='os-consoles', service='compute')
def test_get_extension(self):
# get the specified extensions
extension = self.extensions_client.show_extension('os-consoles')
self.assertEqual('os-consoles', extension['extension']['alias'])
|
apache-2.0
| -5,326,168,622,325,801,000
| 37.236364
| 79
| 0.69805
| false
| 3.930841
| false
| false
| false
|
XianliangJ/collections
|
CNUpdates/updates/examples/experiment_base_waxman.py
|
1
|
1647
|
from waxman_topo import Topology
global networkSize
networkSize = None
def set_size(size):
global networkSize
networkSize = size
def links_to_remove(version, graph):
if version == 0:
return []
if len(graph.coreSwitches) < 2:
return []
return [ (graph.coreSwitches[idx], graph.edgeSwitches[idx+version]) for idx in range(len(graph.coreSwitches))]
def nodes_to_remove(version, graph):
if version == 0:
return []
return [ host for host in graph.hosts() if host % 10 == (version + 1) ]
edges_to_remove = [ [(101,107),(103,108),(104,108)],
[(101,108),(103,107),(105,108)],
[] ]
def switches_to_remove(version, graph):
if version == 0:
return []
return [ core for core in graph.coreSwitches if core % 5 == (version + 1) ]
def _topology1(version, topology=Topology):
global networkSize
graph = topology(networkSize).nx_graph()
graph.remove_nodes_from(nodes_to_remove(version, graph))
graph.remove_edges_from(edges_to_remove[0])
return graph
def _topology2(version, topology=Topology):
global networkSize
graph = topology(networkSize).nx_graph()
graph.remove_nodes_from(nodes_to_remove(0, graph))
graph.remove_edges_from(edges_to_remove[version])
return graph
def _topology3(version, topology=Topology):
global networkSize
graph = topology(networkSize).nx_graph()
graph.remove_nodes_from(nodes_to_remove(version, graph))
graph.remove_edges_from(edges_to_remove[version])
return graph
topologies = [ _topology1,
_topology2,
_topology3 ]
|
gpl-3.0
| -3,425,362,455,485,318,700
| 30.075472
| 114
| 0.645416
| false
| 3.557235
| false
| false
| false
|
egtaonline/quiesce
|
egta/script/innerloop.py
|
1
|
5342
|
"""Script utility for running inner loop"""
import asyncio
import json
import logging
from concurrent import futures
from gameanalysis import regret
from egta import innerloop
from egta import schedgame
from egta.script import schedspec
from egta.script import utils
def add_parser(subparsers):
"""Create innerloop parser"""
parser = subparsers.add_parser(
"quiesce",
help="""Compute equilibria using the quiesce procedure""",
description="""Samples profiles from small restricted strategy sets,
expanding set support by best responses to candidate restricted game
equilibria. For games with a large number of players, a reduction
should be specified. The result is a list where each element specifies
an "equilibrium".""",
)
parser.add_argument(
"scheduler",
metavar="<sched-spec>",
help="""A scheduler specification,
see `egta spec` for more info.""",
)
parser.add_argument(
"--regret-thresh",
metavar="<reg>",
type=float,
default=1e-3,
help="""Regret threshold for a mixture to be considered an equilibrium.
(default: %(default)g)""",
)
parser.add_argument(
"--dist-thresh",
metavar="<norm>",
type=float,
default=0.1,
help="""Norm threshold for two mixtures to be considered distinct.
(default: %(default)g)""",
)
parser.add_argument(
"--max-restrict-size",
metavar="<support>",
type=int,
default=3,
help="""Support size threshold, beyond which restricted games are not
required to be explored. (default: %(default)d)""",
)
parser.add_argument(
"--num-equilibria",
metavar="<num>",
type=int,
default=1,
help="""Number of equilibria requested to be found. This is mainly
useful when game contains known degenerate equilibria, but those
strategies are still useful as deviating strategies. (default:
%(default)d)""",
)
parser.add_argument(
"--num-backups",
metavar="<num>",
type=int,
default=1,
help="""Number
of backup restricted strategy set to pop at a time, when no equilibria
are confirmed in initial required set. When games get to this point
they can quiesce slowly because this by default pops one at a time.
Increasing this number can get games like tis to quiesce more quickly,
but naturally, also schedules more, potentially unnecessary,
simulations. (default: %(default)d)""",
)
parser.add_argument(
"--dev-by-role",
action="store_true",
help="""Explore deviations in
role order instead of all at once. By default, when checking for
beneficial deviations, all role deviations are scheduled at the same
time. Setting this will check one role at a time. If a beneficial
deviation is found, then that restricted strategy set is scheduled
without exploring deviations from the other roles.""",
)
parser.add_argument(
"--style",
default="best",
choices=["fast", "more", "best", "one"],
help="""Style of equilibrium finding to use. `fast` is the fastests but
least thorough, `one` will guarantee an equilibrium is found in
potentially exponential time.""",
)
parser.add_argument(
"--procs",
type=int,
default=2,
metavar="<num-procs>",
help="""Number
of process to use. This will speed up computation if doing
computationally intensive things simultaneously, i.e. nash finding.
(default: %(default)d)""",
)
utils.add_reductions(parser)
parser.run = run
async def run(args):
"""Entry point for cli"""
sched = await schedspec.parse_scheduler(args.scheduler)
red, red_players = utils.parse_reduction(sched, args)
agame = schedgame.schedgame(sched, red, red_players)
async def get_regret(eqm):
"""Gets the regret of an equilibrium"""
game = await agame.get_deviation_game(eqm > 0)
return float(regret.mixture_regret(game, eqm))
async with sched:
with futures.ProcessPoolExecutor(args.procs) as executor:
eqa = await innerloop.inner_loop(
agame,
regret_thresh=args.regret_thresh,
dist_thresh=args.dist_thresh,
restricted_game_size=args.max_restrict_size,
num_equilibria=args.num_equilibria,
num_backups=args.num_backups,
devs_by_role=args.dev_by_role,
style=args.style,
executor=executor,
)
regrets = await asyncio.gather(*[get_regret(eqm) for eqm in eqa])
logging.error(
"quiesce finished finding %d equilibria:\n%s",
eqa.shape[0],
"\n".join(
"{:d}) {} with regret {:g}".format(i, sched.mixture_to_repr(eqm), reg)
for i, (eqm, reg) in enumerate(zip(eqa, regrets), 1)
),
)
json.dump(
[
{"equilibrium": sched.mixture_to_json(eqm), "regret": reg}
for eqm, reg in zip(eqa, regrets)
],
args.output,
)
args.output.write("\n")
|
apache-2.0
| 7,585,665,042,589,189,000
| 33.915033
| 82
| 0.60745
| false
| 4.001498
| false
| false
| false
|
MarkusHackspacher/PythonFarmGame
|
farmlib/gamemanager.py
|
1
|
8848
|
'''
Created on 17-07-2012
@author: orneo1212
'''
import os
import time
from farmlib import DictMapper
from farmlib.farm import FarmField, FarmObject, FarmTile, Seed, objects
from farmlib.player import Player
try:
xrange
except NameError:
xrange = range
class GameManager(object):
"""Game Manager class
"""
def __init__(self):
self.farms = []
self.gameseed = int(time.time())
self.gametime = int(time.time())
self.current_farm = 0
self.player = Player()
def getfarm(self, farmid=None):
"""getfarm
:param farmid:
:return:
"""
if farmid is None:
farmid = self.current_farm
if not self.farms:
self.addfarm()
try:
return self.farms[farmid]
except IndexError:
return None
def getfarmcount(self):
"""get farm count
:return:
"""
return len(self.farms)
def getcurrentfarmid(self):
"""get current farm id
:return:
"""
return self.current_farm
def getnextfarmcost(self):
"""get next farm cost
:return:
"""
farmcount = self.getfarmcount() - 1
cost = 10000 + 12000 * farmcount
return cost
def addfarm(self):
"""add farm
:return:
"""
newfarm = FarmField(self)
self.farms.append(newfarm)
return newfarm
def setcurrentfarm(self, farmid):
"""set current farm
:param farmid:
:return:
"""
if farmid > self.getfarmcount():
farmid = self.getfarmcount() - 1
self.current_farm = farmid
return farmid
def getgameseed(self):
"""get game seed
:return:
"""
return self.gameseed
def setgameseed(self, newseed):
"""set game seed
:param newseed:
:return:
"""
self.gameseed = newseed
def getplayer(self):
"""get player
:return:
"""
return self.player
def update(self):
"""should be called 20 times per second"""
# update selected item
if self.player.selecteditem is not None and \
not self.player.item_in_inventory(self.player.selecteditem):
# clear selected item if player dont have it
self.player.selecteditem = None
# update farms
for farm in self.farms:
farm.update()
def start_new_game(self):
"""Start new game
:return:
"""
farm = self.getfarm(0)
farm.generate_random_stones()
farm.generate_random_planks()
def savegame(self):
"""save game
:return:
"""
self.save_gamestate(self.player)
def loadgame(self):
"""load game
:return:
"""
result = self.load_gamestate('field.json', self.player)
return result
def timeforward(self):
"""time forward
:return:
"""
farm = self.getfarm(0)
if farm.seconds_to_update > 1000:
farm.seconds_to_update = 1000
if farm.seconds_to_update:
# 1 second is equal 20 updates
for _ in xrange(farm.seconds_to_update):
self.update()
def save_gamestate(self, player):
"""Saving game state
:param player:
:return:
"""
print("Saving game state...")
data = DictMapper()
# Save player data
data["inventory"] = player.inventory
data["itemscounter"] = player.itemscounter
data["money"] = player.money
data["watercanuses"] = player.watercanuses
data["exp"] = player.exp
data["nextlvlexp"] = player.nextlvlexp
data["level"] = player.level
# Save time
data["gametime"] = int(time.time())
data["gameseed"] = self.getgameseed()
# save tiles
data["fields"] = []
try:
dict.iteritems
except AttributeError:
# Python 3
def listkeys(d):
"""listkeys
:param d:
:return:
"""
return list(d)
else:
# Python 2
def listkeys(d):
"""listkeys
:param d:
:return:
"""
return d.keys()
# fill tiles
for farmid in xrange(self.getfarmcount()):
farm = self.getfarm(farmid)
data["fields"].append({"tiles": []})
for ftt in listkeys(farm.farmtiles):
ft = farm.farmtiles[ftt]
# skip when no seed
if not ft['object']:
continue
gameobject = ft['object']
tile = {}
tile["px"] = int(ftt.split('x')[0])
tile["py"] = int(ftt.split('x')[1])
tile["water"] = ft["water"]
tile["object"] = {}
# seed data
tile["object"]["type"] = gameobject.type
tile["object"]['id'] = gameobject.id
if isinstance(gameobject, Seed):
tile["object"]['growstarttime'] = gameobject.growstarttime
tile["object"]['growendtime'] = gameobject.growendtime
tile["object"]['growing'] = bool(gameobject.growing)
tile["object"]['to_harvest'] = bool(gameobject.to_harvest)
tile["object"]['harvestcount'] = gameobject.harvestcount
# set tile
data["fields"][farmid]["tiles"].append(tile)
# save data
data.save("field.json")
return True
def load_gamestate(self, filename, player):
"""Loading game state
:param filename:
:param player:
:return:
"""
if not os.path.isfile(filename):
return False
print("Loading game state...")
data = DictMapper()
data.load(filename)
player.inventory = data["inventory"]
player.itemscounter = data["itemscounter"]
player.watercanuses = data.get("watercanuses", 100)
player.exp = data.get("exp", 0.0)
player.nextlvlexp = data.get("nextlvlexp", 100.0)
player.money = int(data.get("money", 1))
player.level = int(data.get("level", 1))
# load game time
self.seconds_to_update = int(time.time()) - data.get("gametime",
int(time.time()))
seed = data.get("gameseed", int(time.time()))
self.setgameseed(seed)
# Migrate old farm
if "fields" not in data.keys():
data["fields"] = []
data['fields'].append({})
data['fields'][0]["tiles"] = data["tiles"]
# load tiles
for farmid in xrange(len(data["fields"])):
farm = self.getfarm(farmid)
if farm is None:
farm = self.addfarm()
# Restore tiles
for tile in data["fields"][farmid]["tiles"]:
px = tile["px"]
py = tile["py"]
# Avoid null objects
if not tile["object"]:
continue
# Restore seed or object
if tile["object"]["type"] == "seed":
objectdata = tile["object"]
newobject = Seed()
newobject.id = objectdata["id"]
newobject.type = objectdata["type"]
newobject.to_harvest = objectdata["to_harvest"]
newobject.growing = objectdata["growing"]
newobject.growendtime = objectdata["growendtime"]
newobject.growstarttime = objectdata["growstarttime"]
farmtile = FarmTile(newobject)
farmtile["water"] = tile["water"]
# Apply global object data
newobject.apply_dict(objects[newobject.id])
# Restore harvest count
newobject.harvestcount = objectdata.get(
"harvestcount", 1)
newobject.requiredlevel = objectdata.get(
"requiredlevel", 1)
else:
newobject = FarmObject()
newobject.id = tile["object"]["id"]
newobject.type = tile["object"]["type"]
# apply dict
newobject.apply_dict(objects[newobject.id])
farmtile = FarmTile(newobject)
# set farmtile
farm.set_farmtile(px, py, farmtile)
# return
return True
|
gpl-3.0
| -7,167,441,936,136,486,000
| 27.541935
| 78
| 0.493897
| false
| 4.219361
| false
| false
| false
|
oesteban/preprocessing-workflow
|
fmriprep/workflows/bold/util.py
|
1
|
18323
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Utility workflows
^^^^^^^^^^^^^^^^^
.. autofunction:: init_bold_reference_wf
.. autofunction:: init_enhance_and_skullstrip_bold_wf
.. autofunction:: init_skullstrip_bold_wf
"""
from packaging.version import parse as parseversion, Version
from pkg_resources import resource_filename as pkgr_fn
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu, fsl, afni, ants
from templateflow.api import get as get_template
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.ants import AI
from niworkflows.interfaces.fixes import (
FixHeaderRegistration as Registration,
FixHeaderApplyTransforms as ApplyTransforms,
)
from niworkflows.interfaces.images import ValidateImage, MatchHeader
from niworkflows.interfaces.masks import SimpleShowMaskRPT
from niworkflows.interfaces.registration import EstimateReferenceImage
from niworkflows.interfaces.utils import CopyXForm
DEFAULT_MEMORY_MIN_GB = 0.01
def init_bold_reference_wf(omp_nthreads, bold_file=None, pre_mask=False,
name='bold_reference_wf', gen_report=False):
"""
This workflow generates reference BOLD images for a series
The raw reference image is the target of :abbr:`HMC (head motion correction)`, and a
contrast-enhanced reference is the subject of distortion correction, as well as
boundary-based registration to T1w and template spaces.
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold import init_bold_reference_wf
wf = init_bold_reference_wf(omp_nthreads=1)
**Parameters**
bold_file : str
BOLD series NIfTI file
omp_nthreads : int
Maximum number of threads an individual process may use
name : str
Name of workflow (default: ``bold_reference_wf``)
gen_report : bool
Whether a mask report node should be appended in the end
**Inputs**
bold_file
BOLD series NIfTI file
bold_mask : bool
A tentative brain mask to initialize the workflow (requires ``pre_mask``
parameter set ``True``).
dummy_scans : int or None
Number of non-steady-state volumes specified by user at beginning of ``bold_file``
sbref_file
single band (as opposed to multi band) reference NIfTI file
**Outputs**
bold_file
Validated BOLD series NIfTI file
raw_ref_image
Reference image to which BOLD series is motion corrected
skip_vols
Number of non-steady-state volumes selected at beginning of ``bold_file``
algo_dummy_scans
Number of non-steady-state volumes agorithmically detected at
beginning of ``bold_file``
ref_image
Contrast-enhanced reference image
ref_image_brain
Skull-stripped reference image
bold_mask
Skull-stripping mask of reference image
validation_report
HTML reportlet indicating whether ``bold_file`` had a valid affine
**Subworkflows**
* :py:func:`~fmriprep.workflows.bold.util.init_enhance_and_skullstrip_wf`
"""
workflow = Workflow(name=name)
workflow.__desc__ = """\
First, a reference volume and its skull-stripped version were generated
using a custom methodology of *fMRIPrep*.
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['bold_file', 'bold_mask', 'dummy_scans',
'sbref_file']),
name='inputnode')
outputnode = pe.Node(
niu.IdentityInterface(fields=['bold_file', 'raw_ref_image', 'skip_vols',
'algo_dummy_scans', 'ref_image', 'ref_image_brain',
'bold_mask', 'validation_report', 'mask _report']),
name='outputnode')
# Simplify manually setting input image
if bold_file is not None:
inputnode.inputs.bold_file = bold_file
validate = pe.Node(ValidateImage(), name='validate', mem_gb=DEFAULT_MEMORY_MIN_GB)
gen_ref = pe.Node(EstimateReferenceImage(), name="gen_ref",
mem_gb=1) # OE: 128x128x128x50 * 64 / 8 ~ 900MB.
enhance_and_skullstrip_bold_wf = init_enhance_and_skullstrip_bold_wf(
omp_nthreads=omp_nthreads, pre_mask=pre_mask)
calc_dummy_scans = pe.Node(niu.Function(function=_pass_dummy_scans,
output_names=['skip_vols_num']),
name='calc_dummy_scans',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, enhance_and_skullstrip_bold_wf, [('bold_mask', 'inputnode.pre_mask')]),
(inputnode, validate, [('bold_file', 'in_file')]),
(inputnode, gen_ref, [('sbref_file', 'sbref_file')]),
(inputnode, calc_dummy_scans, [('dummy_scans', 'dummy_scans')]),
(validate, gen_ref, [('out_file', 'in_file')]),
(gen_ref, enhance_and_skullstrip_bold_wf, [('ref_image', 'inputnode.in_file')]),
(validate, outputnode, [('out_file', 'bold_file'),
('out_report', 'validation_report')]),
(gen_ref, calc_dummy_scans, [('n_volumes_to_discard', 'algo_dummy_scans')]),
(calc_dummy_scans, outputnode, [('skip_vols_num', 'skip_vols')]),
(gen_ref, outputnode, [('ref_image', 'raw_ref_image'),
('n_volumes_to_discard', 'algo_dummy_scans')]),
(enhance_and_skullstrip_bold_wf, outputnode, [
('outputnode.bias_corrected_file', 'ref_image'),
('outputnode.mask_file', 'bold_mask'),
('outputnode.skull_stripped_file', 'ref_image_brain')]),
])
if gen_report:
mask_reportlet = pe.Node(SimpleShowMaskRPT(), name='mask_reportlet')
workflow.connect([
(enhance_and_skullstrip_bold_wf, mask_reportlet, [
('outputnode.bias_corrected_file', 'background_file'),
('outputnode.mask_file', 'mask_file'),
]),
])
return workflow
def init_enhance_and_skullstrip_bold_wf(
name='enhance_and_skullstrip_bold_wf',
pre_mask=False,
omp_nthreads=1):
"""
This workflow takes in a :abbr:`BOLD (blood-oxygen level-dependant)`
:abbr:`fMRI (functional MRI)` average/summary (e.g., a reference image
averaging non-steady-state timepoints), and sharpens the histogram
with the application of the N4 algorithm for removing the
:abbr:`INU (intensity non-uniformity)` bias field and calculates a signal
mask.
Steps of this workflow are:
1. Calculate a tentative mask by registering (9-parameters) to *fMRIPrep*'s
:abbr:`EPI (echo-planar imaging)` -*boldref* template, which
is in MNI space.
The tentative mask is obtained by resampling the MNI template's
brainmask into *boldref*-space.
2. Binary dilation of the tentative mask with a sphere of 3mm diameter.
3. Run ANTs' ``N4BiasFieldCorrection`` on the input
:abbr:`BOLD (blood-oxygen level-dependant)` average, using the
mask generated in 1) instead of the internal Otsu thresholding.
4. Calculate a loose mask using FSL's ``bet``, with one mathematical morphology
dilation of one iteration and a sphere of 6mm as structuring element.
5. Mask the :abbr:`INU (intensity non-uniformity)`-corrected image
with the latest mask calculated in 3), then use AFNI's ``3dUnifize``
to *standardize* the T2* contrast distribution.
6. Calculate a mask using AFNI's ``3dAutomask`` after the contrast
enhancement of 4).
7. Calculate a final mask as the intersection of 4) and 6).
8. Apply final mask on the enhanced reference.
Step 1 can be skipped if the ``pre_mask`` argument is set to ``True`` and
a tentative mask is passed in to the workflow throught the ``pre_mask``
Nipype input.
.. workflow ::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold.util import init_enhance_and_skullstrip_bold_wf
wf = init_enhance_and_skullstrip_bold_wf(omp_nthreads=1)
**Parameters**
name : str
Name of workflow (default: ``enhance_and_skullstrip_bold_wf``)
pre_mask : bool
Indicates whether the ``pre_mask`` input will be set (and thus, step 1
should be skipped).
omp_nthreads : int
number of threads available to parallel nodes
**Inputs**
in_file
BOLD image (single volume)
pre_mask : bool
A tentative brain mask to initialize the workflow (requires ``pre_mask``
parameter set ``True``).
**Outputs**
bias_corrected_file
the ``in_file`` after `N4BiasFieldCorrection`_
skull_stripped_file
the ``bias_corrected_file`` after skull-stripping
mask_file
mask of the skull-stripped input file
out_report
reportlet for the skull-stripping
.. _N4BiasFieldCorrection: https://hdl.handle.net/10380/3053
"""
workflow = Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'pre_mask']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=[
'mask_file', 'skull_stripped_file', 'bias_corrected_file']), name='outputnode')
# Dilate pre_mask
pre_dilate = pe.Node(fsl.DilateImage(
operation='max', kernel_shape='sphere', kernel_size=3.0,
internal_datatype='char'), name='pre_mask_dilate')
# Ensure mask's header matches reference's
check_hdr = pe.Node(MatchHeader(), name='check_hdr',
run_without_submitting=True)
# Run N4 normally, force num_threads=1 for stability (images are small, no need for >1)
n4_correct = pe.Node(ants.N4BiasFieldCorrection(
dimension=3, copy_header=True, bspline_fitting_distance=200),
name='n4_correct', n_procs=1)
# Create a generous BET mask out of the bias-corrected EPI
skullstrip_first_pass = pe.Node(fsl.BET(frac=0.2, mask=True),
name='skullstrip_first_pass')
bet_dilate = pe.Node(fsl.DilateImage(
operation='max', kernel_shape='sphere', kernel_size=6.0,
internal_datatype='char'), name='skullstrip_first_dilate')
bet_mask = pe.Node(fsl.ApplyMask(), name='skullstrip_first_mask')
# Use AFNI's unifize for T2 constrast & fix header
unifize = pe.Node(afni.Unifize(
t2=True, outputtype='NIFTI_GZ',
# Default -clfrac is 0.1, 0.4 was too conservative
# -rbt because I'm a Jedi AFNI Master (see 3dUnifize's documentation)
args='-clfrac 0.2 -rbt 18.3 65.0 90.0',
out_file="uni.nii.gz"), name='unifize')
fixhdr_unifize = pe.Node(CopyXForm(), name='fixhdr_unifize', mem_gb=0.1)
# Run ANFI's 3dAutomask to extract a refined brain mask
skullstrip_second_pass = pe.Node(afni.Automask(dilate=1,
outputtype='NIFTI_GZ'),
name='skullstrip_second_pass')
fixhdr_skullstrip2 = pe.Node(CopyXForm(), name='fixhdr_skullstrip2', mem_gb=0.1)
# Take intersection of both masks
combine_masks = pe.Node(fsl.BinaryMaths(operation='mul'),
name='combine_masks')
# Compute masked brain
apply_mask = pe.Node(fsl.ApplyMask(), name='apply_mask')
if not pre_mask:
bold_template = get_template(
'MNI152NLin2009cAsym', resolution=2, desc='fMRIPrep', suffix='boldref')
brain_mask = get_template(
'MNI152NLin2009cAsym', resolution=2, desc='brain', suffix='mask')
# Initialize transforms with antsAI
init_aff = pe.Node(AI(
fixed_image=str(bold_template),
fixed_image_mask=str(brain_mask),
metric=('Mattes', 32, 'Regular', 0.2),
transform=('Affine', 0.1),
search_factor=(20, 0.12),
principal_axes=False,
convergence=(10, 1e-6, 10),
verbose=True),
name='init_aff',
n_procs=omp_nthreads)
# Registration().version may be None
if parseversion(Registration().version or '0.0.0') > Version('2.2.0'):
init_aff.inputs.search_grid = (40, (0, 40, 40))
# Set up spatial normalization
norm = pe.Node(Registration(
from_file=pkgr_fn(
'fmriprep.data',
'epi_atlasbased_brainmask.json')),
name='norm',
n_procs=omp_nthreads)
norm.inputs.fixed_image = str(bold_template)
map_brainmask = pe.Node(
ApplyTransforms(interpolation='MultiLabel', float=True, input_image=str(brain_mask)),
name='map_brainmask'
)
workflow.connect([
(inputnode, init_aff, [('in_file', 'moving_image')]),
(inputnode, map_brainmask, [('in_file', 'reference_image')]),
(inputnode, norm, [('in_file', 'moving_image')]),
(init_aff, norm, [('output_transform', 'initial_moving_transform')]),
(norm, map_brainmask, [
('reverse_invert_flags', 'invert_transform_flags'),
('reverse_transforms', 'transforms')]),
(map_brainmask, pre_dilate, [('output_image', 'in_file')]),
])
else:
workflow.connect([
(inputnode, pre_dilate, [('pre_mask', 'in_file')]),
])
workflow.connect([
(inputnode, check_hdr, [('in_file', 'reference')]),
(pre_dilate, check_hdr, [('out_file', 'in_file')]),
(check_hdr, n4_correct, [('out_file', 'mask_image')]),
(inputnode, n4_correct, [('in_file', 'input_image')]),
(inputnode, fixhdr_unifize, [('in_file', 'hdr_file')]),
(inputnode, fixhdr_skullstrip2, [('in_file', 'hdr_file')]),
(n4_correct, skullstrip_first_pass, [('output_image', 'in_file')]),
(skullstrip_first_pass, bet_dilate, [('mask_file', 'in_file')]),
(bet_dilate, bet_mask, [('out_file', 'mask_file')]),
(skullstrip_first_pass, bet_mask, [('out_file', 'in_file')]),
(bet_mask, unifize, [('out_file', 'in_file')]),
(unifize, fixhdr_unifize, [('out_file', 'in_file')]),
(fixhdr_unifize, skullstrip_second_pass, [('out_file', 'in_file')]),
(skullstrip_first_pass, combine_masks, [('mask_file', 'in_file')]),
(skullstrip_second_pass, fixhdr_skullstrip2, [('out_file', 'in_file')]),
(fixhdr_skullstrip2, combine_masks, [('out_file', 'operand_file')]),
(fixhdr_unifize, apply_mask, [('out_file', 'in_file')]),
(combine_masks, apply_mask, [('out_file', 'mask_file')]),
(combine_masks, outputnode, [('out_file', 'mask_file')]),
(apply_mask, outputnode, [('out_file', 'skull_stripped_file')]),
(n4_correct, outputnode, [('output_image', 'bias_corrected_file')]),
])
return workflow
def init_skullstrip_bold_wf(name='skullstrip_bold_wf'):
"""
This workflow applies skull-stripping to a BOLD image.
It is intended to be used on an image that has previously been
bias-corrected with
:py:func:`~fmriprep.workflows.bold.util.init_enhance_and_skullstrip_bold_wf`
.. workflow ::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold.util import init_skullstrip_bold_wf
wf = init_skullstrip_bold_wf()
Inputs
in_file
BOLD image (single volume)
Outputs
skull_stripped_file
the ``in_file`` after skull-stripping
mask_file
mask of the skull-stripped input file
out_report
reportlet for the skull-stripping
"""
workflow = Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['mask_file',
'skull_stripped_file',
'out_report']),
name='outputnode')
skullstrip_first_pass = pe.Node(fsl.BET(frac=0.2, mask=True),
name='skullstrip_first_pass')
skullstrip_second_pass = pe.Node(afni.Automask(dilate=1, outputtype='NIFTI_GZ'),
name='skullstrip_second_pass')
combine_masks = pe.Node(fsl.BinaryMaths(operation='mul'), name='combine_masks')
apply_mask = pe.Node(fsl.ApplyMask(), name='apply_mask')
mask_reportlet = pe.Node(SimpleShowMaskRPT(), name='mask_reportlet')
workflow.connect([
(inputnode, skullstrip_first_pass, [('in_file', 'in_file')]),
(skullstrip_first_pass, skullstrip_second_pass, [('out_file', 'in_file')]),
(skullstrip_first_pass, combine_masks, [('mask_file', 'in_file')]),
(skullstrip_second_pass, combine_masks, [('out_file', 'operand_file')]),
(combine_masks, outputnode, [('out_file', 'mask_file')]),
# Masked file
(inputnode, apply_mask, [('in_file', 'in_file')]),
(combine_masks, apply_mask, [('out_file', 'mask_file')]),
(apply_mask, outputnode, [('out_file', 'skull_stripped_file')]),
# Reportlet
(inputnode, mask_reportlet, [('in_file', 'background_file')]),
(combine_masks, mask_reportlet, [('out_file', 'mask_file')]),
(mask_reportlet, outputnode, [('out_report', 'out_report')]),
])
return workflow
def _pass_dummy_scans(algo_dummy_scans, dummy_scans=None):
"""
**Parameters**
algo_dummy_scans : int
number of volumes to skip determined by an algorithm
dummy_scans : int or None
number of volumes to skip determined by the user
**Returns**
skip_vols_num : int
number of volumes to skip
"""
return dummy_scans or algo_dummy_scans
|
bsd-3-clause
| 8,292,137,848,406,605,000
| 40.08296
| 97
| 0.601867
| false
| 3.655097
| false
| false
| false
|
googleapis/python-error-reporting
|
google/cloud/errorreporting_v1beta1/services/error_group_service/transports/grpc_asyncio.py
|
1
|
12859
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.errorreporting_v1beta1.types import common
from google.cloud.errorreporting_v1beta1.types import error_group_service
from .base import ErrorGroupServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import ErrorGroupServiceGrpcTransport
class ErrorGroupServiceGrpcAsyncIOTransport(ErrorGroupServiceTransport):
"""gRPC AsyncIO backend transport for ErrorGroupService.
Service for retrieving and updating individual error groups.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "clouderrorreporting.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "clouderrorreporting.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def get_group(
self,
) -> Callable[[error_group_service.GetGroupRequest], Awaitable[common.ErrorGroup]]:
r"""Return a callable for the get group method over gRPC.
Get the specified group.
Returns:
Callable[[~.GetGroupRequest],
Awaitable[~.ErrorGroup]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_group" not in self._stubs:
self._stubs["get_group"] = self.grpc_channel.unary_unary(
"/google.devtools.clouderrorreporting.v1beta1.ErrorGroupService/GetGroup",
request_serializer=error_group_service.GetGroupRequest.serialize,
response_deserializer=common.ErrorGroup.deserialize,
)
return self._stubs["get_group"]
@property
def update_group(
self,
) -> Callable[
[error_group_service.UpdateGroupRequest], Awaitable[common.ErrorGroup]
]:
r"""Return a callable for the update group method over gRPC.
Replace the data for the specified group.
Fails if the group does not exist.
Returns:
Callable[[~.UpdateGroupRequest],
Awaitable[~.ErrorGroup]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_group" not in self._stubs:
self._stubs["update_group"] = self.grpc_channel.unary_unary(
"/google.devtools.clouderrorreporting.v1beta1.ErrorGroupService/UpdateGroup",
request_serializer=error_group_service.UpdateGroupRequest.serialize,
response_deserializer=common.ErrorGroup.deserialize,
)
return self._stubs["update_group"]
__all__ = ("ErrorGroupServiceGrpcAsyncIOTransport",)
|
apache-2.0
| -2,248,714,581,095,441,200
| 43.649306
| 93
| 0.621199
| false
| 4.662437
| false
| false
| false
|
dnsforever/dnsforever-web
|
dnsforever/web/__init__.py
|
1
|
1055
|
from flask import Flask, g
from dnsforever.config import secret_key
from dnsforever.models import Session
from dnsforever.web.tools.session import get_user
blueprints = ['apis', 'index', 'account', 'domain',
'domain_a', 'domain_ddns', 'domain_aaaa',
'domain_cname', 'domain_mx',
'domain_txt', 'domain_subdomain']
def create_app():
app = Flask(__name__)
app.secret_key = secret_key
for name in blueprints:
app.register_blueprint(load_blueprint(name))
@app.before_request
def define_session():
g.service_name = 'DNS Forever beta'
g.session = Session()
g.user = get_user()
if g.user:
g.domain_list = [ownership.domain.name
for ownership in g.user.ownership]
else:
g.domain_list = []
g.debug = app.debug
return app
def load_blueprint(name):
module = __import__('dnsforever.web.' + name, None, None, ['app'])
blueprint = getattr(module, 'app')
return blueprint
|
gpl-3.0
| 4,566,098,043,050,509,000
| 24.731707
| 70
| 0.589573
| false
| 3.822464
| false
| false
| false
|
FishyFing/FishBot
|
cogs/google.py
|
1
|
2253
|
import discord
from discord.ext import commands
from .utils import checks
import urllib
class SimplyGoogle:
"""A non sarcastic google command"""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True)
async def google(self, ctx, text):
"""Its google, you search with it.
Example: google A french pug
Special search options are avaiable; Image, Maps
Example: google image You know, for kids!
Another example: google maps New York"""
search_type = ctx.message.content[len(ctx.prefix+ctx.command.name)+1:].lower().split(" ")
#Start of Image
if search_type[0] == "image":
search_valid = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+1:].lower())
if search_valid == "image":
await self.bot.say("Please actually search something")
else:
uri = "https://www.google.com/search?tbm=isch&q="
quary = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+7:].lower())
encode = urllib.parse.quote_plus(quary,encoding='utf-8',errors='replace')
await self.bot.say(uri+encode)
#End of Image
#Start of Maps
elif search_type[0] == "maps":
search_valid = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+1:].lower())
if search_valid == "maps":
await self.bot.say("Please actually search something")
else:
uri = "https://www.google.com/maps/search/"
quary = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+6:].lower())
encode = urllib.parse.quote_plus(quary,encoding='utf-8',errors='replace')
await self.bot.say(uri+encode)
#End of Maps
#Start of generic search
else:
uri = "https://www.google.com/search?q="
quary = str(ctx.message.content[len(ctx.prefix+ctx.command.name)+1:])
encode = urllib.parse.quote_plus(quary,encoding='utf-8',errors='replace')
await self.bot.say(uri+encode)
#End of generic search
def setup(bot):
n = SimplyGoogle(bot)
bot.add_cog(n)
|
gpl-3.0
| -4,040,341,036,851,990,500
| 41.509434
| 97
| 0.590324
| false
| 3.742525
| false
| false
| false
|
nomadicfm/pyramid-views
|
tests/_test_dates.py
|
1
|
31005
|
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.test.utils import requires_tz_support
from django.utils import timezone
from .models import Book, BookSigning
def _make_books(n, base_date):
for i in range(n):
Book.objects.create(
name='Book %d' % i,
slug='book-%d' % i,
pages=100 + i,
pubdate=base_date - datetime.timedelta(days=i))
class ArchiveIndexViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_archive_view(self):
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'tests:templates/book_archive.html')
def test_archive_view_context_object_name(self):
res = self.client.get('/dates/books/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['thingies']), list(Book.objects.all()))
self.assertFalse('latest' in res.context)
self.assertTemplateUsed(res, 'tests:templates/book_archive.html')
def test_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 404)
def test_allow_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertTemplateUsed(res, 'tests:templates/book_archive.html')
def test_archive_view_template(self):
res = self.client.get('/dates/books/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'tests:templates/list.html')
def test_archive_view_template_suffix(self):
res = self.client.get('/dates/books/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'tests:templates/book_detail.html')
def test_archive_view_invalid(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/dates/books/invalid/')
def test_archive_view_by_month(self):
res = self.client.get('/dates/books/by_month/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'month', 'DESC')))
def test_paginated_archive_view(self):
_make_books(20, base_date=datetime.date.today())
res = self.client.get('/dates/books/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[0:10]))
self.assertTemplateUsed(res, 'tests:templates/book_archive.html')
res = self.client.get('/dates/books/paginated/?page=2')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 2)
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[10:20]))
def test_paginated_archive_view_does_not_load_entire_table(self):
# Regression test for #18087
_make_books(20, base_date=datetime.date.today())
# 1 query for years list + 1 query for books
with self.assertNumQueries(2):
self.client.get('/dates/books/')
# same as above + 1 query to test if books exist + 1 query to count them
with self.assertNumQueries(4):
self.client.get('/dates/books/paginated/')
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(2):
self.client.get('/dates/books/reverse/')
def test_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted descending in index"""
_make_books(5, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(reversed(sorted(res.context['date_list']))))
class YearArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_year_view(self):
res = self.client.get('/dates/books/2008/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(res.context['year'], datetime.date(2008, 1, 1))
self.assertTemplateUsed(res, 'tests:templates/book_archive_year.html')
# Since allow_empty=False, next/prev years must be valid (#7164)
self.assertEqual(res.context['next_year'], None)
self.assertEqual(res.context['previous_year'], datetime.date(2006, 1, 1))
def test_year_view_make_object_list(self):
res = self.client.get('/dates/books/2006/make_object_list/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2006, 5, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'tests:templates/book_archive_year.html')
def test_year_view_empty(self):
res = self.client.get('/dates/books/1999/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/1999/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
# Since allow_empty=True, next/prev are allowed to be empty years (#7164)
self.assertEqual(res.context['next_year'], datetime.date(2000, 1, 1))
self.assertEqual(res.context['previous_year'], datetime.date(1998, 1, 1))
def test_year_view_allow_future(self):
# Create a new book in the future
year = datetime.date.today().year + 1
Book.objects.create(name="The New New Testement", pages=600, pubdate=datetime.date(year, 1, 1))
res = self.client.get('/dates/books/%s/' % year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/allow_empty/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
res = self.client.get('/dates/books/%s/allow_future/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(year, 1, 1)])
def test_year_view_paginated(self):
res = self.client.get('/dates/books/2006/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'tests:templates/book_archive_year.html')
def test_year_view_invalid_pattern(self):
res = self.client.get('/dates/books/no_year/')
self.assertEqual(res.status_code, 404)
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(4):
self.client.get('/dates/books/2008/reverse/')
def test_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in year view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
class MonthArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_month_view(self):
res = self.client.get('/dates/books/2008/oct/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'tests:templates/book_archive_month.html')
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['month'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev months must be valid (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['month'], datetime.date(2000, 1, 1))
# Since allow_empty=True, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], datetime.date(2000, 2, 1))
self.assertEqual(res.context['previous_month'], datetime.date(1999, 12, 1))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], None)
def test_month_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60)).replace(day=1)
urlbit = future.strftime('%Y/%b').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'][0], b.pubdate)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['month'], future)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month. So next
# should be in the future (yup, #7164, again)
res = self.client.get('/dates/books/2008/oct/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], future)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertTemplateUsed(res, 'tests:templates/book_archive_month.html')
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/')
self.assertEqual(res.status_code, 200)
def test_month_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/no_month/')
self.assertEqual(res.status_code, 404)
def test_previous_month_without_content(self):
"Content can exist on any day of the previous month. Refs #14711"
self.pubdate_list = [
datetime.date(2010, month, day)
for month, day in ((9, 1), (10, 2), (11, 3))
]
for pubdate in self.pubdate_list:
name = str(pubdate)
Book.objects.create(name=name, slug=name, pages=100, pubdate=pubdate)
res = self.client.get('/dates/books/2010/nov/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 10, 1))
# The following test demonstrates the bug
res = self.client.get('/dates/books/2010/nov/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 10, 1))
# The bug does not occur here because a Book with pubdate of Sep 1 exists
res = self.client.get('/dates/books/2010/oct/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 9, 1))
def test_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in month view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/dec/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
class WeekArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_week_view(self):
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'tests:templates/book_archive_week.html')
self.assertEqual(res.context['book_list'][0], Book.objects.get(pubdate=datetime.date(2008, 10, 1)))
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
# Since allow_empty=False, next/prev weeks must be valid
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_allow_empty(self):
# allow_empty = False, empty week
res = self.client.get('/dates/books/2008/week/12/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2008/week/12/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['week'], datetime.date(2008, 3, 23))
# Since allow_empty=True, next/prev are allowed to be empty weeks
self.assertEqual(res.context['next_week'], datetime.date(2008, 3, 30))
self.assertEqual(res.context['previous_week'], datetime.date(2008, 3, 16))
# allow_empty but not allow_future: next_week should be empty
url = datetime.date.today().strftime('/dates/books/%Y/week/%U/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], None)
def test_week_view_allow_future(self):
# January 7th always falls in week 1, given Python's definition of week numbers
future = datetime.date(datetime.date.today().year + 1, 1, 7)
future_sunday = future - datetime.timedelta(days=(future.weekday() + 1) % 7)
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/week/1/' % future.year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/week/1/allow_future/' % future.year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['week'], future_sunday)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty weeks
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2008, 9, 28))
# allow_future, but not allow_empty, with a current week. So next
# should be in the future
res = self.client.get('/dates/books/2008/week/39/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], future_sunday)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_paginated(self):
week_start = datetime.date(2008, 9, 28)
week_end = week_start + datetime.timedelta(days=7)
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertTemplateUsed(res, 'tests:templates/book_archive_week.html')
def test_week_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/week/no_week/')
self.assertEqual(res.status_code, 404)
def test_week_start_Monday(self):
# Regression for #14752
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
res = self.client.get('/dates/books/2008/week/39/monday/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 29))
def test_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
class DayArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_day_view(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'tests:templates/book_archive_day.html')
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['day'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev days must be valid.
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
def test_day_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/1/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/1/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['day'], datetime.date(2000, 1, 1))
# Since it's allow empty, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_day'], datetime.date(2000, 1, 2))
self.assertEqual(res.context['previous_day'], datetime.date(1999, 12, 31))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/%d/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], None)
def test_day_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['day'], future)
# allow_future but not allow_empty, next/prev must be valid
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month.
res = self.client.get('/dates/books/2008/oct/01/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], future)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
# allow_future for yesterday, next_day is today (#17192)
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
res = self.client.get('/dates/books/%s/allow_empty_and_future/'
% yesterday.strftime('%Y/%b/%d').lower())
self.assertEqual(res.context['next_day'], today)
def test_day_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertTemplateUsed(res, 'tests:templates/book_archive_day.html')
def test_next_prev_context(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.content, b"Archive for Oct. 1, 2008. Previous day is May 1, 2006")
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/')
self.assertEqual(res.status_code, 200)
def test_day_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/oct/no_day/')
self.assertEqual(res.status_code, 404)
def test_today_view(self):
res = self.client.get('/dates/books/today/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/today/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['day'], datetime.date.today())
def test_datetime_day_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_day_view(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 404)
class DateDetailViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_date_detail_by_pk(self):
res = self.client.get('/dates/books/2008/oct/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=1))
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
self.assertTemplateUsed(res, 'tests:templates/book_detail.html')
def test_date_detail_by_slug(self):
res = self.client.get('/dates/books/2006/may/01/byslug/dreaming-in-code/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(slug='dreaming-in-code'))
def test_date_detail_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
def test_date_detail_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", slug="new-new", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/new-new/' % urlbit)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/%s/allow_future/' % (urlbit, b.id))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], b)
self.assertTemplateUsed(res, 'tests:templates/book_detail.html')
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, "/dates/books/2008/oct/01/nopk/")
def test_get_object_custom_query(self):
"""
Ensure that custom querys are used when provided to
BaseDateDetailView.get_object()
Refs #16918.
"""
res = self.client.get(
'/dates/books/get_object_custom_query/2006/may/01/2/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=2))
self.assertEqual(res.context['book'], Book.objects.get(pk=2))
self.assertTemplateUsed(res, 'tests:templates/book_detail.html')
res = self.client.get(
'/dates/books/get_object_custom_query/2008/oct/01/1/')
self.assertEqual(res.status_code, 404)
def test_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 404)
|
mit
| 4,528,030,635,448,044,000
| 48.608
| 140
| 0.652959
| false
| 3.480189
| true
| false
| false
|
evansd/django-envsettings
|
envsettings/cache.py
|
1
|
4531
|
from .base import URLSettingsBase, is_importable
class CacheSettings(URLSettingsBase):
REDIS_CONFIG = {'BACKEND': 'django_redis.cache.RedisCache', 'OPTIONS': {}}
CONFIG = {
'locmem': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'file': {'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache'},
# Memcached backends are auto-selected based on what packages are installed
'memcached': {'BACKEND': None},
'memcached-binary': {'BACKEND': None, 'BINARY': True},
'redis': REDIS_CONFIG,
'rediss': REDIS_CONFIG
}
def handle_file_url(self, parsed_url, config):
if parsed_url.path == '/dev/null':
config['BACKEND'] = 'django.core.cache.backends.dummy.DummyCache'
else:
config['LOCATION'] = parsed_url.path
return config
def handle_locmem_url(self, parsed_url, config):
config['LOCATION'] = '{0}{1}'.format(
parsed_url.hostname or '', parsed_url.path or '')
return config
def handle_redis_url(self, parsed_url, config):
if not parsed_url.hostname:
parsed_url = parsed_url._replace(scheme='unix')
config['LOCATION'] = parsed_url.geturl()
return config
def handle_rediss_url(self, parsed_url, config):
return self.handle_redis_url(parsed_url, config)
def handle_memcached_url(self, parsed_url, config):
if parsed_url.hostname:
netloc = parsed_url.netloc.split('@')[-1]
if ',' in netloc:
location = netloc.split(',')
else:
location = '{}:{}'.format(
parsed_url.hostname,
parsed_url.port or 11211)
else:
location = 'unix:{}'.format(parsed_url.path)
config['LOCATION'] = location
if parsed_url.username:
config['USERNAME'] = parsed_url.username
if parsed_url.password:
config['PASSWORD'] = parsed_url.password
# Only auto-select backend if one hasn't been explicitly configured
if not config['BACKEND']:
self.set_memcached_backend(config)
return config
def handle_memcached_binary_url(self, parsed_url, config):
return self.handle_memcached_url(parsed_url, config)
def set_memcached_backend(self, config):
"""
Select the most suitable Memcached backend based on the config and
on what's installed
"""
# This is the preferred backend as it is the fastest and most fully
# featured, so we use this by default
config['BACKEND'] = 'django_pylibmc.memcached.PyLibMCCache'
if is_importable(config['BACKEND']):
return
# Otherwise, binary connections can use this pure Python implementation
if config.get('BINARY') and is_importable('django_bmemcached'):
config['BACKEND'] = 'django_bmemcached.memcached.BMemcached'
return
# For text-based connections without any authentication we can fall
# back to Django's core backends if the supporting libraries are
# installed
if not any([config.get(key) for key in ('BINARY', 'USERNAME', 'PASSWORD')]):
if is_importable('pylibmc'):
config['BACKEND'] = \
'django.core.cache.backends.memcached.PyLibMCCache'
elif is_importable('memcached'):
config['BACKEND'] = \
'django.core.cache.backends.memcached.MemcachedCache'
def auto_config_memcachier(self, env, prefix='MEMCACHIER'):
try:
servers, username, password = [
env[prefix + key] for key in [
'_SERVERS', '_USERNAME', '_PASSWORD']]
except KeyError:
return
return 'memcached-binary://{username}:{password}@{servers}/'.format(
servers=servers, username=username, password=password)
def auto_config_memcachedcloud(self, env):
return self.auto_config_memcachier(env, prefix='MEMCACHEDCLOUD')
def auto_config_redis_url(self, env):
return env.get('REDIS_URL')
def auto_config_redistogo(self, env):
return env.get('REDISTOGO_URL')
def auto_config_rediscloud(self, env):
return env.get('REDISCLOUD_URL')
def auto_config_openredis(self, env):
return env.get('OPENREDIS_URL')
def auto_config_redisgreen(self, env):
return env.get('REDISGREEN_URL')
|
mit
| 6,058,745,335,508,903,000
| 38.745614
| 84
| 0.601854
| false
| 4.134124
| true
| false
| false
|
akimtke/arke
|
textserver/main.py
|
1
|
2812
|
# -*- coding: utf-8 -*-
import pyjsonrpc
from gsmmodem.modem import GsmModem, SentSms, Sms
from gsmmodem.exceptions import TimeoutException, PinRequiredError, IncorrectPinError
def text(number, message, key):
if key.strip() == '9703BB8D5A':
print "Creating modem instance"
modem = GsmModem('/dev/ttyUSB0', 9600)
try:
print "Connecting modem"
modem.connect()
except PinRequiredError:
print "Pin required"
try:
print "Waiting for Network coverage info"
modem.waitForNetworkCoverage(5)
except TimeoutException:
print "Signal strength not strong enough"
return "No signal"
else:
try:
print "Sending %s to %s" % (message, number)
sms = modem.sendSms(number, message)
except TimeoutException:
print "Failed to send message"
return 'Error encountered'
print "Closing modem"
modem.close()
return True
else:
return 'Key is not correct'
def getUnreadText(key):
if key.strip() == '9703BB8D5A':
modem = GsmModem('/dev/ttyUSB0', 9600)
try:
print "Connecting mode"
modem.connect()
except:
return "Error connecting"
try:
messages = modem.listStoredSms(status=Sms.STATUS_RECEIVED_UNREAD)
except Exception as e:
return str(e)
modem.close()
retString = ""
print "Got %d messages" % len(messages)
for message in messages:
retString = retString + "%s : %s" % (message.number, message.text)
return retString
else:
return "Incorrect key"
def getAllText(key):
if key.strip() == '9703BB8D5A':
modem = GsmModem('/dev/ttyUSB0', 9600)
try:
print "Connecting modem"
modem.connect()
except Exception as e:
return str(e)
try:
messages = modem.listStoredSms()
except Exception as e:
return str(e)
modem.close()
retString = ""
print "Got %d messages" % len(messages)
for message in messages:
retString = retString + "%s : %s" % (message.number, message.text) + "\n"
return retString
else:
return "Incorrect key"
class RequestHandler(pyjsonrpc.HttpRequestHandler):
methods = {
"text": text,
"getUnreadText": getUnreadText,
"getAllText": getAllText
}
http_server = pyjsonrpc.ThreadingHttpServer(
server_address = ('192.168.0.20', 8081),
RequestHandlerClass = RequestHandler
)
print "Starting HTTP Server..."
http_server.serve_forever()
|
gpl-3.0
| 7,310,618,908,439,127,000
| 26.841584
| 85
| 0.562945
| false
| 3.988652
| false
| false
| false
|
elifesciences/elife-metrics
|
src/article_metrics/ga_metrics/elife_v5.py
|
1
|
1848
|
"elife_v5, the addition of /executable paths"
from . import elife_v1
from article_metrics.utils import lfilter
import re
import logging
LOG = logging.getLogger(__name__)
event_counts_query = elife_v1.event_counts_query
event_counts = elife_v1.event_counts
# views counting
def path_counts_query(table_id, from_date, to_date):
"returns a query specific to this era that we can send to Google Analytics"
# use the v1 query as a template
new_query = elife_v1.path_counts_query(table_id, from_date, to_date)
new_query['filters'] = ','.join([
# ga:pagePath=~^/articles/50101$
r'ga:pagePath=~^/articles/[0-9]+$', # note: GA doesn't support {n,m} syntax ...
# ga:pagePath=~^/articles/50101/executable$
r'ga:pagePath=~^/articles/[0-9]+/executable$',
])
return new_query
# ...python *does* support {n,m} though, so we can filter bad article IDs in post
# parse the article ID from a path that may include an optional '/executable'
REGEX = r"/articles/(?P<artid>\d{1,5})(/executable)?$"
PATH_RE = re.compile(REGEX, re.IGNORECASE)
def path_count(pair):
"given a pair of (path, count), returns a triple of (art-id, art-type, count)"
path, count = pair
regex_obj = re.match(PATH_RE, path.lower())
if not regex_obj:
LOG.debug("skpping unhandled path %s", pair)
return
# "/articles/12345/executable" => {'artid': 12345}
data = regex_obj.groupdict()
count_type = 'full' # vs 'abstract' or 'digest' from previous eras
return data['artid'], count_type, int(count)
def path_counts(path_count_pairs):
"""takes raw path data from GA and groups by article, returning a
list of (artid, count-type, count)"""
path_count_triples = lfilter(None, [path_count(pair) for pair in path_count_pairs])
return elife_v1.group_results(path_count_triples)
|
gpl-3.0
| -3,663,562,856,745,356,300
| 36.714286
| 87
| 0.671537
| false
| 3.175258
| false
| false
| false
|
credativ/gofer
|
src/gofer/messaging/consumer.py
|
1
|
4309
|
# Copyright (c) 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from time import sleep
from logging import getLogger
from gofer.common import Thread, released
from gofer.messaging.model import InvalidDocument
from gofer.messaging.adapter.model import Reader
log = getLogger(__name__)
class ConsumerThread(Thread):
"""
An AMQP (abstract) consumer.
"""
def __init__(self, node, url, wait=3):
"""
:param node: An AMQP queue.
:type node: gofer.messaging.adapter.model.Node
:param url: The broker URL.
:type url: str
:param wait: Number of seconds to wait for a message.
:type wait: int
"""
Thread.__init__(self, name=node.name)
self.url = url
self.node = node
self.wait = wait
self.authenticator = None
self.reader = None
self.setDaemon(True)
def shutdown(self):
"""
Shutdown the consumer.
"""
self.abort()
@released
def run(self):
"""
Main consumer loop.
"""
self.reader = Reader(self.node, self.url)
self.reader.authenticator = self.authenticator
self.open()
try:
while not Thread.aborted():
self.read()
finally:
self.close()
def open(self):
"""
Open the reader.
"""
while not Thread.aborted():
try:
self.reader.open()
break
except Exception:
log.exception(self.getName())
sleep(30)
def close(self):
"""
Close the reader.
"""
try:
self.reader.close()
except Exception:
log.exception(self.getName())
def read(self):
"""
Read and process incoming documents.
"""
try:
wait = self.wait
reader = self.reader
message, document = reader.next(wait)
if message is None:
# wait expired
return
log.debug('{%s} read: %s', self.getName(), document)
self.dispatch(document)
message.ack()
except InvalidDocument, invalid:
self.rejected(invalid.code, invalid.description, invalid.document, invalid.details)
except Exception:
log.exception(self.getName())
sleep(60)
self.close()
self.open()
def rejected(self, code, description, document, details):
"""
Called to process the received (invalid) document.
This method intended to be overridden by subclasses.
:param code: The rejection code.
:type code: str
:param description: rejection description
:type description: str
:param document: The received *json* document.
:type document: str
:param details: The explanation.
:type details: str
"""
log.debug('rejected: %s', document)
def dispatch(self, document):
"""
Called to process the received document.
This method intended to be overridden by subclasses.
:param document: The received *json* document.
:type document: str
"""
log.debug('dispatched: %s', document)
class Consumer(ConsumerThread):
"""
An AMQP consumer.
Thread used to consumer messages from the specified queue.
On receipt, each message is used to build an document
and passed to dispatch().
"""
def __init__(self, node, url=None):
"""
:param node: The AMQP node.
:type node: gofer.messaging.adapter.model.Node
:param url: The broker URL.
:type url: str
"""
super(Consumer, self).__init__(node, url)
|
lgpl-2.1
| 3,765,145,563,174,778,400
| 28.114865
| 95
| 0.576468
| false
| 4.40593
| false
| false
| false
|
pyslackers/sirbot-slack
|
sirbot/slack/store/channel.py
|
1
|
3727
|
import json
import logging
import time
from sirbot.core import registry
from .store import SlackStore, SlackChannelItem
from .. import database
logger = logging.getLogger(__name__)
class Channel(SlackChannelItem):
"""
Class representing a slack channel.
"""
def __init__(self, id_, raw=None, last_update=None):
"""
:param id_: id_ of the channel
"""
super().__init__(id_, raw, last_update)
@property
def member(self):
return self._raw.get('is_member', False)
@member.setter
def member(self, _):
raise NotImplementedError
class ChannelStore(SlackStore):
"""
Store for the slack channels
"""
def __init__(self, client, refresh=3600):
super().__init__(client, refresh)
async def all(self):
channels = list()
channels_raw = await self._client.get_channels()
for channel_raw in channels_raw:
channel = await self.get(channel_raw['id'])
channels.append(channel)
return channels
async def get(self, id_=None, name=None, fetch=False):
"""
Return a Channel from the Channel Manager
:param id_: id of the channel
:param name: name of the channel
:param update: query the slack api for updated channel info
:return: Channel
"""
if not id_ and not name:
raise SyntaxError('id_ or name must be supplied')
db = registry.get('database')
if name:
data = await database.__dict__[db.type].channel.find_by_name(db,
name)
else:
data = await database.__dict__[db.type].channel.find_by_id(db, id_)
if data and (
fetch or data['last_update'] < (time.time() - self._refresh)):
channel = await self._query_by_id(data['id'])
if channel:
await self._add(channel, db=db)
else:
await self._delete(channel, db=db)
elif data:
channel = Channel(
id_=data['id'],
raw=json.loads(data['raw']),
last_update=data['last_update']
)
else:
logger.debug('Channel "%s" not found in the channel store. '
'Querying the Slack API', (id_ or name))
if id_:
channel = await self._query_by_id(id_)
else:
channel = await self._query_by_name(name)
if channel:
await self._add(channel, db=db)
return channel
async def _add(self, channel, db=None):
"""
Add a channel to the channel store
"""
if not db:
db = registry.get('database')
await database.__dict__[db.type].channel.add(db, channel)
await db.commit()
async def _delete(self, id_, db=None):
"""
Delete a channel from the channel store
:param id_: id of the channel
:return: None
"""
if not db:
db = registry.get('database')
await database.__dict__[db.type].channel.delete(db, id_)
await db.commit()
async def _query_by_id(self, id_):
raw = await self._client.get_channel(id_)
channel = Channel(
id_=id_,
raw=raw,
last_update=time.time(),
)
return channel
async def _query_by_name(self, name):
channels = await self._client.get_channels()
for channel in channels:
if channel['name'] == name:
c = await self.get(id_=channel['id'])
return c
|
mit
| -1,458,524,759,698,175,000
| 26.007246
| 79
| 0.523209
| false
| 4.298731
| false
| false
| false
|
pjh/vm-analyze
|
app_scripts/app_python.py
|
1
|
5200
|
# Virtual memory analysis scripts.
# Developed 2012-2014 by Peter Hornyack, pjh@cs.washington.edu
# Copyright (c) 2012-2014 Peter Hornyack and University of Washington
# Python script to run another Python script as a test application.
#
# Build + setup instructions: do these once / occasionally by hand, not
# done automatically by this script.
# Ensure that py_version is in your path.
# ...
# Run the firefox app script first, then copy its trace-events-full
# file and its target_pids file to match the locations specified
# by py_inputpids and py_inputfile below.
#
#
# Note: this script uses timeout features that were added to Python 3.3
# (available in Ubuntu 13.04) - if this is a problem, they should be
# fairly easy to eliminate from the code, just search for "timeout".
from app_scripts.app_to_run_class import *
from trace.run_common import *
from trace.traceinfo_class import traceinfo
py_version = 'python3.3'
py_scriptname = "{}/analyze_trace.py".format(scripts_dir)
# Run the analyze_trace.py script in the vm-analyze repository
# directly; don't try to copy it to another location and run
# it from there, as it makes the module imports / dependencies
# too hard to deal with.
py_app_dir = "{}/pythonapp".format(appscripts_dir)
py_inputpids = "{}/ffox-target-pids".format(py_app_dir)
py_inputfile = "{}/ffox-trace-events-full".format(py_app_dir)
py_outputdir = py_app_dir
py_cmd = ("{} {} -a ffox -p {} {} {}").format(py_version,
py_scriptname, py_inputpids, py_inputfile, py_outputdir)
# Enable (or leave enabled) options that will require more memory:
# physical page events.
# As of 20140703, running analyze_trace.py on a 420 MB trace-events-full
# from a firefox run (visiting 30 websites) with Rss events enabled
# takes just over five minutes, with ~600 MB virtual and ~450 MB physical
# memory used during the analysis.
poll_period = 10
##############################################################################
# Tracing should already be activated when this method is called - it
# will call trace_wait() while the python script runs.
# Returns a tuple:
# (True on success, False on error;
# pid of the python process on success)
def run_py_script(outputdir, py_stdout, py_stderr, tracer):
tag = 'run_py_script'
# http://docs.python.org/3.2/library/subprocess.html
args = shlex.split(py_cmd)
print_debug(tag, ("executing py_cmd=\"{}\"").format(py_cmd))
py_p = subprocess.Popen(args, stdout=py_stdout, stderr=py_stderr)
if not py_p:
print_error(tag, ("subprocess.Popen returned None; "
"py_cmd={}").format(py_cmd))
return (False, -1)
if not tracer.perf_on():
print_error(tag, ("perf_on() failed, but continuing"))
prefix = 'py'
retcode = tracer.trace_wait(py_p, poll_period, prefix)
tracer.perf_off()
if retcode != "success":
# Assume that trace buffer filling up is an error for this app.
print_error(tag, ("trace_wait() returned {}, either due to process "
"error or trace error; py_p.returncode is {}").format(
retcode, py_p.returncode))
return (False, -1)
elif py_p.returncode is None:
print_error(tag, ("py process' returncode not set?!?").format())
return (False, -1)
elif py_p.returncode != 0:
print_error(tag, ("py process returned error {}").format(
py_p.returncode))
return (False, -1)
print_debug(tag, ("py process exited successfully, output is "
"in directory {}").format(outputdir))
return (True, py_p.pid)
def py_init(outputdir):
tag = 'py_init'
py_stdout_fname = "{}/python-stdout".format(outputdir)
py_stderr_fname = "{}/python-stderr".format(outputdir)
py_stdout = open(py_stdout_fname, 'w')
py_stderr = open(py_stderr_fname, 'w')
return (py_stdout, py_stderr)
def py_cleanup(files_to_close):
for f in files_to_close:
f.close()
return
# Returns: a target_pids list containing the top-level pid of the
# python process, or an empty list on error.
def py_exec(outputdir):
tag = 'py_exec'
target_pids = []
(py_stdout, py_stderr) = py_init(outputdir)
tracer = traceinfo('python')
success = tracer.trace_on(outputdir, "starting python")
if not success:
print_error(tag, ("trace_on failed, returning [] now").format())
py_cleanup([py_stdout, py_stderr])
return []
(success, py_pid) = run_py_script(outputdir, py_stdout, py_stderr,
tracer)
if success and py_pid > 1:
target_pids.append(py_pid)
print_debug(tag, ("run_py_script() successful, target_pids: "
"{}").format(target_pids))
else:
print_error(tag, ("run_py_script() returned {} and {}; will "
"return empty target_pids list").format(success, py_pid))
(tracesuccess, buffer_full) = tracer.trace_off(
descr="python done".format())
if not tracesuccess or buffer_full:
print_error(tag, ("trace buffer filled up before "
"tracing turned off - considering this an error "
"here, but echo {} > target_pids file to analyze "
"trace anyway").format(py_pid))
success = False
target_pids = []
py_cleanup([py_stdout, py_stderr])
return target_pids
# First arg is "appname" member: used to construct output directory.
python_app = app_to_run('python', py_exec)
if __name__ == '__main__':
print_error_exit("not an executable module")
|
bsd-3-clause
| 8,262,389,718,545,666,000
| 33.666667
| 78
| 0.688077
| false
| 3.128761
| false
| false
| false
|
BBN-Q/Quince
|
quince/node.py
|
1
|
24377
|
# coding: utf-8
# Raytheon BBN Technologies 2016
# Contributiors: Graham Rowlands
#
# This file contains the node descriptions
from qtpy.QtGui import *
from qtpy.QtCore import *
from qtpy.QtWidgets import *
from .wire import *
class Node(QGraphicsRectItem):
"""docstring for Node"""
def __init__(self, name, scene, parent=None):
super(Node, self).__init__(parent=parent)
self.name = name
self.scene = scene
self.setFlag(QGraphicsItem.ItemIsMovable)
self.setFlag(QGraphicsItem.ItemIsSelectable)
self.setFlag(QGraphicsItem.ItemSendsGeometryChanges)
self.outputs = {}
self.inputs = {}
self.allowed_destinations = {}
self.parameters = {}
self.parameter_order = {}
self.collapsed = False
self.bg_color = self.default_bg_color = QColor(240,240,240,235)
self.edge_color = self.default_edge_color = QColor(200,200,200)
self.edge_thick = 0.75
self.setRect(0,0,100,30)
# Title bar
self.title_bar = QGraphicsRectItem(parent=self)
self.title_bar.setRect(0,0,100,20)
self.title_color = self.default_title_color = QColor(80,80,100)
self.label = TitleText(self.name, parent=self)
self.label.setDefaultTextColor(Qt.white)
# Glossy flair
shiny_part = QGraphicsPolygonItem(QPolygonF([QPointF(0,0), QPointF(120,0), QPointF(0,8)]),
parent=self)
shiny_part.setBrush(QBrush(QColor(200,200,250,50)))
shiny_part.setPen(QPen(Qt.NoPen))
# Enabled by default
self.enabled = True
# For auspex interoperability
self.type = None
self.is_instrument = False
self.auspex_object = None
# Any additional json we should retain
self.base_params = None
# Dividing line and collapse button
self.divider = QGraphicsLineItem(20, 0, self.rect().width()-5, 0, self)
self.collapse_box = CollapseBox(parent=self)
self.collapse_box.setX(10)
# Make sure things are properly sized
self.min_height = 30.0
self.min_width = 120.0
self.update_min_width()
# if self.label.boundingRect().topRight().x() > 120:
# self.min_width = self.label.boundingRect().topRight().x()+20
# self.setRect(0,0,self.label.boundingRect().topRight().x()+20,30)
# else:
# self.min_width = 120.0
# Resize Handle
self.resize_handle = ResizeHandle(parent=self)
self.resize_handle.setPos(self.rect().width()-8, self.rect().height()-8)
# Disable box
self.disable_box = None
# Synchronizing parameters
self.changing = False
# Set up hovering
self.setAcceptHoverEvents(True)
self.update_screen(self.scene.window.devicePixelRatio())
def update_screen(self, pixel_ratio):
if pixel_ratio < 2:
# Render a nice Drop Shadow
shadow = QGraphicsDropShadowEffect()
shadow.setBlurRadius(18.0)
shadow.setOffset(0.0, 10.0)
shadow.setColor(QColor("#99121212"))
self.setGraphicsEffect(shadow)
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
if value:
self.bg_color = self.default_bg_color
self.title_color = QColor(80,80,100)
else:
self.bg_color = QColor(140,140,140)
self.title_color = QColor(100,100,100)
self.update()
def hoverEnterEvent(self, event):
self.prev_edge_color = self.edge_color
self.prev_edge_thick = self.edge_thick
self.edge_color = QColor(247,247,247)
self.edge_thick = 1.5
self.update()
def hoverLeaveEvent(self, event):
self.edge_color = self.prev_edge_color
self.edge_thick = self.prev_edge_thick
self.update()
def update_min_width(self):
widths = [p.label.boundingRect().topRight().x() for p in self.parameters.values()]
widths.extend([o.label.boundingRect().topRight().x() for o in self.outputs.values()])
widths.extend([i.label.boundingRect().topRight().x() for i in self.inputs.values()])
widths.append(self.label.boundingRect().topRight().x())
self.min_width = max(widths)+20
if self.min_width < 120:
self.min_width = 120.0
self.itemResize(QPointF(self.min_width - self.rect().width(),0.0))
def value_changed(self, name):
# Update the sweep parameters accordingly
if self.name == "Sweep":
stop = self.parameters['Stop'].value()
start = self.parameters['Start'].value()
incr = self.parameters['Incr.'].value()
steps = self.parameters['Steps'].value()
if name == "Incr.":
if incr != 0.0:
steps = int(float(stop-start)/float(incr))
self.parameters['Steps'].set_value(steps if steps>0 else 1)
elif name == "Steps":
self.parameters['Incr.'].set_value((stop-start)/steps)
else:
self.parameters['Incr.'].set_value((stop-start)/steps)
self.changing = False
def add_output(self, connector):
connector.setParentItem(self)
connector.parent = self
connector.setPos(self.rect().width(),30+15*(len(self.outputs)+len(self.inputs)))
self.outputs[connector.name] = connector
self.change_collapsed_state(self.collapsed) # Just for resizing in this case
def add_input(self, connector):
connector.setParentItem(self)
connector.parent = self
connector.setPos(0,30+15*(len(self.inputs)+len(self.outputs)))
self.inputs[connector.name] = connector
self.change_collapsed_state(self.collapsed) # Just for resizing in this case
def add_parameter(self, param):
param.setParentItem(self)
param.parent = self
self.parameters[param.name] = param
self.parameter_order[len(self.parameter_order)] = param.name
self.change_collapsed_state(self.collapsed) # Just for resizing in this case
def change_collapsed_state(self, collapsed):
self.collapsed = collapsed
self.collapse_box.setRotation(0.0 if self.collapsed else 90.0)
# Update the positions
pos = 32+15*(len(self.inputs)+len(self.outputs))
if len(self.parameters) > 0:
self.divider.setY(pos)
self.collapse_box.setY(pos)
self.divider.setVisible(True)
self.collapse_box.setVisible(True)
pos += 10
else:
self.divider.setVisible(False)
self.collapse_box.setVisible(False)
for i in range(len(self.parameter_order)):
# We completely hide parameters without inputs
if not self.parameters[self.parameter_order[i]].has_input:
if self.collapsed:
self.parameters[self.parameter_order[i]].setVisible(False)
else:
self.parameters[self.parameter_order[i]].setPos(0, pos)
pos += self.parameters[self.parameter_order[i]].height
self.parameters[self.parameter_order[i]].setVisible(True)
else:
self.parameters[self.parameter_order[i]].setVisible(True)
self.parameters[self.parameter_order[i]].setPos(0, pos)
self.parameters[self.parameter_order[i]].set_collapsed(self.collapsed)
if self.collapsed:
pos += self.parameters[self.parameter_order[i]].height_collapsed
else:
pos += self.parameters[self.parameter_order[i]].height
self.setRect(0,0,self.rect().width(), pos)
self.min_height = pos
self.update_min_width()
self.itemResize(QPointF(0.0,0.0))
for k, v in self.parameters.items():
for w in v.wires_in:
w.set_end(v.scenePos())
def update_fields_from_connector(self):
# This is peculiar to the "Sweep Nodes"
wires_out = self.outputs['Swept Param.'].wires_out
if len(wires_out) > 0:
wire_end = wires_out[0].end_obj
self.parameters['Start'].datatype = wire_end.value_box.datatype
self.parameters['Start'].value_box.datatype = wire_end.value_box.datatype
self.parameters['Start'].value_box.min_value = wire_end.value_box.min_value
self.parameters['Start'].value_box.max_value = wire_end.value_box.max_value
self.parameters['Start'].value_box.increment = wire_end.value_box.increment
self.parameters['Start'].value_box.snap = wire_end.value_box.snap
self.parameters['Start'].value_box.set_value(self.parameters['Start'].value())
self.parameters['Stop'].datatype = wire_end.value_box.datatype
self.parameters['Stop'].value_box.datatype = wire_end.value_box.datatype
self.parameters['Stop'].value_box.min_value = wire_end.value_box.min_value
self.parameters['Stop'].value_box.max_value = wire_end.value_box.max_value
self.parameters['Stop'].value_box.increment = wire_end.value_box.increment
self.parameters['Stop'].value_box.snap = wire_end.value_box.snap
self.parameters['Stop'].value_box.set_value(self.parameters['Stop'].value())
self.parameters['Incr.'].datatype = wire_end.value_box.datatype
self.parameters['Incr.'].value_box.datatype = wire_end.value_box.datatype
self.parameters['Incr.'].value_box.min_value = -2*abs(wire_end.value_box.max_value)
self.parameters['Incr.'].value_box.max_value = 2*abs(wire_end.value_box.max_value)
self.parameters['Incr.'].value_box.increment = wire_end.value_box.increment
self.parameters['Incr.'].value_box.snap = wire_end.value_box.snap
self.parameters['Incr.'].value_box.set_value(self.parameters['Incr.'].value())
def update_parameters_from(self, other_node):
# Make sure they are of the same type
if other_node.name == self.name:
for k, v in other_node.parameters.items():
self.parameters[k].set_value(v.value())
def itemChange(self, change, value):
if change == QGraphicsItem.ItemPositionChange:
for k, v in self.outputs.items():
v.setX(self.rect().width())
for w in v.wires_out:
w.set_start(v.pos()+value)
for k, v in self.inputs.items():
for w in v.wires_in:
w.set_end(v.pos()+value)
for k, v in self.parameters.items():
for w in v.wires_in:
w.set_end(v.pos()+value)
elif change == QGraphicsItem.ItemSelectedChange:
if value:
self.edge_color = QColor(247,217,17)
self.edge_thick = 1.25
self.title_color = QColor(110,110,80)
self.prev_edge_color = self.edge_color
self.prev_edge_thick = self.edge_thick
else:
self.edge_color = self.default_edge_color
self.edge_thick = 0.75
self.title_color = self.default_title_color
return QGraphicsRectItem.itemChange(self, change, value)
def itemResize(self, delta):
# Keep track of actual change
actual_delta = QPointF(0,0)
r = self.rect()
if r.width()+delta.x() >= self.min_width:
r.adjust(0, 0, delta.x(), 0)
actual_delta.setX(delta.x())
if r.height()+delta.y() >= self.min_height:
r.adjust(0, 0, 0, delta.y())
actual_delta.setY(delta.y())
self.setRect(r)
delta.setY(0.0)
if hasattr(self, 'resize_handle'):
self.resize_handle.setPos(self.rect().width()-8, self.rect().height()-8)
if hasattr(self, 'title_bar'):
self.title_bar.setRect(0,0,self.rect().width(),20)
conn_delta = actual_delta.toPoint()
conn_delta.setY(0.0)
self.divider.setLine(20, 0, self.rect().width()-5, 0)
# Move the outputs
for k, v in self.outputs.items():
v.setX(self.rect().width())
for w in v.wires_out:
w.set_start(v.scenePos()+conn_delta)
# Resize the parameters
for k, v in self.parameters.items():
v.set_box_width(self.rect().width())
return actual_delta
def create_wire(self, parent):
return Wire(parent)
def paint(self, painter, options, widget):
painter.setPen(QPen(self.edge_color, self.edge_thick))
self.title_bar.setPen(QPen(self.edge_color, self.edge_thick))
self.title_bar.setBrush(QBrush(self.title_color))
painter.setBrush(QBrush(self.bg_color))
painter.drawRoundedRect(self.rect(), 5.0, 5.0)
def dict_repr(self):
# First spit out any json that can't be modified in Quince.
# Base_params holds any parameters that aren't flagged as
# being "quince_parameters" in the auspex filters.
if self.base_params is not None:
dict_repr = dict(self.base_params)
else:
dict_repr = {}
# Now update and of the parameters that are set within
# Quince.
for name, param in self.parameters.items():
dict_repr[name] = param.value()
# Find the name of the source connectors (assuming one connection)
# The default connector name is "source", in which case data_source
# is just the name of the node. Otherwise, we return a data_source
# of the form "node_name:connector_name", e.g.
# "averager:partial_averages"
# If we have multiple inputs, they are simply separated by commas
# and some arbitrary and optional amount of whitespace.
if ('sink' in self.inputs.keys()) and len(self.inputs['sink'].wires_in) > 0:
connectors = [w.start_obj for w in self.inputs['sink'].wires_in]
source_text = []
for conn in connectors:
node_name = conn.parent.label.toPlainText()
conn_name = conn.name
if conn_name == "source":
source_text.append(node_name)
else:
source_text.append(node_name + " " + conn_name)
dict_repr['source'] = ", ".join(source_text)
else:
dict_repr['source'] = ""
# data_source not applicable for digitizers
if self.is_instrument:
dict_repr.pop('source')
dict_repr['enabled'] = self.enabled
dict_repr['type'] = self.type
return dict_repr
class TitleText(QGraphicsTextItem):
'''QGraphicsTextItem with textChanged() signal.'''
textChanged = Signal(str)
def __init__(self, text, parent=None):
super(TitleText, self).__init__(text, parent)
self.setTextInteractionFlags(Qt.TextEditorInteraction)
self.setFlag(QGraphicsItem.ItemIsFocusable)
self._value = text
self.parent = parent
def setPlainText(self, text):
if hasattr(self.scene(), 'items'):
nodes = [i for i in self.scene().items() if isinstance(i, Node)]
nodes.remove(self.parent)
node_names = [n.label.toPlainText() for n in nodes]
if text in node_names:
self.scene().window.set_status("Node name already exists")
else:
# self.scene().inspector_change_name(self._value, text)
self._value = text
self.textChanged.emit(self.toPlainText())
else:
self._value = text
super(TitleText, self).setPlainText(self._value)
def focusOutEvent(self, event):
self.setPlainText(self.toPlainText())
super(TitleText, self).focusOutEvent(event)
self.clearFocus()
def focusInEvent(self, event):
super(TitleText, self).focusInEvent(event)
self.setFocus()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:
c = self.textCursor()
c.clearSelection()
self.setTextCursor(c)
self.clearFocus()
else:
return super(TitleText, self).keyPressEvent(event)
class ResizeHandle(QGraphicsRectItem):
"""docstring for ResizeHandle"""
def __init__(self, parent=None):
super(ResizeHandle, self).__init__()
self.dragging = False
self.parent = parent
self.drag_start = None
self.setParentItem(parent)
self.setRect(0,0,5,5)
self.setBrush(QColor(20,20,20))
def mousePressEvent(self, event):
self.dragging = True
self.drag_start = event.scenePos()
def mouseMoveEvent(self, event):
if self.dragging:
delta = event.scenePos() - self.drag_start
actual_delta = self.parent.itemResize(delta)
self.drag_start = self.drag_start + actual_delta
def mouseReleaseEvent(self, event):
self.dragging = False
class CollapseBox(QGraphicsItem):
"""docstring for CollapseBox"""
def __init__(self, parent=None):
super(CollapseBox, self).__init__(parent=parent)
self.parent = parent
self.clicking = False
self.height = 8
self.width = 8
self.setRotation(90.0)
def paint(self, painter, options, widget):
# Draw our triangle
painter.setPen(QPen(QColor(0,0,0), 1.0))
painter.setBrush(QColor(160,200,220))
path = QPainterPath()
path.moveTo(-4,4)
path.lineTo(4,0)
path.lineTo(-4,-4)
path.lineTo(-4,4)
painter.drawPath(path)
def boundingRect(self):
return QRectF(QPointF(-5,-6), QSizeF(15, 15))
def shape(self):
p = QPainterPath()
p.addRect(-5, -6, 15, 15)
return p
def mousePressEvent(self, event):
self.clicking = True
def mouseReleaseEvent(self, event):
if self.clicking:
self.parent.change_collapsed_state(not self.parent.collapsed)
self.setRotation(0.0 if self.parent.collapsed else 90.0)
self.clicking = False
class CommandAddNode(QUndoCommand):
def __init__(self, node_name, create_func, scene):
super(CommandAddNode, self).__init__("Add node {}".format(node_name))
self.create_func = create_func
self.scene = scene
def redo(self):
self.new_node = self.create_func()
def undo(self):
self.scene.removeItem(self.new_node)
class CommandDeleteNodes(QUndoCommand):
def __init__(self, nodes, scene):
super(CommandDeleteNodes, self).__init__("Delete nodes {}".format(",".join([n.name for n in nodes])))
self.scene = scene
self.nodes = nodes
def redo(self):
self.output_wires = []
self.input_wires = []
self.parameter_wires = []
for node in self.nodes:
for k, v in node.outputs.items():
for w in v.wires_out:
w.end_obj.wires_in.pop(w.end_obj.wires_in.index(w))
self.output_wires.append(w)
self.scene.removeItem(w)
for k, v in node.inputs.items():
for w in v.wires_in:
w.start_obj.wires_out.pop(w.start_obj.wires_out.index(w))
self.input_wires.append(w)
self.scene.removeItem(w)
for k, v in node.parameters.items():
for w in v.wires_in:
w.end_obj.wires_in.pop(w.end_obj.wires_in.index(w))
self.parameter_wires.append(w)
self.scene.removeItem(w)
self.scene.removeItem(node)
node.update()
self.scene.update()
def undo(self):
for node in self.nodes:
self.scene.addItem(node)
for w in self.output_wires:
w.end_obj.wires_in.append(w)
self.scene.addItem(w)
for w in self.input_wires:
w.start_obj.wires_out.append(w)
self.scene.addItem(w)
for w in self.parameter_wires:
w.end_obj.wires_in.append(w)
self.scene.addItem(w)
self.output_wires = []
self.input_wires = []
self.parameter_wires = []
self.scene.update()
class CommandDuplicateNodes(QUndoCommand):
def __init__(self, nodes, scene):
super(CommandDuplicateNodes, self).__init__("Duplicate nodes {}".format(",".join([n.name for n in nodes])))
self.nodes = nodes
self.scene = scene
self.new_nodes = []
def redo(self):
old_to_new = {}
for sn in self.nodes:
node_names = [i.label.toPlainText() for i in self.scene.items() if isinstance(i, Node)]
new_node = self.scene.create_node_by_name(sn.name)
nan = next_available_name(node_names, strip_numbers(sn.label.toPlainText()))
new_node.label.setPlainText(nan)
# Set base parameters from old
new_node.update_parameters_from(sn)
if sn.base_params:
new_node.base_params = dict(sn.base_params)
new_node.enabled = sn.enabled
# Update the mapping
old_to_new[sn] = new_node
self.new_nodes.append(new_node)
# Stagger and update the selection to include the new nodes
new_node.setPos(sn.pos()+QPointF(20,20))
sn.setSelected(False)
new_node.setSelected(True)
# Rewire the new nodes according to the old nodes
for sn in self.nodes:
new_node = old_to_new[sn]
for k, v in sn.outputs.items():
for w in v.wires_out:
# See if the user wants to copy nodes on both ends, otherwise don't make a wire
if w.start_obj.parent in old_to_new:
if w.end_obj.parent in old_to_new:
# Create the wire and set the start
new_wire = Wire(new_node.outputs[w.start_obj.name])
new_wire.set_start(new_node.outputs[w.start_obj.name].scenePos())
new_node.outputs[w.start_obj.name].wires_out.append(new_wire)
end_conn_name = w.end_obj.name
end_node = old_to_new[w.end_obj.parent]
if end_conn_name in end_node.inputs.keys():
new_wire.end_obj = end_node.inputs[end_conn_name]
new_wire.set_end(end_node.inputs[end_conn_name].scenePos())
end_node.inputs[end_conn_name].wires_in.append(new_wire)
elif end_conn_name in end_node.parameters.keys():
new_wire.end_obj = end_node.parameters[end_conn_name]
new_wire.set_end(end_node.parameters[end_conn_name].scenePos())
end_node.parameters[end_conn_name].wires_in.append(new_wire)
self.scene.addItem(new_wire)
self.scene.update()
def undo(self):
for node in self.new_nodes:
for k, v in node.outputs.items():
for w in v.wires_out:
w.end_obj.wires_in.pop(w.end_obj.wires_in.index(w))
self.scene.removeItem(w)
for k, v in node.inputs.items():
for w in v.wires_in:
w.start_obj.wires_out.pop(w.start_obj.wires_out.index(w))
self.scene.removeItem(w)
for k, v in node.parameters.items():
for w in v.wires_in:
w.end_obj.wires_in.pop(w.end_obj.wires_in.index(w))
self.scene.removeItem(w)
self.scene.removeItem(node)
node.update()
for sn in self.nodes:
sn.setSelected(True)
self.scene.update()
|
apache-2.0
| 7,390,747,057,331,082,000
| 38.254428
| 115
| 0.57345
| false
| 3.696846
| false
| false
| false
|
orcasgit/py-wsse
|
wsse/encryption.py
|
1
|
11246
|
"""Functions for WS-Security (WSSE) encryption and decryption.
Heavily based on test examples in https://github.com/mehcode/python-xmlsec as
well as the xmlsec documentation at https://www.aleksey.com/xmlsec/. Some
functions from https://github.com/mvantellingen/py-soap-wsse.
Reading the xmldsig, xmlenc, and ws-security standards documents, though
admittedly painful, will likely assist in understanding the code in this
module.
"""
import base64
from lxml import etree
from OpenSSL import crypto
import xmlsec
from .constants import BASE64B, X509TOKEN, DS_NS, ENC_NS, SOAP_NS, WSSE_NS
from .xml import ensure_id, ns
def encrypt(envelope, certfile):
"""Encrypt body contents of given SOAP envelope using given X509 cert.
Currently only encrypts the first child node of the body, so doesn't really
support a body with multiple child nodes (the later ones won't be
encrypted), and doesn't support encryption of multiple nodes.
Expects to encrypt an incoming document something like this (xmlns
attributes omitted for readability):
<soap:Envelope>
<soap:Header>
<wsse:Security mustUnderstand="true">
<wsu:Timestamp>
<wsu:Created>2015-06-25T21:53:25.246276+00:00</wsu:Created>
<wsu:Expires>2015-06-25T21:58:25.246276+00:00</wsu:Expires>
</wsu:Timestamp>
</wsse:Security>
</soap:Header>
<soap:Body>
...
</soap:Body>
</soap:Envelope>
Encryption results in an XML structure something like this (note the added
wsse:BinarySecurityToken and xenc:EncryptedKey nodes in the wsse:Security
header, and that the contents of the soap:Body have now been replaced by a
wsse:EncryptedData node):
<soap:Envelope>
<soap:Header>
<wsse:Security mustUnderstand="true">
<wsse:BinarySecurityToken
wsu:Id="id-31e55a42-adef-4312-aa02-6da738177b25"
EncodingType="...-wss-soap-message-security-1.0#Base64Binary"
ValueType=".../oasis-200401-wss-x509-token-profile-1.0#X509v3">
MIIGRTCC...7RaVeFVB/w==
</wsse:BinarySecurityToken>
<xenc:EncryptedKey>
<xenc:EncryptionMethod
Algorithm="http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"/>
<ds:KeyInfo>
<wsse:SecurityTokenReference
wsse:TokenType="...wss-x509-token-profile-1.0#X509v3">
<wsse:Reference
ValueType="...-wss-x509-token-profile-1.0#X509v3"
URI="#id-31e55a42-adef-4312-aa02-6da738177b25"
/>
</wsse:SecurityTokenReference>
</ds:KeyInfo>
<xenc:CipherData>
<xenc:CipherValue>0m23u5UVh...YLcEcmgzng==</xenc:CipherValue>
</xenc:CipherData>
<xenc:ReferenceList>
<xenc:DataReference
URI="#id-094305bf-f73e-4940-88d9-00688bc78718"/>
</xenc:ReferenceList>
</xenc:EncryptedKey>
<wsu:Timestamp wsu:Id="id-d449ec14-f31c-4174-b51c-2a56843eeda5">
<wsu:Created>2015-06-25T22:26:57.618091+00:00</wsu:Created>
<wsu:Expires>2015-06-25T22:31:57.618091+00:00</wsu:Expires>
</wsu:Timestamp>
</wsse:Security>
</soap:Header>
<soap:Body wsu:Id="id-73bc3f79-1597-4e35-91d5-354fc6197858">
<xenc:EncryptedData
Type="http://www.w3.org/2001/04/xmlenc#Element"
wsu:Id="id-094305bf-f73e-4940-88d9-00688bc78718">
<xenc:EncryptionMethod
Algorithm="http://www.w3.org/2001/04/xmlenc#tripledes-cbc"/>
<xenc:CipherData>
<xenc:CipherValue>rSJC8m...js2RQfw/5</xenc:CipherValue>
</xenc:CipherData>
</xenc:EncryptedData>
</soap:Body>
</soap:Envelope>
(In practice, we'll generally be encrypting an already-signed document, so
the Signature node would also be present in the header, but we aren't
encrypting it and for simplicity it's omitted in this example.)
"""
doc = etree.fromstring(envelope)
header = doc.find(ns(SOAP_NS, 'Header'))
security = header.find(ns(WSSE_NS, 'Security'))
# Create a keys manager and load the cert into it.
manager = xmlsec.KeysManager()
key = xmlsec.Key.from_file(certfile, xmlsec.KeyFormat.CERT_PEM, None)
manager.add_key(key)
# Encrypt first child node of the soap:Body.
body = doc.find(ns(SOAP_NS, 'Body'))
target = body[0]
# Create the EncryptedData node we will replace the target node with,
# and make sure it has the contents XMLSec expects (a CipherValue node,
# a KeyInfo node, and an EncryptedKey node within the KeyInfo which
# itself has a CipherValue).
enc_data = xmlsec.template.encrypted_data_create(
doc,
xmlsec.Transform.DES3,
type=xmlsec.EncryptionType.ELEMENT,
ns='xenc',
)
xmlsec.template.encrypted_data_ensure_cipher_value(enc_data)
key_info = xmlsec.template.encrypted_data_ensure_key_info(
enc_data, ns='dsig')
enc_key = xmlsec.template.add_encrypted_key(
key_info, xmlsec.Transform.RSA_OAEP)
xmlsec.template.encrypted_data_ensure_cipher_value(enc_key)
enc_ctx = xmlsec.EncryptionContext(manager)
# Generate a per-session DES key (will be encrypted using the cert).
enc_ctx.key = xmlsec.Key.generate(
xmlsec.KeyData.DES, 192, xmlsec.KeyDataType.SESSION)
# Ask XMLSec to actually do the encryption.
enc_data = enc_ctx.encrypt_xml(enc_data, target)
# XMLSec inserts the EncryptedKey node directly within EncryptedData,
# but WSSE wants it in the Security header instead, and referencing the
# EncryptedData as well as the actual cert in a BinarySecurityToken.
# Move the EncryptedKey node up into the wsse:Security header.
security.insert(0, enc_key)
# Create a wsse:BinarySecurityToken node containing the cert and add it
# to the Security header.
cert_bst = create_binary_security_token(certfile)
security.insert(0, cert_bst)
# Create a ds:KeyInfo node referencing the BinarySecurityToken we just
# created, and insert it into the EncryptedKey node.
enc_key.insert(1, create_key_info_bst(cert_bst))
# Add a DataReference from the EncryptedKey node to the EncryptedData.
add_data_reference(enc_key, enc_data)
# Remove the now-empty KeyInfo node from EncryptedData (it used to
# contain EncryptedKey, but we moved that up into the Security header).
enc_data.remove(key_info)
return etree.tostring(doc)
def decrypt(envelope, keyfile):
"""Decrypt all EncryptedData, using EncryptedKey from Security header.
EncryptedKey should be a session key encrypted for given ``keyfile``.
Expects XML similar to the example in the ``encrypt`` docstring.
"""
# Create a key manager and load our key into it.
manager = xmlsec.KeysManager()
key = xmlsec.Key.from_file(keyfile, xmlsec.KeyFormat.PEM)
manager.add_key(key)
doc = etree.fromstring(envelope)
header = doc.find(ns(SOAP_NS, 'Header'))
security = header.find(ns(WSSE_NS, 'Security'))
enc_key = security.find(ns(ENC_NS, 'EncryptedKey'))
# Find each referenced encrypted block (each DataReference in the
# ReferenceList of the EncryptedKey) and decrypt it.
ref_list = enc_key.find(ns(ENC_NS, 'ReferenceList'))
for ref in ref_list:
# Find the EncryptedData node referenced by this DataReference.
ref_uri = ref.get('URI')
referenced_id = ref_uri[1:]
enc_data = doc.xpath(
"//enc:EncryptedData[@Id='%s']" % referenced_id,
namespaces={'enc': ENC_NS},
)[0]
# XMLSec doesn't understand WSSE, therefore it doesn't understand
# SecurityTokenReference. It expects to find EncryptedKey within the
# KeyInfo of the EncryptedData. So we get rid of the
# SecurityTokenReference and replace it with the EncryptedKey before
# trying to decrypt.
key_info = enc_data.find(ns(DS_NS, 'KeyInfo'))
key_info.remove(key_info[0])
key_info.append(enc_key)
# When XMLSec decrypts, it automatically replaces the EncryptedData
# node with the decrypted contents.
ctx = xmlsec.EncryptionContext(manager)
ctx.decrypt(enc_data)
return etree.tostring(doc)
def add_data_reference(enc_key, enc_data):
"""Add DataReference to ``enc_data`` in ReferenceList of ``enc_key``.
``enc_data`` should be an EncryptedData node; ``enc_key`` an EncryptedKey
node.
Add a wsu:Id attribute to the EncryptedData if it doesn't already have one,
so the EncryptedKey's URI attribute can reference it.
(See the example XML in the ``encrypt()`` docstring.)
Return created DataReference node.
"""
# Ensure the target EncryptedData has a wsu:Id.
data_id = ensure_id(enc_data)
# Ensure the EncryptedKey has a ReferenceList.
ref_list = ensure_reference_list(enc_key)
# Create the DataReference, with URI attribute referencing the target
# node's id, add it to the ReferenceList, and return it.
data_ref = etree.SubElement(ref_list, ns(ENC_NS, 'DataReference'))
data_ref.set('URI', '#' + data_id)
return data_ref
def ensure_reference_list(encrypted_key):
"""Ensure that given EncryptedKey node has a ReferenceList node.
Return the found or created ReferenceList node.
"""
ref_list = encrypted_key.find(ns(ENC_NS, 'ReferenceList'))
if ref_list is None:
ref_list = etree.SubElement(encrypted_key, ns(ENC_NS, 'ReferenceList'))
return ref_list
def create_key_info_bst(security_token):
"""Create and return a KeyInfo node referencing given BinarySecurityToken.
(See the example XML in the ``encrypt()`` docstring.)
Modified from https://github.com/mvantellingen/py-soap-wsse.
"""
# Create the KeyInfo node.
key_info = etree.Element(ns(DS_NS, 'KeyInfo'), nsmap={'ds': DS_NS})
# Create a wsse:SecurityTokenReference node within KeyInfo.
sec_token_ref = etree.SubElement(
key_info, ns(WSSE_NS, 'SecurityTokenReference'))
sec_token_ref.set(
ns(WSSE_NS, 'TokenType'), security_token.get('ValueType'))
# Add a Reference to the BinarySecurityToken in the SecurityTokenReference.
bst_id = ensure_id(security_token)
reference = etree.SubElement(sec_token_ref, ns(WSSE_NS, 'Reference'))
reference.set('ValueType', security_token.get('ValueType'))
reference.set('URI', '#%s' % bst_id)
return key_info
def create_binary_security_token(certfile):
"""Create a BinarySecurityToken node containing the x509 certificate.
Modified from https://github.com/mvantellingen/py-soap-wsse.
"""
# Create the BinarySecurityToken node with appropriate attributes.
node = etree.Element(ns(WSSE_NS, 'BinarySecurityToken'))
node.set('EncodingType', BASE64B)
node.set('ValueType', X509TOKEN)
# Set the node contents.
with open(certfile) as fh:
cert = crypto.load_certificate(crypto.FILETYPE_PEM, fh.read())
node.text = base64.b64encode(
crypto.dump_certificate(crypto.FILETYPE_ASN1, cert))
return node
|
bsd-3-clause
| 482,553,226,064,850,500
| 36.738255
| 79
| 0.669749
| false
| 3.492547
| false
| false
| false
|
alanfbaird/PyTASA
|
tests/anisotropy_index_tests.py
|
1
|
1660
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test the IO routines - based on the MSAT test cases
"""
import unittest
import numpy as np
import pytasa.anisotropy_index
class TestAnisotropyIndex(unittest.TestCase):
def setUp(self):
"""Some useful matricies for testing"""
self.olivine = np.array([[320.5, 68.1, 71.6, 0.0, 0.0, 0.0],
[68.1, 196.5, 76.8, 0.0, 0.0, 0.0],
[71.6, 76.8, 233.5, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 64.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 77.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 78.7]])
self.isotropic = np.array([[166.6667, 66.6667, 66.6667, 0.0, 0.0, 0.0],
[66.6667, 166.6667, 66.6667, 0.0, 0.0, 0.0],
[66.6667, 66.6667, 166.6667, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 50.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 50.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 50.0]])
def test_isotropic_zenner(self):
"""Test from MSAT - are isotropic results isotropic"""
np.testing.assert_almost_equal(pytasa.anisotropy_index.zenerAniso(
self.isotropic), [1.0, 0.0])
def test_isotropic_universal(self):
"""Test from MSAT - are isotropic results isotropic"""
np.testing.assert_almost_equal(pytasa.anisotropy_index.uAniso(
self.isotropic), [0.0, 0.0])
def suite():
return unittest.makeSuite(TestCijStability, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
mit
| 8,203,978,501,412,045,000
| 35.086957
| 79
| 0.487349
| false
| 2.743802
| true
| false
| false
|
kingsdigitallab/tvof-kiln
|
preprocess/prepare/xml_parser.py
|
1
|
11492
|
# -*- coding: utf-8 -*-
import sys
import os
import re
import glob
import datetime
import xml.etree.ElementTree as ET
class XMLParser(object):
suppressed_output = False
default_output = u'parsed.xml'
# TODO: implement this, but not possible with ElementTree...
is_output_beautified = False
def __init__(self):
self.convert_only = False
self.xml_comments = []
self.reset()
def reset(self):
self.paragraphs = []
self.namespaces_implicit = {
'xml': 'http://www.w3.org/XML/1998/namespace',
}
self.xml = None
def has_xml(self):
return self.xml is not None
def transform(self, xml_path, xsl_path):
import lxml.etree as LET
dom = LET.parse(xml_path)
xslt = LET.parse(xsl_path)
trans = LET.XSLT(xslt)
newdom = trans(dom)
ret = LET.tostring(newdom, pretty_print=True)
# 2-space indent -> 4-space indent
ret = re.sub(r'(?m)^ +', lambda m: ' ' * (2 * len(m.group(0))), ret)
return ret
@classmethod
def run(cls, args=None):
if args is None and cls.__module__ != '__main__':
return
script_file = '%s.py' % cls.__module__
if args is None:
args = sys.argv
script_file = args.pop(0)
parser = cls()
print('python2 %s %s' % (script_file, ' '.join(args)))
if len(args) < 1:
print(
'Usage: {} INPUT.xml [-o OUTPUT.xml] [-r PARA_RANGE]'.format(
os.path.basename(script_file)))
exit()
output_path = cls.default_output
input_str = []
input_path_list = []
while len(args):
arg = (args.pop(0)).strip()
if arg.strip() == '-r':
if len(args) > 0:
arg = args.pop(0)
parser.set_paragraphs(arg)
elif arg.strip() == '-o':
if len(args) > 0:
arg = args.pop(0)
output_path = arg
elif arg.strip() == '-m':
if len(args) > 0:
arg = args.pop(0)
parser.ms_name = arg
elif arg.strip() == '-c':
# aggregate and convert only, don't tokenise or kwic
# TODO: this should really go into doall.py
parser.convert_only = True
else:
input_str.append(arg)
for input_paths in cls.get_expanded_paths(arg):
input_path_list += glob.glob(input_paths)
if input_path_list:
parser.run_custom(input_path_list, output_path)
if parser.has_xml():
parser.write_xml(output_path)
print('written %s' % output_path)
else:
if not getattr(cls, 'suppressed_output', False):
print(
'WARNING: Nothing to output, please check the input argument (%s)' %
', '.join(input_str))
print('done')
def set_paragraphs(self, paragraphs_string=None):
ret = []
if paragraphs_string:
# edfr20125_00589 in range '589-614'
for paras in paragraphs_string.strip().split(','):
paras = paras.split('-')
if len(paras) < 2:
paras[-1] = paras[0]
ret += range(int(paras[0]), int(paras[-1]) + 1)
self.paragraphs = ret
return ret
def is_para_in_range(self, parentid):
ret = False
if not self.paragraphs:
return True
if parentid:
# edfr20125_00589 in range '589-614'
para = re.findall('\d+$', parentid)
if para:
ret = int(para[0]) in self.paragraphs
return ret
@classmethod
def get_expanded_paths(cls, path):
# get_expanded_paths
# e.g. {16,18}X => [16X, 18X]
# e.g. {16-18}X => [16X, 17X, 18X]
ret = [path]
parts = re.findall(ur'^(.*)\{([-\d,]+)\}(.*)$', path)
if parts:
parts = parts[0]
ranges = parts[1].split(',')
for range in ranges:
ends = range.split('-')
if len(ends) == 1:
ends.append(ends[0])
ends = [int(end) for end in ends]
ends[-1] += 1
for end in xrange(*ends):
ret.append(ur'%s%s%s' % (parts[0], end, parts[-1]))
return ret
def set_namespaces_from_unicode(self, xml_string):
# grab all the namespaces
self.namespaces = {
prefix: uri
for definition, prefix, uri
in re.findall(ur'(xmlns:?(\w+)?\s*=\s*"([^"]+)")', xml_string)
}
self.namespaces.update(self.namespaces_implicit)
def set_xml_from_unicode(self, xml_string):
# grab all the namespaces
self.set_namespaces_from_unicode(xml_string)
# remove the default namespace definition
# to simplify parsing
# we'll put it back in get_unicode_from_xml()
xml_string = re.sub(ur'\sxmlns="[^"]+"', '', xml_string, count=1)
# note that ET takes a utf-8 encoded string
try:
self.xml = ET.fromstring(xml_string.encode('utf-8'))
except Exception as e:
f = open('error.log', 'w')
f.write(xml_string.encode('utf-8'))
f.close()
raise e
def get_unicode_from_xml(self, xml=None):
if xml is None:
for prefix, url in self.namespaces.iteritems():
# skip xml namespace, it's implicitly defined
if prefix == 'xml':
continue
aprefix = 'xmlns'
if prefix:
aprefix += ':' + prefix
self.xml.set(aprefix, url)
if xml is None:
xml = self.xml
return ET.tostring(xml, encoding='utf-8').decode('utf-8')
def read_xml(self, filepath):
ret = True
import codecs
with codecs.open(filepath, 'r', 'utf-8') as f:
content = f.read()
content = self.save_xml_comments(content)
try:
self.set_xml_from_unicode(content)
# self.is_wellformed(self.get_unicode_from_xml())
except ET.ParseError as e:
print(e)
ret = False
return ret
def forget_xml_comments(self):
self.xml_comments = []
def restore_xml_comments(self, content):
# xml.etree.ElementTree does NOT preserve <!-- -->
# We could use lxml but that would mean asking project partners
# to install that... let's do it manually.
# return content
def replace_comment(match):
ret = ur''
if self.xml_comments:
ret = self.xml_comments[int(match.group(1))]
return ret
return re.sub(ur'(?musi)<comment\s*id\s*=\s*"c-(\d+)"\s*/>',
replace_comment, content)
def save_xml_comments(self, content):
# xml.etree.ElementTree does NOT preserve <!-- -->
# We could use lxml but that would mean asking project partners
# to install that... let's do it manually.
# TODO: Alternatively
# https://stackoverflow.com/questions/33573807/faithfully-preserve-comments-in-parsed-xml-python-2-7
# return content
first_element_index = (re.search(ur'<\s*\w', content)).start()
def replace_comment(match):
ret = match.group(0)
if match.start() > first_element_index:
commentid = len(self.xml_comments)
self.xml_comments.append(ret)
ret = ur'<comment id="c-%s"/>' % commentid
return ret
return re.sub(ur'(?musi)<!--.*?-->', replace_comment, content)
def write_xml(self, file_path, encoding='utf-8'):
f = open(file_path, 'wb')
content = u'<?xml version="1.0" encoding="{}"?>\n'.format(encoding)
# insert the generated date
date_generated = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
content += u'<!-- AUTO-GENERATED by {} - {} -->\n'.format(
self.__class__.__name__,
date_generated
)
#
content += self.get_unicode_from_xml()
# insert date in tei headers
content = re.sub(
ur'(?musi)(\s+)(<publicationStmt>)(.*?)(</publicationStmt>)',
ur'\1\2\1 <publisher>King' + "'" +
ur's Digital Laboratory</publisher>\1 <date>{}</date>\1\4'.format(
date_generated
),
content
)
content = self.restore_xml_comments(content)
content = content.encode(encoding)
f.write(content)
f.close()
def get_element_text(self, element, recursive=False):
if recursive:
ret = element.itertext()
else:
ret = [(element.text or u'')] +\
[(child.tail or u'') for child in list(element)]
return u''.join(ret)
def expand_prefix(self, expression):
expression = re.sub(
ur'(\w+):',
lambda m: ur'{%s}' % self.namespaces[m.group(1)],
expression
)
return expression
def is_wellformed(self, xml_string):
ret = True
try:
xml_string = ET.fromstring(xml_string.encode('utf-8'))
except ET.ParseError as e:
print(u'%s' % e)
# (3056, 242) = (line, char)
lines = xml_string.split('\n')
print(lines[e.position[0] - 1])
print((' ' * (e.position[1] - 1)) + '^')
ret = False
return ret
def remove_elements(self, filters, condition_function=None):
# Remove all elements in the xml that match any of the given fitlers.
# e.g. filters = ['del', 'orig', 'seg[@type="semi-dip"]', 'sic', 'pb']
# self.remove_elements(filters)
if condition_function is None:
def condition_function(parent, element): return True
for filter in filters:
c = 0
matches = re.findall('^([^\[]*)(\[.*\])?', filter)
tag, condition = matches[0]
for parent in self.xml.findall(ur'.//*[' + tag + ur']'):
# slower version that completely removes the elements
elements = parent.findall(filter)
if len(elements):
previous = None
for element in list(parent):
if element in elements and condition_function(
parent, element):
# make sure we keep the tail
tail = element.tail
parent.remove(element)
c += 1
if tail:
if previous is not None:
previous.tail = (
previous.tail or ur'') + tail
else:
parent.text = (
parent.text or ur'') + tail
else:
previous = element
print('\t removed %s %s' % (c, filter))
|
apache-2.0
| -6,888,239,231,041,055,000
| 31.463277
| 108
| 0.488688
| false
| 4.072289
| false
| false
| false
|
novapost/django-email-change
|
src/email_change/urls.py
|
1
|
1900
|
# -*- coding: utf-8 -*-
#
# This file is part of django-email-change.
#
# django-email-change adds support for email address change and confirmation.
#
# Development Web Site:
# - http://www.codetrax.org/projects/django-email-change
# Public Source Code Repository:
# - https://source.codetrax.org/hgroot/django-email-change
#
# Copyright 2010 George Notaras <gnot [at] g-loaded.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.conf.urls.defaults import *
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'^email/change/$', 'email_change.views.email_change_view', name='email_change'),
url(r'^email/verification/sent/$',
TemplateView.as_view(template_name='email_change/email_verification_sent.html'),
name='email_verification_sent'),
# Note taken from django-registration
# Verification keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]{40} because a bad verification key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
url(r'^email/verify/(?P<verification_key>\w+)/$', 'email_change.views.email_verify_view', name='email_verify'),
url(r'^email/change/complete/$',
TemplateView.as_view(template_name='email_change/email_change_complete.html'),
name='email_change_complete'),
)
|
apache-2.0
| -2,762,859,119,669,229,000
| 41.222222
| 115
| 0.715263
| false
| 3.619048
| false
| false
| false
|
shacknetisp/vepybot
|
plugins/protocols/irc/core/users.py
|
1
|
8208
|
# -*- coding: utf-8 -*-
import bot
import time
class Whois:
def __init__(self):
self.time = time.time()
self.channels = {}
ident = ""
host = ""
name = ""
auth = ""
idle = 0
signon = 0
server = ""
class M_Whois(bot.Module):
index = "whois"
hidden = False
def register(self):
self.addhook("recv", "recv", self.recv)
self.addhook("whois.fromcontext", "fromcontext", self.fromcontext)
self.addhook("whois.fromtuple", "fromtuple", self.fromtuple)
self.addhook("chanmodes", "chanmodes", self.chanmodes)
self.addtimer(self.timer, "whois", 60 * 1000)
self.addtimer(self.chantimer, "chantimer", 60 * 1000)
self.whois = {}
self.server.whois = self.whois
self.tmp = {}
self.addcommand(self.getwhois, "whois",
"Get information about a nick. Space-delimited values."
" Values can be: nick, ident, host, or auth.",
["nick", "[values...]"])
self.addcommand(self.runwhois, "authme",
"Auth via WHOIS.", [])
self.serverset('whois.updatechannels', self.updatechannels)
def runwhois(self, context, args):
if not self.server.settings.get('server.whois'):
return "WHOIS is disabled."
self.server.send("WHOIS %s" % context.user[0])
return "Done, check your rights with: rights get"
def getwhois(self, context, args):
args.default('values', 'nick host channels')
info = {}
if args.getstr("nick") in self.whois:
w = self.whois[args.getstr("nick")]
info['nick'] = args.getstr("nick")
info['ident'] = w.ident
info['name'] = w.name
info['host'] = w.host
info['auth'] = w.auth
info['idstring'] = "irc:{nick}!{ident}@{host}!{auth}".format(**info)
else:
return "Nick not found."
out = []
values = args.getstr("values").split(' ')
for v in values:
if v in info and type(info[v]) in [str, int]:
if len(values) == 1:
out.append(str(info[v]))
else:
out.append("%s: %s" % (v, str(info[v])))
return ', '.join(out) or "No results."
def updatechannels(self, snick=None):
nicks = {}
for chan in self.server.channels:
v = self.server.channels[chan].names
for n in v:
if snick is not None and n not in snick:
continue
if n not in nicks:
nicks[n] = {}
nicks[n][chan] = v[n]
for nick in nicks:
if nick in self.whois:
self.whois[nick].channels = nicks[nick]
def chantimer(self):
self.updatechannels()
def timer(self):
tod = []
for w in self.whois:
if time.time() - self.whois[w].time > 250:
tod.append(w)
for d in tod:
self.whois.pop(d)
def fromcontext(self, context):
nicks = [context.user[0]]
if context.code('nick'):
nicks.append(context.rawsplit[2])
for nick in nicks:
if nick:
if nick not in self.whois:
self.whois[nick] = Whois()
w = self.whois[nick]
w.ident = context.user[1]
w.host = context.user[2]
def fromtuple(self, t):
nick = t[0]
if nick:
if nick not in self.whois:
self.whois[nick] = Whois()
w = self.whois[nick]
w.ident = t[1]
w.host = t[2]
self.server.dohook('whois.found', nick,
"%s!%s@%s!%s" % (nick, w.ident, w.host, w.auth),
w)
def recv(self, context):
if context.code("311"):
self.tmp[context.rawsplit[3]] = Whois()
w = self.tmp[context.rawsplit[3]]
w.ident = context.rawsplit[4]
w.host = context.rawsplit[5]
w.name = context.text
elif context.code("312"):
w = self.tmp[context.rawsplit[3]]
w.server = context.rawsplit[4]
elif context.code("317"):
w = self.tmp[context.rawsplit[3]]
w.idle = int(context.rawsplit[4])
w.signon = int(context.rawsplit[5])
elif context.code("318"):
if context.rawsplit[3] in self.whois:
self.tmp[context.rawsplit[3]].channels = self.whois[
context.rawsplit[3]].channels
self.whois[context.rawsplit[3]] = self.tmp[context.rawsplit[3]]
self.server.dohook("whois", context.rawsplit[3])
w = self.whois[context.rawsplit[3]]
self.server.dohook('whois.found', context.rawsplit[3],
"%s!%s@%s!%s" % (context.rawsplit[3], w.ident, w.host, w.auth),
w)
elif context.code("330"):
# Freenode
w = self.tmp[context.rawsplit[3]]
w.auth = context.rawsplit[4]
elif context.code("JOIN"):
self.handlejoin(context)
elif context.code("PART"):
self.handlepart(context)
elif context.code("QUIT"):
self.fromcontext(context)
w = self.whois[context.user[0]]
self.server.dohook('log', 'quit', (context.rawsplit[0],
context.user),
(list(w.channels.keys()), context.text))
elif context.code("MODE"):
self.handlemode(context)
def handlejoin(self, context):
self.fromcontext(context)
w = self.whois[context.user[0]]
channel = context.rawsplit[2].strip(':')
if channel not in w.channels:
w.channels[channel] = []
self.server.dohook('join', channel, context.user[0])
self.server.dohook('log', 'join', context.user,
channel)
self.server.dohook('whois.lightfound', context.user[0],
"%s!%s@%s!%s" % (context.user[0], w.ident, w.host, w.auth),
w)
def handlepart(self, context):
self.fromcontext(context)
w = self.whois[context.user[0]]
channel = context.rawsplit[2]
if channel in w.channels:
w.channels.pop(channel)
self.server.dohook('log', 'part', context.user,
(channel, context.text))
def handlemode(self, context):
channel = context.rawsplit[2]
modes = context.rawsplit[3]
final = {}
nicks = context.rawsplit[4:]
for n in nicks:
final[n] = []
now = ''
idx = 0
for cchar in modes:
if cchar in '-+':
now = cchar
elif now and idx in range(len(nicks)):
final[nicks[idx]].append(now + cchar)
self.server.dohook('chanmodes', channel, final)
self.server.dohook('log', 'mode', context.rawsplit[0],
(channel, modes, ' '.join(nicks)))
def chanmodes(self, channel, modes):
for target in modes:
if target not in self.whois:
continue
w = self.whois[target]
if channel not in w.channels:
w.channels[channel] = []
if channel not in self.server.channels:
c = channel.strip(':')
self.server.channels[c] = self.server.Channel(self.server, c)
names = self.server.channels[channel].names
for mode in modes[target]:
for mchar in 'ov':
if mode == '+%s' % mchar:
if mchar not in w.channels[channel]:
w.channels[channel].append(mchar)
elif mode == '-%s' % mchar:
if mchar in w.channels[channel]:
i = w.channels[channel].index(mchar)
w.channels[channel].pop(i)
names[target] = w.channels[channel]
bot.register.module(M_Whois)
|
mit
| -1,344,099,156,478,500,600
| 35
| 80
| 0.500731
| false
| 3.756522
| false
| false
| false
|
pascalgutjahr/Praktikum-1
|
V704_Absorp_Beta_Gamma/gammaPB.py
|
1
|
1439
|
import matplotlib as mpl
from scipy.optimize import curve_fit
mpl.use('pgf')
import matplotlib.pyplot as plt
plt.rcParams['lines.linewidth'] = 1
import numpy as np
mpl.rcParams.update({
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pgf.texsystem': 'lualatex',
'pgf.preamble': r'\usepackage{unicode-math}\usepackage{siunitx}'
})
N0 = 1113 # für t = 1100s
N_0 = N0 / 1100 # für t = 1s
N_gamma = 131.62 # für t = 1s
N = np.array([6803, 10219, 7897, 7889, 6041, 3363, 3232, 2065, 1911, 1684])
d = np.array([0.1, 0.4, 1.03, 1.3, 2.01, 3.06, 3.45, 4.06, 4.56, 5.10])
t = np.array([60, 110, 180, 230, 400, 600, 700, 800, 900, 1000])
N = (N-N_0) / t # auf t = 1s normiert und Nulleffekt abgezogen
N_log = np.log(N/N_gamma)
F = np.sqrt(N) / t
print('Fehler Wurzel N durch t:', np.vstack(F))
def f(d, a, b):
return a * d + b
params, covariance = curve_fit(f, d, N_log)
errors = np.sqrt(np.diag(covariance))
print('a =', params[0], '±', errors[0])
print('b =', params[1], '±', errors[1])
plt.plot(d, f(d,*params), '-',color='deeppink', label='Lineare Ausgleichsrechnung')
plt.plot(d, N_log, 'bx', label='Messwerte für Bleiabsorber')
plt.grid()
plt.xlabel(r'$d \,/\, \si{\centi\meter}$')
plt.ylabel(r'$\log{(\frac{N-N_0}{N_\symup{ohne}})}$') # N_ohne: Zählrate ohne Absorber, N_0: Zählrate ohne Strahler
plt.legend(loc='best')
plt.tight_layout
plt.savefig('bilder/gammaPB.pdf')
plt.show()
|
mit
| -4,312,687,204,794,683,000
| 27.62
| 115
| 0.638015
| false
| 2.246468
| false
| false
| false
|
marcuskelly/recover
|
app/manage_commands.py
|
1
|
1623
|
# This file defines command line commands for manage.py
import datetime
from app.init_app import app, db, manager
from app.models import User, Role
@manager.command
def init_db():
""" Initialize the database."""
# Create all tables
db.create_all()
# Add all Users
add_users()
def add_users():
""" Create users when app starts """
# Adding roles
admin_role = find_or_create_role('admin', u'Admin')
# Add users
#user = find_or_create_user(u'Admin', u'Example', u'admin@example.com', 'Password1', admin_role)
#user = find_or_create_user(u'Admin1', u'Example', u'admin1@example.com', 'Password1', admin_role)
#user = find_or_create_user(u'User', u'Example', u'user@example.com', 'Password1')
# Save to DB
db.session.commit()
def find_or_create_role(name, label):
""" Find existing role or create new role """
role = Role.query.filter(Role.name == name).first()
if not role:
role = Role(name=name, label=label)
db.session.add(role)
return role
def find_or_create_user(user_name, dob, email, password, role=None):
""" Find existing user or create new user """
user = User.query.filter(User.email == email).first()
if not user:
user = User(username='Example',
email=email,
dob=datetime.datetime.utcnow(),
password=app.user_manager.hash_password(password),
active=True,
confirmed_at=datetime.datetime.utcnow())
if role:
user.roles.append(role)
db.session.add(user)
return user
|
bsd-2-clause
| -1,381,855,864,617,946,000
| 27.982143
| 102
| 0.612446
| false
| 3.614699
| false
| false
| false
|
lifemapper/LmQGIS
|
scripts/createQgisPackage.py
|
1
|
4111
|
"""
@summary: This module creates a zip file for a plugin that can be uploaded to
the QGIS repository.
@author: CJ Grady
@status: alpha
@version: 1.0
@license: gpl2
@copyright: Copyright (C) 2014, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
import fnmatch
import os
import re
from zipfile import ZipFile
import ConfigParser
import itertools
import StringIO
from LmCommon.common.config import Config
# Jeff: Change these to the locations on your system
IN_DIR = "/home/jcavner/workspace/lm3/components/LmClient/LmQGIS/V2/"
OUT_LOCATION = "/home/jcavner/plugin/V2/lifemapperTools_Testdfsdf2.zip"
#CONFIG_LOCATION = "/home/jcavner/workspace/lm3/components/config/lmconfigfile.jeff"
#SITE_CONFIG = "/home/jcavner/workspace/lm3/components/config/config.site.ini"
SECTIONS = ['LmClient - contact','LmCommon - common','LmClient - Open Tree of Life','SiteConfig']
EXCLUDES = ['.svn', '*.pyc','*.ini']
# .............................................................................
def getFilenames(inDir):
"""
@summary: Gets all of the files and directories in the input directory that
don't match the exclude patterns
@param inDir: The input directory to find files in
"""
excludes = r'|'.join([fnmatch.translate(x) for x in EXCLUDES]) or r'$.'
matches = []
for root, dirnames, fns in os.walk(inDir, topdown=True, followlinks=True):
dirnames[:] = [d for d in dirnames if d not in excludes]
files = [os.path.join(root, f) for f in fns]
files = [f for f in files if not re.match(excludes, f)]
matches.extend(files)
return matches
# .............................................................................
def createZipFile(matches, inDir, outFn, configStrIO):
"""
@summary: Creates a zip file containing all of the files in matches
@param matches: Files to include in the zip file
@param inDir: The base directory for these files. The zip file will store
the directory structure under this location
@param outFn: The output zip file name to use
"""
with ZipFile(outFn, mode='w') as zf:
for fn in matches:
zf.write(fn, fn[len(inDir):])
zf.writestr('lifemapperTools/config/config.ini', configStrIO.getvalue())
def getConfigSections():
#config = ConfigParser.SafeConfigParser()
config = Config().config
#config.read(CONFIG_LOCATION)
#config.read(SITE_CONFIG)
allSec = {}
for sec in SECTIONS:
allSec[sec] = config.items(sec)
return allSec
def createNewConfig(sections):
newConfig = ConfigParser.SafeConfigParser()
for key in sections.keys():
newConfig.add_section(key)
for k,v in sections[key]:
newConfig.set(key,k,v)
output = StringIO.StringIO()
newConfig.write(output)
return output
# .............................................................................
if __name__ == "__main__":
#print Config().config
filenames = getFilenames(IN_DIR)
sections = getConfigSections()
configStr = createNewConfig(sections)
createZipFile(filenames, IN_DIR, OUT_LOCATION,configStr)
|
gpl-2.0
| 5,150,867,926,875,265,000
| 37.064815
| 97
| 0.640477
| false
| 3.987391
| true
| false
| false
|
klenwell/mushpup-demo
|
main.py
|
1
|
1071
|
"""`main` is the top level module for your Flask application."""
# Imports
from os.path import dirname, join
from datetime import date
from flask import Flask
import jinja2
# Constants
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(join(dirname(__file__), 'templates')),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
app = Flask(__name__)
@app.route('/')
def index():
"""Return a friendly HTTP greeting."""
template = JINJA_ENVIRONMENT.get_template('index.html')
return template.render(year=date.today().year)
@app.route('/hello')
def hello():
"""Return a friendly HTTP greeting."""
return 'Hello World!'
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
@app.errorhandler(500)
def application_error(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
|
apache-2.0
| -4,553,135,595,806,245,000
| 24.5
| 76
| 0.69281
| false
| 3.731707
| false
| false
| false
|
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
|
orcid_api_v3/models/researcher_url_v30_rc1.py
|
1
|
9643
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.created_date_v30_rc1 import CreatedDateV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30_rc1 import LastModifiedDateV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.source_v30_rc1 import SourceV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.url_v30_rc1 import UrlV30Rc1 # noqa: F401,E501
class ResearcherUrlV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV30Rc1',
'last_modified_date': 'LastModifiedDateV30Rc1',
'source': 'SourceV30Rc1',
'url_name': 'str',
'url': 'UrlV30Rc1',
'visibility': 'str',
'path': 'str',
'put_code': 'int',
'display_index': 'int'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'url_name': 'url-name',
'url': 'url',
'visibility': 'visibility',
'path': 'path',
'put_code': 'put-code',
'display_index': 'display-index'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, url_name=None, url=None, visibility=None, path=None, put_code=None, display_index=None): # noqa: E501
"""ResearcherUrlV30Rc1 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._url_name = None
self._url = None
self._visibility = None
self._path = None
self._put_code = None
self._display_index = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if url_name is not None:
self.url_name = url_name
if url is not None:
self.url = url
if visibility is not None:
self.visibility = visibility
if path is not None:
self.path = path
if put_code is not None:
self.put_code = put_code
if display_index is not None:
self.display_index = display_index
@property
def created_date(self):
"""Gets the created_date of this ResearcherUrlV30Rc1. # noqa: E501
:return: The created_date of this ResearcherUrlV30Rc1. # noqa: E501
:rtype: CreatedDateV30Rc1
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this ResearcherUrlV30Rc1.
:param created_date: The created_date of this ResearcherUrlV30Rc1. # noqa: E501
:type: CreatedDateV30Rc1
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this ResearcherUrlV30Rc1. # noqa: E501
:return: The last_modified_date of this ResearcherUrlV30Rc1. # noqa: E501
:rtype: LastModifiedDateV30Rc1
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this ResearcherUrlV30Rc1.
:param last_modified_date: The last_modified_date of this ResearcherUrlV30Rc1. # noqa: E501
:type: LastModifiedDateV30Rc1
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this ResearcherUrlV30Rc1. # noqa: E501
:return: The source of this ResearcherUrlV30Rc1. # noqa: E501
:rtype: SourceV30Rc1
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this ResearcherUrlV30Rc1.
:param source: The source of this ResearcherUrlV30Rc1. # noqa: E501
:type: SourceV30Rc1
"""
self._source = source
@property
def url_name(self):
"""Gets the url_name of this ResearcherUrlV30Rc1. # noqa: E501
:return: The url_name of this ResearcherUrlV30Rc1. # noqa: E501
:rtype: str
"""
return self._url_name
@url_name.setter
def url_name(self, url_name):
"""Sets the url_name of this ResearcherUrlV30Rc1.
:param url_name: The url_name of this ResearcherUrlV30Rc1. # noqa: E501
:type: str
"""
self._url_name = url_name
@property
def url(self):
"""Gets the url of this ResearcherUrlV30Rc1. # noqa: E501
:return: The url of this ResearcherUrlV30Rc1. # noqa: E501
:rtype: UrlV30Rc1
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this ResearcherUrlV30Rc1.
:param url: The url of this ResearcherUrlV30Rc1. # noqa: E501
:type: UrlV30Rc1
"""
self._url = url
@property
def visibility(self):
"""Gets the visibility of this ResearcherUrlV30Rc1. # noqa: E501
:return: The visibility of this ResearcherUrlV30Rc1. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this ResearcherUrlV30Rc1.
:param visibility: The visibility of this ResearcherUrlV30Rc1. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
@property
def path(self):
"""Gets the path of this ResearcherUrlV30Rc1. # noqa: E501
:return: The path of this ResearcherUrlV30Rc1. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this ResearcherUrlV30Rc1.
:param path: The path of this ResearcherUrlV30Rc1. # noqa: E501
:type: str
"""
self._path = path
@property
def put_code(self):
"""Gets the put_code of this ResearcherUrlV30Rc1. # noqa: E501
:return: The put_code of this ResearcherUrlV30Rc1. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this ResearcherUrlV30Rc1.
:param put_code: The put_code of this ResearcherUrlV30Rc1. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def display_index(self):
"""Gets the display_index of this ResearcherUrlV30Rc1. # noqa: E501
:return: The display_index of this ResearcherUrlV30Rc1. # noqa: E501
:rtype: int
"""
return self._display_index
@display_index.setter
def display_index(self, display_index):
"""Sets the display_index of this ResearcherUrlV30Rc1.
:param display_index: The display_index of this ResearcherUrlV30Rc1. # noqa: E501
:type: int
"""
self._display_index = display_index
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResearcherUrlV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResearcherUrlV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
mit
| -3,776,811,176,968,661,000
| 28.31003
| 182
| 0.583843
| false
| 3.717425
| false
| false
| false
|
pudo-attic/docstash
|
docstash/collection.py
|
1
|
2373
|
from os import path, walk, close, unlink
from tempfile import mkstemp
import shutil
from docstash import util
from docstash.document import Document
class Collection(object):
def __init__(self, stash, name):
self.stash = stash
self.name = name
@property
def path(self):
return path.join(self.stash.path, self.name)
def exists(self):
return path.isdir(self.path)
def documents(self):
if self.exists():
for (dirpath, dirnames, filenames) in walk(self.path):
if util.MANIFEST_FILE in filenames:
content_id = path.basename(dirpath)
yield self.get(content_id)
def get(self, content_id):
return Document(self, content_id)
def __iter__(self):
return self.documents()
def ingest(self, something, _move=False, **kwargs):
return util.ingest_misc(self, something, _move=_move, **kwargs)
def ingest_fileobj(self, file_obj, _move=False, **kwargs):
if 'file' not in kwargs:
kwargs['file'] = None
sysfd, path = mkstemp()
with open(path, 'wb') as fh:
fh.write(file_obj.read())
close(sysfd)
return self.ingest_file(path, _move=True, **kwargs)
def ingest_file(self, file_path, _move=False, **kwargs):
file_path = util.fullpath(file_path)
if not _move:
kwargs['source_path'] = file_path
file_name = kwargs.get('file', file_path)
kwargs['file'] = util.filename(file_name)
if 'hash' not in kwargs:
kwargs['hash'] = util.checksum(file_path)
doc = Document(self, kwargs['hash'], **kwargs)
if file_path != doc.file:
if not path.exists(doc.file):
if _move:
shutil.move(file_path, doc.file)
else:
shutil.copyfile(file_path, doc.file)
elif _move:
unlink(file_path)
doc.save()
return doc
def ingest_dir(self, dir_path, **kwargs):
for (dirpath, dirnames, file_names) in walk(dir_path):
for file_name in file_names:
file_path = path.join(dirpath, file_name)
self.ingest_file(file_path, **kwargs)
def __repr__(self):
return '<Collection(%s, %s)>' % (self.stash.path, self.name)
|
mit
| 2,439,511,797,433,378,000
| 31.067568
| 71
| 0.564686
| false
| 3.766667
| false
| false
| false
|
Smart-Green/needle
|
setup.py
|
1
|
1056
|
#!/usr/bin/env python
"""
Setup file for pyhaystack
"""
#from pyhaystack.client.HaystackConnection import HaystackConnection
#from pyhaystack.client.NiagaraAXConnection import NiagaraAXConnection
#from pyhaystack import pyhaystack as ph
import pyhaystack.info as info
#from setuptools import setup
from distutils.core import setup
import re
import os
import requests
os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'
os.environ['COPYFILE_DISABLE'] = 'true'
setup(name='pyhaystack',
version=info.__version__,
description='Python Haystack Utility',
author='Christian Tremblay',
author_email='christian.tremblay@servisys.com',
url='http://www.project-haystack.com/',
long_description = "\n".join(info.__doc__.split('\n')),
install_requires = ['requests','setuptools','pandas','numpy'],
packages=['pyhaystack', 'pyhaystack.client', 'pyhaystack.haystackIO','pyhaystack.history','pyhaystack.util','pyhaystack.server',],
entry_points={
'console_scripts': ['pyhaystack=pyhaystack:main'],
},
)
|
apache-2.0
| 794,013,742,930,574,100
| 31.030303
| 136
| 0.721591
| false
| 3.26935
| false
| false
| false
|
marble/Toolchain_RenderDocumentation
|
18-Make-and-build/42-Latex/run_45-Copy-latex-typo3-stuff.py
|
1
|
5072
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import shutil
import stat
import sys
import tct
from os.path import exists as ospe, join as ospj
from tct import deepget
params = tct.readjson(sys.argv[1])
binabspath = sys.argv[2]
facts = tct.readjson(params['factsfile'])
milestones = tct.readjson(params['milestonesfile'])
reason = ''
resultfile = params['resultfile']
result = tct.readjson(resultfile)
loglist = result['loglist'] = result.get('loglist', [])
toolname = params['toolname']
toolname_pure = params['toolname_pure']
workdir = params['workdir']
exitcode = CONTINUE = 0
# ==================================================
# Make a copy of milestones for later inspection?
# --------------------------------------------------
if 0 or milestones.get('debug_always_make_milestones_snapshot'):
tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])
# ==================================================
# Helper functions
# --------------------------------------------------
def lookup(D, *keys, **kwdargs):
result = deepget(D, *keys, **kwdargs)
loglist.append((keys, result))
return result
# ==================================================
# define
# --------------------------------------------------
copied_latex_resources = []
run_latex_make_sh_file = None
xeq_name_cnt = 0
# ==================================================
# Check params
# --------------------------------------------------
if exitcode == CONTINUE:
loglist.append('CHECK PARAMS')
make_latex = lookup(milestones, 'make_latex', default=None)
if not make_latex:
CONTINUE == -2
reason = 'Nothing to do'
if exitcode == CONTINUE:
build_latex = lookup(milestones, 'build_latex', default=None)
builder_latex_folder = lookup(milestones, 'builder_latex_folder', default=None)
latex_contrib_typo3_folder = lookup(milestones,
'latex_contrib_typo3_folder',
default=None)
if not (1
and build_latex
and builder_latex_folder
and latex_contrib_typo3_folder):
CONTINUE = -2
reason = 'Bad params or nothing to do'
if exitcode == CONTINUE:
loglist.append('PARAMS are ok')
else:
loglist.append('Bad PARAMS or nothing to do')
# ==================================================
# work
# --------------------------------------------------
if exitcode == CONTINUE:
if not os.path.isdir(latex_contrib_typo3_folder):
exitcode = 22
reason = 'Folder does not exist'
if exitcode == CONTINUE:
foldername = os.path.split(latex_contrib_typo3_folder)[1]
destpath = ospj(builder_latex_folder, foldername)
shutil.copytree(latex_contrib_typo3_folder, destpath)
if exitcode == CONTINUE:
run_latex_make_sh_file = ospj(builder_latex_folder, 'run-make.sh')
f2text = (
"#!/bin/bash\n"
"\n"
"# This is run-make.sh\n"
"\n"
'scriptdir=$( cd $(dirname "$0") ; pwd -P )'
"\n"
"# cd to this dir\n"
"pushd \"$scriptdir\" >/dev/null\n"
"\n"
"# set environment var pointing to the folder and run make\n"
"TEXINPUTS=::texmf_typo3 make\n"
"\n"
"popd >/dev/null\n"
"\n"
)
with open(run_latex_make_sh_file, 'w') as f2:
f2.write(f2text)
file_permissions = (os.stat(run_latex_make_sh_file).st_mode | stat.S_IXUSR
| stat.S_IXGRP
| stat.S_IXOTH)
os.chmod(run_latex_make_sh_file, file_permissions)
if exitcode == CONTINUE:
makefile_path = ospj(builder_latex_folder, 'Makefile')
makefile_original_path = makefile_path + '.original'
if ospe(makefile_path) and not ospe(makefile_original_path):
shutil.copy2(makefile_path, makefile_original_path)
with open(makefile_path, 'rb') as f1:
data = f1.read()
data, cnt = re.subn("LATEXMKOPTS[ ]*=[ ]*\n", "\n\n\n\nLATEXMKOPTS = -interaction=nonstopmode\n\n\n\n\n", data)
if cnt:
with open(makefile_path, 'wb') as f2:
f2.write(data)
# ==================================================
# Set MILESTONE
# --------------------------------------------------
if copied_latex_resources:
result['MILESTONES'].append({'copied_latex_resources':
copied_latex_resources})
if run_latex_make_sh_file:
result['MILESTONES'].append({'run_latex_make_sh_file':
run_latex_make_sh_file})
# ==================================================
# save result
# --------------------------------------------------
tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason)
# ==================================================
# Return with proper exitcode
# --------------------------------------------------
sys.exit(exitcode)
|
mit
| 4,781,026,684,952,183,000
| 29.554217
| 115
| 0.51321
| false
| 3.723935
| false
| false
| false
|
WarmMe/alpha1
|
WarmMe/thermostat/Activator.py
|
1
|
3097
|
#!/usr/bin/python
# set gpio18 to high when content of file state is 'ON'
import RPi.GPIO as GPIO
import time
import MySQLdb as mdb
import sys
import time
# set GPIO (pin 12) that command the releais
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(12, GPIO.OUT)
def run():
# Vary
curTime = time.strftime("%H:%M:%S")
try:
global con
con = mdb.connect('localhost', 'root', 'warmme', 'warmme');
cur = con.cursor()
# Current temp
cur.execute("SELECT value from sensorMonitorLast")
curTemp = cur.fetchone()
print 'Current temperature: ' + str(curTemp[0])
# Activation type
cur.execute("SELECT type from activationTarget")
qryResult = cur.fetchone()
if qryResult[0] == 'MANUAL':
print 'Activation type: MANUAL'
# Get manual activator
cur = con.cursor()
cur.execute("SELECT tempValue from activationManual")
manualActivator = cur.fetchone()
if manualActivator is None:
print "No manual temp set, set GPIO to low"
turnOff(curTemp[0]);
else:
print 'Target temperature: ' + str(manualActivator[0])
heatMe(curTemp[0], manualActivator[0])
elif qryResult[0] == 'SCHEDULE':
print 'Activation type: SCHEDULE'
# Get shcedule activator
cur = con.cursor()
qry = "SELECT tempValue from activationSchedule where (startTime <= '" + str(curTime) + "' and endTime >= '" + str(curTime) + "') or ((endTime - startTime) < 0 and (('" + str(curTime) + "' >= startTime and '" + str(curTime) + "' < '23:59:59') or ('" + str(curTime) + "' < endTime)))"
cur.execute(qry)
scheduleActivator = cur.fetchone()
if scheduleActivator is None:
print "No schedule, set GPIO to low"
turnOff(curTemp[0]);
else:
print 'Target temperature: ' + str(scheduleActivator[0])
heatMe(curTemp[0], scheduleActivator[0])
elif qryResult[0] == 'OFF':
print 'Activation type: OFF'
print "set GPIO to low"
turnOff(curTemp[0]);
except mdb.Error, e:
print "Error %d: %s" % (e.args[0],e.args[1])
sys.exit(1)
finally:
if con:
con.close()
def heatMe(curTemp, target):
cur = con.cursor()
if curTemp <= target - .3:
print 'status: HIGH'
GPIO.output(12, GPIO.HIGH)
cur.execute("INSERT into activationStatus (state,tempValue,zone_id) values ('ON',"+str(curTemp)+",1)")
con.commit()
cur = con.cursor()
cur.execute("Update activationStatusLast set state = 'ON', tempValue = "+str(curTemp))
con.commit()
elif curTemp > target + .3:
print 'status: LOW'
GPIO.output(12, GPIO.LOW)
cur = con.cursor()
cur.execute("INSERT into activationStatus (state,tempValue,zone_id) values ('OFF',"+str(curTemp)+",1)")
con.commit()
cur = con.cursor()
cur.execute("Update activationStatusLast set state = 'OFF', tempValue = "+str(curTemp))
con.commit()
def turnOff(curTemp):
GPIO.output(12, GPIO.LOW)
cur = con.cursor()
cur.execute("INSERT into activationStatus (state,tempValue,zone_id) values ('OFF',"+str(curTemp)+",1)")
cur.execute("Update activationStatusLast set state = 'OFF', tempValue = "+str(curTemp))
con.commit()
if __name__ == "__main__":
run()
|
gpl-3.0
| -5,307,279,268,875,629,000
| 29.362745
| 286
| 0.659671
| false
| 2.963636
| false
| false
| false
|
Couby/rocket-bike-challenge
|
hardware/rpm.py
|
1
|
1834
|
#!/usr/bin/python3
from gpiozero import Button
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
import threading
hostName = "192.168.1.27"
hostPort = 9000
turns = 0
# HTTP server class.
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
global turns
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("<html><head><title>Rocket bike challenge</title></head>", "utf-8"))
self.wfile.write(bytes('<body>{"turns": %d}' % turns, "utf-8"))
# Thead class that permanently checks reed sensor status.
class ThreadingExample(object):
def __init__(self):
self.button = Button(2)
#self.turns = 0
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
global turns
while True:
if self.button.is_pressed == True:
turns = turns + 1
print("Button was pressed {} times".format(turns))
while self.button.is_pressed:
pass
# Main function, launches the thread and the HTTP server
if __name__ == '__main__':
print("Launching counter thread...")
thread = ThreadingExample()
print("Thread launched, launchint HTTP server...")
myServer = HTTPServer((hostName, hostPort), MyServer)
print(time.asctime(), "Server Starts - %s:%s" % (hostName, hostPort))
try:
myServer.serve_forever()
except KeyboardInterrupt:
pass
myServer.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (hostName, hostPort))
|
mit
| -7,046,286,909,193,763,000
| 30.75
| 99
| 0.585605
| false
| 3.978308
| false
| false
| false
|
NeCTAR-RC/heat
|
heat/tests/test_fault_middleware.py
|
1
|
6446
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception as heat_exc
from heat.openstack.common.rpc import common as rpc_common
from heat.tests.common import HeatTestCase
from oslo.config import cfg
import heat.api.middleware.fault as fault
class StackNotFoundChild(heat_exc.StackNotFound):
pass
class FaultMiddlewareTest(HeatTestCase):
def test_openstack_exception_with_kwargs(self):
wrapper = fault.FaultWrapper(None)
msg = wrapper._error(heat_exc.StackNotFound(stack_name='a'))
expected = {'code': 404,
'error': {'message': 'The Stack (a) could not be found.',
'traceback': None,
'type': 'StackNotFound'},
'explanation': 'The resource could not be found.',
'title': 'Not Found'}
self.assertEqual(expected, msg)
def test_openstack_exception_without_kwargs(self):
wrapper = fault.FaultWrapper(None)
msg = wrapper._error(heat_exc.StackResourceLimitExceeded())
expected = {'code': 500,
'error': {'message': 'Maximum resources '
'per stack exceeded.',
'traceback': None,
'type': 'StackResourceLimitExceeded'},
'explanation': 'The server has either erred or is '
'incapable of performing the requested '
'operation.',
'title': 'Internal Server Error'}
self.assertEqual(expected, msg)
def test_exception_with_non_ascii_chars(self):
# We set debug to true to test the code path for serializing traces too
cfg.CONF.set_override('debug', True)
msg = u'Error with non-ascii chars \x80'
class TestException(heat_exc.HeatException):
msg_fmt = msg
wrapper = fault.FaultWrapper(None)
msg = wrapper._error(TestException())
expected = {'code': 500,
'error': {'message': u'Error with non-ascii chars \x80',
'traceback': 'None\n',
'type': 'TestException'},
'explanation': ('The server has either erred or is '
'incapable of performing the requested '
'operation.'),
'title': 'Internal Server Error'}
self.assertEqual(expected, msg)
def test_remote_exception(self):
# We want tracebacks
cfg.CONF.set_override('debug', True)
error = heat_exc.StackNotFound(stack_name='a')
exc_info = (type(error), error, None)
serialized = rpc_common.serialize_remote_exception(exc_info)
remote_error = rpc_common.deserialize_remote_exception(cfg.CONF,
serialized)
wrapper = fault.FaultWrapper(None)
msg = wrapper._error(remote_error)
expected_message, expected_traceback = str(remote_error).split('\n', 1)
expected = {'code': 404,
'error': {'message': expected_message,
'traceback': expected_traceback,
'type': 'StackNotFound'},
'explanation': 'The resource could not be found.',
'title': 'Not Found'}
self.assertEqual(expected, msg)
def test_should_not_ignore_parent_classes(self):
wrapper = fault.FaultWrapper(None)
msg = wrapper._error(StackNotFoundChild(stack_name='a'))
expected = {'code': 404,
'error': {'message': 'The Stack (a) could not be found.',
'traceback': None,
'type': 'StackNotFoundChild'},
'explanation': 'The resource could not be found.',
'title': 'Not Found'}
self.assertEqual(expected, msg)
def test_internal_server_error_when_exeption_and_parents_not_mapped(self):
wrapper = fault.FaultWrapper(None)
class NotMappedException(Exception):
pass
msg = wrapper._error(NotMappedException('A message'))
expected = {'code': 500,
'error': {'message': u'A message',
'traceback': None,
'type': 'NotMappedException'},
'explanation': ('The server has either erred or is '
'incapable of performing the requested '
'operation.'),
'title': 'Internal Server Error'}
self.assertEqual(expected, msg)
def test_should_not_ignore_parent_classes_even_for_remote_ones(self):
# We want tracebacks
cfg.CONF.set_override('debug', True)
cfg.CONF.set_override('allowed_rpc_exception_modules',
['heat.tests.test_fault_middleware'])
error = StackNotFoundChild(stack_name='a')
exc_info = (type(error), error, None)
serialized = rpc_common.serialize_remote_exception(exc_info)
remote_error = rpc_common.deserialize_remote_exception(cfg.CONF,
serialized)
wrapper = fault.FaultWrapper(None)
msg = wrapper._error(remote_error)
expected_message, expected_traceback = str(remote_error).split('\n', 1)
expected = {'code': 404,
'error': {'message': expected_message,
'traceback': expected_traceback,
'type': 'StackNotFoundChild'},
'explanation': 'The resource could not be found.',
'title': 'Not Found'}
self.assertEqual(expected, msg)
|
apache-2.0
| 1,952,317,835,905,729,500
| 44.394366
| 79
| 0.547626
| false
| 4.767751
| true
| false
| false
|
brokenseal/local
|
server/local/local/messages.py
|
1
|
1789
|
from __future__ import unicode_literals
import logging
import pymongo
from django.core import signing
from django.conf import settings
from . import models, exceptions
SECRET_KEY = "test"
routing = {}
logger = logging.getLogger(__name__)
def resolve(name):
return routing[name]
def event(message_or_func):
if callable(message_or_func):
message = ':'.join(message_or_func.__name__.split('_'))
routing[message] = message_or_func
return message_or_func
def _wrapper(func):
routing[message_or_func] = func
return func
return _wrapper
def _get_messages_table():
return pymongo.MongoClient(settings.MONGO_DB_URL).mongodb.default.messages
@event
def authentication_authenticate(connection, token, salt):
try:
channel = signing.loads(
token,
key=SECRET_KEY,
salt=salt,
max_age=settings.CHANNEL_MAX_AGE,
)
except (signing.BadSignature, KeyError):
logging.debug('Authentication error: invalid token')
raise exceptions.AuthenticationError('Invalid token')
else:
connection.channel = channel
logging.debug('Authentication successful')
connection.emit(name='authentication:success')
@event
def message_create(connection, author, text, client_id, **kwargs):
messages = _get_messages_table()
message_id = messages.insert(dict(
author=author,
text=text,
))
new_message = messages.find_one(dict(_id=message_id))
return dict(
name='message:created',
data=dict(
message=new_message,
client_id=client_id,
)
)
@event
def bootstrap(connection):
connection.emit(name="message:list", data=list(_get_messages_table().find()))
|
mit
| 3,372,694,716,418,899,000
| 22.539474
| 81
| 0.644494
| false
| 4.002237
| false
| false
| false
|
ganga-devs/ganga
|
ganga/GangaCore/Core/GangaRepository/GangaRepositoryXML.py
|
1
|
53576
|
# Note: Following stuff must be considered in a GangaRepository:
#
# * lazy loading
# * locking
from GangaCore.Core.GangaRepository import GangaRepository, RepositoryError, InaccessibleObjectError
from GangaCore.Utility.Plugin import PluginManagerError
import os
import os.path
import time
import errno
import copy
import threading
from GangaCore import GANGA_SWAN_INTEGRATION
from GangaCore.Core.GangaRepository.SessionLock import SessionLockManager, dry_run_unix_locks
from GangaCore.Core.GangaRepository.FixedLock import FixedLockManager
import GangaCore.Utility.logging
from GangaCore.Core.GangaRepository.PickleStreamer import to_file as pickle_to_file
from GangaCore.Core.GangaRepository.PickleStreamer import from_file as pickle_from_file
from GangaCore.Core.GangaRepository.VStreamer import to_file as xml_to_file
from GangaCore.Core.GangaRepository.VStreamer import from_file as xml_from_file
from GangaCore.Core.GangaRepository.VStreamer import XMLFileError
from GangaCore.GPIDev.Base.Objects import Node
from GangaCore.Core.GangaRepository.SubJobXMLList import SubJobXMLList
from GangaCore.GPIDev.Base.Proxy import isType, stripProxy, getName
from GangaCore.Utility.Config import getConfig
logger = GangaCore.Utility.logging.getLogger()
save_all_history = False
def check_app_hash(obj):
"""Writes a file safely, raises IOError on error
Args:
obj (GangaObject): This is an object which has a prepared application
"""
isVerifiableApp = False
isVerifiableAna = False
if hasattr(obj, 'application'):
if hasattr(obj.application, 'hash'):
if obj.application.hash is not None:
isVerifiableApp = True
elif hasattr(obj, 'analysis'):
if hasattr(obj.analysis, 'application'):
if hasattr(obj.analysis.application, 'hash'):
if obj.analysis.application.hash is not None:
isVerifiableAna = True
if isVerifiableApp is True:
hashable_app = stripProxy(obj.application)
elif isVerifiableAna is True:
hashable_app = stripProxy(obj.analysis.application)
else:
hashable_app = None
if hashable_app is not None:
if not hashable_app.calc_hash(True):
try:
logger.warning("%s" % hashable_app)
logger.warning('Protected attribute(s) of %s application (associated with %s #%s) changed!' % (getName(hashable_app), getName(obj), obj._registry_id))
except AttributeError as err:
logger.warning('Protected attribute(s) of %s application (associated with %s) changed!!!!' % (getName(hashable_app), getName(obj)))
logger.warning("%s" % err)
jobObj = stripProxy(hashable_app).getJobObject()
if jobObj is not None:
logger.warning('Job: %s is now possibly corrupt!' % jobObj.getFQID('.'))
logger.warning('If you knowingly circumvented the protection, ignore this message (and, optionally,')
logger.warning('re-prepare() the application). Otherwise, please file a bug report at:')
logger.warning('https://github.com/ganga-devs/ganga/issues/')
def safe_save(fn, _obj, to_file, ignore_subs=''):
"""Try to save the XML for this object in as safe a way as possible
Args:
fn (str): This is the name of the file we are to save the object to
_obj (GangaObject): This is the object which we want to save to the file
to_file (str): This is the method we want to use to save the to the file
ignore_subs (str): This is the name(s) of the attribute of _obj we want to ignore in writing to disk
"""
# Add a global lock to make absolutely sure we don't have multiple threads writing files
# See Github Issue 185
with safe_save.lock:
obj = stripProxy(_obj)
check_app_hash(obj)
# Create the dirs
dirname = os.path.dirname(fn)
if not os.path.exists(dirname):
os.makedirs(dirname)
# Prepare new data file
new_name = fn + '.new'
with open(new_name, "w") as tmpfile:
to_file(obj, tmpfile, ignore_subs)
# everything ready so create new data file and backup old one
if os.path.exists(new_name):
# Do we have an old one to backup?
if os.path.exists(fn):
os.rename(fn, fn + "~")
os.rename(new_name, fn)
# Global lock for above function - See issue #185
safe_save.lock = threading.Lock()
def rmrf(name, count=0):
"""
Safely recursively remove a file/folder from disk by first moving it then removing it
calls self and will only attempt to move/remove a file 3 times before giving up
Args:
count (int): This function calls itself recursively 3 times then gives up, this increments on each call
"""
if count != 0:
logger.debug("Trying again to remove: %s" % name)
if count == 3:
logger.error("Tried 3 times to remove file/folder: %s" % name)
from GangaCore.Core.exceptions import GangaException
raise GangaException("Failed to remove file/folder: %s" % name)
if os.path.isdir(name):
try:
remove_name = name
if not remove_name.endswith('__to_be_deleted'):
remove_name += '_%s__to_be_deleted_' % time.time()
os.rename(name, remove_name)
#logger.debug("Move completed")
except OSError as err:
if err.errno != errno.ENOENT:
logger.debug("rmrf Err: %s" % err)
logger.debug("name: %s" % name)
raise
return
for sfn in os.listdir(remove_name):
try:
rmrf(os.path.join(remove_name, sfn), count)
except OSError as err:
if err.errno == errno.EBUSY:
logger.debug("rmrf Remove err: %s" % err)
logger.debug("name: %s" % remove_name)
## Sleep 2 sec and try again
time.sleep(2.)
rmrf(os.path.join(remove_name, sfn), count+1)
try:
os.removedirs(remove_name)
except OSError as err:
if err.errno == errno.ENOTEMPTY:
rmrf(remove_name, count+1)
elif err.errno != errno.ENOENT:
logger.debug("%s" % err)
raise
return
else:
try:
remove_name = name + "_" + str(time.time()) + '__to_be_deleted_'
os.rename(name, remove_name)
except OSError as err:
if err.errno not in [errno.ENOENT, errno.EBUSY]:
raise
logger.debug("rmrf Move err: %s" % err)
logger.debug("name: %s" % name)
if err.errno == errno.EBUSY:
rmrf(name, count+1)
return
try:
os.remove(remove_name)
except OSError as err:
if err.errno != errno.ENOENT:
logger.debug("%s" % err)
logger.debug("name: %s" % remove_name)
raise
return
class GangaRepositoryLocal(GangaRepository):
"""GangaRepository Local"""
def __init__(self, registry):
"""
Initialize a Repository from within a Registry and keep a reference to the Registry which 'owns' it
Args:
Registry (Registry): This is the registry which manages this Repo
"""
super(GangaRepositoryLocal, self).__init__(registry)
self.dataFileName = "data"
self.sub_split = "subjobs"
self.root = os.path.join(self.registry.location, "6.0", self.registry.name)
self.lockroot = os.path.join(self.registry.location, "6.0")
self.saved_paths = {}
self.saved_idxpaths = {}
self._cache_load_timestamp = {}
self.printed_explanation = False
self._fully_loaded = {}
def startup(self):
""" Starts a repository and reads in a directory structure.
Raise RepositoryError"""
self._load_timestamp = {}
# New Master index to speed up loading of many, MANY files
self._cache_load_timestamp = {}
self._cached_cat = {}
self._cached_cls = {}
self._cached_obj = {}
self._master_index_timestamp = 0
self.known_bad_ids = []
if "XML" in self.registry.type:
self.to_file = xml_to_file
self.from_file = xml_from_file
elif "Pickle" in self.registry.type:
self.to_file = pickle_to_file
self.from_file = pickle_from_file
else:
raise RepositoryError(self, "Unknown Repository type: %s" % self.registry.type)
if getConfig('Configuration')['lockingStrategy'] == "UNIX":
# First test the UNIX locks are working as expected
try:
dry_run_unix_locks(self.lockroot)
except Exception as err:
# Locking has not worked, lets raise an error
logger.error("Error: %s" % err)
msg="\n\nUnable to launch due to underlying filesystem not working with unix locks."
msg+="Please try launching again with [Configuration]lockingStrategy=FIXED to start Ganga without multiple session support."
raise RepositoryError(self, msg)
# Locks passed test so lets continue
self.sessionlock = SessionLockManager(self, self.lockroot, self.registry.name)
elif getConfig('Configuration')['lockingStrategy'] == "FIXED":
self.sessionlock = FixedLockManager(self, self.lockroot, self.registry.name)
else:
raise RepositoryError(self, "Unable to launch due to unknown file-locking Strategy: \"%s\"" % getConfig('Configuration')['lockingStrategy'])
self.sessionlock.startup()
# Load the list of files, this time be verbose and print out a summary
# of errors
self.update_index(True, True)
logger.debug("GangaRepositoryLocal Finished Startup")
def shutdown(self):
"""Shutdown the repository. Flushing is done by the Registry
Raise RepositoryError
Write an index file for all new objects in memory and master index file of indexes"""
from GangaCore.Utility.logging import getLogger
logger = getLogger()
logger.debug("Shutting Down GangaRepositoryLocal: %s" % self.registry.name)
for k in self._fully_loaded:
try:
self.index_write(k, True)
except Exception as err:
logger.error("Warning: problem writing index object with id %s" % k)
try:
self._write_master_cache(True)
except Exception as err:
logger.warning("Warning: Failed to write master index due to: %s" % err)
self.sessionlock.shutdown()
def get_fn(self, this_id):
""" Returns the file name where the data for this object id is saved
Args:
this_id (int): This is the object id we want the XML filename for
"""
if this_id not in self.saved_paths:
self.saved_paths[this_id] = os.path.join(self.root, "%ixxx" % int(this_id * 0.001), "%i" % this_id, self.dataFileName)
return self.saved_paths[this_id]
def get_idxfn(self, this_id):
""" Returns the file name where the data for this object id is saved
Args:
this_id (int): This is the object id we want the index filename for
"""
if this_id not in self.saved_idxpaths:
self.saved_idxpaths[this_id] = os.path.join(self.root, "%ixxx" % int(this_id * 0.001), "%i.index" % this_id)
return self.saved_idxpaths[this_id]
def index_load(self, this_id):
""" load the index file for this object if necessary
Loads if never loaded or timestamp changed. Creates object if necessary
Returns True if this object has been changed, False if not
Raise IOError on access or unpickling error
Raise OSError on stat error
Raise PluginManagerError if the class name is not found
Args:
this_id (int): This is the id for which we want to load the index file from disk
"""
#logger.debug("Loading index %s" % this_id)
fn = self.get_idxfn(this_id)
# index timestamp changed
fn_ctime = os.stat(fn).st_ctime
cache_time = self._cache_load_timestamp.get(this_id, 0)
if cache_time != fn_ctime:
logger.debug("%s != %s" % (cache_time, fn_ctime))
try:
with open(fn, 'rb') as fobj:
cat, cls, cache = pickle_from_file(fobj)[0]
except EOFError:
pass
except Exception as x:
logger.warning("index_load Exception: %s" % x)
raise IOError("Error on unpickling: %s %s" %(getName(x), x))
if this_id in self.objects:
obj = self.objects[this_id]
setattr(obj, "_registry_refresh", True)
else:
try:
obj = self._make_empty_object_(this_id, cat, cls)
except Exception as err:
raise IOError('Failed to Parse information in Index file: %s. Err: %s' % (fn, err))
this_cache = obj._index_cache
this_data = this_cache if this_cache else {}
for k, v in cache.items():
this_data[k] = v
#obj.setNodeData(this_data)
obj._index_cache = cache
self._cache_load_timestamp[this_id] = fn_ctime
self._cached_cat[this_id] = cat
self._cached_cls[this_id] = cls
self._cached_obj[this_id] = cache
return True
elif this_id not in self.objects:
self.objects[this_id] = self._make_empty_object_(this_id, self._cached_cat[this_id], self._cached_cls[this_id])
self.objects[this_id]._index_cache = self._cached_obj[this_id]
setattr(self.objects[this_id], '_registry_refresh', True)
return True
else:
logger.debug("Doubly loading of object with ID: %s" % this_id)
logger.debug("Just silently continuing")
return False
def index_write(self, this_id, shutdown=False):
""" write an index file for this object (must be locked).
Should not raise any Errors,
Args:
this_id (int): This is the index for which we want to write the index to disk
shutdown (bool): True causes this to always be written regardless of any checks"""
if this_id in self.incomplete_objects:
return
logger.debug("Writing index: %s" % this_id)
obj = self.objects[this_id]
try:
ifn = self.get_idxfn(this_id)
new_idx_cache = self.registry.getIndexCache(stripProxy(obj))
if not os.path.exists(ifn) or shutdown:
new_cache = new_idx_cache
with open(ifn, "wb") as this_file:
new_index = (obj._category, getName(obj), new_cache)
logger.debug("Writing: %s" % str(new_index))
pickle_to_file(new_index, this_file)
self._cached_obj[this_id] = new_cache
obj._index_cache = {}
self._cached_obj[this_id] = new_idx_cache
except IOError as err:
logger.error("Index saving to '%s' failed: %s %s" % (ifn, getName(err), err))
def get_index_listing(self):
"""Get dictionary of possible objects in the Repository: True means index is present,
False if not present
Raise RepositoryError"""
try:
if not os.path.exists(self.root):
os.makedirs(self.root)
obj_chunks = [d for d in os.listdir(self.root) if d.endswith("xxx") and d[:-3].isdigit()]
except OSError as err:
logger.debug("get_index_listing Exception: %s" % err)
raise RepositoryError(self, "Could not list repository '%s'!" % (self.root))
objs = {} # True means index is present, False means index not present
for c in obj_chunks:
try:
listing = os.listdir(os.path.join(self.root, c))
except OSError as err:
logger.debug("get_index_listing Exception: %s")
raise RepositoryError(self, "Could not list repository '%s'!" % (os.path.join(self.root, c)))
objs.update(dict([(int(l), False) for l in listing if l.isdigit()]))
for l in listing:
if l.endswith(".index") and l[:-6].isdigit():
this_id = int(l[:-6])
if this_id in objs:
objs[this_id] = True
else:
try:
rmrf(self.get_idxfn(this_id))
logger.warning("Deleted index file without data file: %s" % self.get_idxfn(this_id))
except OSError as err:
logger.debug("get_index_listing delete Exception: %s" % err)
return objs
def _read_master_cache(self):
"""
read in the master cache to reduce significant I/O over many indexes separately on startup
"""
try:
_master_idx = os.path.join(self.root, 'master.idx')
if os.path.isfile(_master_idx):
logger.debug("Reading Master index")
self._master_index_timestamp = os.stat(_master_idx).st_ctime
with open(_master_idx, 'rb') as input_f:
this_master_cache = pickle_from_file(input_f)[0]
for this_cache in this_master_cache:
if this_cache[1] >= 0:
this_id = this_cache[0]
self._cache_load_timestamp[this_id] = this_cache[1]
self._cached_cat[this_id] = this_cache[2]
self._cached_cls[this_id] = this_cache[3]
self._cached_obj[this_id] = this_cache[4]
else:
logger.debug("Not Reading Master Index")
except Exception as err:
GangaCore.Utility.logging.log_unknown_exception()
logger.debug("Master Index corrupt, ignoring it")
logger.debug("Exception: %s" % err)
self._clear_stored_cache()
finally:
rmrf(os.path.join(self.root, 'master.idx'))
def _clear_stored_cache(self):
"""
clear the master cache(s) which have been stored in memory
"""
for k, v in self._cache_load_timestamp.items():
self._cache_load_timestamp.pop(k)
for k, v in self._cached_cat.items():
self._cached_cat.pop(k)
for k, v in self._cached_cls.items():
self._cached_cls.pop(k)
for k, v in self._cached_obj.items():
self._cached_obj.pop(k)
def _write_master_cache(self, shutdown=False):
"""
write a master index cache once per 300sec
Args:
shutdown (boool): True causes this to be written now
"""
try:
_master_idx = os.path.join(self.root, 'master.idx')
this_master_cache = []
if os.path.isfile(_master_idx) and not shutdown:
if abs(self._master_index_timestamp - os.stat(_master_idx).st_ctime) < 300:
return
items_to_save = iter(self.objects.items())
for k, v in items_to_save:
if k in self.incomplete_objects:
continue
try:
if k in self._fully_loaded:
# Check and write index first
obj = v#self.objects[k]
new_index = None
if obj is not None:
new_index = self.registry.getIndexCache(stripProxy(obj))
if new_index is not None:
#logger.debug("k: %s" % k)
arr_k = [k]
if len(self.lock(arr_k)) != 0:
self.index_write(k)
self.unlock(arr_k)
self._cached_obj[k] = new_index
except Exception as err:
logger.debug("Failed to update index: %s on startup/shutdown" % k)
logger.debug("Reason: %s" % err)
iterables = iter(self._cache_load_timestamp.items())
for k, v in iterables:
if k in self.incomplete_objects:
continue
cached_list = []
cached_list.append(k)
try:
fn = self.get_idxfn(k)
if os.path.isfile(fn):
time = os.stat(fn).st_ctime
else:
time = -1
except OSError as err:
logger.debug("_write_master_cache: %s" % err)
logger.debug("_cache_load_timestamp: %s" % self._cache_load_timestamp)
import errno
if err.errno == errno.ENOENT: # If file is not found
time = -1
else:
raise
if time > 0:
cached_list.append(time)
cached_list.append(self._cached_cat[k])
cached_list.append(self._cached_cls[k])
cached_list.append(self._cached_obj[k])
this_master_cache.append(cached_list)
try:
with open(_master_idx, 'wb') as of:
pickle_to_file(this_master_cache, of)
except IOError as err:
logger.debug("write_master: %s" % err)
try:
os.remove(os.path.join(self.root, 'master.idx'))
except OSError as x:
GangaCore.Utility.logging.log_user_exception(True)
except Exception as err:
logger.debug("write_error2: %s" % err)
GangaCore.Utility.logging.log_unknown_exception()
return
def updateLocksNow(self):
"""
Trigger the session locks to all be updated now
This is useful when the SessionLock is updating either too slowly or has gone to sleep when there are multiple sessions
"""
self.sessionlock.updateNow()
def update_index(self, this_id=None, verbose=False, firstRun=False):
""" Update the list of available objects
Raise RepositoryError
TODO avoid updating objects which haven't changed as this causes un-needed I/O
Args:
this_id (int): This is the id we want to explicitly check the index on disk for
verbose (bool): Should we be verbose
firstRun (bool): If this is the call from the Repo startup then load the master index for perfomance boost
"""
# First locate and load the index files
logger.debug("updating index...")
objs = self.get_index_listing()
changed_ids = []
deleted_ids = set(self.objects.keys())
summary = []
if firstRun:
self._read_master_cache()
logger.debug("Iterating over Items")
locked_ids = self.sessionlock.locked
for this_id in objs:
deleted_ids.discard(this_id)
# Make sure we do not overwrite older jobs if someone deleted the
# count file
if this_id > self.sessionlock.count:
self.sessionlock.count = this_id + 1
# Locked IDs can be ignored
if this_id in locked_ids:
continue
# Skip corrupt IDs
if this_id in self.incomplete_objects:
continue
# Now we treat unlocked IDs
try:
# if this succeeds, all is well and we are done
if self.index_load(this_id):
changed_ids.append(this_id)
continue
except IOError as err:
logger.debug("IOError: Failed to load index %i: %s" % (this_id, err))
except OSError as err:
logger.debug("OSError: Failed to load index %i: %s" % (this_id, err))
except PluginManagerError as err:
# Probably should be DEBUG
logger.debug("PluginManagerError: Failed to load index %i: %s" % (this_id, err))
# This is a FATAL error - do not try to load the main file, it
# will fail as well
summary.append((this_id, err))
continue
# this is bad - no or corrupted index but object not loaded yet!
# Try to load it!
if not this_id in self.objects:
try:
logger.debug("Loading disk based Object: %s from %s as indexes were missing" % (this_id, self.registry.name))
self.load([this_id])
changed_ids.append(this_id)
# Write out a new index if the file can be locked
if len(self.lock([this_id])) != 0:
if this_id not in self.incomplete_objects:
# If object is loaded mark it dirty so next flush will regenerate XML,
# otherwise just go about fixing it
if not self.isObjectLoaded(self.objects[this_id]):
self.index_write(this_id)
else:
self.objects[this_id]._setDirty()
#self.unlock([this_id])
except KeyError as err:
logger.debug("update Error: %s" % err)
# deleted job
if this_id in self.objects:
self._internal_del__(this_id)
changed_ids.append(this_id)
except (InaccessibleObjectError, ) as x:
logger.debug("update_index: Failed to load id %i: %s" % (this_id, x))
summary.append((this_id, x))
logger.debug("Iterated over Items")
# Check deleted files:
for this_id in deleted_ids:
self._internal_del__(this_id)
changed_ids.append(this_id)
if len(deleted_ids) > 0:
logger.warning("Registry '%s': Job %s externally deleted." % (self.registry.name, ",".join(map(str, list(deleted_ids)))))
if len(summary) > 0:
cnt = {}
examples = {}
for this_id, x in summary:
if this_id in self.known_bad_ids:
continue
cnt[getName(x)] = cnt.get(getName(x), []) + [str(this_id)]
examples[getName(x)] = str(x)
self.known_bad_ids.append(this_id)
# add object to incomplete_objects
if not this_id in self.incomplete_objects:
logger.error("Adding: %s to Incomplete Objects to avoid loading it again in future" % this_id)
self.incomplete_objects.append(this_id)
for exc, ids in cnt.items():
logger.error("Registry '%s': Failed to load %i jobs (IDs: %s) due to '%s' (first error: %s)" % (self.registry.name, len(ids), ",".join(ids), exc, examples[exc]))
if self.printed_explanation is False:
logger.error("If you want to delete the incomplete objects, you can type:\n")
logger.error("'for i in %s.incomplete_ids(): %s(i).remove()'\n (then press 'Enter' twice)" % (self.registry.name, self.registry.name))
logger.error("WARNING!!! This will result in corrupt jobs being completely deleted!!!")
self.printed_explanation = True
logger.debug("updated index done")
if len(changed_ids) != 0:
isShutdown = not firstRun
self._write_master_cache(isShutdown)
return changed_ids
def add(self, objs, force_ids=None):
""" Add the given objects to the repository, forcing the IDs if told to.
Raise RepositoryError
Args:
objs (list): GangaObject-s which we want to add to the Repo
force_ids (list, None): IDs to assign to object, None for auto-assign
"""
logger.debug("add")
if force_ids not in [None, []]: # assume the ids are already locked by Registry
if not len(objs) == len(force_ids):
raise RepositoryError(self, "Internal Error: add with different number of objects and force_ids!")
ids = force_ids
else:
ids = self.sessionlock.make_new_ids(len(objs))
logger.debug("made ids")
for i in range(0, len(objs)):
fn = self.get_fn(ids[i])
try:
os.makedirs(os.path.dirname(fn))
except OSError as e:
if e.errno != errno.EEXIST:
raise RepositoryError( self, "OSError on mkdir: %s" % (e))
self._internal_setitem__(ids[i], objs[i])
# Set subjobs dirty - they will not be flushed if they are not.
if self.sub_split and hasattr(objs[i], self.sub_split):
try:
sj_len = len(getattr(objs[i], self.sub_split))
if sj_len > 0:
for j in range(sj_len):
getattr(objs[i], self.sub_split)[j]._dirty = True
except AttributeError as err:
logger.debug("RepoXML add Exception: %s" % err)
logger.debug("Added")
return ids
def _safe_flush_xml(self, this_id):
"""
Flush XML to disk whilst checking for relavent SubJobXMLList which handles subjobs now
flush for "this_id" in the self.objects list
Args:
this_id (int): This is the id of the object we want to flush to disk
"""
fn = self.get_fn(this_id)
obj = self.objects[this_id]
from GangaCore.Core.GangaRepository.VStreamer import EmptyGangaObject
if not isType(obj, EmptyGangaObject):
split_cache = None
has_children = getattr(obj, self.sub_split, False)
if has_children:
logger.debug("has_children")
if hasattr(getattr(obj, self.sub_split), 'flush'):
# I've been read from disk in the new SubJobXMLList format I know how to flush
getattr(obj, self.sub_split).flush()
else:
# I have been constructed in this session, I don't know how to flush!
if hasattr(getattr(obj, self.sub_split)[0], "_dirty"):
split_cache = getattr(obj, self.sub_split)
for i in range(len(split_cache)):
if not split_cache[i]._dirty:
continue
sfn = os.path.join(os.path.dirname(fn), str(i), self.dataFileName)
if not os.path.exists(os.path.dirname(sfn)):
logger.debug("Constructing Folder: %s" % os.path.dirname(sfn))
os.makedirs(os.path.dirname(sfn))
else:
logger.debug("Using Folder: %s" % os.path.dirname(sfn))
safe_save(sfn, split_cache[i], self.to_file)
split_cache[i]._setFlushed()
# Now generate an index file to take advantage of future non-loading goodness
tempSubJList = SubJobXMLList(os.path.dirname(fn), self.registry, self.dataFileName, False, obj)
## equivalent to for sj in job.subjobs
tempSubJList._setParent(obj)
job_dict = {}
for sj in getattr(obj, self.sub_split):
job_dict[sj.id] = stripProxy(sj)
tempSubJList._reset_cachedJobs(job_dict)
tempSubJList.flush(ignore_disk=True)
del tempSubJList
safe_save(fn, obj, self.to_file, self.sub_split)
# clean files not in subjobs anymore... (bug 64041)
for idn in os.listdir(os.path.dirname(fn)):
split_cache = getattr(obj, self.sub_split)
if idn.isdigit() and int(idn) >= len(split_cache):
rmrf(os.path.join(os.path.dirname(fn), idn))
else:
logger.debug("not has_children")
safe_save(fn, obj, self.to_file, "")
# clean files leftover from sub_split
for idn in os.listdir(os.path.dirname(fn)):
if idn.isdigit():
rmrf(os.path.join(os.path.dirname(fn), idn))
if this_id not in self.incomplete_objects:
self.index_write(this_id)
else:
raise RepositoryError(self, "Cannot flush an Empty object for ID: %s" % this_id)
if this_id not in self._fully_loaded:
self._fully_loaded[this_id] = obj
def flush(self, ids):
"""
flush the set of "ids" to disk and write the XML representing said objects in self.objects
NB: This adds the given objects corresponding to ids to the _fully_loaded dict
Args:
ids (list): List of integers, used as keys to objects in the self.objects dict
"""
logger.debug("Flushing: %s" % ids)
#import traceback
#traceback.print_stack()
for this_id in ids:
if this_id in self.incomplete_objects:
logger.debug("Should NEVER re-flush an incomplete object, it's now 'bad' respect this!")
continue
try:
logger.debug("safe_flush: %s" % this_id)
self._safe_flush_xml(this_id)
self._cache_load_timestamp[this_id] = time.time()
self._cached_cls[this_id] = getName(self.objects[this_id])
self._cached_cat[this_id] = self.objects[this_id]._category
self._cached_obj[this_id] = self.objects[this_id]._index_cache
try:
self.index_write(this_id)
except:
logger.debug("Index write failed")
pass
if this_id not in self._fully_loaded:
self._fully_loaded[this_id] = self.objects[this_id]
subobj_attr = getattr(self.objects[this_id], self.sub_split, None)
sub_attr_dirty = getattr(subobj_attr, '_dirty', False)
if sub_attr_dirty:
if hasattr(subobj_attr, 'flush'):
subobj_attr.flush()
self.objects[this_id]._setFlushed()
except (OSError, IOError, XMLFileError) as x:
raise RepositoryError(self, "Error of type: %s on flushing id '%s': %s" % (type(x), this_id, x))
def _check_index_cache(self, obj, this_id):
"""
Checks the index cache of "this_id" against the index cache generated from the "obj"ect
If there is a problem then the object is unloaded from memory but will not do anything if everything agrees here
TODO CHECK IF THIS IS VALID GIVEN WE DYNAMICALLY GENERATE INDEX FOR LOADED OBJECTS
Args:
obj (GangaObject): This is the object which we've loaded from disk
this_id (int): This is the object id which is the objects key in the objects dict
"""
new_idx_cache = self.registry.getIndexCache(stripProxy(obj))
if new_idx_cache != obj._index_cache:
logger.debug("NEW: %s" % new_idx_cache)
logger.debug("OLD: %s" % obj._index_cache)
# index is wrong! Try to get read access - then we can fix this
if len(self.lock([this_id])) != 0:
if this_id not in self.incomplete_objects:
# Mark as dirty if loaded, otherwise load and fix
if not self.isObjectLoaded(self.objects[this_id]):
self.index_write(this_id)
else:
self.objects[this_id]._setDirty()
# self.unlock([this_id])
old_idx_subset = all((k in new_idx_cache and new_idx_cache[k] == v) for k, v in obj._index_cache.items())
if not old_idx_subset:
# Old index cache isn't subset of new index cache
new_idx_subset = all((k in obj._index_cache and obj._index_cache[k] == v) for k, v in new_idx_cache.items())
else:
# Old index cache is subset of new index cache so no need to check
new_idx_subset = True
if not old_idx_subset and not new_idx_subset:
if not GANGA_SWAN_INTEGRATION:
logger.warning("Incorrect index cache of '%s' object #%s was corrected!" % (self.registry.name, this_id))
logger.debug("old cache: %s\t\tnew cache: %s" % (obj._index_cache, new_idx_cache))
self.unlock([this_id])
else:
pass
# if we cannot lock this, the inconsistency is
# most likely the result of another ganga
# process modifying the repo
def _parse_xml(self, fn, this_id, load_backup, has_children, tmpobj):
"""
If we must actually load the object from disk then we end up here.
This replaces the attrs of "objects[this_id]" with the attrs from tmpobj
If there are children then a SubJobXMLList is created to manage them.
The fn of the job is passed to the SubbJobXMLList and there is some knowledge of if we should be loading the backup passed as well
Args:
fn (str): This is the path to the data file for this object in the XML
this_id (int): This is the integer key of the object in the self.objects dict
load_backup (bool): This reflects whether we are loading the backup 'data~' or normal 'data' XML file
has_children (bool): This contains the result of the decision as to whether this object actually has children
tmpobj (GangaObject): This contains the object which has been read in from the fn file
"""
# If this_id is not in the objects add the object we got from reading the XML
need_to_copy = True
if this_id not in self.objects:
self.objects[this_id] = tmpobj
need_to_copy = False
obj = self.objects[this_id]
# If the object was already in the objects (i.e. cache object, replace the schema content wilst avoiding R/O checks and such
# The end goal is to keep the object at this_id the same object in memory but to make it closer to tmpobj.
# TODO investigate changing this to copyFrom
# The temp object is from disk so all contents have correctly passed through sanitising via setattr at least once by now so this is safe
if need_to_copy:
for key, val in tmpobj._data.items():
obj.setSchemaAttribute(key, val)
for attr_name, attr_val in obj._schema.allItems():
if attr_name not in tmpobj._data:
obj.setSchemaAttribute(attr_name, obj._schema.getDefaultValue(attr_name))
if has_children:
logger.debug("Adding children")
# NB Keep be a SetSchemaAttribute to bypass the list manipulation which will put this into a list in some cases
obj.setSchemaAttribute(self.sub_split, SubJobXMLList(os.path.dirname(fn), self.registry, self.dataFileName, load_backup, obj))
else:
if obj._schema.hasAttribute(self.sub_split):
# Infinite loop if we use setattr btw
def_val = obj._schema.getDefaultValue(self.sub_split)
if def_val == []:
from GangaCore.GPIDev.Lib.GangaList.GangaList import GangaList
def_val = GangaList()
obj.setSchemaAttribute(self.sub_split, def_val)
from GangaCore.GPIDev.Base.Objects import do_not_copy
for node_key, node_val in obj._data.items():
if isType(node_val, Node):
if node_key not in do_not_copy:
node_val._setParent(obj)
# Check if index cache; if loaded; was valid:
if obj._index_cache not in [{}]:
self._check_index_cache(obj, this_id)
obj._index_cache = {}
if this_id not in self._fully_loaded:
self._fully_loaded[this_id] = obj
def _load_xml_from_obj(self, fobj, fn, this_id, load_backup):
"""
This is the method which will load the job from fn using the fobj using the self.from_file method and _parse_xml is called to replace the
self.objects[this_id] with the correct attributes. We also preseve knowledge of if we're being asked to load a backup or not
Args:
fobj (file handler): This is the file handler for the fn
fn (str): fn This is the name of the file which contains the XML data
this_id (int): This is the key of the object in the objects dict where the output will be stored
load_backup (bool): This reflects whether we are loading the backup 'data~' or normal 'data' XML file
"""
b4=time.time()
tmpobj, errs = self.from_file(fobj)
a4=time.time()
logger.debug("Loading XML file for ID: %s took %s sec" % (this_id, a4-b4))
if len(errs) > 0:
logger.error("#%s Error(s) Loading File: %s" % (len(errs), fobj.name))
for err in errs:
logger.error("err: %s" % err)
raise InaccessibleObjectError(self, this_id, errs[0])
logger.debug("Checking children: %s" % str(this_id))
#logger.debug("Checking in: %s" % os.path.dirname(fn))
#logger.debug("found: %s" % os.listdir(os.path.dirname(fn)))
has_children = SubJobXMLList.checkJobHasChildren(os.path.dirname(fn), self.dataFileName)
logger.debug("Found children: %s" % str(has_children))
self._parse_xml(fn, this_id, load_backup, has_children, tmpobj)
if hasattr(self.objects[this_id], self.sub_split):
sub_attr = getattr(self.objects[this_id], self.sub_split)
if sub_attr is not None and hasattr(sub_attr, '_setParent'):
sub_attr._setParent(self.objects[this_id])
self._load_timestamp[this_id] = os.fstat(fobj.fileno()).st_ctime
logger.debug("Finished Loading XML")
def _open_xml_file(self, fn, this_id, _copy_backup=False):
"""
This loads the XML for the job "this_id" in self.objects using the file "fn" and knowing whether we want the file or the backup by _copy_backup
Args:
fn (str): This is the full XML filename for the given id
this_id (int): This is the key for the object in the objects dict
_copy_backup (bool): Should we use the backup file 'data~' (True) or the 'data' file (False)
"""
fobj = None
has_loaded_backup = False
try:
if not os.path.isfile(fn) and _copy_backup:
if os.path.isfile(fn + '~'):
logger.warning("XML File: %s missing, recovering from backup, recent changes may have been lost!" % fn)
has_loaded_backup = True
try:
from shutil import copyfile
copyfile(fn+'~', fn)
except:
logger.warning("Error Recovering the backup file! loading of Job may Fail!")
fobj = open(fn, "r")
except IOError as x:
if x.errno == errno.ENOENT:
# remove index so we do not continue working with wrong information
try:
# remove internal representation
self._internal_del__(this_id)
rmrf(os.path.dirname(fn) + ".index")
except OSError as err:
logger.debug("load unlink Error: %s" % err)
pass
raise KeyError(this_id)
else:
raise RepositoryError(self, "IOError: %s" % x)
finally:
try:
if os.path.isdir(os.path.dirname(fn)):
ld = os.listdir(os.path.dirname(fn))
if len(ld) == 0:
os.rmdir(os.path.dirname(fn))
logger.warning("No job index or data found, removing empty directory: %s" % os.path.dirname(fn))
except Exception as err:
logger.debug("load error %s" % err)
pass
return fobj, has_loaded_backup
def load(self, ids, load_backup=False):
"""
Load the following "ids" from disk
If we want to load the backup files for these ids then use _copy_backup
Correctly loaded objects are dirty, Objects loaded from backups for whatever reason are marked dirty
Args:
ids (list): The object keys which we want to iterate over from the objects dict
load_backup (bool): This reflects whether we are loading the backup 'data~' or normal 'data' XML file
"""
#print("load: %s " % ids)
#import traceback
#traceback.print_stack()
#print("\n")
logger.debug("Loading Repo object(s): %s" % ids)
for this_id in ids:
if this_id in self.incomplete_objects:
raise RepositoryError(self, "Trying to re-load a corrupt repository id: %s" % this_id)
fn = self.get_fn(this_id)
if load_backup:
has_loaded_backup = True
fn = fn + "~"
else:
has_loaded_backup = False
try:
fobj, has_loaded_backup2 = self._open_xml_file(fn, this_id, True)
if has_loaded_backup2:
has_loaded_backup = has_loaded_backup2
except Exception as err:
logger.debug("XML load: Failed to load XML file: %s" % fn)
logger.debug("Error was:\n%s" % err)
logger.error("Adding id: %s to Corrupt IDs will not attempt to re-load this session" % this_id)
self.incomplete_objects.append(this_id)
raise
try:
self._load_xml_from_obj(fobj, fn, this_id, load_backup)
except RepositoryError as err:
logger.debug("Repo Exception: %s" % err)
logger.error("Adding id: %s to Corrupt IDs will not attempt to re-load this session" % this_id)
self.incomplete_objects.append(this_id)
raise
except Exception as err:
should_continue = self._handle_load_exception(err, fn, this_id, load_backup)
if should_continue is True:
has_loaded_backup = True
continue
else:
logger.error("Adding id: %s to Corrupt IDs will not attempt to re-load this session" % this_id)
self.incomplete_objects.append(this_id)
raise
finally:
fobj.close()
subobj_attr = getattr(self.objects[this_id], self.sub_split, None)
sub_attr_dirty = getattr(subobj_attr, '_dirty', False)
if has_loaded_backup:
self.objects[this_id]._setDirty()
else:
self.objects[this_id]._setFlushed()
if sub_attr_dirty:
getattr(self.objects[this_id], self.sub_split)._setDirty()
logger.debug("Finished 'load'-ing of: %s" % ids)
def _handle_load_exception(self, err, fn, this_id, load_backup):
"""
This method does a lot of the handling of an exception thrown from the load method
We will return True/False here, True if the error can be correctly caught and False if this is terminal and we couldn't load the object
Args:
err (exception): This is the original exception loading the XML data from disk
fn (str): This is the filename which was used to load the file from disk
this_id (int): This is the key of the object in the objects dict
load_backup (bool): This reflects whether we are loading the backup 'data~' or normal 'data' XML file
"""
if isType(err, XMLFileError):
logger.error("XML File failed to load for Job id: %s" % this_id)
logger.error("Actual Error was:\n%s" % err)
if load_backup:
logger.debug("Could not load backup object #%s: %s" % (this_id, err))
raise InaccessibleObjectError(self, this_id, err)
logger.debug("Could not load object #%s: %s" % (this_id, err))
# try loading backup
try:
self.load([this_id], True)
logger.warning("Object '%s' #%s loaded from backup file - recent changes may be lost." % (self.registry.name, this_id))
return True
except Exception as err2:
logger.debug("Exception when loading backup: %s" % err2 )
logger.error("XML File failed to load for Job id: %s" % this_id)
logger.error("Actual Error was:\n%s" % err)
# add object to incomplete_objects
if not this_id in self.incomplete_objects:
logger.error("Loading: %s into incomplete_objects to avoid loading it again in future" % this_id)
self.incomplete_objects.append(this_id)
# remove index so we do not continue working with wrong
# information
rmrf(os.path.dirname(fn) + ".index")
raise InaccessibleObjectError(self, this_id, err)
return False
def delete(self, ids):
"""
This is the method to 'delete' an object from disk, it's written in python and starts with the indexes first
Args:
ids (list): The object keys which we want to iterate over from the objects dict
"""
for this_id in ids:
# First remove the index, so that it is gone if we later have a
# KeyError
fn = self.get_fn(this_id)
try:
rmrf(os.path.dirname(fn) + ".index")
except OSError as err:
logger.debug("Delete Error: %s" % err)
self._internal_del__(this_id)
rmrf(os.path.dirname(fn))
if this_id in self._fully_loaded:
del self._fully_loaded[this_id]
if this_id in self.objects:
del self.objects[this_id]
def lock(self, ids):
"""
Request a session lock for the following ids
Args:
ids (list): The object keys which we want to iterate over from the objects dict
"""
return self.sessionlock.lock_ids(ids)
def unlock(self, ids):
"""
Unlock (release file locks of) the following ids
Args:
ids (list): The object keys which we want to iterate over from the objects dict
"""
released_ids = self.sessionlock.release_ids(ids)
if len(released_ids) < len(ids):
logger.error("The write locks of some objects could not be released!")
def get_lock_session(self, this_id):
"""get_lock_session(id)
Tries to determine the session that holds the lock on id for information purposes, and return an informative string.
Returns None on failure
Args:
this_id (int): Get the id of the session which has a lock on the object with this id
"""
return self.sessionlock.get_lock_session(this_id)
def get_other_sessions(self):
"""get_session_list()
Tries to determine the other sessions that are active and returns an informative string for each of them.
"""
return self.sessionlock.get_other_sessions()
def reap_locks(self):
"""reap_locks() --> True/False
Remotely clear all foreign locks from the session.
WARNING: This is not nice.
Returns True on success, False on error."""
return self.sessionlock.reap_locks()
def clean(self):
"""clean() --> True/False
Clear EVERYTHING in this repository, counter, all jobs, etc.
WARNING: This is not nice."""
self.shutdown()
try:
rmrf(self.root)
except Exception as err:
logger.error("Failed to correctly clean repository due to: %s" % err)
self.startup()
def isObjectLoaded(self, obj):
"""
This will return a true false if an object has been fully loaded into memory
Args:
obj (GangaObject): The object we want to know if it was loaded into memory
"""
try:
_id = next(id_ for id_, o in self._fully_loaded.items() if o is obj)
return True
except StopIteration:
return False
|
gpl-2.0
| -6,180,378,785,507,544,000
| 43.646667
| 177
| 0.559075
| false
| 4.13427
| false
| false
| false
|
google/starthinker
|
dags/bigquery_function_dag.py
|
1
|
4467
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
BigQuery Function
Add a custom function or table to a dataset.
- Specify the dataset, and the function or table will be added.
- Pearson Significance Test: Check if a correlation is significant.
- RGB To HSV: Convert color values for analysis.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'auth': 'service', # Credentials used for writing function.
'function': 'Pearson Significance Test', # Function or table to create.
'dataset': '', # Existing BigQuery dataset.
}
RECIPE = {
'tasks': [
{
'bigquery': {
'auth': {
'field': {
'name': 'auth',
'kind': 'authentication',
'order': 0,
'default': 'service',
'description': 'Credentials used for writing function.'
}
},
'function': {
'field': {
'name': 'function',
'kind': 'choice',
'order': 1,
'choices': [
'Pearson Significance Test',
'RGB To HSV'
],
'default': 'Pearson Significance Test',
'description': 'Function or table to create.'
}
},
'to': {
'dataset': {
'field': {
'name': 'dataset',
'kind': 'string',
'order': 1,
'default': '',
'description': 'Existing BigQuery dataset.'
}
}
}
}
}
]
}
dag_maker = DAG_Factory('bigquery_function', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
|
apache-2.0
| -8,183,038,106,305,137,000
| 32.088889
| 145
| 0.570405
| false
| 4.324298
| false
| false
| false
|
eggsandbeer/scheduler
|
synergy/db/model/daemon_process_entry.py
|
1
|
2920
|
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, DictField, ListField
from synergy.scheduler.scheduler_constants import TYPE_MANAGED, TYPE_FREERUN, TYPE_GARBAGE_COLLECTOR, EXCHANGE_UTILS, \
TYPE_DAEMON
PROCESS_NAME = 'process_name'
CLASSNAME = 'classname'
MQ_QUEUE = 'mq_queue'
MQ_EXCHANGE = 'mq_exchange'
MQ_ROUTING_KEY = 'mq_routing_key'
ARGUMENTS = 'arguments'
TOKEN = 'token'
PROCESS_TYPE = 'process_type'
LOG_FILENAME = 'log_filename'
LOG_TAG = 'log_tag'
PID_FILENAME = 'pid_filename'
PRESENT_ON_BOXES = 'present_on_boxes' # list of boxes where this process is monitored by the Supervisor
class DaemonProcessEntry(BaseDocument):
""" Non-persistent model. This class presents Process Context Entry record """
process_name = StringField(PROCESS_NAME)
classname = StringField(CLASSNAME)
token = StringField(TOKEN)
mq_queue = StringField(MQ_QUEUE)
mq_exchange = StringField(MQ_EXCHANGE)
mq_routing_key = StringField(MQ_ROUTING_KEY)
arguments = DictField(ARGUMENTS)
process_type = StringField(PROCESS_TYPE, choices=[TYPE_MANAGED, TYPE_FREERUN, TYPE_DAEMON, TYPE_GARBAGE_COLLECTOR])
present_on_boxes = ListField(PRESENT_ON_BOXES)
pid_filename = StringField(PID_FILENAME)
log_filename = StringField(LOG_FILENAME)
@BaseDocument.key.getter
def key(self):
return self.process_name
@key.setter
def key(self, value):
""" :param value: name of the process """
self.process_name = value
def daemon_context_entry(process_name,
classname,
token,
exchange=EXCHANGE_UTILS,
present_on_boxes=None,
arguments=None,
queue=None,
routing=None,
process_type=TYPE_DAEMON,
pid_file=None,
log_file=None):
""" forms process context entry """
_ROUTING_PREFIX = 'routing_'
_QUEUE_PREFIX = 'queue_'
_SUFFIX = '_daemon'
if queue is None:
queue = _QUEUE_PREFIX + token + _SUFFIX
if routing is None:
routing = _ROUTING_PREFIX + token + _SUFFIX
if pid_file is None:
pid_file = token + _SUFFIX + '.pid'
if log_file is None:
log_file = token + _SUFFIX + '.log'
if arguments is None:
arguments = dict()
else:
assert isinstance(arguments, dict)
process_entry = DaemonProcessEntry(
process_name=process_name,
classname=classname,
token=token,
mq_queue=queue,
mq_routing_key=routing,
mq_exchange=exchange,
present_on_boxes=present_on_boxes,
arguments=arguments,
process_type=process_type,
log_filename=log_file,
pid_filename=pid_file)
return process_entry
|
bsd-3-clause
| 1,068,565,629,341,200,000
| 31.444444
| 119
| 0.621575
| false
| 3.80704
| false
| false
| false
|
updownlife/multipleK
|
bin/reads2kmer/reads2kmer.py
|
1
|
1433
|
#!/usr/bin/env python
import sys
from Bio import SeqIO
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-o','--output', dest = "outputFile", help = "Name of the output file")
parser.add_option('-k','--klength', dest = "kmerLength", help = "Length of kmer")
parser.add_option('-r','--readsfile', dest = "readsFilename", help = "Name of the reads file")
(options, args) = parser.parse_args(sys.argv[1:])
kmerFilename = options.outputFile
readsFilename = options.readsFilename
kmerLength = int(options.kmerLength)
read_id = -1;
if readsFilename[-1] == 'a':
formatStr = 'fasta'
else:
formatStr = 'fastq'
kmer_file = open(kmerFilename, 'w')
kmer_list=""
buffer_size = 255
count = 0
for seq_record in SeqIO.parse(readsFilename,formatStr):
read_id += 1
cur = 0
cur_max = len(seq_record) - kmerLength
for cur in range(0, cur_max):
kmer_seq = str(seq_record.seq[cur:cur+kmerLength]);
kmer = '>' + str(read_id) +' '+ seq_record.id + '\n' + kmer_seq + '\n'
# kmer = SeqIO.SeqRecord(kmer_seq, id=seq_record.id, description="")
# kmer = SeqIO.SeqRecord(kmer_seq, id=seq_record.id, name=seq_record.name, description=seq_record.description)
kmer_list += kmer
count += 1;
if count > buffer_size:
kmer_file.write(kmer_list);
count = 0
kmer_list = "";
# SeqIO.write(kmer_list, kmer_file, "fasta");
if count != 0:
kmer_file.write(kmer_list)
kmer_file.close()
|
gpl-2.0
| -4,149,227,004,103,901,000
| 32.325581
| 111
| 0.665736
| false
| 2.866
| false
| false
| false
|
f3at/feat
|
src/feat/agencies/messaging/emu.py
|
1
|
6790
|
# F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
# Headers in this file shall remain intact.
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
import functools
from zope.interface import implements
from feat.common import log, defer
from feat.agencies.messaging.rabbitmq import Connection, Queue
from feat.agencies.message import BaseMessage
from feat.agencies import common
from feat.agencies.messaging.interface import IMessagingClient
class DirectExchange(object):
def __init__(self, name):
self.name = name
# key -> [ list of queues ]
self._bindings = {}
def bind(self, queue, key):
assert isinstance(queue, Queue)
list_for_key = self._bindings.get(key, [])
if not queue in list_for_key:
list_for_key.append(queue)
self._bindings[key] = list_for_key
def unbind(self, queue, key):
list_for_key = self._bindings.get(key, [])
if queue in list_for_key:
list_for_key.remove(queue)
if len(list_for_key) == 0:
del(self._bindings[key])
def publish(self, message, key):
assert message is not None
list_for_key = self._bindings.get(key, [])
for queue in list_for_key:
queue.enqueue(message)
class FanoutExchange(object):
def __init__(self, name):
self.name = name
# [ list of queues ]
self._bindings = []
def bind(self, queue, key=None):
assert isinstance(queue, Queue), type(Queue)
if key is not None:
raise AttributeError("Specified key for fanout exchange. Key: %r" %
(key, ))
if queue not in self._bindings:
self._bindings.append(queue)
def unbind(self, queue, key=None):
if key is not None:
raise AttributeError("Specified key for fanout exchange. Key: %r" %
(key, ))
try:
self._bindings.remove(queue)
except ValueError:
self.error("Queue %r not bounded too exchange %r" % (queue, self))
def publish(self, message, key=None):
assert message is not None
if key is not None:
raise AttributeError("Specified key for fanout exchange. Key: %r" %
(key, ))
for queue in self._bindings:
queue.enqueue(message)
class RabbitMQ(common.ConnectionManager, log.Logger, log.LogProxy,
common.Statistics):
implements(IMessagingClient)
log_category = "emu-rabbitmq"
exchange_factories = {'fanout': FanoutExchange,
'direct': DirectExchange}
def __init__(self):
common.ConnectionManager.__init__(self)
log_keeper = log.get_default() or log.FluLogKeeper()
log.LogProxy.__init__(self, log_keeper)
log.Logger.__init__(self, self)
common.Statistics.__init__(self)
# name -> queue
self._queues = {}
# name -> exchange
self._exchanges = {}
self._on_connected()
self._enabled = True
### called by simulation driver ###
def disable(self):
self._enabled = False
def enable(self):
self._enabled = True
### IMessagingClient ###
def is_idle(self):
return all(q.is_idle() for q in self._queues.itervalues())
# is_disconnected() from common.ConnectionManager
# wait_connected() from common.ConnectionManager
def disconnect(self):
# nothing to do here
pass
def new_channel(self, sink, queue_name=None):
return Connection(self, sink, queue_name)
def connect(self):
# nothing to do here, in future here implement timouts and/or failures
pass
# add_disconnected_cb() from common.ConnectionManager
# add_reconnected_cb() from common.ConnectionManager
### eoi ###
def define_exchange(self, name, exchange_type=None):
assert name is not None
factory = self.exchange_factories[exchange_type]
exchange = self._get_exchange(name)
if not exchange:
self.log("Defining exchange: %r" % name)
self.increase_stat('exchanges declared')
exchange = factory(name)
self._exchanges[name] = exchange
return exchange
def define_queue(self, name):
assert name is not None
queue = self._get_queue(name)
if not queue:
self.increase_stat('queues created')
queue = Queue(name, on_deliver=functools.partial(
self.increase_stat, 'messages delivered'))
self._queues[name] = queue
self.log("Defining queue: %r" % name)
return queue
def publish(self, key, shard, message):
assert isinstance(message, BaseMessage), str(type(message))
if not self._enabled:
self.log("RabbitMQ is disabled, message will not be really sent")
return defer.succeed(message)
exchange = self._get_exchange(shard)
if exchange:
self.increase_stat('messages published')
exchange.publish(message, key)
else:
self.error("Exchange %r not found!" % shard)
return defer.succeed(message)
def create_binding(self, exchange, queue, key=None):
ex = self._get_exchange(exchange)
if ex is None:
exchange_type = 'direct' if key is not None else 'fanout'
ex = self.define_exchange(exchange, exchange_type)
que = self._get_queue(queue)
ex.bind(que, key)
def delete_binding(self, exchange, queue, key=None):
ex = self._get_exchange(exchange)
que = self._get_queue(queue)
ex.unbind(que, key)
### private ###
def _get_exchange(self, name):
return self._exchanges.get(name, None)
def _get_queue(self, name):
return self._queues.get(name, None)
|
gpl-2.0
| -3,548,642,203,115,684,400
| 29.863636
| 79
| 0.614286
| false
| 4.056153
| false
| false
| false
|
michaeltchapman/flaskmon
|
app.py
|
1
|
6975
|
from flask import Flask, render_template, request, jsonify
from sqlalchemy import create_engine, Table, MetaData
from sqlalchemy.orm import sessionmaker
from rrdtool import fetch
import time
from os import listdir
graph_height = 50.0
app = Flask(__name__)
### SqlAlchemy stuff for accessing Openstack State ###
db = create_engine('postgresql://nova:testing@os-sql.os/nova')
Session = sessionmaker(bind=db)
session = Session()
metadata = MetaData(db)
sqlservices = Table('services', metadata, autoload=True)
sqlinstances = Table('instances', metadata, autoload=True)
# TODO split nodes by domain/cluster
domains = dict()
nodes = dict()
domains["openstack"] = nodes
@app.route('/')
def index():
return render_template('index.html')
green = '#3B8020'
yellow = '#bfbf00'
orange = '#f07a13'
red = '#bd3838'
@app.route('/fluid')
def fluid():
nodelist = listdir('/var/lib/ganglia/rrds/unspecified')
for node in nodelist:
if node != '__SummaryInfo__':
nodes[node.split('.')[0]] = dict()
nodes[node.split('.')[0]]['domain'] = node.split('.')[1]
nodes[node.split('.')[0]]['f'] = dict()
nodes[node.split('.')[0]]['s'] = dict()
#### SERVICES ####
# retrieve service statuses from nova database
# should do this from a metric
#for row in session.query(sqlservices):
# if row.host.encode("utf-8") not in nodes:
# print row.host.encode("utf-8")
# pass
# nodes[row.host.encode("utf-8")][s][row.topic.encode("utf-8") + '-disabled'] = row.disabled
# query sql server status
# do this from a local metric instead of here.
# r = session.query("Tuples Returned", "Tuples Fetched", "Transactions Committed", "Blocks Fetched", "Block Cache Hits").from_statement('select pg_stat_get_db_tuples_returned(1) as "Tuples Returned", pg_stat_get_db_tuples_fetched(1) as "Tuples Fetched", pg_stat_get_db_xact_commit(1) as "Transactions Committed", pg_stat_get_db_blocks_fetched(1) as "Blocks Fetched", pg_stat_get_db_blocks_hit(1) as "Block Cache Hits"').all()[0]
#d = dict()
#for row in r.keys():
# d[row] = r.__dict__[row]
#nodes['os-sql'] = d
#### LOAD ####
# use rrdtool to get load of each server
res = 60 # 1 minute
t = int(time.mktime(time.localtime(time.time())))
# need to move things out of 'unspecified" at some point...
# grab 10 minutes because fetch is a bit buggy
for node in nodes:
metrics = listdir('/var/lib/ganglia/rrds/unspecified/' + node + '.' + nodes[node]['domain'])
load_raw = fetch('/var/lib/ganglia/rrds/unspecified/'
+ node + '.' + nodes[node]['domain'] + '/'
+ 'load_one.rrd', 'AVERAGE', '-r ' + str(res),
'-s e-10m', '-e ' + str(t/res*res))[2]
cpus_raw = fetch('/var/lib/ganglia/rrds/unspecified/'
+ node + '.' + nodes[node]['domain'] + '/'
+ 'cpu_num.rrd', 'AVERAGE', '-r ' + str(res),
'-s e-10m', '-e ' + str(t/res*res))[2]
# If we are in the middle of a given
# minute there will be a null value
# so check back a couple of times to see
# if we hit a real value, then mark the
# host as down if that doesn't work
load = load_raw[-2:-1][0][0]
if load == None:
load = load_raw[-3:-2][0][0]
if load == None:
load = load_raw[-4:-3][0][0]
if load == None:
load = -1.0
cpus = cpus_raw[-2:-1][0][0]
if cpus == None:
cpus = cpus_raw[-3:-2][0][0]
if cpus == None:
cpus = cpus_raw[-4:-3][0][0]
if cpus == None:
cpus = -1.0;
if load > 0:
load = load / cpus
if (0 <= load < 0.25):
nodes[node.split('.')[0]]['s']['load'] = 'green'
if (0.25 < load < 0.5):
nodes[node.split('.')[0]]['s']['load'] = 'yellow'
if (0.5 <= load < 0.75):
nodes[node.split('.')[0]]['s']['load'] = 'orange'
if (load >= 0.75 <= 1.0):
nodes[node.split('.')[0]]['s']['load'] = 'red'
if (load < 0 ):
nodes[node.split('.')[0]]['s']['load'] = 'down'
return render_template('fluid.html', nodes=nodes)
# ajax route for node metric div
@app.route('/get_metric')
def get_metric():
node = request.args.get('node', 0, type=str)
# list of nodes avabilable from ganglia
nodelist = listdir('/var/lib/ganglia/rrds/unspecified')
for n in nodelist:
if n != '__SummaryInfo__':
nodes[n.split('.')[0]] = dict()
nodes[n.split('.')[0]]['domain'] = n.split('.')[1]
nodes[node.split('.')[0]]['f'] = dict()
nodes[node.split('.')[0]]['s'] = dict()
# use rrdtool to get load of server
res = 600 # 5 minutes
t = int(time.mktime(time.localtime(time.time())))
# need to move things out of 'unspecified" at some point...
metrics = listdir('/var/lib/ganglia/rrds/unspecified/' + node + '.' + nodes[node]['domain'])
for metric in metrics:
rawdata = fetch('/var/lib/ganglia/rrds/unspecified/'
+ node + '.' + nodes[node]['domain'] + '/'
+ metric, 'AVERAGE', '-r ' + str(res),
'-s e-30m', '-e ' + str(t/res*res))[2]
# find maximum
m = 0.0
for datapoint in rawdata:
if isinstance(datapoint[0], float):
if datapoint[0] > m:
m = datapoint[0]
if m == 0:
ratio = 1
else:
ratio = graph_height/m
data = list()
for i, datapoint in enumerate(rawdata):
if isinstance(datapoint[0], float) and i < 6: # Maybe remove size limit...
value = datapoint[0] * ratio
point = value
if '.' in str(value):
point = str(value).split('.')[0]# + "." + str(value).split('.')[1][:2] # round to 2 decimal places
data.append([str(point), datapoint[0]]) # append the normalised value for display plus the actual value for diagnosis
if isinstance(datapoint[0], str):
data.append(datapoint[0])
# TODO Handle string metrics here
if isinstance(rawdata[0][0], float):
nodes[node]['f'][metric.split('.')[0]] = data
if isinstance(rawdata[0][0], str):
nodes[node]['s'][metric.split('.')[0]] = data
instances = [ instance for instance in session.query(sqlinstances) if instance.deleted == False]
for instance in instances:
print instance.host
return jsonify(metrics=nodes[node])
if __name__ == '__main__':
app.run(host='0.0.0.0')
#app.run(host='172.22.1.205', debug=True)
|
bsd-3-clause
| 2,899,113,168,013,195,000
| 35.139896
| 435
| 0.531613
| false
| 3.553235
| false
| false
| false
|
easel/gamestats
|
src/django/gamestats/loot/submission.py
|
1
|
1871
|
from xml.etree import ElementTree
from django.contrib.auth.models import User
from gamestats.loot.models import Character, Item, Loot, Attendance, Kill, LootType
def parse_xml(xml):
"""
Parse an XML submission
"""
root = ElementTree.fromstring(xml)
submitter = User.objects.get(username__iexact=root.get('submitter'))
elem = root.find('loot')
for child in elem.getchildren():
character, _ = Character.objects.get_or_create(name=child.get('looter'))
item, _ = Item.objects.get_or_create(name=child.get('item'))
timestamp = child.get('timestamp').replace('T', ' ')
Loot.objects.get_or_create(
submitter=submitter,
timestamp=timestamp,
character=character,
item=item,
defaults = {
'lootType': LootType.objects.get(name='Unknown')
}
)
# elem = tree.getroot().find('attendance')
# for child in elem.getchildren():
# character = Character.objects.get(name=child.get('name'))
# start_time = child.get('start_time').replace('T', ' ')
# end_time = child.get('end_time').replace('T', ' ')
# Attendance.objects.get_or_create(
# submitter = submitter,
# attendee = character,
# start_time = start_time,
# end_time = end_time
# )
# db.addAttendee(userid, characterid, start_time, end_time)
root.find('kills')
for child in elem.getchildren():
killer = Character.objects.get_or_create(name=child.get('killer'))
killee = Character.objects.get_or_create(name=child.get('killee'))
timestamp = child.get('timestamp').replace('T', ' ')
Kill.objects.add_if_new(
submitter = submitter,
killer = killer,
killee = killee,
timestamp = timestamp
)
|
mit
| -6,596,245,406,597,632,000
| 35.686275
| 83
| 0.590593
| false
| 3.577438
| false
| false
| false
|
sharonlev/pyLoggingExtras
|
test/OutputSetter.py
|
1
|
2060
|
__author__ = 'Sharon Lev'
__email__ = 'sharon_lev@yahoo.com'
__date__ = '10/25/16'
import sys
from StringIO import StringIO
from unittest import TestCase
from logging import root
from json import dumps
class OutputSetter(TestCase):
"""
"""
temp_stdout = None
@classmethod
def setUpClass(cls):
for handler in root.handlers[:]:
root.removeHandler(handler)
cls.temp_stdout = sys.stdout
sys.stdout = StringIO()
def setUp(self):
sys.stdout.truncate(0)
def tearDown(self):
content = sys.stdout.getvalue()
sys.stderr.writelines(content)
@classmethod
def tearDownClass(cls):
cls.tmp = sys.stdout
sys.stdout = cls.temp_stdout
print 'done!'
def get_log_lines_parts(self):
"""
:return: list of logged lines separated by separator ":"
"""
output = sys.stdout.getvalue().splitlines()
return [line.split(":") for line in output]
def validate_output(self, level, method, expected_output, *wargs, **kwargs):
"""
:param level:
:param method:
:param wargs:
:param kwargs:
:return:
"""
output = self.get_log_lines_parts()
self.assertEqual(len(output), 3, output)
for line in [output[0]] + [output[2]]:
self.assertEqual(line[0], level)
self.assertEqual(line[1], method)
self.assertIn(line[2].split()[0], ['entering', 'exiting'])
if wargs:
for arg in wargs:
self.assertIn(str(arg), output[0][2])
self.assertIn(str(arg), output[1][1])
if kwargs:
for key, value in kwargs.iteritems():
self.assertIn(str(key), ':'.join(output[0]))
self.assertIn(str(value), ':'.join(output[0]))
self.assertIn(str(value), output[1][1])
if expected_output:
self.assertIn("%s" % dumps(expected_output, ensure_ascii=False), ":".join(output[2]))
return output
|
gpl-3.0
| -1,023,077,614,002,122,000
| 27.625
| 97
| 0.562136
| false
| 3.931298
| false
| false
| false
|
vaishaksuresh/udacity_data_analyst
|
P2/ProblemSets_2_to_4/P2_02.py
|
1
|
1339
|
import pandas
import pandasql
def max_temp_aggregate_by_fog(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return two columns and
two rows - whether it was foggy or not (0 or 1) and the max
maxtempi for that fog value (i.e., the maximum max temperature
for both foggy and non-foggy days). The dataframe will be
titled 'weather_data'. You'll need to provide the SQL query.
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be useful to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
You can see the weather data that we are passing in below:
https://www.dropbox.com/s/7sf0yqc9ykpq3w8/weather_underground.csv
'''
weather_data = pandas.read_csv(filename)
q = """
select fog, max(cast (maxtempi as integer)) from weather_data group by fog;
"""
#Execute your SQL command against the pandas frame
rainy_days = pandasql.sqldf(q, locals())
return rainy_days
|
gpl-2.0
| -4,712,227,669,270,800,000
| 38.382353
| 93
| 0.704257
| false
| 3.847701
| false
| false
| false
|
maas/maas
|
src/maasserver/api/tests/test_domains.py
|
1
|
9830
|
# Copyright 2016 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for Domain API."""
import http.client
import json
import random
from django.conf import settings
from django.urls import reverse
from testtools.matchers import ContainsDict, Equals
from maasserver.models import GlobalDefault
from maasserver.models.dnspublication import zone_serial
from maasserver.models.domain import Domain
from maasserver.sequence import INT_MAX
from maasserver.testing.api import APITestCase
from maasserver.testing.factory import factory
from maasserver.utils.orm import reload_object
def get_domains_uri():
"""Return a Domain's URI on the API."""
return reverse("domains_handler", args=[])
def get_domain_uri(domain):
"""Return a Domain URI on the API."""
return reverse("domain_handler", args=[domain.id])
class TestDomainsAPI(APITestCase.ForUser):
def test_handler_path(self):
self.assertEqual("/MAAS/api/2.0/domains/", get_domains_uri())
def test_read(self):
for _ in range(3):
factory.make_Domain()
uri = get_domains_uri()
response = self.client.get(uri)
self.assertEqual(
http.client.OK, response.status_code, response.content
)
expected_ids = [domain.id for domain in Domain.objects.all()]
result_ids = [
domain["id"]
for domain in json.loads(
response.content.decode(settings.DEFAULT_CHARSET)
)
]
self.assertItemsEqual(expected_ids, result_ids)
def test_create(self):
self.become_admin()
domain_name = factory.make_name("domain")
uri = get_domains_uri()
response = self.client.post(uri, {"name": domain_name})
self.assertEqual(
http.client.OK, response.status_code, response.content
)
self.assertEqual(
domain_name,
json.loads(response.content.decode(settings.DEFAULT_CHARSET))[
"name"
],
)
def test_create_admin_only(self):
domain_name = factory.make_name("domain")
uri = get_domains_uri()
response = self.client.post(uri, {"name": domain_name})
self.assertEqual(
http.client.FORBIDDEN, response.status_code, response.content
)
def test_create_requires_name(self):
self.become_admin()
uri = get_domains_uri()
response = self.client.post(uri, {})
self.assertEqual(
http.client.BAD_REQUEST, response.status_code, response.content
)
def test_can_set_serial(self):
zone_serial.create_if_not_exists()
self.become_admin()
uri = get_domains_uri()
serial = random.randint(1, INT_MAX)
response = self.client.post(
uri, {"op": "set_serial", "serial": str(serial)}
)
self.assertEqual(
http.client.OK, response.status_code, response.content
)
# The handler forces a DNS reload by creating a new DNS publication,
# so the serial has already been incremented.
self.assertEqual(serial + 1, next(zone_serial))
def test_set_serial_rejects_serials_less_than_1(self):
zone_serial.create_if_not_exists()
self.become_admin()
uri = get_domains_uri()
# A serial of 1 is fine.
response = self.client.post(uri, {"op": "set_serial", "serial": "1"})
self.assertEqual(
http.client.OK, response.status_code, response.content
)
# A serial of 0 is rejected.
response = self.client.post(uri, {"op": "set_serial", "serial": "0"})
self.assertEqual(
http.client.BAD_REQUEST, response.status_code, response.content
)
def test_set_serial_rejects_serials_greater_than_4294967295(self):
zone_serial.create_if_not_exists()
self.become_admin()
uri = get_domains_uri()
# A serial of 4294967295 is fine.
response = self.client.post(
uri, {"op": "set_serial", "serial": "4294967295"}
)
self.assertEqual(
http.client.OK, response.status_code, response.content
)
# A serial of 4294967296 is rejected.
response = self.client.post(
uri, {"op": "set_serial", "serial": "4294967296"}
)
self.assertEqual(
http.client.BAD_REQUEST, response.status_code, response.content
)
class TestDomainAPI(APITestCase.ForUser):
def test_handler_path(self):
domain = factory.make_Domain()
self.assertEqual(
"/MAAS/api/2.0/domains/%s/" % domain.id, get_domain_uri(domain)
)
def test_read(self):
domain = factory.make_Domain()
for _ in range(3):
factory.make_DNSData(domain=domain)
uri = get_domain_uri(domain)
response = self.client.get(uri)
self.assertEqual(
http.client.OK, response.status_code, response.content
)
parsed_domain = json.loads(
response.content.decode(settings.DEFAULT_CHARSET)
)
self.assertThat(
parsed_domain,
ContainsDict(
{
"id": Equals(domain.id),
"name": Equals(domain.get_name()),
"resource_record_count": Equals(3),
}
),
)
def test_read_includes_default_domain(self):
defaults = GlobalDefault.objects.instance()
old_default = Domain.objects.get_default_domain()
domain = factory.make_Domain()
defaults.domain = domain
defaults.save()
uri = get_domain_uri(domain)
response = self.client.get(uri)
self.assertEqual(
http.client.OK, response.status_code, response.content
)
parsed_domain = json.loads(
response.content.decode(settings.DEFAULT_CHARSET)
)
self.assertThat(
parsed_domain, ContainsDict({"is_default": Equals(True)})
)
uri = get_domain_uri(old_default)
response = self.client.get(uri)
self.assertEqual(
http.client.OK, response.status_code, response.content
)
parsed_domain = json.loads(
response.content.decode(settings.DEFAULT_CHARSET)
)
self.assertThat(
parsed_domain, ContainsDict({"is_default": Equals(False)})
)
def test_read_404_when_bad_id(self):
uri = reverse("domain_handler", args=[random.randint(100, 1000)])
response = self.client.get(uri)
self.assertEqual(
http.client.NOT_FOUND, response.status_code, response.content
)
def test_update(self):
self.become_admin()
authoritative = factory.pick_bool()
domain = factory.make_Domain(authoritative=authoritative)
new_name = factory.make_name("domain")
new_ttl = random.randint(10, 1000)
new_auth = not authoritative
uri = get_domain_uri(domain)
response = self.client.put(
uri, {"name": new_name, "authoritative": new_auth, "ttl": new_ttl}
)
self.assertEqual(
http.client.OK, response.status_code, response.content
)
ret = json.loads(response.content.decode(settings.DEFAULT_CHARSET))
domain = reload_object(domain)
self.assertEqual(new_name, ret["name"])
self.assertEqual(new_name, domain.name)
self.assertEqual(new_ttl, ret["ttl"])
self.assertEqual(new_ttl, domain.ttl)
self.assertEqual(new_auth, ret["authoritative"])
self.assertEqual(new_auth, domain.authoritative)
def test_update_admin_only(self):
domain = factory.make_Domain()
new_name = factory.make_name("domain")
uri = get_domain_uri(domain)
response = self.client.put(uri, {"name": new_name})
self.assertEqual(
http.client.FORBIDDEN, response.status_code, response.content
)
def test_set_default(self):
self.become_admin()
domain = factory.make_Domain()
self.assertEqual(False, domain.is_default())
uri = get_domain_uri(domain)
response = self.client.post(uri, {"op": "set_default"})
self.assertEqual(
http.client.OK, response.status_code, response.content
)
ret = json.loads(response.content.decode(settings.DEFAULT_CHARSET))
domain = reload_object(domain)
self.assertEqual(True, ret["is_default"])
self.assertEqual(True, domain.is_default())
def test_set_default_admin_only(self):
domain = factory.make_Domain()
uri = get_domain_uri(domain)
self.client.post(uri, {"op": "set_default"})
def test_delete_deletes_domain(self):
self.become_admin()
domain = factory.make_Domain()
uri = get_domain_uri(domain)
response = self.client.delete(uri)
self.assertEqual(
http.client.NO_CONTENT, response.status_code, response.content
)
self.assertIsNone(reload_object(domain))
def test_delete_403_when_not_admin(self):
domain = factory.make_Domain()
uri = get_domain_uri(domain)
response = self.client.delete(uri)
self.assertEqual(
http.client.FORBIDDEN, response.status_code, response.content
)
self.assertIsNotNone(reload_object(domain))
def test_delete_404_when_invalid_id(self):
self.become_admin()
uri = reverse("domain_handler", args=[random.randint(100, 1000)])
response = self.client.delete(uri)
self.assertEqual(
http.client.NOT_FOUND, response.status_code, response.content
)
|
agpl-3.0
| -6,530,128,466,336,639,000
| 34.232975
| 78
| 0.605595
| false
| 3.943041
| true
| false
| false
|
TUBvision/hrl
|
misc/old/simpledisplay.py
|
1
|
1532
|
import hrl
import pygame as pg
from pygame.locals import *
from OpenGL.GL import *
from patterns import *
from random import shuffle,randint
ntls = 4
wdth = 1024
hght = 766
def circle(y,ptch):
y = np.mod(y,2) - 1
x = np.sin(-np.pi * y)/2 + 0.5
y = np.abs(y)
return np.round((wdth-ptch.wdth)*x),np.round((hght-ptch.hght)*y)
def circleTileDraw(pstd,ptst):
itst = 2
stp = 2.0/ntls
for n in range(ntls):
if n == itst:
ptst.draw(circle(stp*n,ptst))
else:
pstd.draw(circle(stp*n,pstd))
def main():
pg.init()
hrl.initializeOpenGL(1024,766)
dpx = hrl.initializeDPX()
done = False
im1 = hrl.Texture('data/alien.png')
im2 = hrl.Texture('data/cow.png')
#im = hrl.Texture(flatGradient(1024,766),dpx=True)
#im = hrl.Texture('data/linear_rg_gradient.bmp')
while not done:
circleTileDraw(im1,im2)
#im.draw()
#im1.draw((0,0),300,300)
#im1.draw((300,550),200,200)
#im2.draw((550,300),200,200)
#im2.draw((300,50),200,200)
#im2.draw((50,300),200,200)
pg.display.flip()
eventlist = pg.event.get()
for event in eventlist:
if event.type == QUIT \
or event.type == KEYDOWN and event.key == K_ESCAPE:
done = True
if __name__ == '__main__':
main()
### pygame.time.Clock() objects can be used to measure the amount of time between events. ###
|
lgpl-2.1
| -4,229,200,050,737,147,000
| 22.709677
| 93
| 0.54765
| false
| 2.980545
| false
| false
| false
|
Mellthas/quodlibet
|
quodlibet/quodlibet/player/xinebe/cdefs.py
|
1
|
9844
|
# Copyright 2006 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import ctypes
from quodlibet.util import load_library
try:
_libxine, name = load_library(["libxine.so.2", "libxine.so.1"])
except OSError as e:
raise ImportError(e)
if name.endswith("2"):
_version = 2
else:
_version = 1
class xine_event_t(ctypes.Structure):
if _version == 1:
_fields_ = [
('type', ctypes.c_int),
('stream', ctypes.c_void_p),
('data', ctypes.c_void_p),
('data_length', ctypes.c_int),
]
elif _version == 2:
_fields_ = [
('stream', ctypes.c_void_p),
('data', ctypes.c_void_p),
('data_length', ctypes.c_int),
('type', ctypes.c_int),
]
class xine_ui_message_data_t(ctypes.Structure):
_fields_ = [
('compatibility_num_buttons', ctypes.c_int),
('compatibility_str_len', ctypes.c_int),
('compatibility_str', 256 * ctypes.c_char),
('type', ctypes.c_int),
('explanation', ctypes.c_int),
('num_parameters', ctypes.c_int),
('parameters', ctypes.c_void_p),
('messages', ctypes.c_char),
]
# event listener callback type
xine_event_listener_cb_t = ctypes.CFUNCTYPE(
ctypes.c_void_p, ctypes.c_void_p,
ctypes.POINTER(xine_event_t))
# event types
XINE_EVENT_UI_PLAYBACK_FINISHED = 1
XINE_EVENT_UI_CHANNELS_CHANGED = 2
XINE_EVENT_UI_SET_TITLE = 3
XINE_EVENT_UI_MESSAGE = 4
XINE_EVENT_FRAME_FORMAT_CHANGE = 5
XINE_EVENT_AUDIO_LEVEL = 6
XINE_EVENT_QUIT = 7
XINE_EVENT_PROGRESS = 8
# stream parameters
XINE_PARAM_SPEED = 1 # see below
XINE_PARAM_AV_OFFSET = 2 # unit: 1/90000 ses
XINE_PARAM_AUDIO_CHANNEL_LOGICAL = 3 # -1 => auto, -2 => off
XINE_PARAM_SPU_CHANNEL = 4
XINE_PARAM_VIDEO_CHANNEL = 5
XINE_PARAM_AUDIO_VOLUME = 6 # 0..100
XINE_PARAM_AUDIO_MUTE = 7 # 1=>mute, 0=>unmute
XINE_PARAM_AUDIO_COMPR_LEVEL = 8 # <100=>off, % compress otherw
XINE_PARAM_AUDIO_AMP_LEVEL = 9 # 0..200, 100=>100% (default)
XINE_PARAM_AUDIO_REPORT_LEVEL = 10 # 1=>send events, 0=> don't
XINE_PARAM_VERBOSITY = 11 # control console output
XINE_PARAM_SPU_OFFSET = 12 # unit: 1/90000 sec
XINE_PARAM_IGNORE_VIDEO = 13 # disable video decoding
XINE_PARAM_IGNORE_AUDIO = 14 # disable audio decoding
XINE_PARAM_IGNORE_SPU = 15 # disable spu decoding
XINE_PARAM_BROADCASTER_PORT = 16 # 0: disable, x: server port
XINE_PARAM_METRONOM_PREBUFFER = 17 # unit: 1/90000 sec
XINE_PARAM_EQ_30HZ = 18 # equalizer gains -100..100
XINE_PARAM_EQ_60HZ = 19 # equalizer gains -100..100
XINE_PARAM_EQ_125HZ = 20 # equalizer gains -100..100
XINE_PARAM_EQ_250HZ = 21 # equalizer gains -100..100
XINE_PARAM_EQ_500HZ = 22 # equalizer gains -100..100
XINE_PARAM_EQ_1000HZ = 23 # equalizer gains -100..100
XINE_PARAM_EQ_2000HZ = 24 # equalizer gains -100..100
XINE_PARAM_EQ_4000HZ = 25 # equalizer gains -100..100
XINE_PARAM_EQ_8000HZ = 26 # equalizer gains -100..100
XINE_PARAM_EQ_16000HZ = 27 # equalizer gains -100..100
XINE_PARAM_AUDIO_CLOSE_DEVICE = 28 # force closing audio device
XINE_PARAM_AUDIO_AMP_MUTE = 29 # 1=>mute, 0=>unmute
XINE_PARAM_FINE_SPEED = 30 # 1.000.000 => normal speed
XINE_PARAM_EARLY_FINISHED_EVENT = 31 # send event when demux finish
XINE_PARAM_GAPLESS_SWITCH = 32 # next stream only gapless swi
XINE_PARAM_DELAY_FINISHED_EVENT = 33 # 1/10sec,0=>disable,-1=>forev
# speeds
XINE_SPEED_PAUSE = 0
XINE_SPEED_SLOW_4 = 1
XINE_SPEED_SLOW_2 = 2
XINE_SPEED_NORMAL = 4
XINE_SPEED_FAST_2 = 8
XINE_SPEED_FAST_4 = 16
# metadata
XINE_META_INFO_TITLE = 0
XINE_META_INFO_COMMENT = 1
XINE_META_INFO_ARTIST = 2
XINE_META_INFO_GENRE = 3
XINE_META_INFO_ALBUM = 4
XINE_META_INFO_YEAR = 5
XINE_META_INFO_VIDEOCODEC = 6
XINE_META_INFO_AUDIOCODEC = 7
XINE_META_INFO_SYSTEMLAYER = 8
XINE_META_INFO_INPUT_PLUGIN = 9
# statuses
XINE_STATUS_IDLE = 0
XINE_STATUS_STOP = 1
XINE_STATUS_PLAY = 2
XINE_STATUS_QUIT = 3
XINE_MSG_NO_ERROR = 0 # (messages to UI)
XINE_MSG_GENERAL_WARNING = 1 # (warning message)
XINE_MSG_UNKNOWN_HOST = 2 # (host name)
XINE_MSG_UNKNOWN_DEVICE = 3 # (device name)
XINE_MSG_NETWORK_UNREACHABLE = 4 # none
XINE_MSG_CONNECTION_REFUSED = 5 # (host name)
XINE_MSG_FILE_NOT_FOUND = 6 # (file name or mrl)
XINE_MSG_READ_ERROR = 7 # (device/file/mrl)
XINE_MSG_LIBRARY_LOAD_ERROR = 8 # (library/decoder)
XINE_MSG_ENCRYPTED_SOURCE = 9 # none
XINE_MSG_SECURITY = 10 # (security message)
XINE_MSG_AUDIO_OUT_UNAVAILABLE = 11 # none
XINE_MSG_PERMISSION_ERROR = 12 # (file name or mrl)
XINE_MSG_FILE_EMPTY = 13 # file is empty
XINE_MSG_AUTHENTICATION_NEEDED = 14 # (mrl, likely http); added in 1.2
# xine_t *xine_new(void)
xine_new = _libxine.xine_new
xine_new.restype = ctypes.c_void_p
# void xine_init(xine_t *self)
xine_init = _libxine.xine_init
xine_init.argtypes = [ctypes.c_void_p]
# void xine_exit(xine_t *self)
xine_exit = _libxine.xine_exit
xine_exit.argtypes = [ctypes.c_void_p]
# void xine_config_load(xine_t *self, const char *cfg_filename)
xine_config_load = _libxine.xine_config_load
xine_config_load.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
# const char *xine_get_homedir(void)
xine_get_homedir = _libxine.xine_get_homedir
xine_get_homedir.restype = ctypes.c_char_p
# xine_audio_port_t *xine_open_audio_driver(xine_t *self, const char *id,
# void *data)
xine_open_audio_driver = _libxine.xine_open_audio_driver
xine_open_audio_driver.argtypes = [ctypes.c_void_p,
ctypes.c_char_p, ctypes.c_void_p]
xine_open_audio_driver.restype = ctypes.c_void_p
# void xine_close_audio_driver(xine_t *self, xine_audio_port_t *driver)
xine_close_audio_driver = _libxine.xine_close_audio_driver
xine_close_audio_driver.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
# xine_stream_t *xine_stream_new(xine_t *self,
# xine_audio_port_t *ao, xine_video_port_t *vo)
xine_stream_new = _libxine.xine_stream_new
xine_stream_new.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p]
xine_stream_new.restype = ctypes.c_void_p
# void xine_close(xine_sxine_event_create_listener_threadtream_t *stream)
xine_close = _libxine.xine_close
xine_close.argtypes = [ctypes.c_void_p]
# int xine_open (xine_stream_t *stream, const char *mrl)
xine_open = _libxine.xine_open
xine_open.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
xine_open.restype = ctypes.c_int
# int xine_play(xine_stream_t *stream, int start_pos, int start_time)
xine_play = _libxine.xine_play
xine_play.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
xine_play.restype = ctypes.c_int
# void xine_stop(xine_stream_t *stream)
xine_stop = _libxine.xine_stop
xine_stop.argtypes = [ctypes.c_void_p]
# void xine_dispose(xine_stream_t *stream)
xine_dispose = _libxine.xine_dispose
xine_dispose.argtypes = [ctypes.c_void_p]
# xine_event_queue_t *xine_event_new_queue(xine_stream_t *stream)
xine_event_new_queue = _libxine.xine_event_new_queue
xine_event_new_queue.argtypes = [ctypes.c_void_p]
xine_event_new_queue.restype = ctypes.c_void_p
# void xine_event_dispose_queue(xine_event_queue_t *queue)
xine_event_dispose_queue = _libxine.xine_event_dispose_queue
xine_event_dispose_queue.argtypes = [ctypes.c_void_p]
# void xine_event_create_listener_thread(xine_event_queue_t *queue,
# xine_event_listener_cb_t callback,
# void *user_data)
xine_event_create_listener_thread = _libxine.xine_event_create_listener_thread
xine_event_create_listener_thread.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p]
xine_usec_sleep = _libxine.xine_usec_sleep
xine_usec_sleep.argtypes = [ctypes.c_int]
xine_set_param = _libxine.xine_set_param
xine_set_param.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
xine_get_param = _libxine.xine_get_param
xine_get_param.argtypes = [ctypes.c_void_p, ctypes.c_int]
xine_get_param.restype = ctypes.c_int
xine_get_meta_info = _libxine.xine_get_meta_info
xine_get_meta_info.argtypes = [ctypes.c_void_p, ctypes.c_int]
xine_get_meta_info.restype = ctypes.c_char_p
xine_get_status = _libxine.xine_get_status
xine_get_status.argtypes = [ctypes.c_void_p]
xine_get_status.restype = ctypes.c_int
xine_get_pos_length = _libxine.xine_get_pos_length
xine_get_pos_length.argtypes = [ctypes.c_void_p,
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
xine_get_version_string = _libxine.xine_get_version_string
xine_get_version_string.restype = ctypes.c_char_p
xine_get_file_extensions = _libxine.xine_get_file_extensions
xine_get_file_extensions.argtypes = [ctypes.c_void_p]
xine_get_file_extensions.restype = ctypes.c_char_p
xine_get_mime_types = _libxine.xine_get_mime_types
xine_get_mime_types.argtypes = [ctypes.c_void_p]
xine_get_mime_types.restype = ctypes.c_char_p
xine_list_input_plugins = _libxine.xine_list_input_plugins
xine_list_input_plugins.argtypes = [ctypes.c_void_p]
xine_list_input_plugins.restype = ctypes.POINTER(ctypes.c_char_p)
xine_check_version = _libxine.xine_check_version
xine_check_version.argtypes = [ctypes.c_int, ctypes.c_int,
ctypes.c_int]
xine_check_version.restype = ctypes.c_int
_callbacks = []
def xine_event_create_listener_thread(queue, callback, user_data):
cb = xine_event_listener_cb_t(callback)
_callbacks.append(cb)
_libxine.xine_event_create_listener_thread(queue, cb, user_data)
def xine_get_pos_length(stream):
_pos_stream = ctypes.c_int()
_pos_time = ctypes.c_int()
_length_time = ctypes.c_int()
result = _libxine.xine_get_pos_length(stream, ctypes.byref(_pos_stream),
ctypes.byref(_pos_time), ctypes.byref(_length_time))
if result:
return _pos_stream.value, _pos_time.value, _length_time.value
else:
return 0, 0, 0
|
gpl-2.0
| 7,453,561,007,139,033,000
| 33.907801
| 78
| 0.701849
| false
| 2.598733
| false
| false
| false
|
ctalbert/mozharness
|
configs/signing/android_mozilla-esr10.py
|
1
|
4482
|
#!/usr/bin/env python
LOCALES = ["en-US", "multi"]
# override tag for all repos
TAG = None
#AUS_SERVER = "dev-stage01.build.mozilla.org"
AUS_SERVER = "aus3-staging.mozilla.org"
#FTP_SERVER = "dev-stage01.build.mozilla.org"
FTP_SERVER = "stage.mozilla.org"
AUS_UPLOAD_BASE_DIR = "/opt/aus2/snippets/staging"
AUS_DIR_BASE_NAME = "Fennec-%(version)s-build%(buildnum)d"
FTP_UPLOAD_BASE_DIR = "/pub/mozilla.org/mobile/candidates/%(version)s-candidates/build%(buildnum)d"
#DOWNLOAD_BASE_URL = "http://%s%s" % (FTP_SERVER, FTP_UPLOAD_BASE_DIR)
DOWNLOAD_BASE_URL = "http://ftp.mozilla.org/pub/mozilla.org/mobile/candidates/%(version)s-candidates/build%(buildnum)d"
APK_BASE_NAME = "fennec-%(version)s.%(locale)s.android-arm.apk"
BUILDID_BASE_URL = DOWNLOAD_BASE_URL + "/%(platform)s_info.txt"
STAGE_SSH_KEY = '~/.ssh/ffxbld_dsa'
#STAGE_SSH_KEY = '~/staging_ssh/ffxbld_dsa'
AUS_SSH_KEY = '~/.ssh/auspush'
#AUS_SSH_KEY = '~/staging_ssh/id_rsa'
RELEASE_UPDATE_URL = "http://download.mozilla.org/?product=fennec-%(version)s-complete&os=%(platform)s&lang=%(locale)s"
BETATEST_UPDATE_URL = "http://stage.mozilla.org/pub/mozilla.org/mobile/candidates/%(version)s-candidates/build%(buildnum)d/%(apk_name)s"
SNIPPET_TEMPLATE = """version=1
type=complete
url=%(url)s
hashFunction=sha512
hashValue=%(sha512_hash)s
size=%(size)d
build=%(buildid)s
appv=%(version)s
extv=%(version)s
"""
KEYSTORE = "/home/cltsign/.android/android-release.keystore"
JAVA_HOME = "/tools/jdk-1.6.0_17"
JARSIGNER = "%s/bin/jarsigner" % JAVA_HOME
KEY_ALIAS = "release"
config = {
"log_name": "sign_android_esr10",
"work_dir": "esr10",
"locales": LOCALES,
"locales_file": "buildbot-configs/mozilla/l10n-changesets_mobile-esr10.json",
"release_config_file": "buildbot-configs/mozilla/release-fennec-mozilla-esr10.py",
"platforms": ['android',],
"platform_config": {
'android': {},
'android-xul': {
'locales': ['en-US', 'multi'],
},
'android-armv6': {
'locales': ['en-US'],
'apk_base_name': "fennec-%(version)s.%(locale)s.android-arm-armv6.apk"
},
},
"update_platforms": [],
"update_platform_map": {
'android': 'Android_arm-eabi-gcc3',
'android-xul': 'Android_arm-eabi-gcc3-xul',
'android-armv6': 'Android_arm-eabi-gcc3-armv6',
},
"update_channels": {
'release': {
'url': RELEASE_UPDATE_URL,
'template': SNIPPET_TEMPLATE,
'dir_base_name': AUS_DIR_BASE_NAME,
},
'betatest': {
'url': BETATEST_UPDATE_URL,
'template': SNIPPET_TEMPLATE,
'dir_base_name': '%s-test' % AUS_DIR_BASE_NAME,
},
'releasetest': {
'url': RELEASE_UPDATE_URL,
'template': SNIPPET_TEMPLATE,
'dir_base_name': '%s-test' % AUS_DIR_BASE_NAME,
},
},
"ftp_upload_base_dir": FTP_UPLOAD_BASE_DIR,
# These should be from release_config, but that has stage-ffxbld
# which doesn't work with dev-stage01.
"ftp_ssh_key": STAGE_SSH_KEY,
"ftp_user": "ffxbld",
"aus_ssh_key": AUS_SSH_KEY,
"aus_upload_base_dir": AUS_UPLOAD_BASE_DIR,
"apk_base_name": APK_BASE_NAME,
"unsigned_apk_base_name": APK_BASE_NAME,
"download_base_url": DOWNLOAD_BASE_URL,
"download_unsigned_base_subdir": "unsigned/%(platform)s/%(locale)s",
"download_signed_base_subdir": "%(platform)s/%(locale)s",
"buildid_base_url": BUILDID_BASE_URL,
"old_buildid_base_url": BUILDID_BASE_URL,
"actions": [
"passphrase",
"clobber",
"pull",
"download-unsigned-bits",
"sign",
"verify-signatures",
"upload-signed-bits",
],
"keystore": KEYSTORE,
"key_alias": KEY_ALIAS,
"env": {
"PATH": JAVA_HOME + "/bin:%(PATH)s",
},
"exes": {
"jarsigner": JARSIGNER,
"zipalign": "/tools/android-sdk-r8/tools/zipalign",
},
"signature_verification_script": "tools/release/signing/verify-android-signature.sh",
"user_repo_override": "build",
"tag_override": TAG,
"repos": [{
"repo": "http://hg.mozilla.org/%(user_repo_override)s/tools",
"dest": "tools",
"revision": "default",
},{
"repo": "http://hg.mozilla.org/%(user_repo_override)s/buildbot-configs",
"dest": "buildbot-configs",
"revision": "production",
}],
}
|
mpl-2.0
| 8,020,089,138,990,125,000
| 33.21374
| 136
| 0.598394
| false
| 2.939016
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.