repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
alobbs/autome
|
plugins/telegram.py
|
Python
|
mit
| 5,837
| 0.000171
|
import glob
import os
import tempfile
import urllib.parse
import plugin
import pluginconf
import requests
import telepot
util = plugin.get("util")
ERROR_USER_UNKNOWN = ("I don't know you, but I'll tell you someth
|
ing. "
"Sometimes, I use words that I don't "
"know to make me seem more… photosynthesis.")
ERROR_NO_USERID = ("You gotta set yourself a name alias "
"if you wanna talk to me.")
class Keyboard:
def __init__(self, items_per_line=1):
|
self._markup = None
self._keyboard = []
self.items_per_line = items_per_line
# Guts
def _get_markup(self):
if self._markup:
return self._markup
if self._keyboard:
# Split in rows
n = max(1, self.items_per_line)
kb = [self._keyboard[i:i + n]
for i in range(0, len(self._keyboard), n)]
# Send keyboard
return telepot.namedtuple.ReplyKeyboardMarkup(keyboard=kb)
def get_message_params(self, text):
return dict(text=text, reply_markup=self._get_markup())
# Public
def hide_keyboard(self):
self._markup = telepot.namedtuple.ReplyKeyboardHide()
def add(self, text, request_location=False, request_contact=False,
callback_data=None, url=None):
if request_location:
button = telepot.namedtuple.KeyboardButton(text=text, request_location=True)
return self._keyboard.append(button)
if request_contact:
button = dict(text=text, request_contact=True)
return self._keyboard.append(button)
if url:
button = dict(text=text, url=url)
return self._keyboard.append(button)
if callback_data:
button = dict(text=text, callback_data=callback_data)
return self._keyboard.append(button)
return self._keyboard.append(text)
class Telegram:
# Embedded class refs
Keyboard = Keyboard
def __init__(self):
# Read configuration
self.conf = pluginconf.get('telegram')
# Instance
self.bot = telepot.Bot(self.conf['BOT_TOKEN'])
self.bot.message_loop(self._msg_received)
self._youtube_dl_sites = None
@property
def youtube_dl_sites(self):
if not self._youtube_dl_sites:
with os.popen("youtube-dl --list-extractors", 'r') as f:
tmp_all = [d.strip().split(':')[0] for d in f.readlines()]
self._youtube_dl_sites = list(set(tmp_all))
return self._youtube_dl_sites
def reply_command(self, userid, command, live=True):
# All in one message
if not live:
with os.popen(command) as f:
self.send_msg(userid, f.read())
return
# Message per line
with os.popen(command) as f:
for line in f:
if len(line.strip()):
self.send_msg(userid, line)
def msg_received_cb(self, msg):
None
def _msg_received(self, msg):
userid = msg['from'].get('id')
username = msg['from'].get('username')
if not username:
self.send_picture(userid, "static/NoAlias.jpg")
self.send_msg(userid, ERROR_NO_USERID)
return
if username != self.conf['ME_USER']:
self.bot.sendMessage(userid, ERROR_USER_UNKNOWN)
return
self.msg_received_cb(msg)
"""
first = msg['text'].split(' ')[0]
if first == "ping":
self._reply_comman(userid, "ping -c 2 home.corp.redhat.com", False)
else:
self.bot.sendMessage(userid, "Am not sure what you want me to do")
"""
def send_msg(self, user_id, msg):
self.bot.sendMessage(user_id, msg)
def send_video(self, user_id, path, caption=None):
# Youtube-dl
if 'http' in path:
supported = any([d in path for d in self.youtube_dl_sites])
if supported:
with util.tmpdir_fp() as tmpdir:
# Download video
dest = '{}/%(title)s-%(id)s.%(ext)s'.format(tmpdir)
cmd = "youtube-dl '{}' -o '{}'".format(path, dest)
os.system(cmd)
# Send video
for fp in glob.glob('%s/*' % tmpdir):
with open(fp, 'rb') as f:
self.bot.sendVideo(user_id, f, caption=caption)
return
# Local file
with open(path, 'rb') as f:
self.bot.sendVideo(user_id, f, caption=caption)
def send_picture(self, user_id, path, caption=None, tmp_suffix=None):
# HTTP
if path.startswith('http://') or path.startswith("https://"):
r = requests.get(path)
p = urllib.parse.urlparse(path)
filename = os.path.basename(p.path)
if not tmp_suffix:
tmp_suffix = filename.split('.')[-1]
tmp_dir = tempfile.gettempdir()
tmp_fp = os.path.join(tmp_dir, filename + '.%s' % tmp_suffix)
with open(tmp_fp, 'w+b') as f:
f.write(r.content)
f.seek(0)
self.bot.sendPhoto(user_id, f, caption=caption)
os.unlink(tmp_fp)
return
# Local file
with open(path, 'rb') as f:
self.bot.sendPhoto(user_id, f, caption=caption)
# Me
#
def send_me_msg(self, *args, **kwargs):
self.send_msg(self.conf['ME_ID'], *args, **kwargs)
def send_me_picture(self, *args, **kwargs):
return self.send_picture(self.conf['ME_ID'], *args, **kwargs)
def send_me_video(self, *args, **kwargs):
return self.send_video(self.conf['ME_ID'], *args, **kwargs)
|
frappe/frappe
|
frappe/www/profile.py
|
Python
|
mit
| 165
| 0.024242
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Co
|
ntributors
# License: MIT. See LICENSE
|
no_cache = 1
def get_context(context):
context.show_sidebar=True
|
mark47/OESandbox
|
liquibase/OE2.8/testCatalogCI_LNSP/scripts/testPanel.py
|
Python
|
mpl-2.0
| 1,391
| 0.007908
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
panel_file = open('panels.txt','r')
name_file = open('testName.txt','r')
sample_type_file = open("sampleType.txt")
test_panel_results = open("output/testPanelResults.txt", 'w')
panel
|
= []
type = []
test_names = []
def get_split_names( name ):
split_name_list = name.split("/")
for i in range(0, len(split_name
|
_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
def esc_char(name):
if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'"
for line in panel_file:
panel.append(line.strip())
panel_file.close()
for line in sample_type_file:
type.append(line.strip())
sample_type_file.close()
for line in name_file:
test_names.append(line.strip())
name_file.close()
test_panel_results.write("Below should be pasted to TestPanel.csv\n\n")
for row in range(0, len(test_names)):
if len(panel[row]) > 1:
test_description = esc_char(test_names[row] + "(" + type[row] + ")")
test_panel_results.write("nextval( 'panel_item_seq' ) , (select id from panel where name = '" + panel[row] + "')")
test_panel_results.write(" , (select id from test where description = " + test_description + ") , null , now() \n")
test_panel_results.close()
print "Done look for results in testPanelResults.txt"
|
morta-code/IntelliStock
|
intellistock/ui/navigatorplotwidget.py
|
Python
|
gpl-2.0
| 3,270
| 0.003671
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 4 00:18:49 2014
@author: Polcz Péter <ppolcz@gmail.com>
"""
import sys
import numpy as np
from PyQt4.QtGui import QVBoxLayout, QMessageBox, QApplication
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from intellistock.ui.plotwidget import PlotWidget
class NavigatorPlotWidget(PlotWidget):
def __init__(self, parent=None, cols=1, rows=1):
""""""
PlotWidget.__init__(self, parent, cols, rows)
self.mpl_toolbar = None
def setup_ui(self):
# Bind the 'pick' event for clicking on one of the bars
self.canvas.mpl_connect('pick_event', self.on_pick)
# Create the navigation toolbar, tied to the canvas
self.mpl_toolbar = NavigationToolbar(self.canvas, self)
layout = QVBoxLayout()
layout.addWidget(self.can
|
vas)
layout.addWidget(self.mpl_toolbar)
self.setLayout(layout)
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
box_points = event.artist.get_bbox().get_points()
msg = "You've clicked on
|
a bar with coordinates:\n %s" % box_points
QMessageBox.information(self, "Click!", msg)
def on_draw(self):
self.canvas.draw()
# -------------------------------------------------------------------------------------------------------------------- #
# -- TEST ------------------------------------------------------------------------------------------------------------ #
# -------------------------------------------------------------------------------------------------------------------- #
def main_test():
# Qt keretrendszer elinditasa (enelkul nem lehet widgeteket letrehozni)
app = QApplication(sys.argv)
w = NavigatorPlotWidget()
w.show()
t = np.linspace(0, 2*np.pi, 1000)
t2 = np.linspace(0.4, 1.5, 20)
x = np.sin(t * 13)
if not w.subplot(2, rows=2, cols=2):
print("Assertion error: subplot - resplit figure")
# test plot()
w.clear()
p = w.plot(t, x)[0]
w.plot(t, x*2)
w.plot(t, x*2, 'g')
q = w.plot(t2, np.ones_like(t2) * 0.5)[0]
w.draw()
# test erase_line()
if not w.erase_line(ploth=p):
print("Assertion error: erase_line by ploth")
if not w.erase_line(plotnr=0):
print("Assertion error: erase_line by plotnr")
w.draw()
# test hide_line()
if not w.hide_line(ploth=q):
print("Assertion error: erase_line by ploth")
if not w.hide_line(plotnr=1, hide=False):
print("Assertion error: erase_line by plotnr, set visible")
w.plot(t, 0.5*x+0.5, 'b')
w.draw()
if not w.subplot(1):
print("Assertion error: subplot - switch subplot")
w.plot(t, x)
w.draw()
w.subplot(3)
t = np.linspace(0, 10, 1000)
x = np.sin(t) + np.cos(t * 10) * 0.5 + np.cos(t * 10 + 0.2) * 0.5 + np.cos(t * 11 + 0.2) * 0.5 + np.random.rand(x.shape[0])
w.plot(t, x)
# Qt keretrendszer futtatasa (main loop)
app.exec_()
if __name__ == "__main__":
main_test()
|
kmatzen/ansible
|
lib/ansible/__init__.py
|
Python
|
gpl-3.0
| 888
| 0.001126
|
# (c) 2012-201
|
4, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public L
|
icense as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
__version__ = '2.0.2.0'
__author__ = 'Ansible, Inc.'
|
yoyo2k/l10n-romania
|
partner_create_by_vat/res_partner.py
|
Python
|
agpl-3.0
| 9,585
| 0.00407
|
# -*- encoding: utf-8 -*-
#############################
|
#################################################
#
# Romanian accounting localization for OpenERP V7
# @author - Fekete Mihai, Tatár Attila <atta@nvm.ro>
# Copyright (C) 2011-2013 TOTAL PC SYSTEMS (http://www.www.erpsystems.ro).
# Copyright (C) 2013 Tatár Attila
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU G
|
eneral Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from string import maketrans
import requests
from stdnum.eu.vat import check_vies
from lxml import html
from openerp import models, fields, api, _
from openerp.exceptions import Warning
CEDILLATRANS = maketrans(u'\u015f\u0163\u015e\u0162'.encode(
'utf8'), u'\u0219\u021b\u0218\u021a'.encode('utf8'))
def getMfinante(cod):
headers = {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)",
"Content-Type": "multipart/form-data;"
}
params = {'cod': cod}
res = requests.get(
'http://www.mfinante.ro/infocodfiscal.html',
params=params,
headers=headers
)
res.raise_for_status()
htm = html.fromstring(res.text)
# sunt 2 tabele primul e important
table = htm.xpath("//div[@id='main']//center/table")[0]
result = dict()
for tr in table.iterchildren():
key = ' '.join([x.strip() for x in tr.getchildren()[
0].text_content().split('\n') if x.strip() != ''])
val = ' '.join([x.strip() for x in tr.getchildren()[
1].text_content().split('\n') if x.strip() != ''])
result[key] = val.encode('utf8').translate(CEDILLATRANS).decode('utf8')
return result
class res_partner(models.Model):
_name = "res.partner"
_inherit = "res.partner"
name = fields.Char('Name', required=True, select=True, default=' ')
@api.one
def button_get_partner_data(self):
def _check_vat_ro(vat):
return bool(len(part.name.strip()) > 2 and
part.name.strip().upper()[:2] == 'RO' and
part.name.strip()[2:].isdigit())
part = self[0]
vat = part.vat
if vat:
self.write({'vat': part.vat.upper().replace(" ","")})
elif part.name and len(part.name.strip())>2 and part.name.strip().upper()[:2]=='RO' and part.name.strip()[2:].isdigit():
self.write( {'vat': part.name.upper().replace(" ","")})
if not part.vat and part.name:
try:
vat_country, vat_number = self._split_vat(part.name.upper().replace(" ",""))
valid = self.vies_vat_check(vat_country, vat_number)
if valid:
self.write( {'vat': part.name.upper().replace(" ","")})
except:
raise Warning(_("No VAT number found"))
vat_country, vat_number = self._split_vat(part.vat)
if part.vat_subjected:
self.write({'vat_subjected': False})
if vat_number and vat_country:
self.write({
'is_company': True,
'country_id': self.env['res.country'].search(
[('code', 'ilike', vat_country)])[0].id
})
if vat_country == 'ro':
try:
nrc_key = 'Numar de inmatriculare la Registrul Comertului:'
tva_key = 'Taxa pe valoarea adaugata (data luarii in evidenta):'
result = getMfinante(vat_number)
name = nrc = adresa = tel = fax = False
zip1 = vat_s = state = False
if 'Denumire platitor:' in result.keys():
name = result['Denumire platitor:'].upper()
if 'Adresa:' in result.keys():
adresa = result['Adresa:'].title() or ''
if nrc_key in result.keys():
nrc = result[nrc_key].replace(' ', '')
if nrc == '-/-/-':
nrc = ''
if 'Codul postal:' in result.keys():
zip1 = result['Codul postal:'] or ''
if 'Judetul:' in result.keys():
jud = result['Judetul:'].title() or ''
if jud.lower().startswith('municip'):
jud = ' '.join(jud.split(' ')[1:])
if jud != '':
state = self.env['res.country.state'].search(
[('name', 'ilike', jud)])
if state:
state = state[0].id
if 'Telefon:' in result.keys():
tel = result['Telefon:'].replace('.', '') or ''
if 'Fax:' in result.keys():
fax = result['Fax:'].replace('.', '') or ''
if tva_key in result.keys():
vat_s = bool(
result[tva_key] != 'NU')
self.write({
'name': name or '',
'nrc': nrc or '',
'street': adresa or '',
'phone': tel or '',
'fax': fax or '',
'zip': zip1 or '',
'vat_subjected': vat_s or False,
'state_id': state,
})
except:
headers = {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)",
"Content-Type": "application/json;"
}
res = requests.post(
'https://webservicesp.anaf.ro:/PlatitorTvaRest/api/v1/ws/tva',
json=[{'cui': vat_number, 'data': fields.Date.today()}],
headers = headers)
if res.status_code == 200:
res = res.json()
if res['found'] and res['found'][0]:
datas = res['found'][0]
if datas['data_sfarsit'] and datas['data_sfarsit'] != ' ':
res = requests.post(
'https://webservicesp.anaf.ro:/PlatitorTvaRest/api/v1/ws/tva',
json=[{'cui': vat_number, 'data': datas['data_sfarsit']}],
headers = headers)
if res.status_code == 200:
res = res.json()
if res['found'] and res['found'][0]:
datas = res['found'][0]
if res['notfound'] and res['notfound'][0]:
datas = res['notfound'][0]
if datas['data_sfarsit'] and datas['data_sfarsit'] != ' ':
res = requests.post(
'https://webservicesp.anaf.ro:/PlatitorTvaRest/api/v1/ws/tva',
json=[{'cui': vat_number, 'data': datas['data_sfarsit']}],
headers = headers)
if res.status_code == 200:
res = res.json()
if res['found'] and res['found'][0]:
|
VirusTotal/content
|
Packs/PrismaCloud/Integrations/RedLock/RedLock_test.py
|
Python
|
mit
| 5,638
| 0.004966
|
import pytest
from freezegun import freeze_time
import demistomock as demisto
integration_params = {
'url': 'http://test.com',
'credentials': {'identifier': 'test', 'password': 'pass'},
'fetch_time': '3 days',
'proxy': 'false',
'unsecure': 'false',
}
@pytest.fixture(autouse=True)
def set_mocks(mocker):
mocker.patch.object(demisto, 'params', return_value=integration_params)
@freeze_time("2021-07-10T16:34:14.758295 UTC+1")
def test_fetch_incidents_first_time_fetch(mocker):
"""
Given
- fetch incidents command
- command args
When
- mock the integration parameters
Then
- Validate that the last_time is as the now time(not changed, not of the incident)
"""
mocker.patch.object(demisto, 'command', return_value='fetch-incidents')
from RedLock import fetch_incidents
mocker.patch('RedLock.req', return_value=[])
_, next_run = fetch_incidents()
assert next_run == 1625938454758
def test_redlock_list_scans(mocker):
"""
Given
- The response from the API call of redlock-list-scans command.
When
- calling redlock-list-scans
Then
- Validate that the readable output and the context entry of the command is as expected
"""
from RedLock import redlock_list_scans
list_scans_response = {
'data': [{
'id': '111111111',
'attributes': {
'name': ['test name'],
'type': ['test type'],
'user': ['test user'],
|
'scanTime': '2021-10-18T14:38:53.654174'
}
}]
}
expected_readable_output = '### Scans List:\n|ID|Name|Scan Time|Type|User|\n|---|---|---|---|---|\n| 111111111 |' \
' test name | 2021-10-18T14:38:53.654174 | test type | test user |\n'
expected_context_entry = {'Redlock.Scans(val.id == obj.id)': [{'id':
|
'111111111',
'name': ['test name'],
'type': ['test type'],
'user': ['test user'],
'scanTime': '2021-10-18T14:38:53.654174'}]}
mocker.patch('RedLock.req', return_value=list_scans_response)
mocker.patch.object(demisto, 'results')
redlock_list_scans()
assert demisto.results.call_args[0][0].get('HumanReadable') == expected_readable_output
assert demisto.results.call_args[0][0].get('EntryContext') == expected_context_entry
def test_redlock_get_scan_status(mocker):
"""
Given
- The response from the API call of redlock-get-scan-status command.
When
- calling redlock-get-scan-status
Then
- Validate that the readable output and the context entry of the command is as expected
"""
from RedLock import redlock_get_scan_status
get_status_response = {
'data': {
'id': '111111111',
'attributes': {
'status': 'test'
}
}
}
expected_readable_output = '### Scan Status:\n|ID|Status|\n|---|---|\n| 111111111 | test |\n'
expected_context_entry = {'Redlock.Scans(val.id == obj.id)': {'id': '111111111',
'status': 'test'}}
mocker.patch('RedLock.req', return_value=get_status_response)
mocker.patch.object(demisto, 'results')
redlock_get_scan_status()
assert demisto.results.call_args[0][0].get('HumanReadable') == expected_readable_output
assert demisto.results.call_args[0][0].get('EntryContext') == expected_context_entry
def test_redlock_get_scan_results(mocker):
"""
Given
- The response from the API call of redlock-get-scan-result command.
When
- calling redlock-get-scan-result
Then
- Validate that the readable output and the context entry of the command is as expected
"""
from RedLock import redlock_get_scan_results
get_result_response = {
'data': [{
'id': '111111111',
'attributes': {
'name': 'test',
'policyId': '2222',
'desc': 'test',
'severity': 'high'
}}]
}
expected_readable_output = '### Scan Results:\n|Description|ID|Name|Policy ID|Severity|\n|---|---|---|---|---|\n|' \
' test | 111111111 | test | 2222 | high |\n'
expected_context_entry = {'Redlock.Scans(val.id == obj.id)': {'id': None,
'results': [
{'id': '111111111',
'attributes': {'name': 'test',
'policyId': '2222',
'desc': 'test',
'severity': 'high'}}]}}
mocker.patch('RedLock.req', return_value=get_result_response)
mocker.patch.object(demisto, 'results')
redlock_get_scan_results()
assert demisto.results.call_args[0][0].get('HumanReadable') == expected_readable_output
assert demisto.results.call_args[0][0].get('EntryContext') == expected_context_entry
|
Connexions/cnx-publishing
|
cnxpublishing/main.py
|
Python
|
agpl-3.0
| 838
| 0
|
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
import os
from ._version import get_versions
__version__ = get_ve
|
rsions()['version']
del get_versions
__name__ = 'c
|
nxpublishing'
def find_migrations_directory(): # pragma: no cover
"""Finds and returns the location of the database migrations directory.
This function is used from a setuptools entry-point for db-migrator.
"""
here = os.path.abspath(os.path.dirname(__file__))
return os.path.join(here, 'sql/migrations')
def make_wsgi_app(global_config, **settings): # pragma: no cover
"""Application factory"""
from .config import configure
return configure(settings).make_wsgi_app()
|
marktoakley/LamarckiAnt
|
SCRIPTS/AMBER/symmetrise_prmtop/perm-prmtop.ff03ua.py
|
Python
|
gpl-3.0
| 31,681
| 0.023137
|
#!/usr/bin/env python
import os
import os.path
import sys
import string
###############################################################
## #
## Edyta Malolepsza #
## David Wales' group, University of Cambridge #
## in case of problems please send email: em427@cam.ac.uk #
## #
###############################################################
## #
## program finds in prmtop file from LEaP wrong defined order #
## of atoms in IMPROPER, permutes appropriate atoms and write #
## new prmtop file for united-atom force field ff03ua #
## #
## how to use: #
## ./perm-prmtop.ff03ua.py OLD_PRMTOP NEW_PRMTOP #
## #
## IMPORTANT: #
## 0. copy /home/em427/amber9/leap into your AMBER #
## directory since in default AMBER9 there are #
## missing parameters #
## 1. change names of terminal amino acid residues #
## according to warnings below #
## 2. change path to libraries #
## 3. program changes the atom order ONLY for amino acid #
## #
###############################################################
# khs26> changed the path to use the $AMBERHOME environment variable
amberhome = os.environ["AMBERHOME"]
path = os.path.join(amberhome, "dat/leap/lib")
#########################
## some useful functions
#########################
def exchange_atoms(atom_type, a, aa, residue, dihedrals, currentAtomNumber):
find_atom = a[aa.index(residue)].index(atom_type)
atomNumber = find_atom+currentAtomNumber
atomNumberIndex = atomNumber*3
for j in range(len(dihedrals)):
if (dihedrals[j][1]==str(atomNumberIndex)):
d1 = dihedrals[j][0]
d2 = dihedrals[j][1]
dihedrals[j][0] = d2
dihedrals[j][1] = d1
# ('OD1', aTypes, aaNames, residueLabel[i], dihedralsWithoutHydrogen, currentAtomNumber)
def exchange_atoms_asp(atom_type, a, aa, residue, dihedrals, currentAtomNumber):
find_atom = a[aa.index(residue)].index(atom_type)
atomNumber = find_atom+currentAtomNumber
atomNumberIndex = atomNumber*3
for j in range(len(dihedrals)):
print dihedrals[j][1], atomNumberIndex,
if (dihedrals[j][1]==str(atomNumberIndex)):
print dihedrals[j][0], dihedrals[j][1], dihedrals[j][2], dihedrals[j][3]
d1 = dihedrals[j][0]
d2 = dihedrals[j][1]
dihedrals[j][0] = d2
dihedrals[j][1] = d1
print dihedrals[j][0], dihedrals[j][1], dihedrals[j][2], dihedrals[j][3]
def exchange_atoms_nt(atom_type, a, aa, residue, dihedrals):
find_atom = a[aa.index(residue)].index(atom_type)
for j in range(len(dihedrals)):
if (dihedrals[j][1]==str(atomIndex[find_atom])):
d1 = dihedrals[j][0]
d2 = dihedrals[j][1]
dihedrals[j][0] = d2
dihedrals[j][1] = d1
def exchange_atoms_nglu(atom_type, a, aa, residue, dihedrals,n5):
find_atom = a[aa.index(residue)].index(atom_type)
atomNumber = find_atom+currentAtomNumber
atomNumberIndex = atomNumber*3
for j in range(len(dihedrals)):
# print dihedrals[j][1], atomNumberIndex
if (dihedrals[j][1]==str(atomNumberIndex)):
# print dihedrals[j][0], dihedrals[j][1], dihedrals[j][2], dihedrals[j][3]
# d1 = dihedrals[j][0]
# d2 = dihedrals[j][1]
# dihedrals[j][0] = d2
# dihedrals[j][1] = d1
# print dihedrals[j][0], dihedrals[j][1], dihedrals[j][2], dihedrals[j][3]
## find_atom = a[aa.index(residue)].index(atom_type)
## for j in range(len(dihedrals)):
## if (dihedrals[j][1]==str(atomIndex[find_atom])):
## d1 = dihedrals[j][0]
## d2 = dihedrals[j][1]
## dihedrals[j][0] = d2
## dihedrals[j][1] = d1
if (n5==0):
continue
elif (n5==1):
d1 = '-'+dihedrals[j][1]
d2 = dihedrals[j][3][1:]
dihedrals[j][1] = d2
dihedrals[j][3] = d1
elif (n5==2):
d1 = dihedrals[j][0]
d2 = dihedrals[j][1]
dihedrals[j][0] = d2
dihedrals[j][1] = d1
elif (n5==3):
d0 = '-'+dihedrals[j][0]
d1 = dihedrals[j][1]
d3 = dihedrals[j][3][1:]
dihedrals[j][0] = d1
dihedrals[j][1] = d3
dihedrals[j][3] = d0
elif (n5==4):
d0 = dihedrals[j][0]
d1 = '-'+dihedrals[j][1]
d3 = dihedrals[j][3][1:]
dihedrals[j][0] = d3
dihedrals[j][1] = d0
dihedrals[j][3] = d1
elif (n5==5):
d1 = '-'+dihedrals[j][0]
d2 = dihedrals[j][3][1:]
dihedrals[j][0] = d2
dihedrals[j][3] = d1
def exchange_atoms_arg(a, aa, residue, dihedrals, currentAtomNumber):
## IMP
|
ROPER responsible for trouble with NH2 group permutation:
find_atom1 = a[aa.index(residue)].index('NE')
atomNumber1 = find_atom1+currentAtomNumber
atomNumberIndex1 = atomNumber1*3
find_atom2 = a[aa.index(residue)].index('NH1')
atomNumber2 = find_atom2+currentAtomNumber
atomNumberIndex2 = atomNumber2*3
find_atom3 = a[aa.index(residue)].index('CZ')
atomNumber3 = find_atom3+currentA
|
tomNumber
atomNumberIndex3 = atomNumber3*3
find_atom4 = a[aa.index(residue)].index('NH2')
atomNumber4 = find_atom4+currentAtomNumber
atomNumberIndex4 = atomNumber4*3
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex2))):
d0 = dihedrals[j][0]
d1 = dihedrals[j][1]
dihedrals[j][0] = d1
dihedrals[j][1] = d0
def exchange_atoms_ring1_phe(a, aa, residue, dihedrals):
# n - different orders of atoms in IMPROPER (the third atom is always fixed)
# 0: 0-1-3
# 1: 0-3-1
# 2: 1-0-3
# 3: 1-3-0
# 4: 3-0-1
# 5: 3-1-0
find_atom1 = a[aa.index(residue)].index('CD1')
atomNumber1 = find_atom1+currentAtomNumber
atomNumberIndex1 = atomNumber1*3
find_atom2 = a[aa.index(residue)].index('CD2')
atomNumber2 = find_atom2+currentAtomNumber
atomNumberIndex2 = atomNumber2*3
find_atom3 = a[aa.index(residue)].index('CG')
atomNumber3 = find_atom3+currentAtomNumber
atomNumberIndex3 = atomNumber3*3
find_atom4 = a[aa.index(residue)].index('CB')
atomNumber4 = find_atom4+currentAtomNumber
atomNumberIndex4 = atomNumber4*3
n5 = 0
for j in range(len(dihedrals)):
if ((dihedrals[j][0]==str(atomNumberIndex4)) and (dihedrals[j][1]==str(atomNumberIndex1)) and (dihedrals[j][2]=="-"+str(atomNumberIndex3)) and (dihedrals[j][3]=="-"+str(atomNumberIndex2))):
# print "n5 = 4"
n5 = 4
elif ((dihedrals[j][0]==str(atomNumberIndex4)) and (dihedrals[j][1]==str(atomNumberIndex2)) and (dihedrals[j][2]=="-"+str(atomNumberIndex3)) and (dihedrals[j][3]=="-"+str(atomNumberIndex1))):
# print "n5 = 2"
n5 = 2
elif ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex4)) and (dihedrals[j][2]=="-"+str(atomNumberIndex3)) and (dihedrals[j][3]=="-"+str(atomNumberIndex2))):
# print "n5 = 5"
n5 = 5
elif ((dihedrals[j][0]==str(atomNumberIndex1)) and (dihedrals[j][1]==str(atomNumberIndex2)) and (dihedrals[j][2]=="-"+str(atomNumberIndex3)) and (dihedrals[j][3]=="-"+str(atomNumberIndex4))):
# print "n5 = 3"
n5 = 3
elif ((dihedrals[j][0]==str(atomNumberIndex2)) and (dihedrals[j][1]==str(atomNumberIndex1)) and (dihedrals[j][2]=="-"+str(atomNumberIndex3)) and (dihedrals[j][3]=="-"+str(atomNumberIndex4))):
# print "n5 = 1"
n5 = 1
elif ((dihedrals[j][0]==str(atomNumberIndex2)) and (dihedrals[j][1]==str(atomNumberIndex4)) and (dihedrals[j][2]=="-"+str(atomNumberIndex3)) and (dihedrals[j][3]=="-"+str(atomNumberIndex1))):
# print "n5 = 0"
n5 = 0
else: continue
if (n5==
|
klarnemann/jagust_rsfmri
|
rsfmri/utils.py
|
Python
|
mit
| 19,059
| 0.007346
|
import os, sys
import datetime
from glob import glob
import json
import numpy as np
import pandas
from skimage.morphology import binary_erosion
from nitime.timeseries import TimeSeries
from nitime.analysis import SpectralAnalyzer, FilterAnalyzer
import nibabel
import nipype.interfaces.spm as spm
from nipype.interfaces.base import CommandLine
import nipype.interfaces.fsl as fsl
from nipype.utils import filemanip
import nipype.interfaces.afni as afni
## deal with relative import for now
cwd = os.getcwd()
sys.path.insert(0, cwd)
import nipype_ext
########################
## naming structure used in scripts to make subdirectories
defaults = {
'rawdir': 'raw',
'func_glob': 'B*func4d.nii*',
'despiked_func_glob' : 'dsB*func4d.nii*',
'anat_glob' : 'brainmask.nii*',
'aparc_glob' : 'aparcaseg.nii*',
'aligned' : 'align4d_{0}.nii*',
'realign_ants':'ants_realign',
'realign_spm': 'spm_realign_slicetime',
'despike' : 'despike_',
'coreg' : 'coreg_masks',
'bandpass' : 'bandpass',
'model_fsl': 'model_fsl',
'wm_labels': [2,41, 77,78,79],
'vent_labels': [4,5,14,15,28,43,44,60,72,75,76],
'movement_names' : ['mc{}.1D'.format(x+1) for x in xrange(6)],
'noise_names' : ['wm.1D', 'csf.1D', 'global.1D']
}
def get_files(dir, globstr):
"""
uses glob to find dir/globstr
returns sorted list; number of files
"""
searchstr = os.path.join(dir, globstr)
files = glob(searchstr)
files.sort()
return files, len(files)
def make_datestr():
now = datetime.datetime.now()
return now.strftime('%Y_%m_%d_%H_%S')
def make_dir(base_dir, dirname='newdir'):
""" makes a new directory if it doesnt alread exist
returns full path
Parameters
----------
base_dir : str
the root directory
dirname : str (default pib_nifti)
new directory name
Returns
-------
newdir : str
full path of new directory
"""
newdir = os.path.join(base_dir,dirname)
if not os.path.isdir(base_dir):
raise IOError('ERROR: base dir %s DOES NOT EXIST'%(base_dir))
directory_exists = os.path.isdir(newdir)
if not directory_exists:
os.mkdir(newdir)
return newdir, directory_exists
def fsl_make4d(infiles, newfile):
"""a list of files is passed, a 4D volume will be created
in the same directory as the original files"""
if not hasattr(infiles, '__iter__'):
raise IOError('expected list,not %s'%(infiles))
startdir = os.getcwd()
pth, nme = os.path.split(infiles[0])
os.chdir(pth)
merge = fsl.Merge()
merge.inputs.in_files = infiles
merge.inputs.dimension = 't'
merge.inputs.merged_file = newfile
out = merge.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None
else:
return out.outputs.merged_file
def fsl_split4d(infile, outdir, sid):
""" uses fsl to split 4d file into parts
based on sid, puts resulting files in outdir
"""
startdir = os.getcwd()
pth, nme = os.path.split(infile)
os.chdir(outdir)
im = fsl.Split()
im.inputs.in_file = infile
im.inputs.dimension = 't'
im.inputs.out_base_name = sid
im.inputs.output_type = 'NIFTI'
out = im.run()
os.chdir(startdir)
if not out.runtime.returncode == 0:
print out.runtime.stderr
return None
else:
# fsl split may include input file as an output
## bad globbing...
# remove it here
outfiles = out.outputs.out_files
outfiles = [x for x in outfiles if not x == im.inputs.in_file]
return outfiles
def get_slicetime(nslices):
"""
If TOTAL # SLICES = EVEN, then the excitation order when interleaved
is EVENS first, ODDS second.
If TOTAL # SLICES = ODD, then the excitation order when interleaved is
ODDS first, EVENS second.
Returns:
sliceorder: list
list containing the order of slice acquisition used for slicetime
correction
"""
if np.mod(nslices,2) == 0:
sliceorder = np.concatenate((np.arange(2,nslices+1,2),
np.arange(1,nslices+1,2)))
else:
sliceorder = np.concatenate((np.arange(1,nslices+1,2),
np.arange(2,nslices+1,2)))
# cast to a list for use with interface
return list(sliceorder)
def get_slicetime_vars(infiles, TR=None):
"""
uses nibabel to get slicetime variables
Returns:
dict: dict
nsclies : number of slices
TA : acquisition Time
TR: repetition Time
sliceorder : array with slice order to run slicetime correction
"""
if hasattr(infiles, '__iter__'):
img = nibabel.load(infiles[0])
else:
img = nibabel.load(infiles)
hdr = img.get_header()
if TR is None:
raise RuntimeError('TR is not defined ')
shape = img.get_shape()
nslices = shape[2]
TA = TR - TR/nslices
sliceorder = get_slicetime(nslices)
return dict(nslices=nslices,
TA = TA,
TR = TR,
sliceorder = sliceorder)
def save_json(inobj, outfile):
''' save inobj to outfile using json'''
try:
json.dump(inobj, open(outfile,'w+'))
except:
raise IOError('Unable to save %s to %s (json)'%(inobj, outfile))
def load_json(infile):
''' use json to load objects in json file'''
try:
result = json.load(open(infile))
except:
raise IOError('Unable to load %s' %infile)
return result
def zip_files(files):
if not hasattr(files, '__iter__'):
files = [files]
for f in files:
base, ext = os.path.splitext(f)
if 'gz' in ext:
# file already gzipped
continue
cmd = CommandLine('gzip %s' % f)
cout = cmd.run()
if not cout.runtime.returncode == 0:
logging.error('Failed to zip %s'%(f))
def unzip_file(infile):
""" looks for gz at end of file,
unzips and returns unzipped filename"""
base, ext = os.path.splitext(infile)
if not ext == '.gz':
return infile
else:
if os.path.isfile(base):
return base
cmd = CommandLine('gunzip %s' % infile)
cout = cmd.run()
if not cout.runtime.returncode == 0:
print 'Failed to unzip %s'%(infile)
return None
else:
return base
def copy_file(infile, newdir):
""" copy infile to new directory
return full path of new file
"""
cl = CommandLine('c
|
p %s %s'%(infile, newdir))
out
|
= cl.run()
if not out.runtime.returncode == 0:
print 'failed to copy %s' % infile
print out.runtime.stderr
return None
else:
basenme = os.path.split(infile)[1]
newfile = os.path.join(newdir, basenme)
return newfile
def copy_files(infiles, newdir):
"""wraps copy file to run across multiple files
returns list"""
newfiles = []
for f in infiles:
newf = copy_file(f, newdir)
newfiles.append(newf)
return newfiles
def remove_files(files):
"""removes files """
if not hasattr(files, '__iter__'):
cl = CommandLine('rm %s'% files)
out = cl.run()
if not out.runtime.returncode == 0:
print 'failed to delete %s' % files
print out.runtime.stderr
return
for f in files:
cl = CommandLine('rm %s'% f)
out = cl.run()
if not out.runtime.returncode == 0:
print 'failed to delete %s' % f
print out.runtime.stderr
def afni_despike(in4d):
""" uses afni despike to despike a 4D dataset
saves as ds_<filename>"""
dspike = afni.Despike()
dspike.inputs.in_file = in4d
dspike.inputs.outputtype = 'NIFTI_GZ'
dspike.inputs.ignore_exception = True
outfile = filemanip.fname_presuffix(in4d, 'ds')
dspike.inputs.out_file = outfile
res = dspike.run()
return res.runtime.returncode, res
def spm_realign(infiles, matlab='matlab-spm8'):
""" Uses SPM to realign files"""
startdir = os.getcwd()
pth, _ = os.path.split
|
nnsnodnb/django-ios-notifications
|
notification/views.py
|
Python
|
mit
| 3,036
| 0.000988
|
from django.shortcuts import render, redirect
from django.http.response import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .forms import CertFileUploadForm
from .models import DeviceToken, CertFile
from .utils import send_notification, upload_certificate
import json
@csrf_exempt
def device_token_receive(request):
if request.method != 'PUT':
return HttpResponse(status=405)
if request.body is b'':
return JsonResponse({'error': 'Bad Request'}, status=400)
query_dict = request.body.decode('utf-8')
body = json.loads(query_dict)
error = False
if 'device_token' not in body:
error = True
if 'uuid' not in body:
error = True
if 'sandbox' not in body:
error = True
if error:
return JsonResponse({'error': 'Bad Request'}, status=400)
device_token = body['device_token']
uuid = body['uuid']
sandbox = body['sandbox']
if DeviceToken.objects.filter(uuid=uuid).count() != 0:
token = DeviceToken.objects.get(uuid=uuid)
token.device_token = device_token
token.use_sandbox = sandbox
token.save()
else:
token = DeviceToken()
token.device_token = device_token
token.uuid = uuid
token.use_sandbox = sandbox
token.save()
return JsonResponse({'result': 'success'}, status=200)
def send_notification_with_device_token(request, mode, device_token, execute=True):
# mode: 0 or 1
# 0: develop target
# 1: product target
if request.user is None or not request.user.is_superuser:
return HttpResponse('Please login for admin user.', status=401)
if int(mode) > 1:
return HttpResponse('check your mode number(0 or 1).', status=400)
message = 'This is test push notification.'
if 'message' in request.GET:
message = request.GET['message']
try:
device_token = DeviceToken.objects.get(device_token=device_token)
if not execute:
return HttpResponse('End process.', status=200)
send_notification(message=message,
device_token=device_token.device_token,
use_sandbox=True if int(mode) == 0 else False)
return Htt
|
pResponse('Successful sending.', status=200)
except DeviceToken.DoesNotExist:
|
return HttpResponse('Not found. Your device token.', status=404)
def cert_upload(request):
if not request.user.is_superuser:
return redirect('notification:login')
if request.method == 'POST':
form = CertFileUploadForm(request.POST, request.FILES)
if form.is_valid():
result = upload_certificate(request.FILES['cert_file'], target_mode=int(request.POST['target']))
return render(request, 'upload.html', result)
else:
return render(request, 'upload.html', {'error': 'invalid'}, status=400)
else:
form = CertFileUploadForm()
return render(request, 'upload.html', {'form': form})
|
bc-jackiehuynh/bigcommerce-api-python
|
bigcommerce/connection.py
|
Python
|
mit
| 4,916
| 0.008544
|
"""
Simple wrapper around requests library, mostly for configuring connection
and usage by resources.
The get, post, put, and delete methods of the class could be used directly.
_run_method doc:
Runs given method (requests.post, .get, .put, .delete)
with given req_path (the part after /api/v2), and the
given options keyword args as the query string.
If content is received in response, returns it as
parsed JSON or raw XML (or other raw data).
"""
import requests
from httpexception import *
API_HOST = 'http://store.mybigcommerce.com'
API_PATH = '/api/v2'
API_USER = 'admin'
API_KEY = 'yourpasswordhere'
class Connection(object):
"""
Makes connections according to configuration.
Generally, only host, user, api_key needs to be changed.
Proxies can be defined by doing:
Connection.proxies = {"http": "http://10.10.1.10:3128",
"https": "http://10.10.1.10:1080"}
Custom headers can also be defined (requests tends to handle everything):
Connection.headers = {'content-type' : 'application/xml'}
Set back to None to disable.
The four methods corresponding to the http methods return the
JSON of the response data (or None if no data received), or raise
an exception if the request failed (see HttpException).
"""
prtcl_str = "https://"
host = API_HOST
base_path = API_PATH
user = API_USER
api_key = API_KEY
proxies = None
headers = None
# instead of using requests.get methods, can create and hold session instance
# (as class variable) on demand and use that (which is what requests does anyway)
# and let user close it if they wish
# -> would user ever really want to manually close sessions? does requests ever do that automatically?
# also see (for session objects):
# Note that connections are only released back to the pool for reuse once all body data has been read;
# be sure to either set stream to False or read the content property of the Response object.
@classmethod
def auth_pair(cls):
return (cls.user, cls.api_key)
@classmethod
def full_path(cls, req_path):
return cls.prtcl_str + cls.host + cls.base_path + req_path
@classmethod
def _join_options(cls, path, options):
query_str = '&'.join(['='.join(map(str, item)) for item in options.iteritems()])
return path + '?' + query_str
@classmethod
def _run_method(cls, method, req_path, data, **options):
"""
Runs given method (requests.post, .get, .put, .delete)
with given req_path (the part after /api/v2), and the
given options keyword args as the query string.
If content is received in response, returns it as
parsed JSON or raw XML (or other raw data).
"""
#TODO: in which cases would users not want parsed JSON returned?
if options: req_path = cls._join_options(req_path, options)
r = method(cls.full_path(req_path), auth=cls.auth_pair(), data=data,
proxies=cls.proxies,
headers=cls.headers)
ex = cls._check_response(r)
if ex:
ex.message = r.request.method + " request failed:" + ex.message
raise ex
else:
if r.content:
if r.headers['content-type'] == 'application/json':
return r.json()
else:
return r.content
#TODO: maybe CRUD naming would be better... not important?
@
|
classmethod
def get(cls, req_path, **options):
return cls._run_method(requests.get, req_path, None, **options)
@classmethod
def delete(cls, re
|
q_path, **options):
"""
No return value. Exception if not successful.
"""
return cls._run_method(requests.delete, req_path, None, **options)
@classmethod
def post(cls, req_path, data, **options):
return cls._run_method(requests.post, req_path, data, **options)
@classmethod
def put(cls, req_path, data, **options):
return cls._run_method(requests.put, req_path, data, **options)
@classmethod
def _check_response(cls, r):
"""
Returns an appropriate HttpException object for
status codes other than 2xx, and None otherwise.
"""
ex = None
if not r.status_code in (200, 201, 202, 204):
if r.status_code >= 500:
ex = ServerException(str(r.content), r.status_code, r.headers, r.content)
elif r.status_code >= 400:
ex = ClientRequestException(str(r.content), r.status_code, r.headers, r.content)
elif r.status_code >= 300:
ex = RedirectionException(str(r.content), r.status_code, r.headers, r.content)
return ex
|
AusTac/parma
|
b3/parsers/moh.py
|
Python
|
gpl-2.0
| 24,337
| 0.012204
|
#
# Medal of Honor Parser for BigBrotherBot(B3) (www.bigbrotherbot.net)
# Copyright (C) 2010 James 'Bakes' Baker (bakes@bigbrotherbot.net)
#
# This program is free software; you can redistribute it and/or modify
# it under the te
|
rms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
|
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# CHANGELOG
# 2010/11/07 - 0.10 - Courgette
# * add new maps info
# 2010/11/08 - 0.9.2 - GrosBedo
# * messages can now be empty (no message broadcasted on kick/tempban/ban/unban)
# 2010/10/27 - 0.9.1 - GrosBedo
# * messages now support named $variables instead of %s
# 2010/10/27 - 0.9 - Courgette
# * when banning, also kick to take over MoH engine failure to enforce bans. This
# will need more test to determine how to make the MoH engine enforce temp bans.
# 2010/10/24 - 0.8 - Courgette
# * fix OnServerRoundover and OnServerRoundoverplayers
# 2010/10/24 - 0.7 - Courgette
# * add missing getTeam() method
# 2010/10/24 - 0.6 - Courgette
# * minor fixes
# 2010/10/23 - 0.5 - Courgette
# * create specific events : EVT_GAME_ROUND_PLAYER_SCORES and EVT_GAME_ROUND_TEAM_SCORES
# * now fires native B3 event EVT_GAME_ROUND_END
# * manage team changed event correctly
# 2010/10/23 - 0.4 - Courgette
# * refactor inheriting from frostbite AbstratParser
# * change available server var list
# 2010/10/10 - 0.3 - Bakes
# * getEasyName is now implemented and working, getHardName is implemented
# but not working.
# 2010/10/07 - 0.2 - Courgette
# * add gameName property. Fix SAY_LINE_MAX_LENGTH
# 2010/09/25 - 0.1 - Bakes
# * Initial version of MoH parser - hasn't been tested with OnKill events yet
# but basic commands seem to work.
# 2010-11-21 - 1.0 - Courgette
# * add rotateMap and changeMap to fix !maprotate and !map#
# 2011-02-01 - 1.1 - xlr8or
# * adapted to server R9 version 615937 - fixed onPlayerSpawn and vars.noCrosshairs errors
# 2011-03-05 - 1.2 - xlr8or
# * admin.kickPlayer after ban now in try/except to avoid error msg when player is already gone
# 2011-04-09 - 1.2.1 - Courgette
# * import missing time module
# 2011-05-22 - 1.2.2 - Courgette
# * move events EVT_GAME_ROUND_PLAYER_SCORES and EVT_GAME_ROUND_TEAM_SCORES to abstract Frostbite parser
# as they also exist in BFBC2
# 2011-06-04 - 1.3.0 - Courgette
# makes use of the new pluginsStarted parser hook
# 2011-06-05 - 1.4.0 - Courgette
# * change data format for EVT_CLIENT_BAN_TEMP and EVT_CLIENT_BAN events
# 2012-10-60 - 1.5 - Courgette
# reflect changes in abstract parser 1.6
#
__author__ = 'Bakes, Courgette'
__version__ = '1.5'
import time
import b3.events
from b3.parsers.frostbite.abstractParser import AbstractParser
from b3.parsers.frostbite.util import PlayerInfoBlock
import b3.functions
SAY_LINE_MAX_LENGTH = 100
class MohParser(AbstractParser):
gameName = 'moh'
_gameServerVars = (
'serverName', # vars.serverName [name] Set the server name
'gamePassword', # vars.gamePassword [password] Set the game password for the server
'punkBuster', # vars.punkBuster [enabled] Set if the server will use PunkBuster or not
'hardCore', # vars.hardCore[enabled] Set hardcore mode
'ranked', # vars.ranked [enabled] Set ranked or not
'skillLimit', # vars.skillLimit [lower, upper] Set the skill limits allowed on to the server
'noUnlocks', # vars.noUnlocks [enabled] Set if unlocks should be disabled
'noAmmoPickups', # vars.noAmmoPickups [enabled] Set if pickups should be disabled
'realisticHealth', # vars.realisticHealth [enabled] Set if health should be realistic
'supportAction', # vars.supportAction [enabled] Set if support action should be enabled
'preRoundLimit', # vars.preRoundLimit [upper, lower] Set pre round limits. Setting both to zero means the game uses whatever settings are used on the specific levels. On ranked servers, the lowest values allowed are lower = 2 and upper = 4.
'roundStartTimerPlayersLimit', # vars.roundStartTimerPlayersLimit [limit] Get/Set the number of players that need to spawn on each team for the round start timer to start counting down.
'roundStartTimerDelay', # vars.roundStartTimerDelay [delay] If set to other than -1, this value overrides the round start delay set on the individual levels.
'tdmScoreCounterMaxScore', # vars.tdmScoreCounterMaxScore [score] If set to other than -1, this value overrides the score needed to win a round of Team Assault, Sector Control or Hot Zone.
'clanTeams', # vars.clanTeams [enabled] Set if clan teams should be used
'friendlyFire', # vars.friendlyFire [enabled] Set if the server should allow team damage
'currentPlayerLimit', # vars.currentPlayerLimit Retrieve the current maximum number of players
'maxPlayerLimit', # vars.maxPlayerLimit Retrieve the server-enforced maximum number of players
'playerLimit', # vars.playerLimit [nr of players] Set desired maximum number of players
'bannerUrl', # vars.bannerUrl [url] Set banner url
'serverDescription', # vars.serverDescription [description] Set server description
'noCrosshairs', # vars.noCrosshairs [enabled] Set if crosshairs for all weapons is hidden
'noSpotting', # vars.noSpotting [enabled] Set if spotted targets are disabled in the 3d-world
'teamKillCountForKick', # vars.teamKillCountForKick [count] Set number of teamkills allowed during a round
'teamKillValueForKick', # vars.teamKillValueForKick [count] Set max kill-value allowed for a player before he/she is kicked
'teamKillValueIncrease', # vars.teamKillValueIncrease [count] Set kill-value increase for a teamkill
'teamKillValueDecreasePerSecond', # vars.teamKillValueDecreasePerSecond [count] Set kill-value decrease per second
'idleTimeout', # vars.idleTimeout [time] Set idle timeout vars.profanityFilter [enabled] Set if profanity filter is enabled
)
def startup(self):
AbstractParser.startup(self)
# create the 'Server' client
self.clients.newClient('Server', guid='Server', name='Server', hide=True, pbid='Server', team=b3.TEAM_UNKNOWN)
if self.config.has_option('moh', 'max_say_line_length'):
try:
maxlength = self.config.getint('moh', 'max_say_line_length')
if maxlength > SAY_LINE_MAX_LENGTH:
self.warning('max_say_line_length cannot be greater than %s' % SAY_LINE_MAX_LENGTH)
maxlength = SAY_LINE_MAX_LENGTH
if maxlength < 20:
self.warning('max_say_line_length is way too short. using default')
maxlength = self._settings['line_length']
self._settings['line_length'] = maxlength
self._settings['min_wrap_length'] = maxlength
except Exception, err:
self.error('failed to read max_say_line_length setting "%s" : %s' % (self.config.get('moh', 'max_say_line_length'), err))
self.debug('line_length: %s' % self._settings['line_length'])
self.verbose('GameType: %s, Map: %s' %(self.game.gameType, self.game.mapName))
def pluginsStarted(self):
self.info('connecting all players...')
plist = self.getPlayerList()
for cid, p in plist.iteritems():
client = self.clients.getByCID(cid)
if not client:
#self.clients.newClient(playerdata['cid'], guid=playerdata['guid'], name=playerdata['name'], team=playerdata['team'], squad=playerdata['squad'])
name = p['name']
if 'clanT
|
Jellofishi/solartron
|
qtsolartron.py
|
Python
|
lgpl-3.0
| 12,301
| 0.028453
|
#!/bin/env python
r"""Solartron Orbit network reader GUI."""
# python modules
import sys
from time import sleep
# external modules
from serial.serialutil import SerialException
from PyQt4 import QtCore, QtGui
# app modules
from solartron import Driver
from sderrors import *
from pedal import DummyPedal as Pedal
from config import Config
class MainWindow(QtGui.QWidget):
"""Main app window."""
def __init__(self, config, *args):
"""Build main gui."""
QtGui.QWidget.__init__(self, *args)
self.setWindowTitle(self.tr("Solartron Reader"))
self.dataView = QtGui.QTableView()
self.dataView.setShowGrid(True)
self.getDataBtn = QtGui.QPushButton(self.tr("Get data"))
self.connect(self.getDataBtn, QtCore.SIGNAL("pressed()"), self.aquireData)
self.setZeroBtn = QtGui.QPushButton(self.tr("Set Zero"))
self.copyBtn = QtGui.QPushButton(self.tr("Copy"))
self.exportBtn = QtGui.QPushButton(self.tr("Save"))
self.configBtn = QtGui.QPushButton(self.tr("Config"))
self.clearBtn = QtGui.QPushButton(self.tr("Clear"))
self.probes = []
i = 1
for probe in config.probes:
probeOutput = SolartronProbe(i)
i += 1
self.probes.append(probeOutput)
self.actionsLayout = QtGui.QVBoxLayout()
for probe in self.probes:
self.actionsLayout.addWidget(probe)
self.actionsLayout.addWidget(self.getDataBtn)
self.actionsLayout.addWidget(self.setZeroBtn)
self.actionsLayout.addWidget(self.copyBtn)
self.actionsLayout.addWidget(self.configBtn)
#self.actionsLayout.addWidget(self.exportBtn)
self.actionsLayout.addWidget(self.clearBtn)
self.readerLayout = QtGui.QHBoxLayout()
self.readerLayout.addWidget(self.dataView, 4)
self.readerLayout.addLayout(self.actionsLayout)
self.statusBar = QtGui.QStatusBar()
self.mainLayout = QtGui.QVBoxLayout()
self.mainLayout.addLayout(self.readerLayout)
self.mainLayout.addWidget(self.statusBar)
self.mainLayout.setContentsMargins(0,0,0,0)
self.setLayout(self.mainLayout)
def updateProbe(self, uid, value):
"""Update probe value in text boxs."""
self.probes[uid-1].setValue(value)
def statusMsgUpdate(self, message):
"""Update message in status bar."""
self.statusBar.showMessage(message)
def setModel(self, model):
"""Set table view model."""
self.dataView.setModel(model)
def aquireData(self):
"""Request data from the polling thread."""
self.emit(QtCore.SIGNAL("dataRequested"), [probe.text() for probe in self.probes])
def keyPressEvent(self, event):
"""Enter key used to aquire data."""
#FIXME: This doesn't work.
key = event.key()
if key == QtCore.Qt.Key_Space:
self.aquireData()
else:
QtGui.QWidget.keyPressEvent(self, event)
def scrollTo(self, index, start_row, end_row):
"""Scroll table to view new data."""
self.dataView.scrollTo(self.dataView.model().index(start_row, 0))
class ConfigWindow(QtGui.QWidget):
"""Configuration window."""
def __init__(self, config, *args):
"""Build configuration setting gui."""
QtGui.QWidget.__init__(self, *args)
def save(self):
"""Save configuration."""
pass
def reload(self):
"""Reload configuration."""
pass
class SolartronProbe(QtGui.QWidget):
"""Probe readeout widget."""
def __init__(self, pid, *args):
QtGui.QWidget.__init__(self, *args)
self.label = QtGui.QLabel(chr(pid + ord('A') - 1))
self.output =
|
QtGui.QLineEdit()
self.output.setReadOnly(1)
self.layout = QtGui.QHBoxLayout()
self.layout.addWidget(self.label)
self.layout.addWidget(self.output)
self.layout.setContentsMargins(0,0,0,0)
self.
|
setLayout(self.layout)
def setValue(self, value):
"""Update probe readout value."""
self.output.setText(str(value))
def text(self):
"""Get probe readout value as text."""
return self.output.text()
class PedalPoller(QtCore.QThread):
"""Poll pedal driver."""
def __init__(self, *args):
QtCore.QThread.__init__(self, *args)
self.running_mutex = QtCore.QMutex()
self.running = True
self.pedal = Pedal()
def run(self):
"""Poll pedal."""
while self.getRunning():
if self.pedal.waitForPress():
self.emit(QtCore.SIGNAL("pedalPressed"))
def getRunning(self):
"""Get application running status."""
self.running_mutex.lock()
running = self.running
self.running_mutex.unlock()
return running
def exit(self):
"""Stop polling pedal."""
self.running_mutex.lock()
self.running = False
self.running_mutex.unlock()
class DataPooler(QtCore.QThread):
"""Poll device for values."""
def __init__(self, config, *args):
QtCore.QThread.__init__(self, *args)
self.running_mutex = QtCore.QMutex()
self.zero_mutex = QtCore.QMutex()
self.config = config
self.running = True
def status(self, message):
self.emit(QtCore.SIGNAL("status"), message)
def pconnect(self):
try:
self.driver.disconnect()
except: # Don't care if couldn't disconnect properly
pass
self.driver = Driver(readTimeout=self.config.timeout, writeTimeout=self.config.timeout)
retry_connection = True
# TODO replace reconnection loop with a reconnect timer and signal (emit a reconnect signal which
# will start a timer and start the connection process when timer ends.
while retry_connection and self.getRunning():
try:
if len(self.config.probes) == 0:
# If no probes specified in config. Get from search.
(prefix, self.count) = self.driver.setup()
self.config.probes = ["%s%02d" % (prefix, i) for i in range(1, self.count+1)]
else:
(prefix, self.count) = self.driver.setup(probes=self.config.probes)
self.setZeroValues(probe=False)
retry_connection = False
self.status("Connected.")
except SerialException:
self.status("Connection failed. Check hardware. Retrying in %d seconds." % self.config.retry)
sleep(self.config.retry)
except ValueError:
self.status("Setup failure. Retrying in %d seconds." % self.config.retry)
sleep(self.config.retry)
def run(self):
"""Probe polling loop."""
self.pconnect()
while self.getRunning():
for i in range(1, self.count+1):
self.running_mutex.lock()
try:
result = self.driver.read("1", i)
except OSError:
self.status("Disconnected.")
self.pconnect()
except OrbitError:
self.status("Read Error.")
self.pconnect()
if isinstance(result, float):
if isinstance(self.zero[i-1], float):
result -= self.zero[i-1]
result = float("%.*f" % (self.config.precision, result))
self.running_mutex.unlock()
self.emit(QtCore.SIGNAL("dataRetrieved"), i, result)
sleep(self.config.delay)
self.driver.disconnect()
def setZeroValues(self, probe=True):
self.zero_mutex.lock()
try:
if probe:
self.zero = [self.driver.read("1", i) for i in range(1, self.count+1)]
else:
self.zero = [0.0 for i in range(1, self.count+1)]
except AttributeError:
self.status("Error settings zero point. Please try again.")
self.zero_mutex.unlock()
def getRunning(self):
"""Get application running status."""
self.running_mutex.lock()
running = self.running
self.running_mutex.unlock()
return running
def exit(self):
"""Stop polling device."""
self.running_mutex.lock()
self.running = False
self.running_mutex.unlock()
class Measurements(QtCore.QAbstractTableModel):
"""Probe measurements data.
values = Contains sets of measurements. All probes are sampled at once.
"""
def __init__(self, config, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self.config = config
self.values = []
colors = ((255,255,255), (200,255,255))
self.brush = [QtCore.QVariant(QtGui.QBrush(QtGui.QColor(*color))) for color in colors]
self.clipboard = QtGui.QApplication.clipboard()
self.value_count = len(self.config.probes)
def createTable(self, columns):
self.values = []
self.groups = columns
def addMeasurement(self, data):
"""Add values from all probes to measurements list."""
# Set data
new_cell = self.getTableIndex(len(self.values),0)
self.values.append(data)
# It at beginning of new row, insert a row.
if (new_cell[
|
octarin/PySrt
|
PySrt.py
|
Python
|
gpl-3.0
| 3,556
| 0.005624
|
import re
import sys
class SrtSection:
"""
This class is used to stock a section from a srt file (subtitle frames).
- self.beginning is the time (in seconds) where the subframe begins
- self.duration is the duration (in seconds) of the subframe
- self.content is the content of the subframe
"""
def __init__(self, beginning, duration, content):
self.beginning = beginning
self.duration = duration
self.content = content
def __repr__(self):
return '({0}, {1}), "{2}"'.format(self.beginning, self.duration, self.content.encode("unicode_escape").decode())
def export(self):
"""
Exports the section to a formatted string
"""
return self.__export_tdata() + '\n' + self.content
def __export_tdata(self):
"""
Writes the time section in the srt syntax from the tuple
(beginning, duration)
"""
# Calculates each momentum
beginning, end = self.beginning, self.beginning + self.duration
times = []
for temps in beginning, end:
hours = int(temps // 3600)
temps %= 3600
minutes = int(temps // 60)
temps %= 60
seconds = int(temps)
miliseconds = int(round(temps - seconds, 3)*1000)
times.append('{0}:{1}:{2},{3}'.format(hours, minutes, seconds,
miliseconds))
return ' --> '.join(times)
class SrtSubs:
"""
This class is used to stock and manipulate sections from a srt file.
self.sections, where all the datas are stored, is a list of SrtSections.
"""
def __init__(self, string):
"""
|
string is the content of the srt file.
"""
self.rawsections = [s.strip() for s in string.split("\n\n") if s != '']
self.sections = self.__extract_sections()
def __extract_sections(self):
"""
Extracts all the informations from a list containing all the
sections of the file, in the form of a list of tuples :
((beginning, duration), content)
with
beginning and duration i
|
n seconds
content the sub to show at this time
"""
sections = []
for section in self.rawsections:
lines = section.split('\n')
beginning, duration = self.__extract_tdata(lines[1])
content = "\n".join(lines[2:])
sections.append(SrtSection(beginning, duration, content))
return sections
def export_sections(self):
"""
Writes the sections to a string to be written to the subs file
"""
secs = []
for number, section in enumerate(self.sections):
sec = str(number+1)+'\n'
sec += section.export()
secs.append(sec)
return '\n\n'.join(secs)
def __extract_tdata(self, timesection):
"""
Returns a tuple (beginning, duration) from
the %H:%M:%S --> %H:%M:%S line.
"""
tparts = timesection.split(" --> ")
beginning_end = []
for sec in tparts:
hours, minutes, seconds, miliseconds = tuple(map(int, re.split("[:,]", sec)))
beginning_end.append(3600 * hours + 60 * minutes + seconds + miliseconds/1000)
beginning, end = tuple(beginning_end)
duration = end - beginning
return beginning, round(duration)
|
jghenry/standard_deviation
|
setup.py
|
Python
|
mit
| 187
| 0.005348
|
from distutils.core import setup
setup(
name='standard_deviation',
version='0.1dev',
packages=['sd_py',],
|
license='MIT',
long_description=open('README.md').read(),
|
)
|
tweemeterjop/thug
|
thug/DOM/W3C/HTML/HTMLFontElement.py
|
Python
|
gpl-2.0
| 352
| 0.008523
|
#!/usr/bin/env pyt
|
hon
from .HTMLElement import HTMLElement
from .attr_property import attr_property
class HTMLFontElement(HTMLElement):
def __init__(self, doc, tag):
HTMLElement.__init__(self, doc, tag)
color = attr_property("color")
face = attr_p
|
roperty("face")
size = attr_property("size")
|
Comunitea/CMNT_00098_2017_JIM_addons
|
custom_sale_order_variant_mgmt/models/sale_order.py
|
Python
|
agpl-3.0
| 13,669
| 0.001171
|
# -*- coding: utf-8 -*-
# © 2017 Comunitea
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api, _
from odoo.tools import float_compare
from lxml import etree
class SaleOrderLineTemplate(models.Model):
_name = 'sale.order.line.template'
_inherit = 'sale.order.line'
product_template = fields.Many2one(
'product.template', string='Product',
domain=[('sale_ok', '=', True), ('product_attribute_count', '=', 0)],
change_default=True, ondelete='restrict', required=True)
order_lines = fields.One2many('sale.order.line', 'template_line',
copy=True)
lines_qty = fields.Integer(compute='_compute_order_lines_qty')
price_subtotal = fields.Monetary(
compute='_compute_amount', string='Subtotal', readonly=True,
store=True)
global_available_stock = fields.\
Float('Stock', related='product_template.global_available_stock')
@api.depends('order_lines.price_subtotal')
def _compute_amount(self):
for line in self:
line.price_subtotal = sum(
[x.price_subtotal for x in line.order_lines])
@api.multi
def unlink(self):
if not self._context.get('unlink_product_line', False):
ctx = self._context.copy()
ctx.update(unlink_template_line=True)
self.mapped('order_lines').with_context(ctx).unlink()
return super(SaleOrderLineTemplate, self).unlink()
@api.multi
def write(self, vals):
for template in self:
line_vals = vals.copy()
if template.lines_qty > 1:
line_vals.pop('product_id', False)
#line_vals.pop('price_unit', False)
line_vals.pop('product_uom_qty', False)
line_vals.pop('purchase_price', False)
line_vals.pop('name', False)
line_vals.pop('sequence', False)
template.order_lines.write(line_vals)
return super(models.Model, self).write(vals)
@api.model
def create(self, vals):
# Se controla el create con order_lines debido que al duplicar un
# pedido el vals de las lineas viene sin order_id
if vals.get('order_lines', False):
for line_vals in vals['order_lines']:
if line_vals[0] == 0:
line_vals[2]['order_id'] = vals.get('order_id', False)
if not self._context.get('no_create_line', False):
# Nos aseguramos que el name de sale.order.line sea el correcto
# (con referencia y atributos de variante)
line_vals = vals.copy()
template_product = self.env['product.template'].browse(vals['product_template'])
if template_product.display_name == line_vals['name']:
product_vals = self.env['product.product'].browse(
line_vals['product_id'])
line_vals['name'] = product_vals.display_name
new_line = self.env['sale.order.line'].with_context(
no_create_template_line=True).create(line_vals)
vals['order_lines'] = [(6, 0, [new_line.id])]
vals['name'] = template_product.display_name
return super(
SaleOrderLineTemplate,
self.with_context(no_create_template_line=True)).create(vals)
@api.model
def create_mal(self, vals):
## TODO: REVISAR KIKO. No traslada el precio de la primera variante
ctx = self._context.copy()
# Se controla el create con order_lines debido que al duplicar un
# pedido el vals de las lineas viene sin order_id
order_id = vals.get('order_id', False)
if vals.get('order_lines', False):
for line_vals in vals['order_lines']:
if line_vals[0] == 0:
line_vals[2]['order_id'] = vals.get('order_id', False)
if not self._context.get('no_create_line', False):
# Nos aseguramos que el name de sale.order.line sea el correcto
# (con referencia y atributos de variante)
ctx.update(no_create_template_line=True)
line_vals = vals.copy()
orig = True
if orig:
line_vals = vals.copy()
template_product = self.env['product.template'].browse(vals['product_template'])
if template_product.display_name == line_vals['name']:
product_vals = self.env['product.product'].browse(
line_vals['product_id'])
line_vals['name'] = product_vals.display_name
new_line = self.env['sale.order.line'].with_context(ctx).create(line_vals)
vals['order_lines'] = [(6, 0, [new_line.id])]
else:
new_line_ids = self.env['sale.order.line']
template_product = self.env['product.template'].browse(vals['product_template'])
|
product_id = self.env['product.product'].browse(line_vals['product_id'])
if template_product.display_name == line_vals['name']:
line_vals['name'] = product_id.display_name
line_vals.update({
|
'product_id': product_id.id,
'product_uom': product_id.uom_id,
'order_id': order_id,
})
order_line = self.env['sale.order.line'].with_context(ctx).new(line_vals)
order_line.product_id_change()
order_line_vals = order_line._convert_to_write(order_line._cache)
new_line_ids |= new_line_ids.with_context(ctx).create(order_line_vals)
vals['order_lines'] = [(6, 0, new_line_ids.ids)]
return super(
SaleOrderLineTemplate,
self.with_context(no_create_template_line=True)).create(vals)
def _compute_order_lines_qty(self):
for template in self:
template.lines_qty = len(template.order_lines)
@api.onchange('product_template')
def onchange_template(self):
if not self.product_template:
return
self.product_id = self.product_template.product_variant_ids[0]
# @api.onchange('product_uom_qty', 'product_uom', 'route_id')
# def _onchange_product_id_check_availability(self):
# return
#
# @api.onchange('product_id')
# def _onchange_product_id_uom_check_availability(self):
# return
#
# @api.onchange('product_uom_qty')
# def _onchange_product_uom_qty(self):
# return
#
# @api.onchange('product_id')
# def _onchange_product_id_set_customer_lead(self):
# return
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.multi
@api.depends('product_id')
def _get_global_stock(self):
for line in self:
if line.product_id:
line.global_available_stock = \
line.product_id.web_global_stock
else:
line.global_available_stock = 0.0
template_line = fields.Many2one('sale.order.line.template')
global_available_stock = fields.Float('Stock', readonly=True,
compute="_get_global_stock",
store=True)
note = fields.Text("Notas")
partner_id = fields.Many2one(related='order_id.partner_id', string='partner', store=True, readonly=True)
pricelist_id = fields.Many2one(related='order_id.pricelist_id', string='partner', store=True, readonly=True)
check_edit = fields.Boolean(compute='_compute_check_edit')
@api.depends('template_line.lines_qty', 'product_id.product_tmpl_id.product_attribute_count')
def _compute_check_edit(self):
for line in self:
check_edit = True
if line.product_id.product_tmpl_id.product_attribute_count > 0:
check_edit = False
if line.template_line.lines_qty > 1:
check_edit = False
line.check_edit = check_edit
@api.model
def create(self, vals):
if self._context.get('template_line', False):
|
nvbn/coviolations_web
|
violations/coverage.py
|
Python
|
mit
| 1,087
| 0
|
from django.template.loader import render_to_string
from tasks.const import STATUS_SUCCESS
from .base import library
@library.register('coverage')
def coverage_violation(data):
"""Coverage violation parser
:param data: task data
:type data: dict
:returns: dict
"""
data['status'] = STATUS_SUCCESS
line = data['raw'].sp
|
lit('\n')[-2]
statements, miss, cover = [
part for part in line.split(' ')
if len(part) > 0 and 'TOTAL' not in part
]
each_file = [
filter(len, line.split(' '))
for line in data['raw'].split('\n')[2:-3]
]
data['preview'] = render_to_string('violations/coverage/preview.html', {
'statements': statements,
'miss': miss,
'cover': cover,
})
data['prepared'] = render_to_string('violations/coverage
|
/prepared.html', {
'statements': statements,
'miss': miss,
'cover': cover,
'each_file': each_file,
})
data['plot'] = {
'cover': int(cover[:-1]),
}
data['success_percent'] = int(cover[:-1])
return data
|
criswell/noink
|
src/noink/role_db.py
|
Python
|
agpl-3.0
| 7,210
| 0.000832
|
"""
##BOILERPLATE_COPYRIGHT
##BOILERPLATE_COPYRIGHT
"""
import datetime
from types import IntType
from noink import mainDB
from noink.data_models import Role, RoleMapping
from noink.user_db import UserDB
from noink.activity_table import get_activity_dict
from noink.exceptions import DuplicateRole, RoleNotFound
from noink.event_log import EventLog
from noink.pickler import pickle, depickle
from noink.util import string_types
class RoleDB:
__borg_state = {}
def __init__(self):
self.__dict__ = self.__borg_state
try:
self._setup
except AttributeError:
self._setup = False
if not self._setup:
self.eventLog = EventLog()
self._setup = True
def find_role_by_name(self, rolename):
'''
Finds a role by it's name.
@param rolename: The name of the role.
@return The role object found.
'''
return Role.query.filter_by(name=rolename).first()
def find_role_by_id(self, rid):
'''
Find a role by its role ID.
@param rid: The role ID to find.
@return The role object found.
'''
return Role.query.get(rid)
def get_role(self, role):
"""
Given a role identifier, return the role object.
@param role: The role. Can be role object, rid, or string name.
"""
r = role
if isinstance(role, IntType):
r = self.find_role_by_id(role)
elif isinstance(role, string_types):
r = self.find_role_by_name(role)
return r
def get_rolemapping(self, user, group, role):
"""
Given a user, group and role, will return the rolemap of the three,
if it exists. Otherwise will return None.
FIXME - Docstring
"""
r = self.get_role(role)
user_db = UserDB()
u = user_db.get_user(user)
g = user_db.get_group(group)
return RoleMapping.query.filter_by(user=u).filter_by(
group=g).filter_by(role=r).first()
def add_role(self, name, description, activities=None):
'''
Add a new role to the DB.
@param name: Short, descriptive name of the role. Must be unique.
@param description: Longer description of the role.
@param activities: An activity dict defining the role's activities.
If parameter is omitted, then a default dict is used.
'''
try:
exists = self.find_role_by_name(name)
except:
exists = False
if exists:
raise DuplicateRole("{0} already exists as a role with id "\
"'{1}'".format(name, str(exists)))
if activities is None:
activities = get_activity_dict(False)
now = datetime.datetime.now()
pact = pickle(activities)
role = Role(name, description, pact, now)
mainDB.session.add(role)
mainDB.session.commit()
blob = pickle({'id' : role.id})
# XXX - Do we wan
|
t to use the user ID of the person adding this role?
self.eventLog.add('add_role', -1, True, blob, role.name)
|
return role
def update_role(self, role):
"""
Given a role object, update the database with whatever changes it
contains.
"""
if isinstance(role, Role):
exists = Role.query.get(role.id)
if exists == []:
mainDB.session.add(role)
mainDB.session.commit()
def create_temp_empty_role(self):
"""
Returns a temporary, empty role object.
"""
pact = pickle(get_activity_dict(False))
return Role(None, None, pact, None)
def update_temp_role_activities(self, role, acts):
"""
Given a temportary role and updated activies for it, update it.
Retuns updated role.
"""
pact = pickle(acts)
role.activities = pact
return role
def get_activities(self, role):
'''
Given a role, return the activities that role can do.
@param role: The role to use. Can be a role object, a role.id, or a
role name.
@return Decoded/decoupled activity dictionary
'''
r = self.get_role(role)
if r is not None:
return depickle(r.activities)
else:
return None
def assign_role(self, user, group, role):
'''
Given a user, group and role, assign the user as the role when part of
the group.
@param user: The user. Can be user object, uid, or string name of the
user.
@param group: The group. Can be group object, gid, or string name.
@param role: The role. Can be role object, rid, or string name.
'''
userDB = UserDB()
u = userDB.get_user(user)
g = userDB.get_group(group)
r = self.get_role(role)
exist = RoleMapping.query.filter_by(user=u).filter_by(
group=g).filter_by(role=r).all()
if exist == []:
rm = RoleMapping(r, u, g)
mainDB.session.add(rm)
mainDB.session.commit()
def revoke_role(self, user, group, role):
"""
Given a user, group and role, revoke the user's rights to that role
when part of the group.
@param user: The user. Can be a user object, uid, or string name of
the user.
@param group: The group. Can be a group object, gid, or string name.
@param role: The role. Can be role object, rid, or string name.
"""
user_db = UserDB()
u = user_db.get_user(user)
g = user_db.get_group(group)
r = self.get_role(role)
rmaps = RoleMapping.query.filter_by(user=u).filter_by(
group=g).filter_by(role=r).all()
for rm in rmaps:
mainDB.session.delete(rm)
mainDB.session.commit()
def delete_role(self, role):
"""
Given a role, delete it from the database. Role can be integer,
string or role object.
"""
r = self.get_role(role)
if role is not None:
rid = int(r.id)
rname = r.name
mainDB.session.delete(r)
mainDB.session.commit()
self.eventLog.add('del_role', rid, True, None, rname)
else:
raise RoleNotFound('Role not found in database')
def get_roles(self, user, group=None):
'''
Get the roles a given user has. Optionally, limit by group.
@param user: The user. Can be user object, uid, or string name.
@param group: Group to limit by. Can be group object, gid, or string
name.
@return A list of role mappings.
'''
userDB = UserDB()
u = userDB.get_user(user)
rm = RoleMapping.query.filter_by(user=u)
if group is not None:
g = userDB.get_group(group)
rm = rm.filter_by(group=g)
return rm.all()
def get_all_roles(self):
'''
Get all the available roles
'''
return Role.query.all()
|
christianmemije/kolibri
|
kolibri/plugins/learn/viewsets.py
|
Python
|
mit
| 2,084
| 0.00048
|
from .serializers import LearnerClassroomSerializer
from django.db.models.query import F
from kolibri.auth.api import KolibriAuthPermissionsFilter
from kolibri.auth.filters import HierarchyRelationsFilter
from kolibri.auth.models import Classroom
from kolibri.auth.serializers import ClassroomSerializer
from kolibri.core.lessons.models import Lesson
from kolibri.core.lessons.models import LessonAssignment
from kolibri.core.lessons.serializers import LessonSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import ReadOnlyModelViewSet
class LearnerClassroomViewset(ReadOnlyModelViewSet):
"""
Returns all Classrooms for which the requesting User is a member.
Use the ?no_assignments flag to just get the name and ID of the Classroom
(e.g. when listing classes in which User is enrolled)
"""
filter_backends = (KolibriAuthPermissionsFilter,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
current_user = self.request.user
memberships = current_user.memberships.filter(
collection__kind='classroom',
).values('collection_id')
return Classroom.objects.filter(id__in=memberships)
def get_serializer_class(self):
if ('no_assignments' in self.request.query_params):
return ClassroomSerializer
else:
return LearnerClassroomSerializer
class LearnerLessonViewset(ReadOnlyModelViewSet):
"""
Special Viewset for Learners to view Lessons to which they are assigned.
The core Lesson Viewset is locked down to Admin users only.
"""
serializer_class = LessonSerializer
permission_classes = (IsAuthenticated,)
def ge
|
t_queryset(self):
assignments = HierarchyRelationsFilter(LessonAssignment.objects.all()) \
.filter_by_hierarchy(
target_user=self.request.user,
ancestor_collection=F('collection')
)
return Lesson.objects.filter(
lesson_assignments__in=assignments,
|
is_active=True
)
|
taylorhardy/ConferenceApp
|
conference.py
|
Python
|
apache-2.0
| 34,962
| 0.003976
|
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime, timedelta, time as timed
import json
import os
import time
import logging
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import Session
from models import SessionForm
from models import SessionForms
from models import SpeakerForm
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1, required=True),
typeOfSession=messages.StringField(2)
)
SESSION_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1, required=True),
)
SPEAKER_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1, required=True),
)
WISHLIST_POST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSessionKey=messages.StringField(1, required=True),
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _getUserId():
"""A workaround implementation for getting userid."""
auth = os.getenv('HTTP_AUTHORIZATION')
bearer, token = auth.split()
token_type = 'id_token'
if 'OAUTH_USER_ID' in os.environ:
token_type = 'access_token'
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% (token_type, token))
user = {}
wait = 1
for i in range(3):
resp = urlfet
|
ch.fetch(url)
if resp.status_code == 200:
user = json.loads(resp.content)
break
elif resp.status_code == 400 and 'invalid_token' in resp.content:
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% ('access_token', token))
else:
|
time.sleep(wait)
wait = wait + i
return user.get('user_id', '')
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID, ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = _getUserId()
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = _getUserId()
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
d
|
Glottotopia/aagd
|
moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/support/passlib/exc.py
|
Python
|
mit
| 7,667
| 0.004826
|
"""passlib.exc -- exceptions & warnings raised by passlib"""
#=============================================================================
# exceptions
#=============================================================================
class MissingBackendError(RuntimeError):
"""Error raised if multi-backend handler has no available backends;
or if specifically req
|
uested backend is not available.
:exc:`!MissingBackendError` derives
from :exc:`RuntimeError`, since it usually indicates
lack of an external library or OS feature.
This is primarily raised by handlers which depend on
external libraries (which is currently just
|
:class:`~passlib.hash.bcrypt`).
"""
class PasswordSizeError(ValueError):
"""Error raised if a password exceeds the maximum size allowed
by Passlib (4096 characters).
Many password hash algorithms take proportionately larger amounts of time and/or
memory depending on the size of the password provided. This could present
a potential denial of service (DOS) situation if a maliciously large
password is provided to an application. Because of this, Passlib enforces
a maximum size limit, but one which should be *much* larger
than any legitimate password. :exc:`!PasswordSizeError` derives
from :exc:`!ValueError`.
.. note::
Applications wishing to use a different limit should set the
``PASSLIB_MAX_PASSWORD_SIZE`` environmental variable before
Passlib is loaded. The value can be any large positive integer.
.. versionadded:: 1.6
"""
def __init__(self):
ValueError.__init__(self, "password exceeds maximum allowed size")
# this also prevents a glibc crypt segfault issue, detailed here ...
# http://www.openwall.com/lists/oss-security/2011/11/15/1
#=============================================================================
# warnings
#=============================================================================
class PasslibWarning(UserWarning):
"""base class for Passlib's user warnings.
.. versionadded:: 1.6
"""
class PasslibConfigWarning(PasslibWarning):
"""Warning issued when non-fatal issue is found related to the configuration
of a :class:`~passlib.context.CryptContext` instance.
This occurs primarily in one of two cases:
* The CryptContext contains rounds limits which exceed the hard limits
imposed by the underlying algorithm.
* An explicit rounds value was provided which exceeds the limits
imposed by the CryptContext.
In both of these cases, the code will perform correctly & securely;
but the warning is issued as a sign the configuration may need updating.
"""
class PasslibHashWarning(PasslibWarning):
"""Warning issued when non-fatal issue is found with parameters
or hash string passed to a passlib hash class.
This occurs primarily in one of two cases:
* A rounds value or other setting was explicitly provided which
exceeded the handler's limits (and has been clamped
by the :ref:`relaxed<relaxed-keyword>` flag).
* A malformed hash string was encountered which (while parsable)
should be re-encoded.
"""
class PasslibRuntimeWarning(PasslibWarning):
"""Warning issued when something unexpected happens during runtime.
The fact that it's a warning instead of an error means Passlib
was able to correct for the issue, but that it's anonmalous enough
that the developers would love to hear under what conditions it occurred.
"""
class PasslibSecurityWarning(PasslibWarning):
"""Special warning issued when Passlib encounters something
that might affect security.
"""
#=============================================================================
# error constructors
#
# note: these functions are used by the hashes in Passlib to raise common
# error messages. They are currently just functions which return ValueError,
# rather than subclasses of ValueError, since the specificity isn't needed
# yet; and who wants to import a bunch of error classes when catching
# ValueError will do?
#=============================================================================
def _get_name(handler):
return handler.name if handler else "<unnamed>"
#------------------------------------------------------------------------
# generic helpers
#------------------------------------------------------------------------
def type_name(value):
"return pretty-printed string containing name of value's type"
cls = value.__class__
if cls.__module__ and cls.__module__ not in ["__builtin__", "builtins"]:
return "%s.%s" % (cls.__module__, cls.__name__)
elif value is None:
return 'None'
else:
return cls.__name__
def ExpectedTypeError(value, expected, param):
"error message when param was supposed to be one type, but found another"
# NOTE: value is never displayed, since it may sometimes be a password.
name = type_name(value)
return TypeError("%s must be %s, not %s" % (param, expected, name))
def ExpectedStringError(value, param):
"error message when param was supposed to be unicode or bytes"
return ExpectedTypeError(value, "unicode or bytes", param)
#------------------------------------------------------------------------
# encrypt/verify parameter errors
#------------------------------------------------------------------------
def MissingDigestError(handler=None):
"raised when verify() method gets passed config string instead of hash"
name = _get_name(handler)
return ValueError("expected %s hash, got %s config string instead" %
(name, name))
def NullPasswordError(handler=None):
"raised by OS crypt() supporting hashes, which forbid NULLs in password"
name = _get_name(handler)
return ValueError("%s does not allow NULL bytes in password" % name)
#------------------------------------------------------------------------
# errors when parsing hashes
#------------------------------------------------------------------------
def InvalidHashError(handler=None):
"error raised if unrecognized hash provided to handler"
return ValueError("not a valid %s hash" % _get_name(handler))
def MalformedHashError(handler=None, reason=None):
"error raised if recognized-but-malformed hash provided to handler"
text = "malformed %s hash" % _get_name(handler)
if reason:
text = "%s (%s)" % (text, reason)
return ValueError(text)
def ZeroPaddedRoundsError(handler=None):
"error raised if hash was recognized but contained zero-padded rounds field"
return MalformedHashError(handler, "zero-padded rounds")
#------------------------------------------------------------------------
# settings / hash component errors
#------------------------------------------------------------------------
def ChecksumSizeError(handler, raw=False):
"error raised if hash was recognized, but checksum was wrong size"
# TODO: if handler.use_defaults is set, this came from app-provided value,
# not from parsing a hash string, might want different error msg.
checksum_size = handler.checksum_size
unit = "bytes" if raw else "chars"
reason = "checksum must be exactly %d %s" % (checksum_size, unit)
return MalformedHashError(handler, reason)
#=============================================================================
# eof
#=============================================================================
|
googleads/google-ads-python
|
google/ads/googleads/v8/services/services/gender_view_service/transports/grpc.py
|
Python
|
apache-2.0
| 10,280
| 0.000875
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import gender_view
from google.ads.googleads.v8.services.types import gender_view_service
from .base import GenderViewServiceTransport, DEFAULT_CLIENT_INFO
class GenderViewServiceGrpcTransport(GenderViewServiceTransport):
"""gRPC backend transport for GenderViewService.
Service to manage gender views.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure
|
that channel and credentials are not both
# provided.
credentials = False
# If a channel
|
was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A o
|
disabler/isida3
|
lib/chardet/sbcsgroupprober.py
|
Python
|
gpl-3.0
| 2,948
| 0.010855
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Origi
|
nal Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of t
|
he License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from charsetgroupprober import CharSetGroupProber
from sbcharsetprober import SingleByteCharSetProber
from langcyrillicmodel import Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model
from langgreekmodel import Latin7GreekModel, Win1253GreekModel
from langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from langthaimodel import TIS620ThaiModel
from langhebrewmodel import Win1255HebrewModel
from hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [ \
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.True, hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber])
self.reset()
|
StartupsPoleEmploi/labonneboite
|
labonneboite/web/admin/views/index.py
|
Python
|
agpl-3.0
| 619
| 0
|
"""
The admin interface is built on top of Flask-Admin:
- https://flask-admin.readthed
|
ocs.io/en/latest/
- http://flask-admin.readthedocs.io/en/latest/api/mod_model/
- http://mrjoes.github.io/2015/06/17/flask-admin-120.html
"""
from flask_admin import AdminIndexView, expose
|
from labonneboite.web.admin.utils import abort_if_not_admin, user_is_admin
class LbbAdminIndexView(AdminIndexView):
"""
A custom index view class for the LBB admin.
"""
@expose('/')
def index(self):
if user_is_admin():
return super(LbbAdminIndexView, self).index()
return abort_if_not_admin()
|
sfstpala/robot-ci
|
robot/tests/test_tasks.py
|
Python
|
gpl-3.0
| 7,594
| 0
|
import urllib.parse
import unittest.mock
import tornado.httpclient
import robot.tests
class TasksHandlerTest(robot.tests.TestCase):
@unittest.mock.patch("robot.tasks.TasksHandler.fetch")
def test_get(self, fetch):
fetch.return_value = robot.tests.future(unittest.mock.Mock(json={
"rows": [self.task],
"prev": None,
"next": None,
}))
res = self.fetch("/tasks", authenticated=True)
self.assertEqual(res.code, 200)
@unittest.mock.patch("robot.tasks.TasksHandler.fetch")
def test_get_bad_request(self, fetch):
res = self.fetch("/tasks?limit=None", authenticated=True)
self.assertEqual(res.code, 400)
res = self.fetch("/tasks?sort=broken", authenticated=True)
self.assertEqual(res.code, 400)
fetch.return_value = robot.tests.future(
exception=tornado.httpclient.HTTPError(400))
res = self.fetch("/tasks", authenticated=True)
self.assertEqual(res.code, 400)
@unittest.mock.patch("robot.tasks.TasksHandler.fetch")
def test_post(self, fetch):
fetch.return_value = robot.tests.future(unittest.mock.Mock(json={
"id": self.task["id"],
}))
body = urllib.parse.urlencode({
"name": "test",
"lang": "bash",
"text": "echo hello",
})
res = self.fetch(
"/tasks", method="POST", body=body,
authenticated=True, follow_redirects=False)
self.assertEqual(res.code, 302)
self.assertEqual(res.headers["Location"], "/tasks/" + self.task["id"])
fetch.assert_called_once_with("/api/t
|
asks", method="POST", body={
"lang": "bash", "text": "echo hello", "name": "test"})
@unittest.mock.patch("robot.tasks.TasksHandler.fetch")
def test_post_bad_request(self, fetch):
fetch.return_value = robot.tests.future(
exception=tornado.httpclient.HTTPError(400))
body = url
|
lib.parse.urlencode({
"name": "test",
"lang": "",
"text": "",
})
res = self.fetch(
"/tasks", method="POST", body=body,
authenticated=True, follow_redirects=False)
self.assertEqual(res.code, 400)
class TaskHandlerTest(robot.tests.TestCase):
@unittest.mock.patch("robot.tasks.TaskHandler.fetch")
def test_get(self, fetch):
task = self.task
fetch.return_value = robot.tests.future(unittest.mock.Mock(json=task))
res = self.fetch("/tasks/" + self.task["id"], authenticated=True)
self.assertEqual(res.code, 200)
@unittest.mock.patch("robot.tasks.TaskHandler.fetch")
def test_get_not_found(self, fetch):
fetch.return_value = robot.tests.future(
exception=tornado.httpclient.HTTPError(404))
res = self.fetch("/tasks/test", authenticated=True)
self.assertEqual(res.code, 404)
@unittest.mock.patch("robot.tasks.TaskHandler.fetch")
def test_post(self, fetch):
fetch.return_value = robot.tests.future(unittest.mock.Mock(json={}))
body = urllib.parse.urlencode({
"name": "test",
"lang": "bash",
"text": "echo hello",
})
res = self.fetch(
"/tasks/" + self.task["id"], method="POST", body=body,
authenticated=True, follow_redirects=False)
self.assertEqual(res.code, 302)
self.assertEqual(res.headers["Location"], "/tasks/" + self.task["id"])
fetch.assert_called_once_with(
"/api/tasks/" + self.task["id"], method="PUT", body={
"lang": "bash", "text": "echo hello", "name": "test"})
@unittest.mock.patch("robot.tasks.TaskHandler.fetch")
def test_post_not_found(self, fetch):
fetch.return_value = robot.tests.future(
exception=tornado.httpclient.HTTPError(404))
body = urllib.parse.urlencode({
"name": "test",
"lang": "bash",
"text": "echo hello",
})
res = self.fetch(
"/tasks/" + self.task["id"], method="POST", body=body,
authenticated=True, follow_redirects=False)
self.assertEqual(res.code, 404)
class BuildTaskHandlerTest(robot.tests.TestCase):
def test_get(self):
res = self.fetch("/tasks/build", authenticated=True)
self.assertEqual(res.code, 200)
class RunTaskHandlerTest(robot.tests.TestCase):
@unittest.mock.patch("robot.tasks.RunTaskHandler.fetch")
def test_post(self, fetch):
result = self.result
result["task"] = self.task["id"]
fetch.return_value = robot.tests.future(
unittest.mock.Mock(json=result))
res = self.fetch("/tasks/" + self.task["id"] + "/run", method="POST",
body="", authenticated=True, follow_redirects=False)
self.assertEqual(res.code, 302)
self.assertEqual(
res.headers["Location"], "/results/" + self.result["id"])
fetch.assert_called_once_with(
"/api/tasks/" + self.task["id"] + "/run", body={}, method="POST")
@unittest.mock.patch("robot.tasks.RunTaskHandler.fetch")
def test_post_bad_request(self, fetch):
fetch.return_value = robot.tests.future(
exception=tornado.httpclient.HTTPError(400))
res = self.fetch("/tasks/" + self.task["id"] + "/run", method="POST",
body="", authenticated=True, follow_redirects=False)
self.assertEqual(res.code, 400)
fetch.assert_called_once_with(
"/api/tasks/" + self.task["id"] + "/run", body={}, method="POST")
class EditTaskHandlerTest(robot.tests.TestCase):
@unittest.mock.patch("robot.tasks.EditTaskHandler.fetch")
def test_get(self, fetch):
task = self.task
fetch.return_value = robot.tests.future(unittest.mock.Mock(json=task))
res = self.fetch(
"/tasks/" + self.task["id"] + "/edit", authenticated=True)
self.assertEqual(res.code, 200)
@unittest.mock.patch("robot.tasks.EditTaskHandler.fetch")
def test_get_not_found(self, fetch):
fetch.return_value = robot.tests.future(
exception=tornado.httpclient.HTTPError(404))
res = self.fetch(
"/tasks/" + self.task["id"] + "/edit", authenticated=True)
self.assertEqual(res.code, 404)
class DeleteTaskHandlerTest(robot.tests.TestCase):
@unittest.mock.patch("robot.tasks.DeleteTaskHandler.fetch")
def test_post(self, fetch):
task = self.task
task["task"] = self.task["id"]
fetch.return_value = robot.tests.future(
unittest.mock.Mock(json=task))
res = self.fetch("/tasks/" + self.task["id"] + "/delete",
method="POST", body="", authenticated=True,
follow_redirects=False)
self.assertEqual(res.code, 302)
self.assertEqual(res.headers["Location"], "/tasks")
fetch.assert_called_once_with(
"/api/tasks/" + self.task["id"], method="DELETE")
@unittest.mock.patch("robot.tasks.DeleteTaskHandler.fetch")
def test_post_bad_request(self, fetch):
fetch.return_value = robot.tests.future(
exception=tornado.httpclient.HTTPError(400))
res = self.fetch("/tasks/" + self.task["id"] + "/delete",
method="POST", body="", authenticated=True,
follow_redirects=False)
self.assertEqual(res.code, 400)
fetch.assert_called_once_with(
"/api/tasks/" + self.task["id"], method="DELETE")
|
mbohlool/client-python
|
kubernetes/test/test_v1beta1_http_ingress_path.py
|
Python
|
apache-2.0
| 1,011
| 0.004946
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.
|
client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_http_ingress_path import V1beta1HTTPIngressPath
class TestV1beta1HTTPIngressPath(unittest.TestCase):
""" V1beta1HTTPIngressPath unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1HTTPIngressPath(self):
"""
Test V1beta1HTTPIngressPath
"""
# FIXME: construct object with mandatory attributes with e
|
xample values
#model = kubernetes.client.models.v1beta1_http_ingress_path.V1beta1HTTPIngressPath()
pass
if __name__ == '__main__':
unittest.main()
|
astroufsc/python-si-tcpclient
|
si/commands/__init__.py
|
Python
|
gpl-2.0
| 22
| 0
|
from came
|
ra import *
|
|
lukehsiao/RobotSoccer
|
MotionControl/scripts/kalman_filter/LocationFilter.py
|
Python
|
mit
| 821
| 0.013398
|
#!/usr/bin/env python
from gamepieces.HomeRobot import *
from gamepieces.Ball import *
import cPickle as pickle
class LocationFilter:
def __init__(self):
self.lastSample = None
def callback(self,data):
# parse message
measuredLocations = Locati
|
ons()
measuredLocations.setDataFromSample(data)
measuredRobotLocation = RobotLocation(measuredSample.time, measuredSample.home1_x
|
,
newSample.home1_y, newSample.home1_theta)
newBallLocation = BallLocation(newSample.time, newSample.ball_x, newSample.ball_y)
updatedRobotLocation = self.filterRobotLocation(newRobotLocation)
self.ball.time = newSample.time
self.ball.point.x = newSample.ball_x
self.ball.point.y = newSample.ball_y
self.lastSample = newSample
self.postSamplingSemaphore()
|
keishi/chromium
|
chrome/test/functional/autofill.py
|
Python
|
bsd-3-clause
| 41,065
| 0.004359
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import pickle
import re
import simplejson
import autofill_dataset_converter
import autofill_dataset_generator
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from webdriver_pages import settings
class AutofillTest(pyauto.PyUITest):
"""Tests that autofill works correctly"""
def setUp(self):
pyauto.PyUITest.setUp(self)
self._driver = self.NewWebDriver()
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Hit <enter> to dump info.. ')
self.pprint(self.GetAutofillProfile())
def testFillProfile(self):
"""Test filling profiles and overwriting with new profiles."""
profiles = [{'NAME_FIRST': ['Bob',],
'NAME_LAST': ['Smith',], 'ADDRESS_HOME_ZIP': ['94043',],},
{'EMAIL_ADDRESS': ['sue@example.com',],
'COMPANY_NAME': ['Company X',],}]
credit_cards = [{'CREDIT_CARD_NUMBER': '6011111111111117',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2011'},
{'CREDIT_CARD_NAME': 'Bob C. Smith'}]
self.FillAutofillProfile(profiles=profiles, credit_cards=credit_cards)
profile = self.GetAutofillProfile()
self.assertEqual(profiles, profile['profiles'])
self.assertEqual(credit_cards, profile['credit_cards'])
profiles = [ {'NAME_FIRST': ['Larry']}]
self.FillAutofillProfile(profiles=profiles)
profile = self.GetAutofillProfile()
self.assertEqual(profiles, profile['profiles'])
self.assertEqual(credit_cards, profile['credit_cards'])
def testFillProfileMultiValue(self):
"""Test filling a profile with multi-value data."""
profile_expected = [{'NAME_FIRST': ['Bob', 'Joe'],
'NAME_LAST': ['Smith', 'Jones'],
'ADDRESS_HOME_ZIP': ['94043',],},]
self.FillAutofillProfile(profiles=profile_expected)
profile_actual = self.GetAutofillProfile()
self.assertEqual(profile_expected, profile_actual['profiles'])
def testFillProfileCrazyCharacters(self):
"""Test filling profiles with unicode strings and crazy characters."""
# Adding autofill profiles.
file_path = os.path.join(self.DataDir(), 'autofill', 'functional',
'crazy_autofill.txt')
profiles = self.EvalDataFrom(file_path)
self.FillAutofillProfile(profiles=profiles)
self.assertEqual(profiles, self.GetAutofillProfile()['profiles'],
msg='Autofill profile data does not match.')
# Adding credit cards.
file_path = os.path.join(self.DataDir(), 'autofill', 'functional',
'crazy_creditcards.txt')
test_data = self.EvalDataFrom(file_path)
credit_cards_input = test_data['input']
self.FillAutofillProfile(credit_cards=credit_cards_input)
self.assertEqual(test_data['expected'],
self.GetAutofillProfile()['credit_cards'],
msg='Autofill credit card data does not match.')
def testGetProfilesEmpty(self):
"""Test getting profiles when none have been filled."""
profile = self.GetAutofillProfile()
self.assertEqual([], profile['profiles'])
self.assertEqual([], profile['credit_cards'])
def testAutofillInvalid(self):
"""Test filling in invalid values for profiles are saved as-is.
Phone information entered into the prefs UI is not validated or rejected
except for duplicates.
"""
# First try profiles with invalid ZIP input.
without_invalid = {'NAME_FIRST': ['Will',],
'ADDRESS_HOME_CITY': ['Sunnyvale',],
'ADDRESS_HOME_STATE': ['CA',],
'ADDRESS_HOME_ZIP': ['my_zip',],
'ADDRESS_HOME_COUNTRY': ['United States',]}
# Add invalid data for phone field.
with_invalid = without_invalid.copy()
with_invalid['PHONE_HOME_WHOLE_NUMBER'] = ['Invalid_Phone_Number',]
self.FillAutofillProfile(profiles=[with_invalid])
self.assertNotEqual(
[without_invalid], self.GetAutofillProfile()['profiles'],
msg='Phone data entered into prefs UI is validated.')
def testAutofillPrefsStringSavedAsIs(self):
"""Test invalid credit card numbers typed in prefs should be saved as-is."""
credit_card = {'CREDIT_CARD_NUMBER': 'Not_0123-5Checked'}
self.FillAutofillProfile(credit_cards=[credit_card])
self.assertEqual([credit_card],
self.GetAutofillProfile()['credit_cards'],
msg='Credit card number in prefs not saved as-is.')
def _WaitForWebpageFormReadyToFillIn(self, form_profile, tab_index, windex):
"""Waits until an autofill form on a webpage is ready to be filled in.
A call to NavigateToURL() may return before all form elements on the page
are ready to be accessed. This function waits until they are ready to be
filled in.
Args:
form_profile: A dictionary representing an autofill profile in which the
keys are strings corresponding to webpage element IDs.
tab_index: The index of the tab containing the webpage form
|
to check.
windex: The index of the window containing the webpage form to check.
"""
field_check_code = ''.join(
['if (!document.getElementById("%s")) ready = "false";' %
key for key in form_profile.keys()])
js = """
var ready = 'true';
if (!document.getElementById("testform"))
ready = 'false';
%s
window.domAutomationController.send(ready);
""" % field_check_code
self.assertTrue(
self.WaitUntil(lambda: self.Execut
|
eJavascript(js, tab_index, windex),
expect_retval='true'),
msg='Timeout waiting for webpage form to be ready to be filled in.')
def _FillFormAndSubmit(self, datalist, filename, tab_index=0, windex=0):
"""Navigate to the form, input values into the fields, and submit the form.
If multiple profile dictionaries are specified as input, this function will
repeatedly navigate to the form, fill it out, and submit it, once for each
specified profile dictionary.
Args:
datalist: A list of dictionaries, where each dictionary represents the
key/value pairs for profiles or credit card values.
filename: HTML form website file. The file is the basic file name and not
the path to the file. File is assumed to be located in
autofill/functional directory of the data folder.
tab_index: Integer index of the tab to work on; defaults to 0 (first tab).
windex: Integer index of the browser window to work on; defaults to 0
(first window).
"""
url = self.GetHttpURLForDataPath('autofill', 'functional', filename)
for profile in datalist:
self.NavigateToURL(url)
self._WaitForWebpageFormReadyToFillIn(profile, tab_index, windex)
# Fill in and submit the form.
js = ''.join(['document.getElementById("%s").value = "%s";' %
(key, value) for key, value in profile.iteritems()])
js += 'document.getElementById("testform").submit();'
self.SubmitAutofillForm(js, tab_index=tab_index, windex=windex)
def _LuhnCreditCardNumberValidator(self, number):
"""Validates whether a number is valid or invalid using the Luhn test.
Validation example:
1. Example number: 49927398716
2. Reverse the digits: 61789372994
3. Sum the digits in the odd-numbered position for s1:
6 + 7 + 9 + 7 + 9 + 4 = 42
4. Take the digits in the even-numbered position: 1, 8, 3, 2, 9
4.1. Two times each digit in the even-numbered position: 2, 16, 6, 4, 18
4.2. For each resulting value that is now 2 digits, add the digits
|
openstack/python-openstacksdk
|
openstack/baremetal/v1/node.py
|
Python
|
apache-2.0
| 38,102
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.baremetal.v1 import _common
from openstack import exceptions
from openstack import resource
from openstack import utils
class ValidationResult(object):
"""Result of a single interface validation.
:ivar result: Result of a validation, ``True`` for success, ``False`` for
failure, ``None`` for unsupported interface.
:ivar reason: If ``result`` is ``False`` or ``None``, explanation of
the result.
"""
def __init__(self, result, reason):
self.result = result
self.reason = reason
class Node(_common.ListMixin, resource.Resource):
resources_key = 'nodes'
base_path = '/nodes'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
allow_patch = True
commit_method = 'PATCH'
commit_jsonpatch = True
_query_mapping = resource.QueryParameters(
'associated', 'conductor_group', 'driver', 'fault',
'provision_state', 'resource_class',
fields={'type': _common.fields_type},
instance_id='instance_uuid',
is_maintenance='maintenance',
)
# The allocation_uuid field introduced in 1.52 (Stein).
_max_microversion = '1.52'
# Properties
#: The UUID of the allocation associated with this node. Added in API
#: microversion 1.52.
allocation_id = resource.Body("allocation_uuid")
#: A string or UUID of the tenant who owns the baremetal node. Added in API
#: microversion 1.50.
owner = resource.Body("owner")
#: The UUID of the chassis associated wit this node. Can be empty or None.
chassis_id = resource.Body("chassis_uuid")
#: The current clean step.
clean_step = resource.Body("clean_step")
#: Hostname of the conductor currently handling this ndoe. Added in API
# microversion 1.49.
conductor = resource.Body("conductor")
#: Conductor group this node is managed by. Added in API microversion 1.46.
conductor_group = resource.Body("conductor_group")
#: Timestamp at which the node was last updated.
created_at = resource.Body("created_at")
#: The current deploy step. Added in API microversion 1.44.
deploy_step = resource.Body("deploy_step")
#: The name of the driver.
driver = resource.Body("driver")
#: All the metadata required by the driver to manage this node. List of
#: fields varies between drivers, and can be retrieved from the
#: :class:`openstack.baremetal.v1.driver.Driver` resource.
driver_info = resource.Body("driver_info", type=dict)
#: Internal metadata set and stored by node's driver. This is read-only.
driver_internal_info = resource.Body("driver_internal_info", type=dict)
#: A set of one or more arbitrary metadata key and value pairs.
extra = resource.Body("extra")
#: Fault type that caused the node to enter maintenance mode.
#: Introduced in API microversion 1.42.
fault = resource.Body("fault")
#: The UUID of the node resource.
id = resource.Body("uuid", alternate_id=True)
#: Information used to customize the deployed image, e.g. size of root
#: partition, config drive in the form of base64 encoded string and other
#: metadata.
instance_info = resource.Body("instance_info")
#: UUID of the nova instance associated with this node.
instance_id = resource.Body("instance_uuid")
#: Override enabling of automated cleaning. Added in API microversion 1.47.
is_automated_clean_enabled = resource.Body("automated_clean", type=bool)
#: Whether console access is enabled on this node.
is_console_enabled = resource.Body("console_enabled", type=bool)
#: Whether node is currently in "maintenance mode". Nodes put into
#: maintenance mode are removed from the available resource pool.
is_maintenance = resource.Body("maintenance", type=bool)
# Whether the node is protected from undeploying. Added in API microversion
# 1.48.
is_protected = resource.Body("protected", type=bool)
#: Any error from the most recent transaction that started but failed to
#: finish.
last_error = resource.Body("last_error")
#: A list of relative links, including self and bookmark links.
links = resource.Body("links", type=list)
#: user settable description of the reason why the node was placed into
#: maintenance mode.
maintenance_reason = resource.Body("maintenance_reason")
#: Human readable identifier for the node. May be undefined. Certain words
#: are reserved. Added in API microversion 1.5
name = resource.Body("name")
#: Links to the collection of ports on this node.
ports = resource.Body("ports", type=list)
#: Links to the collection of portgroups on this node. Available since
#: API microversion 1.24.
port_groups = resource.Body("portgroups", type=list)
#: The current power state. Usually "power on" or "power off", but may be
#: "None" if service is unable to determine the power state.
power_state = resource.Body("power_state")
#: Physical characteristics of the node. Content populated by the service
#: during inspection.
properties = resource.Body("properties", type=dict)
# The reason why this node is protected. Added in API microversion 1.48.
protected_reason = resource.Body("protected_reason")
#: The current provisioning state of the node.
provision_state = resource.Body("provision_state")
#: The current RAID configuration of the node.
raid_config = resource.Body("raid_config")
#: The name of an service conductor host which is holding a lock on this
#: node, if a lock is held.
reservation = resource.Body("reservation")
#: A string to be used by external schedulers to identify this node as a
#: unit of a specific type of resource. Added in API microversion 1.21.
resource_class = resource.Body("resource_class")
#: Links to the collection of states.
states = resource.Body("states", type=list)
#: The requested state if a provisioning action has been requested. For
#: exampl
|
e, ``AVAILABLE``, ``DEPLOYING``, ``DEPLOYWAIT``, ``DEPLOYING``,
#: ``ACTIVE`` etc.
target_provision_state = resource.Body("target_provision_state")
#: The requested state during a state transition.
target_power_state = resource.Body("target_power_state")
#: The requested RAID configuration of the node which will be applied when
#: the node next transitions through the CLEANING state.
target_raid_config = resource.Body("target_raid_config")
|
#: Traits of the node. Introduced in API microversion 1.37.
traits = resource.Body("traits", type=list)
#: Timestamp at which the node was last updated.
updated_at = resource.Body("updated_at")
# Hardware interfaces grouped together for convenience.
#: BIOS interface to use when setting BIOS properties of the node.
#: Introduced in API microversion 1.40.
bios_interface = resource.Body("bios_interface")
#: Boot interface to use when configuring boot of the node.
#: Introduced in API microversion 1.31.
boot_interface = resource.Body("boot_interface")
#: Console interface to use when working with serial console.
#: Introduced in API microversion 1.31.
console_interface = resource.Body("console_interface")
#: Deploy interface to use when deploying the node.
#: Introduced in API microversion 1.31.
deploy_interface = resource.Body("deploy_interface")
#: Inspect interface to use when inspecting the node.
#: Introduced in API microversion 1.31.
inspect_interface = resource.Body
|
royharoush/rtools
|
dnc.py
|
Python
|
gpl-2.0
| 13,314
| 0.026288
|
#! /usr/bin/env python
# Copyright (C) 2009 Sebastian Garcia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# Author:
# Sebastian Garcia eldraco@gmail.com
#
# Based on code from Twisted examples.
# Copyright (c) Twisted Matrix Laboratories.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
# CHANGELOG
# 0.6
# - Added some more chars to the command injection prevention.
# - Clients decide the nmap scanning rate.
# - If the server sends a --min-rate parameter, we now delete it. WE control the scan speed.
# - Clients decide the nmap scanning rate.
# - Exit if nmap is not installed
# - Stop sending the euid, it was a privacy violation. Now we just say if we are root or not.
#
# TODO
# - privileges on nmap
#
try:
from OpenSSL import SSL
except:
print 'You need openssl libs for python. apt-get install python-openssl'
exit(-1)
import sys
try:
from twisted.internet.protocol import ClientFactory, ReconnectingClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import ssl, reactor
except:
print 'You need twisted libs for python. apt-get install python-twisted'
exit(-1)
import time, getopt, shlex
from subprocess import Popen
from subprocess import PIPE
import os
import random
# Global variables
server_ip = False
server_port = 46001
vernum = '0.6'
# Your name alias defaults to anonymous
alias='Anonymous'
debug=False
# Do not use a max rate by default
maxrate = False
# End global variables
# Print version information and exit
def version():
print "+----------------------------------------------------------------------+"
print "| dnmap Client Version "+ vernum +" |"
print "| This program is free software; you can redistribute it and/or modify |"
print "| it under the terms of the GNU General Public License as published by |"
print "| the Free Software Foundation; either version 2 of the License, or |"
print "| (at your option) any later version. |"
print "| |"
print "| Author: Garcia Sebastian, eldraco@gmail.com |"
print "| www.mateslab.com.ar |"
print "+----------------------------------------------------------------------+"
print
# Print help information and exit:
def usage():
print "+----------------------------------------------------------------------+"
print "| dnmap Client Version "+ vernum +" |"
print "| This program is free software; you can redistribute it and/or modify |"
print "| it under the terms of the GNU General Public License as published by |"
print "| the Free Software Foundation; either version 2 of the License, or |"
print "| (at your option) any later version. |"
print "| |"
print "| Author: Garcia Sebastian, eldraco@gmail.com |"
print "| www.mateslab.com.ar |"
print "+----------------------------------------------------------------------+"
print "\nusage: %s <options>" % sys.argv[0]
print "options:"
print " -s, --server-ip IP address of dnmap server."
print " -p, --server-port Port of dnmap server. Dnmap port defaults to 46001"
print " -
|
a, --alias Your name alias so we can give credit to you for your help. Optional"
print " -d, --debug Debuging."
print " -m, --max-rate Force nmaps commands to use at most this rate. Useful to slow
|
nmap down. Adds the --max-rate parameter."
print
sys.exit(1)
def check_clean(line):
global debug
try:
outbound_chars = [';', '#', '`']
ret = True
for char in outbound_chars:
if char in line:
ret = False
return ret
except Exception as inst:
print 'Problem in dataReceived function'
print type(inst)
print inst.args
print inst
class NmapClient(LineReceiver):
def connectionMade(self):
global client_id
global alias
global debug
print 'Client connected succesfully...'
print 'Waiting for more commands....'
if debug:
print ' -- Your client ID is: {0} , and your alias is: {1}'.format(str(client_id), str(alias))
euid = os.geteuid()
# Do not send the euid, just tell if we are root or not.
if euid==0:
# True
iamroot = 1
else:
# False
iamroot = 0
# 'Client ID' text must be sent to receive another command
line = 'Starts the Client ID:{0}:Alias:{1}:Version:{2}:ImRoot:{3}'.format(str(client_id),str(alias),vernum,iamroot)
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
#line = 'Send more commands to Client ID:{0}:Alias:{1}:\0'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
def dataReceived(self, line):
global debug
global client_id
global alias
# If a wait is received. just wait.
if 'Wait' in line:
sleeptime = int(line.split(':')[1])
time.sleep(sleeptime)
# Ask for more
# line = 'Send more commands to Client ID:{0}:Alias:{1}:'.format(str(client_id),str(alias))
line = 'Send more commands'
if debug:
print ' -- Line sent: {0}'.format(line)
self.sendLine(line)
else:
# dataReceived does not wait for end of lines or CR nor LF
if debug:
print "\tCommand Received: {0}".format(line.strip('\n').strip('\r'))
# A little bit of protection from the server
if check_clean(line):
# Store the nmap output file so we can send it to the server later
try:
nmap_output_file = line.split('-oA ')[1].split(' ')[0].strip(' ').strip("\n")
except IndexError:
random_file_name = str(random.randrange(0, 100000000, 1))
print '+ No -oA given. We add it anyway so not to lose the results. Added -oA ' + random_file_name
line = line + '-oA ' + random_file_name
nmap_output_file = line.split('-oA ')[1].split(' ')[0].strip(' ').strip("\n")
try:
nmap_returncode = -1
# Check for rate commands
# Verfiy that the server is NOT trying to force us to be faster. NMAP PARAMETER DEPENDACE
if 'min-rate' in line:
temp_vect = shlex.split(line)
word_index = temp_vect.index('--min-rate')
# Just delete the --min-rate parameter with its value
nmap_command = temp_vect[0:word_index] + temp_vect[word_index + 1:]
else:
nmap_command = shlex.split(line)
# Do we have to add a max-rate parameter?
if maxrate:
nmap_command.append('--max-rate')
nmap_command.append(str((maxrate)))
# Strip the command, so we can controll that only nmap is executed really
nmap_command = nmap_command[1:]
nmap_command.insert(0, 'nmap')
# Recreate the final command to show it
nmap_command_string = ''
for i in nmap_command:
nmap_command_string = nmap_command_string + i + ' '
print "\tCommand Executed: {0}".format(nmap_command_string)
# For some reason this executable thing does not work! seems to change nmap sP for sS
# nmap_process = Popen(nmap_command,executable='nmap',stdout=PIPE)
nmap_process = Popen(nmap_command, stdout=PIPE)
raw_nmap_output = nmap_process.communicate()[0]
nmap_returncode = nmap_process.returncode
except OSError:
print 'You don\'t have nmap installed. You ca
|
katemsu/kate_website
|
kate3/mobile_apps/migrations/0002_auto__add_field_app_created_at.py
|
Python
|
mit
| 2,545
| 0.007859
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'App.created_at'
db.add_column('mobile_apps_app', 'created_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'App.created_at'
db.delete_column('mobile_apps_app', 'created_at')
models = {
'core.level': {
'Meta': {'ordering': "['order']", 'object_name': 'Level'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mobile_apps.app': {
'Meta': {'object_name': 'App'},
'content_areas': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'content_areas'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Level']"}),
'cost': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'levels': ('django
|
.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'levels'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Level']"}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.F
|
oreignKey', [], {'to': "orm['mobile_apps.Type']"})
},
'mobile_apps.type': {
'Meta': {'object_name': 'Type'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['mobile_apps']
|
FilipeMaia/afnumpy
|
afnumpy/core/shape_base.py
|
Python
|
bsd-2-clause
| 1,076
| 0.009294
|
from . import numeric as _nx
from .numeric import asanyarray, newaxis
def atleast_1d(*arys):
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0 :
result = ary.reshape(1)
else :
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def atleast_2d(*arys):
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0 :
result = ary.reshape(1, 1)
elif len(ary.shape) == 1 :
result = ary[newaxis,:]
else :
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def vstack(tup):
return _nx.concatenate([atleast_2d(_m) for _m in tup], 0)
def hstack(tup):
arrs =
|
[atleast_1d(_m
|
) for _m in tup]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
|
Crimson-Star-Software/data-combine
|
datacombine/datacombine/migrations/0001_initial.py
|
Python
|
mit
| 7,003
| 0.003427
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-25 16:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address_type', models.CharField(choices=[('BU', 'Business'), ('PE', 'Personal')], max_length=2)),
('city', models.CharField(max_length=32, null=True)),
('country_code', models.CharField(max_length=2, null=True)),
('cc_id', models.CharField(max_length=36)),
('line1', models.CharField(max_length=100, null=True)),
('line2', models.CharField(max_length=100, null=True)),
('line3', models.CharField(max_length=100, null=True)),
('postal_code', models.CharField(max_length=10, null=True)),
('state', models.CharField(max_length=20, null=True)),
('state_code', models.CharField(max_length=2, null=True)),
('sub_postal_code', models.CharField(max_length=20, null=True)),
],
),
migrations.CreateModel(
name='ConstantContactList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cc_id', models.IntegerField()),
('status', models.CharField(choices=[('AC', 'Active'), ('HI', 'Hidden')], max_length=2)),
('name', models.CharField(max_length=48)),
('created_date', models.DateTimeField()),
('modified_date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('confirmed', models.NullBooleanField()),
('company_name', models.CharField(max_length=100, null=True)),
('created_date', models.DateTimeField()),
('first_name', models.CharField(max_length=50, null=True)),
('middle_name', models.CharField(max_length=50, null=True)),
('last_name', models.CharField(max_length=50, null=True)),
('cc_id', models.IntegerField()),
('cc_modified_date', models.DateTimeField()),
('prefix_name', models.CharField(max_length=10, null=True)),
('job_title', models.CharField(max_length=50, null=True)),
('source', models.CharField(max_length=50, null=True)),
('status', models.CharField(choices=[('UN', 'Unconfirmed'), ('AC', 'Active'), ('OP', 'Optout'), ('RE', 'Removed'), ('NO', 'Non Subscriber')], max_length=2)),
('addresses', models.ManyToManyField(to='datacombine.Address')),
],
),
migrations.CreateModel(
name='EmailAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('confirm_status', models.CharField(choices=[('CO', 'Confirmed'), ('NC', 'No Confirmation Required')], max_length=3)),
('cc_id', models.CharField(max_length=36)),
('status', models.CharField(choices=[('UN', 'Unconfirmed'), ('AC', 'Active'), ('OP', 'Optout'), ('RE', 'Removed'), ('NO', 'Non Subscriber')], max_length=2)),
('opt_in_date', models.DateTimeField(null=True)),
('opt_out_date', models.DateTimeField(null=True)),
('email_address', models.EmailField(max_length=254)),
('opt_in_source', models.CharField(choices=[('AO', 'Action by Owner'), ('AV', 'Action by Visitor')], max_length=2)),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField()),
('cc_id', models.CharField(max_length=36)),
('modified_date', models.DateTimeField()),
('note', models.TextField()),
],
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('area_code', models.CharField(max_length=3, null=True)),
('number', models.CharField(max_length=7)),
('extension', models.CharField(max_length=7, null=True)),
],
),
migrations.CreateModel(
name='UserStatusOnCCList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('AC', 'Active'), ('HI', 'Hidden')], max_length=2)),
('cclist', mode
|
ls.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='datacombine.ConstantContactList')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='datacombine.Contact')),
],
|
),
migrations.AddField(
model_name='contact',
name='cc_lists',
field=models.ManyToManyField(through='datacombine.UserStatusOnCCList', to='datacombine.ConstantContactList'),
),
migrations.AddField(
model_name='contact',
name='cell_phone',
field=models.ManyToManyField(related_name='_contact_cell_phone_+', to='datacombine.Phone'),
),
migrations.AddField(
model_name='contact',
name='email_addresses',
field=models.ManyToManyField(to='datacombine.EmailAddress'),
),
migrations.AddField(
model_name='contact',
name='fax',
field=models.ManyToManyField(related_name='_contact_fax_+', to='datacombine.Phone'),
),
migrations.AddField(
model_name='contact',
name='home_phone',
field=models.ManyToManyField(related_name='_contact_home_phone_+', to='datacombine.Phone'),
),
migrations.AddField(
model_name='contact',
name='notes',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='datacombine.Note'),
),
migrations.AddField(
model_name='contact',
name='work_phone',
field=models.ManyToManyField(related_name='_contact_work_phone_+', to='datacombine.Phone'),
),
]
|
allegro/django-powerdns-dnssec
|
powerdns/migrations/0015_auto_20151214_0632.py
|
Python
|
bsd-2-clause
| 1,537
| 0.002602
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
import powerdns.models.powerdns
class Migration(migrations.Migration):
dependencies = [
('powerdns', '0014_auto_20151124_0505'),
]
operations = [
migrations.AddField(
model_name='domain',
name='unrestricted',
field=models.BooleanField(default=False, verbose_na
|
me='Unrestricted', help_text="Can users that are not owners of this domain add recordsto it without owner's permission?"),
),
migrations.AddField(
model_name='domainrequest',
name='unrestricted',
field=models.BooleanField(default=False, verbose_name='Unrestricted', help_text="Can users that are not owners of this
|
domain add recordsto it without owner's permission?"),
),
migrations.AddField(
model_name='domaintemplate',
name='unrestricted',
field=models.BooleanField(default=False, verbose_name='Unrestricted', help_text="Can users that are not owners of this domain add recordsto it without owner's permission?"),
),
migrations.AlterField(
model_name='domain',
name='name',
field=models.CharField(max_length=255, validators=[django.core.validators.RegexValidator('^(\\*\\.)?([_A-Za-z0-9-]+\\.)*([A-Za-z0-9])+$'), powerdns.models.powerdns.SubDomainValidator()], unique=True, verbose_name='name'),
),
]
|
jlopezpena/bearcart
|
docs/conf.py
|
Python
|
mit
| 7,820
| 0.007417
|
# -*- coding: utf-8 -*-
#
# bearcart documentation build configuration file, created by
# sphinx-quickstart on Mon May 6 20:08:35 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../bearcart'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
autclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bearcart'
copyright = u'2013, Rob Story'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.2'
# The full version, including alpha/beta/rc tags.
release = '0.1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either
|
, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when
|
looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bearcartdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bearcart.tex', u'bearcart Documentation',
u'Rob Story', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bearcart', u'bearcart Documentation',
[u'Rob Story'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bearcart', u'bearcart Documentation',
u'Rob Story', 'bearcart', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
avishek-r-kumar/dfitools
|
MD_DFI_analysis/makecpptraj.py
|
Python
|
bsd-3-clause
| 1,742
| 0.030425
|
#!/usr/bin/env python
"""
Makecpptraj
===========
Usage
-----
./makecpptraj.py title trajlen interval parm traj
"""
import sys
def getwindow(trajlen,interval=2000):
i = trajlen
timewindows=[]
while i > 0:
timewindows.append(i)
i = i - interval
timewindows.append(0)
return timewindows[::-1]
def makecpptrajscript(title,start,end,parm="0.prmtop.parm7",traj="0.mdtrj.crd.gz"):
cpptrajdic={'title':title, 'start':start, 'end':end, 'parm':parm, 'traj':traj}
cppscript="""
parm {parm}
trajin {traj} {start} {end} 1
center
rms first @CA,C,N
matrix mwcovar name mwcovarmat @CA out {title}_{start}_{end}_mwcovarmat.dat
analyze matrix mwcovarmat name evecs out {title}_{start}_{end}_evecs.dat vecs 30
run
quit
"""
return cppscript.format(**cpptrajdic)
def outputcpptrajscript(title,trajlen,interval=2000,parm="0.prmtop.parm7",traj="0.mdtrj.crd.gz"):
trajcuts = getwindow(trajlen,interval=interval)
for i in range(len(trajcuts)):
i
|
f i+1 >= len(trajcuts):
break
fname="{title}_{start}_{end}.cpptraj".format(title=title,start=trajcuts[i],end=trajcuts[i+1])
with open(fname,'w') as outfile:
print "Writing out to:",fname
outfile.write( makecpptrajscript(title,trajcuts[i], trajcuts[i+1],parm,traj) )
if __name__ == "__main__":
if(len(sys.argv)) < 2:
print __doc__
s
|
ys.exit()
title=sys.argv[1]
trajlen=int(sys.argv[2])
interval=int(sys.argv[3])
if(len(sys.argv)) > 4:
parm = sys.argv[4]
traj = sys.argv[5]
outputcpptrajscript(title,trajlen,interval,parm,traj)
else:
outputcpptrajscript(title,trajlen,interval)
|
juice-ryang/online-judge
|
OnlineJudgeServer/process_capsule.py
|
Python
|
gpl-3.0
| 5,696
| 0
|
"""Process Capsule: The PExpect Wrapper."""
from errno import ESRCH as NoSuchProcess
from os import kill, environ
from os.path import join
from signal import SIGTERM as CTRL_C
from chardet import detect as Chardet
from pexpect import (
spawn,
TIMEOUT,
EOF,
)
__author__ = "Minho Ryang (minhoryang@gmail.com)"
class ProcessCapsule(object):
"""Process Capsule: The PExpect Wrapper.
It's designed for
- receiving stderr
- detecting segfault++
- dying gracefully.
>>> with ProcessCapsule('a.out') as process:
... process.run()
... process.read()
"""
_SEGFAULT = '.*Segmentation fault.*'
_CONDITIONS = [_SEGFAULT, EOF, TIMEOUT]
_TIMEOUT = .05
def __init__(self, program, logfile=None):
self.program = program
self.logfile = logfile
self._readpos = 0
self._runtime = None
self._initialized_pid = None
def __del__(self):
"""Rest in peace, you're going to **die gracefully**."""
if self._initialized_pid:
|
try:
kill(sel
|
f._initialized_pid, CTRL_C)
except OSError as exc:
if exc.errno != NoSuchProcess:
raise exc
def __enter__(self):
return self
def __exit__(self, *exc):
self.__del__()
def __cmd__(self):
return 'bash -c "./%s 2>&1 #%s"' % (self.program, self)
def run(self, with_check=True, flush_by_read=True, timeout=None):
"""First of all, **Show must go on**, whether you like it or not."""
if self._initialized_pid:
raise self.ALREADYLAUNCHED()
self._runtime = spawn(
self.__cmd__(),
logfile=self.logfile,
ignore_sighup=False)
self._initialized_pid = self._runtime.pid
if with_check:
return self.__try_read__(with_read=flush_by_read, timeout=timeout)
def read(self, timeout=None):
"""Returns the text from stdin/stdout/stderr streams."""
try:
if self._initialized_pid:
self.__try_read__(with_read=False, timeout=timeout)
else:
self.run(flush_by_read=False)
except self.Exceptions as handling_my_excptions_only:
raise handling_my_excptions_only
return self.__readpos__()
def write(self, this="", response=True, timeout=None):
"""Returns with/without @response."""
if not self._initialized_pid:
self.run() # flushed
retval = self._runtime.sendline(this)
if response:
return (retval, self.__try_read__(timeout=timeout))
return (retval, None)
def expect(self, queries, where=None, timeout=None):
"""Returns expected (@query, @where)."""
if not self._initialized_pid:
self.run() # flushed
text = where if where else self.__try_read__(timeout=timeout)
if isinstance(queries, (list, tuple)):
for query in queries:
if query in text:
return (query, text)
elif isinstance(queries, str):
for queries in text:
return (queries, text)
return (None, text)
def __try_read__(self, with_read=True, timeout=None):
"""Every steps you take, watch out! (SegFault, Dead, ...)"""
if not self._initialized_pid:
self.run(with_check=False)
selected = self._runtime.expect(
self._CONDITIONS,
timeout=self._TIMEOUT if not timeout else timeout
)
if self._CONDITIONS[selected] == self._SEGFAULT:
self._runtime.close()
# TODO: Propagate self._runtime.exitstatus .signalstatus
raise self.SEGFAULT(self.__readpos__())
elif self._CONDITIONS[selected] == EOF:
raise self.DEAD(self.__readpos__())
elif with_read:
return self.read()
def __readpos__(self):
"""Read from **just before**."""
current = len(self._runtime.before)
wanted = self._runtime.before[self._readpos:current]
self._readpos = current
det = Chardet(wanted)
if det['encoding']:
return wanted.decode(det['encoding'])
return wanted.decode('utf-8') # TODO
def is_dead(self):
return not self._runtime.isalive()
class Exceptions(Exception):
"""Grouping all exceptions controlled by here."""
class SEGFAULT(Exceptions):
"""Fired when SegFault detected."""
class DEAD(Exceptions):
"""Fired when dead unexpectedly."""
class ALREADYLAUNCHED(Exceptions):
"""Fired when calling run() more than once."""
DEFAULT_PYTHON = 'python'
if environ['VIRTUAL_ENV']:
DEFAULT_PYTHON = join(environ['VIRTUAL_ENV'], 'bin/python')
class PythonCapsule(ProcessCapsule):
def __init__(self, program, logfile=None, python=DEFAULT_PYTHON):
super().__init__(program, logfile=logfile)
self.python = python
def __cmd__(self):
return 'bash -c "%s -u %s 2>&1 #%s"' % (
self.python,
self.program,
self,
)
def prompt_spliter(result, cmd='', prompt='', splits='\n'):
"""Split the output without prompt environment.
For removing all empty results, using filter method.
Learned from:
stackoverflow.com/questions/3845423/remove-empty-strings-from-a-list-of-strings
"""
output = []
for i in result.split(splits):
output.append(i.strip())
for _ in range(output.count(cmd)):
output.remove(cmd)
for _ in range(output.count(prompt)):
output.remove(prompt)
return list(filter(None, output))
|
marscher/PyEMMA
|
pyemma/coordinates/tests/test_stride.py
|
Python
|
lgpl-3.0
| 4,463
| 0.002241
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import absolute_import
import unittest
import os
import tempfile
import numpy as np
import mdtraj
import pyemma.coordinates as coor
class TestStride(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dim = 3 # dimension (must be divisible by 3)
N_trajs = 10 # number of trajectories
# create topology file
cls.temppdb = tempfile.mktemp('.pdb')
with open(cls.temppdb, 'w') as f:
for i in range(cls.dim//3):
print(('ATOM %5d C ACE A 1 28.490 31.600 33.379 0.00 1.00' % i), file=f)
cls.trajnames = [] # list of xtc file names
cls.data = []
for i in range(N_trajs):
# set up data
N = int(np.random.rand()*1000+1000)
xyz = np.random.randn(N, cls.dim//3, 3).astype(np.float32)
cls.data.append(xyz)
t = np.arange(0, N)
# create trajectory file
traj = mdtraj.load(cls.temppdb)
traj.xyz = xyz
traj.time = t
tempfname = tempfile.mktemp('.xtc')
traj.save(tempfname)
cls.trajnames.append(tempfname)
def test_length_and_content_feature_reader_and_TICA(self):
for stride in range(1, 100, 23):
r = coor.source(self.trajnames, top=self.temppdb)
t = coor.tica(data=r, lag=2, dim=2)
# subsample data
out_tica = t.get_output(stride=stride)
out_reader = r.get_output(stride=stride)
# get length in different ways
len_tica = [x.shape[0] for x in out_tica]
len_reader = [x.shape[0] for x in out_reader]
len_trajs = t.trajectory_lengths(stride=stride)
len_ref = [(x.shape[0]-1)//stride+1 for x in self.data]
# print 'len_ref', len_ref
# compare length
np.testing.assert_equal(len_trajs, len_ref)
self.assertTrue(len_ref == len_tica)
self.assertTrue(len_ref == len_reader)
# compare content (reader)
for ref_data, test_data in zip(self.data, out_reader):
ref_data_reshaped = ref_data.reshape((ref_data.shape[0], ref_data.shape[1]*3))
self.assertTrue(np.allclose(ref_data_reshaped[::stride, :], test_data, atol=1E-3))
def test_content_data_in_memory(self):
# prepare test data
N_trajs = 10
d = []
for _ in range(N_trajs):
N = int(np.random.rand()*1000+10)
d.append(np.random.randn(N, 10).astype(np.float32))
# read data
reader = coor.source(d)
# compare
for stride in range(1, 10, 3):
out_reader = reader.get_output(stride=stride)
for ref_data, test_data in zip(d, out_reader):
self.assertTrue(np.all(ref_data[::stride] == test_data)) # here we can test exact equality
def test_parametrize_with_stride(sel
|
f):
for stride in range(1, 100, 23):
r = coor.source(self.trajnames, top=self.temppdb)
tau = 5
try:
t = coor.tica(r, lag=tau, stride=stride, dim=2)
# force_eigenvalues_le_one=True enables an internal consistency check in TICA
self.assertTrue(np.all(t.eigenvalues <= 1.0+1.E-12)
|
)
except RuntimeError:
assert tau % stride != 0
@classmethod
def tearDownClass(cls):
for fname in cls.trajnames:
os.unlink(fname)
os.unlink(cls.temppdb)
super(TestStride, cls).tearDownClass()
if __name__ == "__main__":
unittest.main()
|
bright-pan/my_data_sync
|
old2new.py
|
Python
|
gpl-2.0
| 11,662
| 0.017869
|
# -*- coding: utf-8 -*-
__author__ = 'Bright Pan'
# 注意:我们必须要把old数据库中的yehnet_customer表中的postdate改成add_time
import types
import urllib.request
try:
import simplejson as json
except Exception:
import json
import pymysql
import datetime
import sys
print(sys.getdefaultencoding())
def f(x):
if isinst
|
ance(x, str):
return x.encode("gbk", "ignore").decode("gbk")
return x
class DB(object):
def __init__(self, host="localhost", user="root", passwd="", old_db="", new_db="",charset="utf8"):
self.new_conn=pymysql.connect(host=host,user=user,passwd=passwd,db=new_db,charset=charset, cursorclass=pymysql.cursors.DictCursor)
self.old_conn=pymysql.connect(host=host,user=user,passwd=passwd,db=old_db,charset=charset, cursorclass=pymysql.cursors.Di
|
ctCursor)
self.new_cur = self.new_conn.cursor()
self.old_cur = self.old_conn.cursor()
def db_clear_data(self):
self.new_cur.execute("show tables")
qs_new = self.new_cur.fetchall()
print(qs_new)
for each in qs_new:
self.new_cur.execute("truncate %s" % each['Tables_in_new'])
self.new_conn.commit()
def db_commit(self):
self.new_conn.commit()
self.old_conn.commit()
def table_copy_process(self, new_table="", old_table=""):
result = self.new_cur.execute("show columns from %s" % new_table)
qs_new = self.new_cur.fetchall()
new_cols = {}
for each in qs_new:
new_cols[each['Field']] = each
result = self.old_cur.execute("select * from %s" % old_table)
qs_old = self.old_cur.fetchall()
for each in qs_old:
sql_key = ""
sql_value = ""
for key,value in each.items():
#print(type(key),type(value))
cols_attr = new_cols.get(key)
#print(cols_attr)
if cols_attr:
if value is None:
#if cols_attr['Default'] is None:
#sql += "%s=''," % (key)
#sql_key += key + ','
pass
else:
if not cols_attr['Type'].find("date"):
#print(cols_attr)
if isinstance(value, datetime.datetime):
value = value.strftime("%Y-%m-%d %H:%M:%S")
else:
value = datetime.datetime.fromtimestamp(value).strftime("%Y-%m-%d %H:%M:%S")
#sql += "%s='%s'," % (key, value)
sql_key += "`%s`," % key
if isinstance(value, str):
sql_value += "\"%s\"," % pymysql.escape_string(value)
else:
sql_value += "\"%s\"," % value
sql = "INSERT INTO %s (%s) VALUES (%s)" % (new_table,sql_key[:-1],sql_value[:-1])
try:
self.new_cur.execute(sql)
#print(sql)
except pymysql.err.IntegrityError as e:
pass
#print(e)
#print(sql)
except pymysql.err.ProgrammingError as e: #捕捉除0异常
print(e)
self.db_commit()
def db_copy_process(self):
self.db_clear_data()
result = self.new_cur.execute("show tables")
new_tables = self.new_cur.fetchall()
new_tables_list = []
for each in new_tables:
new_tables_list.append(each["Tables_in_new"])
print(new_tables_list)
result = self.old_cur.execute("show tables")
old_tables = self.old_cur.fetchall()
old_tables_list = []
for each in old_tables:
old_tables_list.append(each["Tables_in_old"])
print(old_tables_list)
for each in new_tables_list:
print(each)
try:
old_tables_list.index(each)
new_tables_list.index(each)
except ValueError:
continue
self.table_copy_process(each, each)
def process_update(self, from_table='yehnet_sales', to_table="yehnet_admin", set_dict={}, where_condition=("adminid","id")):
map_dict = {"username":(0,"username"),
"email":(0,"email"),"phone":(0,"phone"),"company":(0,"company"),"department":(0,"department"),"job":(0,"job"),"add_time":(1,"add_time")}
if set_dict:
map_dict = set_dict
self.old_cur.execute("select * from %s" % from_table)
qs_old = self.old_cur.fetchall()
# self.new_cur.execute("select * from yehnet_admin")
# qs_new = self.new_cur.fetchall()
# new_dict = {}
# for each in qs_new:
# new_dict[each['admin_id']] = each
for each in qs_old:
#print(each)
sql = "UPDATE `%s` SET " % to_table
for from_cols, to_cols in map_dict.items():
key = to_cols[1]
value = each[from_cols]
if value is None:
pass
else:
if to_cols[0]:
if isinstance(value, str):
value = int(value)
value = datetime.datetime.fromtimestamp(value).strftime("%Y-%m-%d %H:%M:%S")
if isinstance(value, str):
sql += "`%s` = \"%s\"," % (key, pymysql.escape_string(value))
else:
sql += "`%s` = \"%s\"," % (key, value)
sql = sql[:-1] + " WHERE `%s` = \"%s\"" % (where_condition[1], each[where_condition[0]])
try:
#print(sql)
self.new_cur.execute(sql)
except pymysql.err.IntegrityError as e:
pass
#print(e)
#print(sql)
except pymysql.err.ProgrammingError as e: #捕捉除0异常
print(e)
self.db_commit()
def process_insert(self, from_table='yehnet_sales', to_table="yehnet_admin", set_dict={}, project=False):
map_dict = {"username":(0,"username"),
"email":(0,"email"),"phone":(0,"phone"),"company":(0,"company"),"department":(0,"department"),"job":(0,"job"),"add_time":(1,"add_time")}
if set_dict:
map_dict = set_dict
if project:
sql = "select * from %s " % (from_table)
sql += project
self.old_cur.execute(sql)
else:
self.old_cur.execute("select * from %s" % from_table)
qs_old = self.old_cur.fetchall()
# self.new_cur.execute("select * from yehnet_admin")
# qs_new = self.new_cur.fetchall()
# new_dict = {}
# for each in qs_new:
# new_dict[each['admin_id']] = each
for each in qs_old:
#print(list(map(f,list(each.keys()))))
sql_key = ""
sql_value = ""
for from_cols, to_cols in map_dict.items():
#print(from_cols,to_cols)
key = to_cols[1]
value = each[from_cols]
if value is None:
pass
else:
if to_cols[0] is 1:
if isinstance(value, str):
value = int(value)
value = datetime.datetime.fromtimestamp(value).strftime("%Y-%m-%d %H:%M:%S")
if to_cols[0] is 2:
value = to_cols[2]
if to_cols[0] is 3:
value = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
sql_key += "`%s`," % key
if isinstance(value, str):
sql_value += "\"%s\"," % pymysql.escape_string(value)
else:
sql_value += "\"%s\"," % value
# if isinstance(value, str):
# sql += "`%s` = \"%s\"," % (key, pymysql.escape_string(value))
# else:
# sql += "`%s` = \"%s\"," % (key, value)
|
J535D165/recordlinkage
|
tests/test_measures.py
|
Python
|
bsd-3-clause
| 4,180
| 0.000478
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import recordlinkage as rl
import numpy
import pandas
FULL_INDEX = pandas.MultiIndex.from_product(
[[1, 2, 3], [1, 2, 3]], # 3x3 matrix
names=['first', 'second'])
LINKS_TRUE = pandas.MultiIndex.from_tuples(
[(1, 1), (2, 2), (3, 3)], # the diagonal
names=['first', 'second'])
LINKS_PRED = pandas.MultiIndex.from_tuples(
[(1, 1), (2, 1), (3, 1), (1, 2)], # L shape
names=['first', 'second'])
class TestMeasures(object):
def test_confusion_matrix(self):
result_len = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX))
result_full_index = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED, FULL_INDEX)
expected = numpy.array([[1, 2], [3, 3]])
numpy.testing.assert_array_equal(result_len, expected)
numpy.testing.assert_array_equal(result_full_index, expected)
def test_tp_fp_tn_fn(self):
tp = rl.true_positives(LINKS_TRUE, LINKS_PRED)
assert tp == 1
fp = rl.false_positives(LINKS_TRUE, LINKS_PRED)
assert fp == 3
tn = rl.true_negatives(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX))
assert tn == 3
fn = rl.false_negatives(LINKS_TRUE, LINKS_PRED)
assert fn == 2
def test_recall(self):
# confusion matrix
cm = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED)
assert rl.recall(LINKS_TRUE, LINKS_PRED) == 1 / 3
assert rl.recall(cm) == 1 / 3
def test_precision(self):
# confusion matrix
cm = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX))
assert rl.precision(LINKS_TRUE, LINKS_PRED) == 1 / 4
assert rl.precision(cm) == 1 / 4
def test_accuracy(self):
# confusion matrix
cm = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX))
assert rl.accuracy(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX)) == 4 / 9
assert rl.accuracy(cm) == 4 / 9
assert rl.accuracy(LINKS_TRUE, LINKS_PRED, FULL_INDEX) == 4 / 9
def test_specificity(self):
# confusion matrix
cm = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX))
assert rl.specificity(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX)) == 1 / 2
assert rl.specificity(cm) == 1 / 2
assert rl.specificity(LINKS_TRUE, LINKS_PRED, FULL_INDEX) == 1 / 2
def test_fscore(self):
# confusion matrix
cm = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX))
prec = rl.precision(LINKS_TRUE, LINKS_PRED)
rec = rl.recall(LINKS_TRUE, LINKS_PRED)
expected = float(2 * prec * rec / (prec +
|
rec))
assert rl.fscore(LINKS_TRUE, LINKS_PRED) == expected
assert rl.fscore(cm) == expected
def test_full_inde
|
x_size(self):
df_a = pandas.DataFrame(numpy.arange(10))
df_b = pandas.DataFrame(numpy.arange(10))
assert rl.full_index_size(df_a) == 45
assert rl.full_index_size(len(df_a)) == 45
assert rl.full_index_size((len(df_a))) == 45
assert rl.full_index_size([len(df_a)]) == 45
assert rl.full_index_size(df_a, df_b) == 100
assert rl.full_index_size(len(df_a), len(df_b)) == 100
assert rl.full_index_size((len(df_a), len(df_b))) == 100
assert rl.full_index_size([len(df_a), len(df_b)]) == 100
def test_reduction_ratio(self):
df_a = pandas.DataFrame(numpy.arange(10))
df_b = pandas.DataFrame(numpy.arange(10))
candidate_pairs_link = pandas.MultiIndex.from_product(
[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])
candidate_pairs_dedup = pandas.MultiIndex.from_arrays(
[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])
assert rl.reduction_ratio(candidate_pairs_dedup, df_a) == 8 / 9
assert rl.reduction_ratio(candidate_pairs_dedup, (df_a)) == 8 / 9
assert rl.reduction_ratio(candidate_pairs_dedup, (df_a, )) == 8 / 9
assert rl.reduction_ratio(candidate_pairs_link, df_a, df_b) == 3 / 4
assert rl.reduction_ratio(candidate_pairs_link, (df_a, df_b)) == 3 / 4
assert rl.reduction_ratio(candidate_pairs_link, [df_a, df_b]) == 3 / 4
|
mdhaman/superdesk-core
|
superdesk/io/commands/add_provider.py
|
Python
|
agpl-3.0
| 2,040
| 0.003431
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
from superdesk import get_resource_service
from superdesk.errors import ProviderError
class AddProvider(superdesk.Command):
"""Add ingest provider.
Example:
::
$ python manage.py ingest:provider --provider='{"update_schedule" : { "minutes" : 5, "seconds" : 0 },
"idle_time" : { "hours" : 0, "minutes" : 0 }, "content_expiry" : 2880, "name" : "aap-demo",
"source" : "aap-demo", "feeding_service" : "rss",
"config" : { "url" : "https://abcnews.go.com/abcnews/primetimeheadlines", "field_aliases" : [ ] },
"feed_parser" : null, "content_types" : [ "text" ]}'
"""
option_list = {
superdesk.Option('--provider', '-p', dest='provider', required=True),
}
def run(self, provider):
try:
data = {}
data = superdesk.json.loads(provider)
data.setdefault('content_expiry', superdesk.app.config['INGEST_EXPIRY_MINUTES'])
validator
|
= superdesk.app.validator(superdesk.app.config['DOMAIN']['ingest_providers']['schema'],
'ingest_providers')
validation = validator.validate(data)
if validation:
get_resource_service('ingest_providers').post([data])
return data
|
else:
ex = Exception('Failed to add Provider as the data provided is invalid. Errors: {}'
.format(str(validator.errors)))
raise ProviderError.providerAddError(exception=ex, provider=data)
except Exception as ex:
raise ProviderError.providerAddError(ex, data)
superdesk.command('ingest:provider', AddProvider())
|
naturalness/unnaturalcode
|
unnaturalcode/pythonSource.py
|
Python
|
agpl-3.0
| 4,032
| 0.010665
|
#!/usr/bin/python
# Copyright 2013, 2014 Joshua Charles Campbell, Alex Wilson, Eddie Santos
#
# This file is part of UnnaturalCode.
#
# UnnaturalCode is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UnnaturalCode is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with UnnaturalCode. If not, see <http://www.gnu.org/licenses/>.
from unnaturalcode.ucUtil import *
from unnaturalcode.unnaturalCode import *
from logging import debug, info, warning, error
from unnaturalcode import flexibleTokenize
import sys, token, zmq;
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
COMMENT = 53
ws = re.compile('\s')
# TODO: Refactor so base class is genericSource
class pythonLexeme(ucLexeme):
@classmethod
def stringify_build(cls, t, v):
"""
Stringify a lexeme: produce a string describing it.
In the case of comments, strings, indents, dedents, and newlines, and
the endmarker, a string with '<CATEGORY-NAME>' is returned. Else, its
actual text is returned.
"""
if t == 'COMMENT':
return '<'+t+'>'
# Josh though this would be a good idea for some strange reason:
elif len(v) > 20 :
return '<'+t+'>'
elif ws.match(str(v)) :
return '<'+t+'>'
elif t == 'STRING' :
return '<'+t+'>'
elif len(v) > 0 :
return v
else:
# This covers for <DEDENT> case, and also, probably some other
# special cases...
return '<' + t + '>'
@classmethod
def fromTuple(cls, tup):
if isinstance(tup[0], int):
t0 = token.tok_name[tup[0]]
else:
t0 = tup[0]
new = tuple.__new__(cls, (t0, tup[1], ucPos(tup[2]), ucPos(tup[3]), cls.stringify_build(t0, tup[1])))
return new
def comment(self):
return (self.ltype == 'COMMENT')
class pythonSource(ucSource):
lexemeClass = pythonLexeme
def lex(self, code, mid_line=False):
tokGen = flexibleToken
|
ize.generate_tokens(StringIO(code).readline,
mid_line)
return [pythonLexeme.fromTuple(t) for t in tokGen]
def unCommented(self):
assert len(self)
return filter(lambda a: not a.comment(), copy(self))
def scrubbed(self):
"""Clean up python source code removing extra whitespace tokens and comments"""
ls = copy(self)
assert len(ls)
|
i = 0
r = []
for i in range(0, len(ls)):
if ls[i].comment():
continue
elif ls[i].ltype == 'NL':
continue
elif ls[i].ltype == 'NEWLINE' and i < len(ls)-1 and ls[i+1].ltype == 'NEWLINE':
continue
elif ls[i].ltype == 'NEWLINE' and i < len(ls)-1 and ls[i+1].ltype == 'INDENT':
continue
else:
r.append(ls[i])
assert len(r)
return pythonSource(r)
class LexPyMQ(object):
def __init__(self, lexer):
self.lexer = lexer
self.zctx = zmq.Context()
self.socket = self.zctx.socket(zmq.REP)
def run(self):
self.socket.bind("tcp://lo:32132")
while True:
msg = self.socket.recv_json(0)
# there are definitely new lines in the code
assert msg.get('python'), 'received non-python code'
code = msg.get('body', '')
self.socket.send_json(list(tokenize.generate_tokens(StringIO(code).readline)))
if __name__ == '__main__':
LexPyMQ(LexPy()).run()
|
tweekmonster/django-userjs
|
userjs/utils.py
|
Python
|
bsd-3-clause
| 1,325
| 0
|
import six
import json
from userjs.userjs_settings import JSON_HANDLERS
def _json_handlers(obj):
"""Extra handlers that JSON aren't able to parse.
The only built-in conversion is for datetime. User configured handlers
are tried for other types. If they all fail, raise TypeError.
"""
if hasattr(obj, 'isoformat'):
return obj.isoformat()
elif JSON_HANDLERS:
for handler in JSON_HANDLERS:
try:
return handler(obj)
except TypeError:
pass
raise TypeError('%s is not JSON serializable' % repr(obj))
def jsondumps(obj):
"""Creates a JSON string that can handle datetime objects.
"""
return json.dumps(obj, separators=(',', ':'), default=_json_handlers)
def get_field_value(obj, field_name):
"""Get a value from an object
This tries to get a value from an object that's similar to the way Django's
queries work. If it's unresolvable, return None instead of raising an
exception.
Not sure if there's a utility like
|
this in Django that's available to use.
"""
value = obj
for field in fiel
|
d_name.split('__'):
if not hasattr(value, field):
return None
value = getattr(value, field)
if six.callable(value):
return value()
return value
|
Aladom/django-mailing
|
django_mailing/wsgi.py
|
Python
|
mit
| 405
| 0
|
"""
WSGI config for django_mai
|
ling project.
It exposes the WSGI callable as a module-level variable
|
named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_mailing.settings")
application = get_wsgi_application()
|
googleads/google-ads-python
|
google/ads/googleads/v8/enums/types/campaign_experiment_traffic_split_type.py
|
Python
|
apache-2.0
| 1,292
| 0.000774
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in
|
compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific langua
|
ge governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.enums",
marshal="google.ads.googleads.v8",
manifest={"CampaignExperimentTrafficSplitTypeEnum",},
)
class CampaignExperimentTrafficSplitTypeEnum(proto.Message):
r"""Container for enum describing campaign experiment traffic
split type.
"""
class CampaignExperimentTrafficSplitType(proto.Enum):
r"""Enum of strategies for splitting traffic between base and
experiment campaigns in campaign experiment.
"""
UNSPECIFIED = 0
UNKNOWN = 1
RANDOM_QUERY = 2
COOKIE = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
wuzheng-sjtu/FastFPN
|
libs/preprocessings/city_v1.py
|
Python
|
apache-2.0
| 3,441
| 0.011334
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
import libs.configs.config_v1 as cfg
from . import utils as preprocess_utils
FLAGS = tf.app.flags.FLAGS
def preprocess_image(image, gt_boxes, is_training=False):
"""preprocess image for coco
1. random flipping
2. min size resizing
3. zero mean
4. ...
"""
if is_training:
return preprocess_for_training(image, gt_boxes)
else:
return preprocess_for_test(image, gt_boxes)
def preprocess_for_training(image, gt_boxes):
ih, iw = tf.shape(image)[0], tf.shape(image)[1]
## random flipping
coin = tf.to_float(tf.random_uniform([1]))[0]
image, gt_boxes =\
tf.cond(tf.greater_equal(coin, 0.5),
lambda: (preprocess_utils.flip_image(image),
preprocess_utils.flip_gt_boxes(gt_boxes, ih, iw)),
lambda: (image, gt_boxes))
## min size resizing
new_ih, new_iw = preprocess_utils._smallest_size_at_least(ih, iw, cfg.FLAGS.image_min_size)
image = tf.expand_dim
|
s(image, 0)
image = tf.image.resize_bilinear(image, [new_ih, new_iw], align_corners=False)
image = tf.squeeze(image, axis=[0])
#gt_masks = tf.expand_dims(gt_masks, -1)
#gt_masks = tf.cast(gt_masks, tf.float32)
#gt
|
_masks = tf.image.resize_nearest_neighbor(gt_masks, [new_ih, new_iw], align_corners=False)
#gt_masks = tf.cast(gt_masks, tf.int32)
#gt_masks = tf.squeeze(gt_masks, axis=[-1])
scale_ratio = tf.to_float(new_ih) / tf.to_float(ih)
gt_boxes = preprocess_utils.resize_gt_boxes(gt_boxes, scale_ratio)
## random flip image
# val_lr = tf.to_float(tf.random_uniform([1]))[0]
# image = tf.cond(val_lr > 0.5, lambda: preprocess_utils.flip_image(image), lambda: image)
# gt_masks = tf.cond(val_lr > 0.5, lambda: preprocess_utils.flip_gt_masks(gt_masks), lambda: gt_masks)
# gt_boxes = tf.cond(val_lr > 0.5, lambda: preprocess_utils.flip_gt_boxes(gt_boxes, new_ih, new_iw), lambda: gt_boxes)
## zero mean image
image = tf.cast(image, tf.float32)
image = image / 256.0
image = (image - 0.5) * 2.0
image = tf.expand_dims(image, axis=0)
## rgb to bgr
image = tf.reverse(image, axis=[-1])
return image, gt_boxes
def preprocess_for_test(image, gt_boxes):
ih, iw = tf.shape(image)[0], tf.shape(image)[1]
## min size resizing
new_ih, new_iw = preprocess_utils._smallest_size_at_least(ih, iw, cfg.FLAGS.image_min_size)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [new_ih, new_iw], align_corners=False)
image = tf.squeeze(image, axis=[0])
#gt_masks = tf.expand_dims(gt_masks, -1)
#gt_masks = tf.cast(gt_masks, tf.float32)
#gt_masks = tf.image.resize_nearest_neighbor(gt_masks, [new_ih, new_iw], align_corners=False)
#gt_masks = tf.cast(gt_masks, tf.int32)
#gt_masks = tf.squeeze(gt_masks, axis=[-1])
scale_ratio = tf.to_float(new_ih) / tf.to_float(ih)
gt_boxes = preprocess_utils.resize_gt_boxes(gt_boxes, scale_ratio)
## zero mean image
image = tf.cast(image, tf.float32)
image = image / 256.0
image = (image - 0.5) * 2.0
image = tf.expand_dims(image, axis=0)
## rgb to bgr
image = tf.reverse(image, axis=[-1])
return image, gt_boxes
|
mvesper/invenio-circulation
|
tests/test_api.py
|
Python
|
gpl-2.0
| 3,899
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Module tests."""
from __future__ import absolute_import, print_function
import pytest
from invenio_circulation import InvenioCirculation
from utils import (_create_dates, _create_test_data, _delete_test_data,
current_app, rec_uuids, state)
def test_create(current_app, rec_uuids):
import invenio_circulation.api as api
import invenio_circulation.models as models
with current_app.app_context():
cl, clr, clrm, cu, ci = _create_test_data(rec_uuids)
creates = {'loan_rule': ['default', 'period', 28,
True, True, True, True],
'loan_rule_match': [clr.id, '*', '*', '*', True],
'location': ['CCL', 'CERN CENTRAL LIBRARY', ''],
'mail_template': ['foo', 'foo', 'foo', 'foo'],
'user': [1, 934657, 'John Doe', '3 1-014', 'C27800',
'john.doe@cern.ch', '+41227141337', '',
models.CirculationUser.GROUP_DEFAULT]}
changes = {'loan_rule': 'name',
'loan_rule_match': 'item_type',
'location': 'name',
'mail_template': 'template_name',
'user': 'name'}
objs = []
for key, val in creates.items():
# Test create
_api = getattr(api, key)
|
obj = _api.create(*val)
_id = obj.id
assert obj.get(_id)
#
|
Test update
_api.update(obj, **dict([(changes[key], 'bar')]))
assert getattr(obj.get(_id), changes[key]) == 'bar'
# Test delete
_api.delete(obj)
try:
obj.get(_id)
raise AssertionError('The object should not be there anymore.')
except Exception:
pass
def test_event_create(current_app, rec_uuids):
import invenio_circulation.api as api
import invenio_circulation.models as models
with current_app.app_context():
ce = api.event.create()
assert models.CirculationEvent.get(ce.id)
_delete_test_data(ce)
def test_event_update(current_app, rec_uuids):
import invenio_circulation.api as api
import invenio_circulation.models as models
with current_app.app_context():
ce = api.event.create()
try:
api.event.update(ce)
raise AssertionError('Updating an event should not be possible.')
except Exception as e:
pass
_delete_test_data(ce)
def test_event_delete(current_app, rec_uuids):
import invenio_circulation.api as api
import invenio_circulation.models as models
with current_app.app_context():
ce = api.event.create()
try:
api.event.delete(ce)
raise AssertionError('Deleting an event should not be possible.')
except Exception as e:
pass
|
ddevlin/GitSavvy
|
common/interwebs.py
|
Python
|
mit
| 2,509
| 0.001594
|
"
|
""
A simple HTTP interface for making GET, PUT and POST requests.
"""
import http.client
import json
from urllib.parse import urlparse, urlencode # NOQA
from base64 import b64encode
from functools import partial
from collections import namedtuple
Respon
|
se = namedtuple("Response", ("payload", "headers", "status", "is_json"))
def request(verb, host, port, path, payload=None, https=False, headers=None, auth=None, redirect=True):
"""
Make an HTTP(S) request with the provided HTTP verb, host FQDN, port number, path,
payload, protocol, headers, and auth information. Return a response object with
payload, headers, JSON flag, and HTTP status number.
"""
if not headers:
headers = {}
headers["User-Agent"] = "GitSavvy Sublime Plug-in"
if auth:
username_password = "{}:{}".format(*auth).encode("ascii")
headers["Authorization"] = "Basic {}".format(b64encode(username_password).decode("ascii"))
connection = (http.client.HTTPSConnection(host, port)
if https
else http.client.HTTPConnection(host, port))
connection.request(verb, path, body=payload, headers=headers)
response = connection.getresponse()
response_payload = response.read()
response_headers = dict(response.getheaders())
status = response.status
is_json = "application/json" in response_headers["Content-Type"]
if is_json:
response_payload = json.loads(response_payload.decode("utf-8"))
response.close()
connection.close()
if redirect and verb == "GET" and status == 301 or status == 302:
return request_url(
verb,
response_headers["Location"],
headers=headers,
auth=auth
)
return Response(response_payload, response_headers, status, is_json)
def request_url(verb, url, payload=None, headers=None, auth=None):
parsed = urlparse(url)
https = parsed.scheme == "https"
return request(
verb,
parsed.hostname,
parsed.port or 443 if https else 80,
parsed.path,
payload=payload,
https=https,
headers=headers,
auth=([parsed.username, parsed.password]
if parsed.username and parsed.password
else None)
)
get = partial(request, "GET")
post = partial(request, "POST")
put = partial(request, "PUT")
get_url = partial(request_url, "GET")
post_url = partial(request_url, "POST")
put_url = partial(request_url, "PUT")
|
Shrews/PyGerrit
|
webapp/django/contrib/gis/utils/layermapping.py
|
Python
|
apache-2.0
| 29,814
| 0.005601
|
# LayerMapping -- A Django Model/OGR Layer Mapping
|
Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
This grew out of my personal needs, s
|
pecifically the code repetition
that went into pulling geometries and fields out of an OGR layer,
converting to another coordinate system (e.g. WGS84), and then inserting
into a GeoDjango model.
Please report any bugs encountered using this utility.
Requirements: OGR C Library (from GDAL) required.
Usage:
lm = LayerMapping(model, source_file, mapping) where,
model:
GeoDjango model (not an instance)
data:
OGR-supported data source file (e.g. a shapefile) or
gdal.DataSource instance
mapping:
A python dictionary, keys are strings corresponding
to the GeoDjango model field, and values correspond to
string field names for the OGR feature, or if the model field
is a geographic then it should correspond to the OGR
geometry type, e.g. 'POINT', 'LINESTRING', 'POLYGON'.
Keyword Args:
layer:
The index of the layer to use from the Data Source (defaults to 0)
source_srs:
Use this to specify the source SRS manually (for example,
some shapefiles don't come with a '.prj' file). An integer SRID,
a string WKT, and SpatialReference objects are valid parameters.
encoding:
Specifies the encoding of the string in the OGR data source.
For example, 'latin-1', 'utf-8', and 'cp437' are all valid
encoding parameters.
transaction_mode:
May be 'commit_on_success' (default) or 'autocommit'.
transform:
Setting this to False will disable all coordinate transformations.
unique:
Setting this to the name, or a tuple of names, from the given
model will create models unique only to the given name(s).
Geometries will from each feature will be added into the collection
associated with the unique model. Forces transaction mode to
be 'autocommit'.
Example:
1. You need a GDAL-supported data source, like a shapefile.
Assume we're using the test_poly SHP file:
>>> from django.contrib.gis.gdal import DataSource
>>> ds = DataSource('test_poly.shp')
>>> layer = ds[0]
>>> print layer.fields # Exploring the fields in the layer, we only want the 'str' field.
['float', 'int', 'str']
>>> print len(layer) # getting the number of features in the layer (should be 3)
3
>>> print layer.geom_type # Should be 3 (a Polygon)
3
>>> print layer.srs # WGS84
GEOGCS["GCS_WGS_1984",
DATUM["WGS_1984",
SPHEROID["WGS_1984",6378137,298.257223563]],
PRIMEM["Greenwich",0],
UNIT["Degree",0.017453292519943295]]
2. Now we define our corresponding Django model (make sure to use syncdb):
from django.contrib.gis.db import models
class TestGeo(models.Model, models.GeoMixin):
name = models.CharField(maxlength=25) # corresponds to the 'str' field
poly = models.PolygonField(srid=4269) # we want our model in a different SRID
objects = models.GeoManager()
def __str__(self):
return 'Name: %s' % self.name
3. Use LayerMapping to extract all the features and place them in the database:
>>> from django.contrib.gis.utils import LayerMapping
>>> from geoapp.models import TestGeo
>>> mapping = {'name' : 'str', # The 'name' model field maps to the 'str' layer field.
'poly' : 'POLYGON', # For geometry fields use OGC name.
} # The mapping is a dictionary
>>> lm = LayerMapping(TestGeo, 'test_poly.shp', mapping)
>>> lm.save(verbose=True) # Save the layermap, imports the data.
Saved: Name: 1
Saved: Name: 2
Saved: Name: 3
LayerMapping just transformed the three geometries from the SHP file from their
source spatial reference system (WGS84) to the spatial reference system of
the GeoDjango model (NAD83). If no spatial reference system is defined for
the layer, use the `source_srs` keyword with a SpatialReference object to
specify one.
"""
import sys
from datetime import date, datetime
from decimal import Decimal
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.db.backend import SpatialBackend
from django.contrib.gis.gdal import CoordTransform, DataSource, \
OGRException, OGRGeometry, OGRGeomType, SpatialReference
from django.contrib.gis.gdal.field import \
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime
from django.contrib.gis.models import GeometryColumns, SpatialRefSys
from django.db import models, transaction
# LayerMapping exceptions.
class LayerMapError(Exception): pass
class InvalidString(LayerMapError): pass
class InvalidDecimal(LayerMapError): pass
class InvalidInteger(LayerMapError): pass
class MissingForeignKey(LayerMapError): pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1 : OGRGeomType('MultiPoint'),
2 : OGRGeomType('MultiLineString'),
3 : OGRGeomType('MultiPolygon'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField : OFTInteger,
models.IntegerField : (OFTInteger, OFTReal, OFTString),
models.FloatField : (OFTInteger, OFTReal),
models.DateField : OFTDate,
models.DateTimeField : OFTDateTime,
models.EmailField : OFTString,
models.TimeField : OFTTime,
models.DecimalField : (OFTInteger, OFTReal),
models.CharField : OFTString,
models.SlugField : OFTString,
models.TextField : OFTString,
models.URLField : OFTString,
models.USStateField : OFTString,
models.XMLField : OFTString,
models.SmallIntegerField : (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField : (OFTInteger, OFTReal, OFTString),
}
# The acceptable transaction modes.
TRANSACTION_MODES = {'autocommit' : transaction.autocommit,
'commit_on_success' : transaction.commit_on_success,
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding=None,
transaction_mode='commit_on_success',
transform=True, unique=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, basestring):
self.ds = DataSource(data)
else:
self.ds = data
self.layer = self.ds[layer]
# Setting the mapping
self.mapping = mapping
# Setting the model, and getting the geometry column associated
# with the model (an exception will be raised if there is no
# geometry column).
self.model = model
self.geo_col = self.geometry_column()
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Checking the layer -- intitialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
|
tersmitten/ansible
|
lib/ansible/modules/database/mysql/mysql_db.py
|
Python
|
gpl-3.0
| 15,596
| 0.002501
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Mark Theunissen <mark.theunissen@gmail.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mysql_db
short_description: Add or remove MySQL databases from a remote host.
description:
- Add or remove MySQL databases from a remote host.
version_added: "0.6"
options:
name:
description:
- name of the database to add or remove
- name=all May only be provided if I(state) is C(dump) or C(import).
- if name=all Works like --all-databases option for mysqldump (Added in 2.0)
required: true
aliases: [ db ]
state:
description:
- The database state
default: present
choices: [ "present", "absent", "dump", "import" ]
collation:
description:
- Collation mode (sorting). This only applies to new table/databases and does not update existing ones, this is a limitation of MySQL.
encoding:
description:
- Encoding mode to use, examples include C(utf8) or C(latin1_swedish_ci)
target:
description:
- Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and xz (Added in 2.0) compressed files are supported.
single_transaction:
description:
- Execute the dump in a single transaction
type: bool
default: 'no'
version_added: "2.1"
quick:
description:
- Option used for dumping large tables
type: bool
default: 'yes'
version_added: "2.1"
ignore_tables:
description:
- A list of table names that will be ignored in the dump of the form database_name.table_name
required: false
default: []
version_added: "2.7"
author: "Ansible Core Team"
requirements:
- mysql (command line binary)
- mysqldump (command line binary)
notes:
- Requires the mysql and mysqldump binaries on the remote host.
- This module is B(not idempotent) when I(state) is C(import), and will import the dump file each time if run more than once.
extends_documentation_fragment: mysql
'''
EXAMPLES = r'''
- name: Create a new database with name 'bobdata'
mysql_db:
name: bobdata
state: present
# Copy database dump file to remote host and restore it to database 'my_db'
- name: Copy database dump file
copy:
src: dump.sql.bz2
dest: /tmp
- name: Restore database
mysql_db:
name: my_db
state: import
target: /tmp/dump.sql.bz2
- name: Dump all databases to hostname.sql
mysql_db:
state: dump
name: all
target: /tmp/{{ inventory_hostname }}.sql
- name: Import file.sql similar to mysql -u <username> -p <password> < hostname.sql
mysql_db:
state: import
name: all
target: /tmp/{{ inventory_hostname }}.sql
'''
import os
import pipes
import subprocess
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import mysql_quote_identifier
from ansible.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils._text import to_native
# ===========================================
# MySQL module specific support methods.
#
def db_exists(cursor, db):
res = cursor.execute("SHOW DATABASES LIKE %s", (db.replace("_", r"\_"),))
return bool(res)
def db_delete(cursor, db):
query = "DROP DATABASE %s" % mysql_quote_identifier(db, 'database')
cursor.execute(query)
return True
def db_dump(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None,
sing
|
le_transaction=None, quick=None, ignore_tables=None):
cmd = module.get_bin_path('mysqldump', True)
# If defined, mysqldump demands --defaults-extra-file be the first option
if config_file:
cmd += " --defaults-extra-file=%s" % pipes.quote(config_file)
if user is not None:
|
cmd += " --user=%s" % pipes.quote(user)
if password is not None:
cmd += " --password=%s" % pipes.quote(password)
if ssl_cert is not None:
cmd += " --ssl-cert=%s" % pipes.quote(ssl_cert)
if ssl_key is not None:
cmd += " --ssl-key=%s" % pipes.quote(ssl_key)
if ssl_ca is not None:
cmd += " --ssl-ca=%s" % pipes.quote(ssl_ca)
if socket is not None:
cmd += " --socket=%s" % pipes.quote(socket)
else:
cmd += " --host=%s --port=%i" % (pipes.quote(host), port)
if all_databases:
cmd += " --all-databases"
else:
cmd += " %s" % pipes.quote(db_name)
if single_transaction:
cmd += " --single-transaction=true"
if quick:
cmd += " --quick"
if ignore_tables:
for an_ignored_table in ignore_tables:
cmd += " --ignore-table={0}".format(an_ignored_table)
path = None
if os.path.splitext(target)[-1] == '.gz':
path = module.get_bin_path('gzip', True)
elif os.path.splitext(target)[-1] == '.bz2':
path = module.get_bin_path('bzip2', True)
elif os.path.splitext(target)[-1] == '.xz':
path = module.get_bin_path('xz', True)
if path:
cmd = '%s | %s > %s' % (cmd, path, pipes.quote(target))
else:
cmd += " > %s" % pipes.quote(target)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
def db_import(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None):
if not os.path.exists(target):
return module.fail_json(msg="target %s does not exist on the host" % target)
cmd = [module.get_bin_path('mysql', True)]
# --defaults-file must go first, or errors out
if config_file:
cmd.append("--defaults-extra-file=%s" % pipes.quote(config_file))
if user:
cmd.append("--user=%s" % pipes.quote(user))
if password:
cmd.append("--password=%s" % pipes.quote(password))
if ssl_cert is not None:
cmd.append("--ssl-cert=%s" % pipes.quote(ssl_cert))
if ssl_key is not None:
cmd.append("--ssl-key=%s" % pipes.quote(ssl_key))
if ssl_ca is not None:
cmd.append("--ssl-ca=%s" % pipes.quote(ssl_ca))
if socket is not None:
cmd.append("--socket=%s" % pipes.quote(socket))
else:
cmd.append("--host=%s" % pipes.quote(host))
cmd.append("--port=%i" % port)
if not all_databases:
cmd.append("-D")
cmd.append(pipes.quote(db_name))
comp_prog_path = None
if os.path.splitext(target)[-1] == '.gz':
comp_prog_path = module.get_bin_path('gzip', required=True)
elif os.path.splitext(target)[-1] == '.bz2':
comp_prog_path = module.get_bin_path('bzip2', required=True)
elif os.path.splitext(target)[-1] == '.xz':
comp_prog_path = module.get_bin_path('xz', required=True)
if comp_prog_path:
p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout2, stderr2) = p2.communicate()
p1.stdout.close()
p1.wait()
if p1.returncode != 0:
stderr1 = p1.stderr.read()
return p1.returncode, '', stderr1
else:
return p2.returncode, stdout2, stderr2
else:
cmd = ' '.join(cmd)
cmd += " < %s" % pipes.quote(target)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
def db_create(cursor, db, encoding, collation):
query_params = dict(enc=encoding, collate=collation)
query = ['CREATE DATABASE %s' % mysql_quote_identifier(db, 'database')]
if encoding:
query.append("CHARACTER SET %(enc)s")
if collation:
|
scop/bash-completion
|
test/t/test_kpdf.py
|
Python
|
gpl-2.0
| 502
| 0
|
import p
|
ytest
from conftest import assert_complete, create_dummy_filedirs
@pytest.mark.bashcomp(temp_cwd=True)
class TestKpdf:
def test_1(self, bash):
files, dirs = create_dummy_filedirs(
|
".eps .EPS .pdf .PDF .ps .PS .txt".split(),
"foo".split(),
)
completion = assert_complete(bash, "kpdf ")
assert completion == [
x
for x in sorted(files + ["%s/" % d for d in dirs])
if x.lower() != ".txt"
]
|
0x1306e6d/Baekjoon
|
baekjoon/2439.py
|
Python
|
gpl-2.0
| 326
| 0.003125
|
"""
2439 : 별 찍기 -
|
2
URL : https://www.acmicpc.net/problem/2439
Input :
5
Output :
*
**
***
****
*****
"""
from itertools import repeat
N = int(input())
for i in range(1, N + 1):
print(''.join(list(repeat(' ', N - i)) + list(r
|
epeat('*', i))))
|
sivakuna-aap/superdesk
|
server/apps/ldap/commands.py
|
Python
|
agpl-3.0
| 2,913
| 0.004806
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from flask import current_app as app
from superdesk.errors import SuperdeskApiError
import superdesk
from .ldap import ADAuth, add_default_values, get_user_query
logger = logging.getLogger(__name__)
class ImportUserProfileFromADCommand(superdesk.Command):
"""
Responsible for importing a user profile from Active Directory (AD) to Mongo.
This command runs on assumption that the user executing this command and
the user wh
|
ose profile need to be imported need not to be the same. Uses ad_username and ad_password to bind to AD
and then searches for
|
a user identified by username_to_import and if found imports into Mongo.
"""
option_list = (
superdesk.Option('--ad_username', '-adu', dest='ad_username', required=True),
superdesk.Option('--ad_password', '-adp', dest='ad_password', required=True),
superdesk.Option('--username_to_import', '-u', dest='username', required=True),
superdesk.Option('--admin', '-a', dest='admin', required=False),
)
def run(self, ad_username, ad_password, username, admin='false'):
"""
Imports or Updates a User Profile from AD to Mongo.
:param ad_username: Active Directory Username
:param ad_password: Password of Active Directory Username
:param username: Username as in Active Directory whose profile needs to be imported to Superdesk.
:return: User Profile.
"""
# force type conversion to boolean
user_type = 'administrator' if admin is not None and admin.lower() == 'true' else 'user'
# Authenticate and fetch profile from AD
settings = app.settings
ad_auth = ADAuth(settings['LDAP_SERVER'], settings['LDAP_SERVER_PORT'], settings['LDAP_BASE_FILTER'],
settings['LDAP_USER_FILTER'], settings['LDAP_USER_ATTRIBUTES'], settings['LDAP_FQDN'])
user_data = ad_auth.authenticate_and_fetch_profile(ad_username, ad_password, username)
if len(user_data) == 0:
raise SuperdeskApiError.notFoundError('Username not found')
# Check if User Profile already exists in Mongo
user = superdesk.get_resource_service('users').find_one(req=None, **get_user_query(username))
if user:
superdesk.get_resource_service('users').patch(user.get('_id'), user_data)
else:
add_default_values(user_data, username, user_type=user_type)
superdesk.get_resource_service('users').post([user_data])
return user_data
superdesk.command('users:copyfromad', ImportUserProfileFromADCommand())
|
appsembler/awstrial
|
awstrial/trial/context_processors.py
|
Python
|
agpl-3.0
| 532
| 0.003759
|
'''
Context processors for AWSTrial pages
'''
from django.conf import settings
def google_analytics_id(request):
"""Adds the google analytics id to the context if it's present."""
return {
'google
|
_analytics_id': getattr(settings, 'GOOGLE_ANALYTICS_ID', None),
}
def theme_assets_root(request):
"""Adds the ASSET_ROOT variable from the settings file""
|
"
root = getattr(settings, 'ASSET_ROOT', '')
if root.endswith('/'):
root = root[:-1]
return {
'ASSET_ROOT': root,
}
|
nathanbullock/pymtp-nkb
|
pymtp/main.py
|
Python
|
gpl-3.0
| 23,914
| 0.031237
|
#!/usr/bin/env python
#
|
-*- coding: iso-8859-1 -*-
#
# PyMTP
# Developed by: Nick Devito (nick@nick125.com)
# (c) 2008 Nick Devito
# Released under the GPLv3 or later.
#
import os
import ctypes
import ctypes.util
from models import *
from constants import *
from errors import *
_module_path = ctypes.util.find_library("mtp")
_libmtp = ctypes.CDLL(_module_path)
# Initialize LibMTP (Why are we doing this here? Just to make sure that
# it only gets initi
|
alized once)
_libmtp.LIBMTP_Init()
# ----------
# Type Definitions
# ----------
_libmtp.LIBMTP_Detect_Raw_Devices = ctypes.c_int
_libmtp.LIBMTP_Get_Friendlyname.restype = ctypes.c_char_p
_libmtp.LIBMTP_Get_Serialnumber.restype = ctypes.c_char_p
_libmtp.LIBMTP_Get_Modelname.restype = ctypes.c_char_p
_libmtp.LIBMTP_Get_Manufacturername.restype = ctypes.c_char_p
_libmtp.LIBMTP_Get_Deviceversion.restype = ctypes.c_char_p
_libmtp.LIBMTP_Get_Filelisting_With_Callback.restype = ctypes.POINTER(LIBMTP_File)
_libmtp.LIBMTP_Get_Tracklisting_With_Callback.restype = ctypes.POINTER(LIBMTP_Track)
_libmtp.LIBMTP_Get_Filetype_Description.restype = ctypes.c_char_p
_libmtp.LIBMTP_Get_Filemetadata.restype = ctypes.POINTER(LIBMTP_File)
_libmtp.LIBMTP_Get_Trackmetadata.restype = ctypes.POINTER(LIBMTP_Track)
_libmtp.LIBMTP_Get_First_Device.restype = ctypes.POINTER(LIBMTP_MTPDevice)
_libmtp.LIBMTP_Get_Playlist_List.restype = ctypes.POINTER(LIBMTP_Playlist)
_libmtp.LIBMTP_Get_Playlist.restype = ctypes.POINTER(LIBMTP_Playlist)
_libmtp.LIBMTP_Get_Folder_List.restype = ctypes.POINTER(LIBMTP_Folder)
_libmtp.LIBMTP_Find_Folder.restype = ctypes.POINTER(LIBMTP_Folder)
_libmtp.LIBMTP_Get_Errorstack.restype = ctypes.POINTER(LIBMTP_Error)
# This is for callbacks with the type of LIBMTP_progressfunc_t
Progressfunc = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_uint64, ctypes.c_uint64)
# ----------
# End Type Definitions
# ----------
class MTPConnectionManager(object):
"""
MTPConnectionManager
Provides facilities for managing connections to MTP devices
"""
def __init__(self):
"""
Initializes the internal structures and variables
"""
self._mtp = _libmtp
self.connections = {}
def connect(self, device):
"""
Connects to an MTP device
@type device: L{MTPRawDevice}
@param device: The L{MTPRawDevice} to connect to
@rtype: L{MTPObject}
@return: A fresh MTPObject, already connected.
"""
if not device:
raise ValueError
if device.device_id in self.connections:
raise AlreadyConnected
obj = MTPObject(self, device)
obj.connect()
return obj
def _register_object(self, device, obj):
"""
Registers an object with the internal connections list
so we don't reinitialize an MTPObject for that device
"""
self.connections[device.device_id] = obj
def _unregister_object(self, device):
"""
Unregisters an object after being disconnected
"""
del self.connections[device.device_id]
def detect_devices(self):
"""
Detects the MTP devices on the USB bus that we can connect to
@rtype: L{MTPRawDevices}
@return: An array/list of L{MTPRawDevice} objects
"""
numdevices = ctypes.c_int(0)
devices = ctypes.POINTER(LIBMTP_RawDevice)
ret = self._mtp.LIBMTP_Detect_Raw_Devices(ctypes.byref(devices),
ctypes.byref(numdevices))
if ret != 0:
raise
class MTP:
"""
The MTP object
This is the main wrapper around libmtp
"""
def __init__(self):
"""
Initializes the MTP object
@rtype: None
@return: None
"""
self.mtp = _libmtp
self.mtp.LIBMTP_Init()
self.device = None
def debug_stack(self):
"""
Checks if __DEBUG__ is set, if so, prints and clears the
errorstack.
@rtype: None
@return: None
"""
if __DEBUG__:
self.mtp.LIBMTP_Dump_Errorstack()
#self.mtp.LIBMTP_Clear_Errorstack()
def connect(self):
"""
Initializes the MTP connection to the device
@rtype: None
@return: None
"""
if (self.device != None):
raise AlreadyConnected
self.device = self.mtp.LIBMTP_Get_First_Device()
if not self.device:
self.device = None
raise NoDeviceConnected
def disconnect(self):
"""
Disconnects the MTP device and deletes the self.device object
@rtype: None
@return: None
"""
if (self.device == None):
raise NotConnected
self.mtp.LIBMTP_Release_Device(self.device)
del self.device
self.device = None
def get_devicename(self):
"""
Returns the connected device's 'friendly name' (or
known as the owner name)
@rtype: string
@return: The connected device's 'friendly name'
"""
if (self.device == None):
raise NotConnected
return self.mtp.LIBMTP_Get_Friendlyname(self.device)
def set_devicename(self, name):
"""
Changes the connected device's 'friendly name' to name
@type name: string
@param name: The name to change the connected device's
'friendly name' to
@rtype: None
@return: None
"""
if (self.device == None):
raise NotConnected
ret = self.mtp.LIBMTP_Set_Friendlyname(self.device, name)
if (ret != 0):
self.debug_stack()
raise CommandFailed
def get_serialnumber(self):
"""
Returns the connected device's serial number
@rtype: string
@return: The connected device's serial number
"""
if (self.device == None):
raise NotConnected
return self.mtp.LIBMTP_Get_Serialnumber(self.device)
def get_manufacturer(self):
"""
Return the connected device's manufacturer
@rtype: string
@return: The connected device's manufacturer
"""
if (self.device == None):
raise NotConnected
return self.mtp.LIBMTP_Get_Manufacturername(self.device)
def get_batterylevel(self):
"""
Returns the connected device's maximum and current
battery levels
@rtype: tuple
@return: The connected device's maximum and current
battery levels ([0] is maximum, [1] is current)
"""
if (self.device == None):
raise NotConnected
maximum_level = ctypes.c_uint8()
current_level = ctypes.c_uint8()
ret = self.mtp.LIBMTP_Get_Batterylevel(self.device, \
ctypes.byref(maximum_level), ctypes.byref(current_level))
if (ret != 0):
raise CommandFailed
return (maximum_level.value, current_level.value)
def get_modelname(self):
"""
Returns the connected device's model name (such
as "Zen V Plus")
@rtype: string
@return: The connected device's model name
"""
if (self.device == None):
raise NotConnected
return self.mtp.LIBMTP_Get_Modelname(self.device)
def get_deviceversion(self):
"""
Returns the connected device's version (such as
firmware/hardware version)
@rtype: string
@return: Returns the connect device's version
information
"""
if (self.device == None):
raise NotConnected
return self.mtp.LIBMTP_Get_Deviceversion(self.device)
def get_filelisting(self, callback=None):
"""
Returns the connected device's file listing as a tuple,
containing L{LIBMTP_File} objects.
@type callback: function or None
@param callback: The function provided to libmtp to
receive callbacks from ptp. Callback must take two
arguments, total and sent (in bytes)
@rtype: tuple
@return: Returns the connect device file listing tuple
"""
if (self.device == None):
raise NotConnected
if (callback != None):
callback = Progressfunc(callback)
files = self.mtp.LIBMTP_Get_Filelisting_With_Callback(self.device, callback, None)
ret = []
next = files
while next:
ret.append(next.contents)
if (next.contents.next == None):
break
next = next.contents.next
return ret
def get_filetype_description(self, filetype):
"""
Returns the description of the filetype
@type filetype: int
@param filetype: The MTP filetype integer
@rtype: string
@return: The file type information
"""
if (self.device == None):
raise NotConnected
return self.mtp.LIBMTP_Get_Filetype_Description(filetype)
def
|
pocketone/django-shoppy
|
shoppy/shop/management/commands/shoppy_updatePermissions.py
|
Python
|
bsd-3-clause
| 893
| 0.003359
|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = "Update permissions without using syncdb."
de
|
f handle_noargs(self, **options):
from django.core.management import setup_environ
try:
import settings
except ImportError:
import sys
sys.stderr.write("Couldn't find the settings
|
.py module.")
sys.exit(1)
setup_environ(settings)
# Add any missing content types
from django.contrib.contenttypes.management import update_all_contenttypes
update_all_contenttypes()
# Add any missing permissions
from django.contrib.auth.management import create_permissions
from django.db.models import get_apps
for app in get_apps():
create_permissions(app, None, 2)
|
claimsmall/waterbug
|
app_config.py
|
Python
|
mit
| 5,956
| 0.002015
|
#!/usr/bin/env python
"""
Project-wide application configuration.
DO NOT STORE SECRETS, PASSWORDS, ETC. IN THIS FILE.
They will be exposed to users. Use environment variables instead.
See get_secrets() below for a fast way to access them.
"""
import os
from authomatic.providers import oauth2
from authomatic import Authomatic
"""
NAMES
"""
# Project name to be used in urls
# Use dashes, not underscores!
PROJECT_SLUG = 'twitterbug'
# Project name to be used in file paths
PROJECT_FILENAME = 'twitterbug'
# The name of the repository containing the source
REPOSITORY_NAME = 'waterbug'
GITHUB_USERNAME = 'nprapps'
REPOSITORY_URL = 'git@github.com:%s/%s.git' % (GITHUB_USERNAME, REPOSITORY_NAME)
REPOSITORY_ALT_URL = None # 'git@bitbucket.org:nprapps/%s.git' % REPOSITORY_NAME'
# Project name used for assets rig
# Should stay the same, even if PROJECT_SLUG changes
ASSETS_SLUG = 'watermarker'
"""
DEPLOYMENT
"""
PRODUCTION_S3_BUCKET = {
'bucket_name': 'apps.npr.org',
'region': 'us-east-1'
}
STAGING_S3_BUCKET = {
'bucket_name': 'stage-apps.npr.org',
'region': 'us-east-1'
}
ASSETS_S3_BUCKET = {
'bucket_name': 'assets.apps.npr.org',
'region': 'us-east-1'
}
FILE_SERVER = 'tools.apps.npr.org'
DEFAULT_MAX_AGE = 20
PRODUCTION_SERVERS = ['cron.nprapps.org']
STAGING_SERVERS = ['cron-staging.nprapps.org']
# Should code be deployed to the web/cron servers?
DEPLOY_TO_SERVERS = False
SERVER_USER = 'ubuntu'
SERVER_PYTHON = 'python2.7'
SERVER_PROJECT_PATH = '/home/%s/apps/%s' % (SERVER_USER, PROJECT_FILENAME)
SERVER_REPOSITORY_PATH = '%s/repository' % SERVER_PROJECT_PATH
SERVER_VIRTUALENV_PATH = '%s/virtualenv' % SERVER_PROJECT_PATH
# Should the crontab file be installed on the servers?
# If True, DEPLOY_TO_SERVERS must also be True
DEPLOY_CRONTAB = False
# Should the service configurations be installed on the servers?
# If True, DEPLOY_TO_SERVERS must also be True
DEPLOY_SERVICES = False
UWSGI_SOCKET_PATH = '/tmp/%s.uwsgi.sock' % PROJECT_FILENAME
# Services are the server-side services we want to enable and configure.
# A three-tuple following this format:
# (service name, service deployment path, service config file extension)
SERVER_SERVICES = [
('app', SERVER_REPOSITORY_PATH, 'ini'),
('uwsgi', '/etc/init', 'conf'),
('nginx', '/etc/nginx/locations-enabled', 'conf'),
]
# These variables will be set at runtime. See configure_targets() below
S3_BUCKET = None
S3_BASE_URL = None
S3_DEPLOY_URL = None
SERVERS = []
SERVER_BASE_URL = None
SERVER_LOG_PATH = None
DEBUG = True
"""
COPY EDITING
"""
COPY_GOOGLE_DOC_KEY = '0AqjLQISCZzBkdGJqbFQ5TGhVOEpnRkptSFJvaE1FRUE'
COPY_PATH = 'data/copy.xlsx'
"""
SHARING
"""
SHARE_URL = 'http://%s/%s/' % (PRODUCTION_S3_BUCKET['bucket_name'], PROJECT_SLUG)
"""
ADS
"""
NPR_DFP = {
'STORY_ID': '1002',
'TARGET': 'homepage',
'ENVIRONMENT': 'NPRTEST',
'TESTSERVER': 'false'
}
"""
SERVICES
"""
GOOGLE_ANALYTICS = {
'ACCOUNT_ID': 'UA-5828686-4',
'DOMAIN': PRODUCTION_S3_BUCKET['bucket_name'],
'TOPICS': '' # e.g. '[1014,3,1003,1002,1001]'
}
DISQUS_API_KEY = 'tIbSzEhGBE9NIptbnQWn4wy1gZ546CsQ2IHHtxJiYAceyyPoAkDkVnQfCifmCaQW'
DISQUS_UUID = '$NEW_DISQUS_UUID'
"""
OAUTH
"""
GOOGLE_OAUTH_CREDENTIALS_PATH = '~/.google_oauth_credentials'
authomatic_config = {
'google': {
'id': 1,
'class_': oauth2.Google,
'consumer_key': os.environ.get('GOOG
|
LE_OAUTH_CLIENT_ID'),
'consumer_secret': os.environ.get('GOOGLE_OAUTH_CONSUMER_SECRET'),
'scope': ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/userinfo.email'],
'offline': True,
},
}
authomatic = Authomatic(authomatic_config, os.environ.get('AUTHOMATIC_SALT'))
DEFAULT_IMAGE = 'assets/test-kitten.jpg'
"""
Utilities
"""
def get_secrets():
"""
A me
|
thod for accessing our secrets.
"""
secrets_dict = {}
for k,v in os.environ.items():
if k.startswith(PROJECT_SLUG):
k = k[len(PROJECT_SLUG) + 1:]
secrets_dict[k] = v
return secrets_dict
def configure_targets(deployment_target):
"""
Configure deployment targets. Abstracted so this can be
overriden for rendering before deployment.
"""
global S3_BUCKET
global S3_BASE_URL
global S3_DEPLOY_URL
global SERVERS
global SERVER_BASE_URL
global SERVER_LOG_PATH
global DEBUG
global DEPLOYMENT_TARGET
global DISQUS_SHORTNAME
global ASSETS_MAX_AGE
if deployment_target == 'production':
S3_BUCKET = PRODUCTION_S3_BUCKET
S3_BASE_URL = 'http://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
S3_DEPLOY_URL = 's3://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
SERVERS = PRODUCTION_SERVERS
SERVER_BASE_URL = 'http://%s/%s' % (SERVERS[0], PROJECT_SLUG)
SERVER_LOG_PATH = '/var/log/%s' % PROJECT_FILENAME
DISQUS_SHORTNAME = 'npr-news'
DEBUG = False
ASSETS_MAX_AGE = 86400
elif deployment_target == 'staging':
S3_BUCKET = STAGING_S3_BUCKET
S3_BASE_URL = 'http://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
S3_DEPLOY_URL = 's3://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
SERVERS = STAGING_SERVERS
SERVER_BASE_URL = 'http://%s/%s' % (SERVERS[0], PROJECT_SLUG)
SERVER_LOG_PATH = '/var/log/%s' % PROJECT_FILENAME
DISQUS_SHORTNAME = 'nprviz-test'
DEBUG = True
ASSETS_MAX_AGE = 20
else:
S3_BUCKET = None
S3_BASE_URL = 'http://127.0.0.1:8000'
S3_DEPLOY_URL = None
SERVERS = []
SERVER_BASE_URL = 'http://127.0.0.1:8001/%s' % PROJECT_SLUG
SERVER_LOG_PATH = '/tmp'
DISQUS_SHORTNAME = 'nprviz-test'
DEBUG = True
ASSETS_MAX_AGE = 20
DEPLOYMENT_TARGET = deployment_target
"""
Run automated configuration
"""
DEPLOYMENT_TARGET = os.environ.get('DEPLOYMENT_TARGET', None)
configure_targets(DEPLOYMENT_TARGET)
|
47lining/ansible-modules-core
|
network/basics/get_url.py
|
Python
|
gpl-3.0
| 11,195
| 0.004288
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# see examples/playbooks/get_url.yml
import shutil
import datetime
import re
import tempfile
DOCUMENTATION = '''
---
module: get_url
short_description: Downloads files from HTTP, HTTPS, or FTP to node
description:
- Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote
server I(must) have direct access to the remote resource.
- By default, if an environment variable C(<protocol>_proxy) is set on
the target host, requests will be sent through that proxy. This
behaviour can be overridden by setting a variable for this task
(see `setting the environment
<http://docs.ansible.com/playbooks_environment.html>`_),
or by using the use_proxy option.
version_added: "0.6"
options:
url:
description:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
required: true
default: null
aliases: []
dest:
description:
- absolute path of where to download the file to.
- If C(dest) is a directory, either the server provided filename or, if
none provided, the base name of the URL on the remote server will be
used. If a directory, C(force) has no effect.
If C(dest) is a directory, the file will always be
downloaded (regardless of the force option), but replaced only if the contents changed.
required: true
default: null
force:
description:
- If C(yes) and C(dest) is not a directory, will download the file every
time and replace the file if the contents change. If C(no), the file
will only be downloaded if the destination does not exist. Generally
should be C(yes) only for small local files. Prior to 0.6, this module
behaved as if C(yes) was the default.
version_added: "0.7"
required: false
choices: [ "yes", "no" ]
default: "no"
aliases: [ "thirsty" ]
sha256sum:
description:
- If a SHA-256 checksum is passed to this parameter, the digest of the
destination file will be calculated after it is downloaded to ensure
its integrity and verify that the transfer completed successfully.
version_added: "1.3"
required: false
default: null
use_proxy:
description:
- if C(no), it will not use a proxy, even if one is defined in
an environment variable on the target hosts.
required: false
default: 'yes'
choices: ['yes', 'no']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
timeout:
description:
- Timeout for URL request
required: false
default: 10
version_added: '1.8'
url_username:
description:
- The username for use in HTTP basic authentication. This parameter can be used
without C(url_password) for sites that allow empty passwords.
required: false
version_added: '1.6'
url_password:
description:
- The password for use in HTTP basic authentication. If the C(url_username)
parameter is not specified, the C(url_password) parameter will not be used.
required: false
|
version_added: '1.6'
others:
description:
- all arguments accepted by the M(file) module also work here
required: false
notes:
- This module doesn't yet support co
|
nfiguration for proxies.
# informational: requirements for nodes
requirements: [ urllib2, urlparse ]
author: Jan-Piet Mens
'''
EXAMPLES='''
- name: download foo.conf
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf mode=0440
- name: download file with sha256 check
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
'''
try:
import hashlib
HAS_HASHLIB=True
except ImportError:
HAS_HASHLIB=False
# ==============================================================
# url handling
def url_filename(url):
fn = os.path.basename(urlparse.urlsplit(url)[2])
if fn == '':
return 'index.html'
return fn
def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10):
"""
Download data from the url and store in a temporary file.
Return (tempfile, info about the request)
"""
rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout)
if info['status'] == 304:
module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''))
# create a temporary file and copy content to do checksum-based replacement
if info['status'] != 200:
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest)
fd, tempname = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
try:
shutil.copyfileobj(rsp, f)
except Exception, err:
os.remove(tempname)
module.fail_json(msg="failed to create temporary content file: %s" % str(err))
f.close()
rsp.close()
return tempname, info
def extract_filename_from_headers(headers):
"""
Extracts a filename from the given dict of HTTP headers.
Looks for the content-disposition header and applies a regex.
Returns the filename if successful, else None."""
cont_disp_regex = 'attachment; ?filename="?([^"]+)'
res = None
if 'content-disposition' in headers:
cont_disp = headers['content-disposition']
match = re.match(cont_disp_regex, cont_disp)
if match:
res = match.group(1)
# Try preventing any funny business.
res = os.path.basename(res)
return res
# ==============================================================
# main
def main():
argument_spec = url_argument_spec()
argument_spec.update(
url = dict(required=True),
dest = dict(required=True),
sha256sum = dict(default=''),
timeout = dict(required=False, type='int', default=10),
)
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec = argument_spec,
add_file_common_args=True
)
url = module.params['url']
dest = os.path.expanduser(module.params['dest'])
force = module.params['force']
sha256sum = module.params['sha256sum']
use_proxy = module.params['use_proxy']
timeout = module.params['timeout']
dest_is_dir = os.path.isdir(dest)
last_mod_time = None
if not dest_is_dir and os.path.exists(dest):
if not force:
module.exit_json(msg="file already exists", dest=dest, url=url, changed=False)
# If the file already exists, prepare the last modified time for the
# request.
mtime = os.path.getmtime(dest)
last_mod_time = datetime.datetime.utcfromtimestamp(mtime)
# download to tmpsrc
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout)
# Now the request has completed, we can finally generate the final
# destination file name from the info dict.
if dest_is_dir:
filename = extract_filename_from_headers(info)
if not filename:
# Fall back to extracting the filename from the URL.
# Pluck the URL from the info, since a redirect coul
|
dotKom/onlineweb4
|
apps/sso/endpoints.py
|
Python
|
mit
| 1,048
| 0.001908
|
# -*- coding: utf8 -*-
#
# Created by 'myth' on 6/27/15
from django.http import JsonResponse
from oauth2_provider.decorators import protected_resource
from apps.sso.userinfo import Onlineweb4Userinfo
USERINFO_SCOPES = [
'authentication.onlineuser.username.read',
'authentication.onlineuser.first_name.read',
'authentication.onlineuser.last_name.read',
'authentication.onlineuser.email.read',
'authentication.onlineuser.is_member.read',
'authentication.onlineuser.is_staff.read',
'authentication.onlineuser.is_superuser.read',
'authentication.onlineuser.field_of_study.read',
'authentication.onlineuser.nickname.read',
'authentication.onlineuser.rfid.read'
]
@protected_resource(USERINFO_SCOPES)
def oauth2_provider_userinfo(request):
"""
Basic user information provided based on the Bearer Token provided by an SSO a
|
pplication
:
|
param request: The Django Request object
:return: An HTTP response
"""
return JsonResponse(status=200, data=Onlineweb4Userinfo(request.user).oauth2())
|
uclouvain/osis
|
ddd/logic/preparation_programme_annuel_etudiant/domain/service/get_programme_inscription_cours.py
|
Python
|
agpl-3.0
| 8,497
| 0.002707
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2022 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import itertools
from typing import List, Union
from ddd.logic.preparation_programme_annuel_etudiant.commands import GetProgrammeInscriptionCoursCommand
from ddd.logic.preparation_programme_annuel_etudiant.domain.model.groupement_ajuste_inscription_cours import \
GroupementAjusteInscriptionCours
from ddd.logic.preparation_programme_annuel_etudiant.domain.model.unite_enseignement_ajoutee import \
UniteEnseignementAjoutee
from ddd.logic.preparation_programme_annuel_etudiant.domain.service.i_catalogue_formations import \
ICatalogueFormationsTranslator
from ddd.logic.preparation_programme_annuel_etudiant.domain.service.i_catalogue_unites_enseignement import \
ICatalogueUnitesEnseignementTranslator
from ddd.logic.preparation_programme_annuel_etudiant.dtos import ProgrammeInscriptionCoursDTO, \
GroupementInscriptionCoursDTO, UniteEnseignementProgrammeDTO, ContenuGroupementCatalogueDTO, \
UniteEnseignementAjouteeDTO, UniteEnseignementCatalogueDTO
from ddd.logic.preparation_programme_annuel_etudiant.repository.i_groupement_ajuste_inscription_cours import \
IGroupementAjusteInscriptionCoursRepository
from education_group.ddd.domain.group import GroupIdentity
from osis_common.ddd import interface
class GetProgrammeInscriptionCours(interface.DomainService):
@classmethod
def get_programme_inscription_cours(
cls,
cmd: 'GetProgrammeInscriptionCoursCommand',
groupement_ajuste_repository: 'IGroupementAjusteInscriptionCoursRepository',
catalogue_formations_translator: 'ICatalogueFormationsTranslator',
catalogue_unites_enseignement_translator: 'ICatalogueUnitesEnseignementTranslator'
):
formation = catalogue_formations_translator.get_formation(
code_programme=cmd.code_programme,
annee=cmd.annee,
)
groupements_ajustes = groupement_ajuste_repository.search(
groupement_id=GroupIdentity(
year=cmd.annee,
code=cmd.code_programme,
)
)
unite_enseignements_ajoutes_dto = cls.rechercher_unites_enseignement_ajoutees_catalogue_dto(
groupements_ajustes,
catalogue_unites_enseignement_translator
)
return ProgrammeInscriptionCoursDTO(
uuid='uuid-1234',
code=formation.racine.groupement_contenant.code,
annee=cmd.annee,
sigle=formation.sigle,
version=formation.version,
transition_name=formation.transition_name,
intitule_complet_formation=formation.intitule_formation,
racine=cls.__build_contenu(
[formation.racine],
groupements_ajustes,
unite_enseignements_ajoutes_dto
)[0],
)
@classmethod
def rechercher_unites_enseignement_ajoutees_catalogue_dto(
cls,
groupements_ajustes: List['GroupementAjusteInscriptionCours'],
catalogue_unites_enseignement_translator: 'ICatalogueUnitesEnseignementTranslator'
) -> List['UniteEnseignementCatalogueDTO']:
unites_enseignement_ajoutees = itertools.chain.from_iterable(
[
groupement.unites_enseignement_ajoutees
for groupement in groupements_ajustes
]
)
entity_id_unites_enseignement = [
unite_enseignement.unite_enseignement_identity
for unite_enseignement in unites_enseignement_ajoutees
]
return catalogue_unites_enseignement_translator.search(
entity_ids=entity_id_unites_enseignement
)
@classmethod
def __build_contenu(
cls,
contenu_ordonne_catalogue: List[Union['UniteEnseignementCatalogueDTO', 'ContenuGroupementCatalogueDTO']],
groupements_ajustes: List['GroupementAjusteInscriptionCours'],
unite_enseignement_ajoutes_dto: List['UniteEnseignementCatalogueDTO']
) -> List[Union['UniteEnseignementProgrammeDTO', 'GroupementInscriptionCoursDTO']]:
contenu = []
for element in contenu_ordonne_catalogue:
if isinstance(element, UniteEnseignementCatalogueDTO):
contenu.append(
UniteEnseignementProgrammeDTO(
code=element.code,
intitule=element.intitule_complet,
obligatoire=element.obligatoire,
bloc=element.bloc,
)
)
elif isinstance(element, ContenuGroupementCatalogueDTO):
contenu.append(
GroupementInscriptionCoursDTO(
intitule_complet=element.groupement_contenant.intitule_complet,
obligatoire=element.groupement_contenant.obligatoire,
code=element.groupement_contenant.code,
unites_enseignement_ajoutees=cls.__build_unite_enseignement_ajoute_dtos(
element,
groupements_ajustes,
unite_enseignement_ajoutes_dto
),
contenu=cls.__build_contenu(
element.contenu_ordonne_catalogue,
groupements_ajustes,
unite_enseignement_ajoutes_dto
)
|
)
)
return contenu
@classmethod
def __build_unite_enseignement_ajoute_dtos(
cls,
groupement: 'ContenuGroupementCatalogueDTO',
groupements_ajustes: List['GroupementAjusteInscriptionCours'],
unite_enseignement_ajoutes_dto: List['UniteEnseignementCatalogueDTO']
|
) -> List['UniteEnseignementAjouteeDTO']:
groupement_ajuste_correspondant = next(
(
groupement_ajuste
for groupement_ajuste in groupements_ajustes
if groupement_ajuste.groupement_id.code == groupement.groupement_contenant.code
),
None
)
unites_enseignemnts_ajoutes = groupement_ajuste_correspondant.unites_enseignement_ajoutees if \
groupement_ajuste_correspondant else []
return [
cls.__build_unite_enseignement_ajoute_dto(unite_enseignement, unite_enseignement_ajoutes_dto)
for unite_enseignement in unites_enseignemnts_ajoutes
]
@classmethod
def __build_unite_enseignement_ajoute_dto(
cls,
unite_enseignement_ajoute: 'UniteEnseignementAjoutee',
unite_enseignement_dtos: List['UniteEnseignementCatalogueDTO']
) -> 'UniteEnseignementAjouteeDTO':
unite_enseignement_dto_correspondant = next(
dto for dto in unite_enseignement_dtos if dto.code == unite_enseignement_ajoute.co
|
cwmartin/rez
|
src/rez/release_vcs.py
|
Python
|
lgpl-3.0
| 8,750
| 0.000457
|
from rez.exceptions import ReleaseVCSError
from rez.packages_ import get_developer_package
from rez.util import which
from rez.utils.system import popen
from rez.utils.logging_ import print_debug
from rez.utils.filesystem import walk_up_dirs
from pipes import quote
import subprocess
def get_release_vcs_types():
"""Returns the available VCS implementations - git, hg etc."""
from rez.plugin_managers import plugin_manager
return plugin_manager.get_plugins('release_vcs')
def create_release_vcs(path, vcs_name=None):
"""Return a new release VCS that can release from this source path."""
from rez.plugin_managers import plugin_manager
vcs_types = get_release_vcs_types()
if vcs_name:
if vcs_name not in vcs_types:
raise ReleaseVCSError("Unknown version control system: %r" % vcs_name)
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
return cls(path
|
)
classes_by_level = {}
for vcs_name in vcs_types:
cls = plugin_manager.get_
|
plugin_class('release_vcs', vcs_name)
result = cls.find_vcs_root(path)
if not result:
continue
vcs_path, levels_up = result
classes_by_level.setdefault(levels_up, []).append((cls, vcs_path))
if not classes_by_level:
raise ReleaseVCSError("No version control system for package "
"releasing is associated with the path %s" % path)
# it's ok to have multiple results, as long as there is only one at the
# "closest" directory up from this dir - ie, if we start at:
# /blah/foo/pkg_root
# and these dirs exist:
# /blah/.hg
# /blah/foo/.git
# ...then this is ok, because /blah/foo/.git is "closer" to the original
# dir, and will be picked. However, if these two directories exist:
# /blah/foo/.git
# /blah/foo/.hg
# ...then we error, because we can't decide which to use
lowest_level = sorted(classes_by_level)[0]
clss = classes_by_level[lowest_level]
if len(clss) > 1:
clss_str = ", ".join(x[0].name() for x in clss)
raise ReleaseVCSError("Several version control systems are associated "
"with the path %s: %s. Use rez-release --vcs to "
"choose." % (path, clss_str))
else:
cls, vcs_root = clss[0]
return cls(pkg_root=path, vcs_root=vcs_root)
class ReleaseVCS(object):
"""A version control system (VCS) used to release Rez packages.
"""
def __init__(self, pkg_root, vcs_root=None):
if vcs_root is None:
result = self.find_vcs_root(pkg_root)
if not result:
raise ReleaseVCSError("Could not find %s repository for the "
"path %s" % (self.name(), pkg_root))
vcs_root = result[0]
else:
assert(self.is_valid_root(vcs_root))
self.vcs_root = vcs_root
self.pkg_root = pkg_root
self.package = get_developer_package(pkg_root)
self.type_settings = self.package.config.plugins.release_vcs
self.settings = self.type_settings.get(self.name())
@classmethod
def name(cls):
"""Return the name of the VCS type, eg 'git'."""
raise NotImplementedError
@classmethod
def find_executable(cls, name):
exe = which(name)
if not exe:
raise ReleaseVCSError("Couldn't find executable '%s' for VCS '%s'"
% (name, cls.name()))
return exe
@classmethod
def is_valid_root(cls, path):
"""Return True if the given path is a valid root directory for this
version control system.
Note that this is different than whether the path is under the
control of this type of vcs; to answer that question,
use find_vcs_root
"""
raise NotImplementedError
@classmethod
def search_parents_for_root(cls):
"""Return True if this vcs type should check parent directories to
find the root directory
"""
raise NotImplementedError
@classmethod
def find_vcs_root(cls, path):
"""Try to find a version control root directory of this type for the
given path.
If successful, returns (vcs_root, levels_up), where vcs_root is the
path to the version control root directory it found, and levels_up is an
integer indicating how many parent directories it had to search through
to find it, where 0 means it was found in the indicated path, 1 means it
was found in that path's parent, etc. If not sucessful, returns None
"""
if cls.search_parents_for_root():
valid_dirs = walk_up_dirs(path)
else:
valid_dirs = [path]
for i, current_path in enumerate(valid_dirs):
if cls.is_valid_root(current_path):
return current_path, i
return None
def validate_repostate(self):
"""Ensure that the VCS working copy is up-to-date."""
raise NotImplementedError
def get_current_revision(self):
"""Get the current revision, this can be any type (str, dict etc)
appropriate to your VCS implementation.
Note:
You must ensure that a revision contains enough information to
clone/export/checkout the repo elsewhere - otherwise you will not
be able to implement `export`.
"""
raise NotImplementedError
def get_changelog(self, previous_revision=None, max_revisions=None):
"""Get the changelog text since the given revision.
If previous_revision is not an ancestor (for example, the last release
was from a different branch) you should still return a meaningful
changelog - perhaps include a warning, and give changelog back to the
last common ancestor.
Args:
previous_revision: The revision to give the changelog since. If
None, give the entire changelog.
Returns:
Changelog, as a string.
"""
raise NotImplementedError
def tag_exists(self, tag_name):
"""Test if a tag exists in the repo.
Args:
tag_name (str): Tag name to check for.
Returns:
bool: True if the tag exists, False otherwise.
"""
raise NotImplementedError
def create_release_tag(self, tag_name, message=None):
"""Create a tag in the repo.
Create a tag in the repository representing the release of the
given version.
Args:
tag_name (str): Tag name to write to the repo.
message (str): Message string to associate with the release.
"""
raise NotImplementedError
@classmethod
def export(cls, revision, path):
"""Export the repository to the given path at the given revision.
Note:
The directory at `path` must not exist, but the parent directory
must exist.
Args:
revision (object): Revision to export; current revision if None.
path (str): Directory to export the repository to.
"""
raise NotImplementedError
def _cmd(self, *nargs):
"""Convenience function for executing a program such as 'git' etc."""
cmd_str = ' '.join(map(quote, nargs))
if self.package.config.debug("package_release"):
print_debug("Running command: %s" % cmd_str)
p = popen(nargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=self.pkg_root)
out, err = p.communicate()
if p.returncode:
print_debug("command stdout:")
print_debug(out)
print_debug("command stderr:")
print_debug(err)
raise ReleaseVCSError("command failed: %s\n%s" % (cmd_str, err))
out = out.strip()
if out:
return [x.rstrip() for x in out.split('\n')]
else:
return []
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the
|
anubhavshrimal/Data_Structures_Algorithms_In_Python
|
Tree/BinarySearchTree/Lowest_Common_Ancestor.py
|
Python
|
mit
| 1,935
| 0
|
# Find the Lowest Common Ancestor (LCA) in a Binary Search Tree
# A Binary Search Tree node
class Node:
# Constructor to initialise node
def __init__(self, data):
self.data = data
self.left = None
self.right = None
class BST:
def __init__(self):
self.root = None
def insert_node(self, data):
if self.root is None:
self.root = Node(data)
else:
self._insert(data, self.root)
def _insert(self, data, current_node):
if data <= current_node.data:
if current_node.left is not None:
self._insert(data, current_node.left)
else:
current_node.left = Node(data)
else:
if current_node.right is not None:
self._insert(data, current_node.right)
else:
current_node.right = Node(data)
def inorder(self):
current_node = self.root
self._inorder(current_node)
print('End')
def _inorder(self, current_node):
if current_node is None:
return
self._inorder(current_node.left)
print(current_node.data, " -> ", end='')
self._inorder(current_node.right)
# assuming both nodes are present in the tree
def lca_bst(root, value1, value2):
while root is not None:
if value2 > root.data < value1:
root = root.ri
|
ght
elif value2 < root.data > value1:
root = root.left
else:
return root.data
if __name__ == '__main__':
tree = BST()
tree.insert_node(6)
tree.insert_node(8)
tree.insert_node(9)
tree.insert_node(6)
tree.insert_node(5)
tree.insert_node(7)
tree.insert_node(3)
tree.insert_node(2)
tree.insert_node(4)
print(lca_bst(tree.root, 4, 2))
"""
given tree:
6
6 8
5
|
7 9
3
2 4
"""
|
gangadhar-kadam/sapphite_lib
|
webnotes/model/doc.py
|
Python
|
mit
| 20,173
| 0.044218
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Contains the Document class representing an object / record
"""
_toc = ["webnotes.model.doc.Document"]
import webnotes
import webnotes.model.meta
import MySQLdb
from webnotes.utils import *
valid_fields_map = {}
class Document:
"""
The wn(meta-data)framework equivalent of a Database Record.
Stores,Retrieves,Updates the record in the corresponding table.
Runs the triggers required.
The `Document` class represents the basic Object-Relational Mapper (ORM). The object type is defined by
`DocType` and the object ID is represented by `name`::
Please note the anamoly in the Web Notes Framework that `ID` is always called as `name`
If both `doctype` and `name` are specified in the constructor, then the object is loaded from the database.
If only `doctype` is given, then the object is not loaded
If `fielddata` is specfied, then the object is created from the given dictionary.
**Note 1:**
The getter and setter of the object are overloaded to map to the fields of the object that
are loaded when it is instantiated.
For example: doc.name will be the `name` field and doc.owner will be the `owner` field
**Note 2 - Standard Fields:**
* `name`: ID / primary key
* `owner`: creator of the record
* `creation`: datetime of creation
* `modified`: datetime of last modification
* `modified_by` : last updating user
* `docstatus` : Status 0 - Saved, 1 - Submitted, 2- Cancelled
* `parent` : if child (table) record, this represents the parent record
* `parenttype` : type of parent record (if any)
* `parentfield` : table fieldname of parent record (if any)
* `idx` : Index (sequence) of the child record
"""
def __init__(self, doctype = None, name = None, fielddata = None, prefix='tab'):
self._roles = []
self._perms = []
self._user_defaults = {}
self._prefix = prefix
if isinstance(doctype, dict):
fielddata = doctype
doctype = None
if fielddata:
self.fields = webnotes._dict(fielddata)
else:
self.fields = webnotes._dict()
if not self.fields.has_key('name'):
self.fields['name']='' # required on save
if not self.fields.has_key('doctype'):
self.fields['doctype']='' # required on save
if not self.fields.has_key('owner'):
self.fields['owner']='' # required on save
if doctype:
self.fields['doctype'] = doctype
if name:
self.fields['name'] = name
self.__initialized = 1
if (doctype and name):
self._loadfromdb(doctype, name)
else:
if not fielddata:
self.fields['__islocal'] = 1
if not self.fields.docstatus:
self.fields.docstatus = 0
def __nonzero__(self):
return True
def __str__(self):
return str(self.fields)
def __repr__(self):
return repr(self.fields)
def __unicode__(self):
return unicode(self.fields)
def __eq__(self, other):
if isinstance(other, Document):
return self.fields == other.fields
else:
return False
def __getstate__(self):
return self.fields
def __setstate__(self, d):
self.fields = d
def encode(self, encoding='utf-8'):
"""convert all unicode values to utf-8"""
from webnotes.utils import encode_dict
encode_dict(self.fields)
def _loadfromdb(self, doctype = None, name = None):
if name: self.name = name
if doctype: self.doctype = doctype
is_single = False
try:
is_single = webnotes.model.meta.is_single(self.doctype)
except Exception, e:
pass
if is_single:
self._loadsingle()
else:
try:
dataset = webnotes.conn.sql('select * from `%s%s` where name="%s"' % (self._prefix, self.doctype, self.name.replace('"', '\"')))
except MySQLdb.ProgrammingError, e:
if e.args[0]==1146:
dataset = None
else:
raise e
if not dataset:
raise webnotes.DoesNotExistError, '[WNF] %s %s does not exist' % (self.doctype, self.name)
self._load_values(dataset[0], webnotes.conn.get_description())
def _load_values(self, data, description):
if '__islocal' in self.fields:
del self.fields['__islocal']
for i in range(len(description)):
v = data[i]
self.fields[description[i][0]] = webnotes.conn.convert_to_simple_type(v)
def _merge_values(self, data, description):
for i in range(len(description)):
v = data[i]
if v: # only if value, over-write
self.fields[description[i][0]] = webnotes.conn.convert_to_simple_type(v)
def _loadsingle(self):
self.name = self.doctype
self.fields.update(getsingle(self.doctype))
def __setattr__(self, name, value):
# normal attribute
if not self.__dict__.has_key('_Document__initialized'):
self.__dict__[name] = value
elif self.__dict__.has_key(name):
self.__dict__[name] = value
else:
# field attribute
f = self.__dict__['fields']
f[name] = value
def __getattr__(self, name):
if self.__dict__.has_key(name):
return self.__dict__[name]
elif self.fields.has_key(name):
return self.fields[name]
else:
return ''
def _get_amended_name(self):
am_id = 1
am_prefix = self.amended_from
if webnotes.conn.sql('select amended_from from `tab%s` where name = "%s"' % (self.doctype, self.amended_from))[0][0] or '':
am_id = cint(self.amended_from.split('-')[-1]) + 1
am_prefix = '-'.join(self.amended_from.split('-')[:-1]) # except the last hyphen
self.name = am_prefix + '-' + str(am_id)
def _set_name(self, autoname, istable):
self.localname = self.name
# get my object
import webnotes.model.code
so = webnotes.model.code.get_server_obj(self, [])
# amendments
if self.amended_from:
self._get_amended_name()
# by method
elif so and hasattr(so, 'autoname'):
r = webnotes.model.code.run_server_obj(so, 'autoname')
if r: return r
# based on a field
elif autoname and autoname.startswith('field:'):
n = self.fields[autoname[6:]]
if not n:
raise Exception, 'Name is requ
|
ired'
self.name = n.strip()
elif autoname and autoname.startswith("naming_series:"):
self.set_naming_s
|
eries()
if not self.naming_series:
webnotes.msgprint(webnotes._("Naming Series mandatory"), raise_exception=True)
self.name = make_autoname(self.naming_series+'.#####')
# based on expression
elif autoname and autoname.startswith('eval:'):
doc = self # for setting
self.name = eval(autoname[5:])
# call the method!
elif autoname and autoname!='Prompt':
self.name = make_autoname(autoname, self.doctype)
# given
elif self.fields.get('__newname',''):
self.name = self.fields['__newname']
# default name for table
elif istable:
self.name = make_autoname('#########', self.doctype)
# unable to determine a name, use a serial number!
if not self.name:
self.name = make_autoname('#########', self.doctype)
def set_naming_series(self):
if not self.naming_series:
# pick default naming series
from webnotes.model.doctype import get_property
self.naming_series = get_property(self.doctype, "options", "naming_series")
if self.naming_series:
self.naming_series = self.naming_series.split("\n")
self.naming_series = self.naming_series[0] or self.naming_series[1]
def _insert(self, autoname, istable, case='', make_autoname=1, keep_timestamps=False):
# set name
if make_autoname:
self._set_name(autoname, istable)
# validate name
self.name = validate_name(self.doctype, self.name, case)
# insert!
if not keep_timestamps:
if not self.owner:
self.owner = webnotes.session['user']
self.modified_by = webnotes.session['user']
if not self.creation:
self.creation = self.modified = now()
else:
self.modified = now()
webnotes.conn.sql("insert into `tab%(doctype)s`" % self.fields \
+ """ (name, owner, creation, modified, modified_by)
values (%(name)s, %(owner)s, %(creation)s, %(modified)s,
%(modified_by)s)""", self.fields)
def _update_single(self, link_list):
self.modified = now()
update_str, values = [], []
webnotes.conn.sql("delete from tabSingles where doctype='%s'" % self.doctype)
f
|
lukas-hetzenecker/home-assistant
|
homeassistant/components/plex/server.py
|
Python
|
apache-2.0
| 25,421
| 0.001377
|
"""Shared class to maintain Plex server instances."""
import logging
import ssl
import time
from urllib.parse import urlparse
from plexapi.client import PlexClient
from plexapi.exceptions import BadRequest, NotFound, Unauthorized
import plexapi.myplex
import plexapi.playqueue
import plexapi.server
from requests import Session
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.media_player.const import (
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_VIDEO,
)
from homeassistant.const import CONF_CLIENT_ID, CONF_TOKEN, CONF_URL, CONF_VERIFY_SSL
from homeassistant.core import callback
from homeassistant.helpers.debounce import Debouncer
from home
|
assistant.helpers.dispatcher import async_dispatcher_send
f
|
rom .const import (
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_USE_EPISODE_ART,
DEBOUNCE_TIMEOUT,
DEFAULT_VERIFY_SSL,
DOMAIN,
GDM_DEBOUNCER,
GDM_SCANNER,
PLAYER_SOURCE,
PLEX_NEW_MP_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL,
PLEX_UPDATE_SENSOR_SIGNAL,
PLEXTV_THROTTLE,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import (
MediaNotFound,
NoServersFound,
ServerNotSpecified,
ShouldUpdateConfigEntry,
)
from .media_search import lookup_movie, lookup_music, lookup_tv
from .models import PlexSession
_LOGGER = logging.getLogger(__name__)
# Set default headers sent by plexapi
plexapi.X_PLEX_DEVICE_NAME = X_PLEX_DEVICE_NAME
plexapi.X_PLEX_PLATFORM = X_PLEX_PLATFORM
plexapi.X_PLEX_PRODUCT = X_PLEX_PRODUCT
plexapi.X_PLEX_VERSION = X_PLEX_VERSION
class PlexServer:
"""Manages a single Plex server connection."""
def __init__(
self, hass, server_config, known_server_id=None, options=None, entry_id=None
):
"""Initialize a Plex server instance."""
self.hass = hass
self.entry_id = entry_id
self.active_sessions = {}
self._plex_account = None
self._plex_server = None
self._created_clients = set()
self._known_clients = set()
self._known_idle = set()
self._url = server_config.get(CONF_URL)
self._token = server_config.get(CONF_TOKEN)
self._server_name = server_config.get(CONF_SERVER)
self._verify_ssl = server_config.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL)
self._server_id = known_server_id
self.options = options
self.server_choice = None
self._accounts = []
self._owner_username = None
self._plextv_clients = None
self._plextv_client_timestamp = 0
self._client_device_cache = {}
self._use_plex_tv = self._token is not None
self._version = None
self.async_update_platforms = Debouncer(
hass,
_LOGGER,
cooldown=DEBOUNCE_TIMEOUT,
immediate=True,
function=self._async_update_platforms,
).async_call
self.thumbnail_cache = {}
# Header conditionally added as it is not available in config entry v1
if CONF_CLIENT_ID in server_config:
plexapi.X_PLEX_IDENTIFIER = server_config[CONF_CLIENT_ID]
plexapi.myplex.BASE_HEADERS = plexapi.reset_base_headers()
plexapi.server.BASE_HEADERS = plexapi.reset_base_headers()
@property
def account(self):
"""Return a MyPlexAccount instance."""
if not self._plex_account and self._use_plex_tv:
try:
self._plex_account = plexapi.myplex.MyPlexAccount(token=self._token)
except (BadRequest, Unauthorized):
self._use_plex_tv = False
_LOGGER.error("Not authorized to access plex.tv with provided token")
raise
return self._plex_account
def plextv_clients(self):
"""Return available clients linked to Plex account."""
if self.account is None:
return []
now = time.time()
if now - self._plextv_client_timestamp > PLEXTV_THROTTLE:
self._plextv_client_timestamp = now
self._plextv_clients = [
x
for x in self.account.resources()
if "player" in x.provides and x.presence and x.publicAddressMatches
]
_LOGGER.debug(
"Current available clients from plex.tv: %s", self._plextv_clients
)
return self._plextv_clients
def connect(self):
"""Connect to a Plex server directly, obtaining direct URL if necessary."""
config_entry_update_needed = False
def _connect_with_token():
all_servers = [
x for x in self.account.resources() if "server" in x.provides
]
servers = [x for x in all_servers if x.presence] or all_servers
available_servers = [(x.name, x.clientIdentifier) for x in servers]
if not available_servers:
raise NoServersFound
if not self._server_name and len(available_servers) > 1:
raise ServerNotSpecified(available_servers)
self.server_choice = (
self._server_name if self._server_name else available_servers[0][0]
)
self._plex_server = self.account.resource(self.server_choice).connect(
timeout=10
)
def _connect_with_url():
session = None
if self._url.startswith("https") and not self._verify_ssl:
session = Session()
session.verify = False
self._plex_server = plexapi.server.PlexServer(
self._url, self._token, session
)
def _update_plexdirect_hostname():
matching_servers = [
x.name
for x in self.account.resources()
if x.clientIdentifier == self._server_id
]
if matching_servers:
self._plex_server = self.account.resource(matching_servers[0]).connect(
timeout=10
)
return True
_LOGGER.error("Attempt to update plex.direct hostname failed")
return False
if self._url:
try:
_connect_with_url()
except requests.exceptions.SSLError as error:
while error and not isinstance(error, ssl.SSLCertVerificationError):
error = error.__context__
if isinstance(error, ssl.SSLCertVerificationError):
domain = urlparse(self._url).netloc.split(":")[0]
if domain.endswith("plex.direct") and error.args[0].startswith(
f"hostname '{domain}' doesn't match"
):
_LOGGER.warning(
"Plex SSL certificate's hostname changed, updating"
)
if _update_plexdirect_hostname():
config_entry_update_needed = True
else:
raise Unauthorized( # pylint: disable=raise-missing-from
"New certificate cannot be validated with provided token"
)
else:
raise
else:
raise
else:
_connect_with_token()
try:
system_accounts = self._plex_server.systemAccounts()
shared_users = self.account.users() if self.account else []
except Unauthorized:
_LOGGER.warning(
"Plex account has limited permissions, shared account filtering will not be available"
)
else:
self._accounts = []
for user in shared_users:
for shared_server in user.servers:
if shared_server.machineIdentifier == self.machine_
|
mellis13/moose
|
scripts/memory_logger.py
|
Python
|
lgpl-2.1
| 43,940
| 0.013359
|
#!/usr/bin/env python
from tempfile import TemporaryFile, SpooledTemporaryFile
import os, sys, re, socket, time, pickle, csv, uuid, subprocess, argparse, decimal, select, platform
class LLDB:
def __init__(self):
self.debugger = lldb.SBDebugger.Create()
self.command_interpreter = self.debugger.GetCommandInterpreter()
self.target = self.debugger.CreateTargetWithFileAndArch(None, None)
self.listener = lldb.SBListener("event_listener")
self.error = lldb.SBError()
def __del__(self):
lldb.SBDebugger.Destroy(self.debugger)
def _parseStackTrace(self, gibberish):
return gibberish
def _run_commands(self, commands):
tmp_text = ''
return_obj = lldb.SBCommandReturnObject()
for command in commands:
self.command_interpreter.HandleCommand(command, return_obj)
if return_obj.Succeeded():
if command == 'process status':
tmp_text += '\n########################################################\n## Process Status:\n##\n'
tmp_text += return_obj.GetOutput()
elif command == 'bt':
tmp_text += '\n########################################################\n## Backtrace:\n##\n'
tmp_text += return_obj.GetOutput()
return tmp_text
def getStackTrace(self, pid):
event = lldb.SBEvent()
lldb_results = ''
state = 0
attach_info = lldb.SBAttachInfo(int(pid))
process = self.target.Attach(attach_info, self.error)
process.GetBroadcaster().AddListener(self.listener, lldb.SBProcess.eBroadcastBitStateChanged)
done = False
while not done:
if self.listener.WaitForEvent(lldb.UINT32_MAX, event):
state = lldb.SBProcess.GetStateFromEvent(event)
if state == lldb.eStateExited:
done = True
elif state == lldb.eStateStopped:
lldb_results = self._run_commands(['process status', 'bt', 'cont'])
done = True
elif state == lldb.eStateRunning:
self._run_commands(['process interrupt'])
if state == lldb.eStateCrashed or state == lldb.eStateInvalid or state == lldb.eStateExited:
return 'Binary exited before sample could be taken'
time.sleep(0.03)
# Due to some strange race condition we have to wait until eState is running
# before we can pass the 'detach, quit' command. Why we can not do this all in
# one go... bug?
done = False
while not done:
if self.listener.WaitForEvent(lldb.UINT32_MAX, event):
state = lldb.SBProcess.GetStateFromEvent(event)
if state == lldb.eStateRunning:
self._run_commands(['detach', 'quit'])
done = True
if state == lldb.eStateCrashed or state == lldb.eStateInvalid or state == lldb.eStateExited:
return 'Binary exited before sample could be taken'
time.sleep(0.03)
return self._parseStackTrace(lldb_results)
class GDB:
def _parseStackTrace(self, gibberish):
not_gibberish = re.findall(r'\(gdb\) (#.*)\(gdb\)', gibberish, re.DOTALL)
if len(not_gibberish) != 0:
return not_gibberish[0]
else:
return 'Stack Trace failed:', gibberish
def _waitForResponse(self, wait=True):
while wait:
self.gdb_stdout.seek(self.last_position)
for line in self.gdb_stdout:
if line == '(gdb) ':
self.last_position = self.gdb_stdout.tell()
return True
time.sleep(0.05)
time.sleep(0.05)
return True
def getStackTrace(self, pid):
gdb_commands = [ 'attach ' + pid + '\n', 'set verbose off\n', 'thread\n', 'apply\n', 'all\n', 'bt\n', 'quit\n', 'y\n' ]
self.gdb_stdout = SpooledTemporaryFile()
self.last_position = 0
gdb_process = subprocess.Popen([which('gdb'), '-nx'], stdin=subprocess.PIPE, stdout=self.gdb_stdout, stderr=self.gdb_stdout)
while gdb_process.poll() == None:
for command in gdb_commands:
if command == gdb_commands[-1]:
gdb_commands = []
elif self._waitForResponse():
# I have seen GDB exit out from under us
try:
gdb_process.stdin.write(command)
except:
pass
self.gdb_stdout.seek(0)
stack_trace = self._parseStackTrace(self.gdb_stdout.read())
self.gdb_stdout.close()
return stack_trace
class Server:
def __init__(self, arguments):
self.arguments = arguments
self.arguments.cwd = os.getcwd()
# Test to see if we are starting as a server
if self.arguments.pbs == True:
if os.getenv('PBS_NODEFILE') != None:
# Initialize an agent, strictly for holding our stdout logs. Give it the UUID of 'server'
self.agent = Agent(self.arguments, 'server')
if self.arguments.recover:
self.logfile = WriteCSV(self.arguments.outfile[0], False)
else:
self.logfile = WriteCSV(self.arguments.outfile[0], True)
self.client_connections = []
|
self.startServer()
else:
print 'I could not find your PBS_NODEFILE. Is PBS loaded?'
sys.exit(1)
# If we are not a server, start the single client
else:
|
self.startClient()
def startServer(self):
# Setup the TCP socket
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.bind((socket.gethostname(), 0))
self.server_socket.listen(5)
(self.host, self.port) = self.server_socket.getsockname()
# We will store all connections (sockets objects) made to the server in a list
self.client_connections.append(self.server_socket)
# Launch the actual binary we want to track
self._launchJob()
# Now launch all pbs agents
self._launchClients()
# This is a try so we can handle a keyboard ctrl-c
try:
# Continue to listen and accept active connections from agents
# until all agents report a STOP command.
AGENTS_ACTIVE = True
while AGENTS_ACTIVE:
read_sockets, write_sockets, error_sockets = select.select(self.client_connections,[],[])
for sock in read_sockets:
if sock == self.server_socket:
# Accept an incomming connection
self.client_connections.append(self.server_socket.accept()[0])
else:
# Deal with the data being sent to the server by its agents
self.handleAgent()
# Check to see if _all_ agents are telling the server to stop
agent_count = len(self.agent.agent_data.keys())
current_count = 0
for agent in self.agent.agent_data.keys():
if self.agent.agent_data[agent]['STOP']:
current_count += 1
# if All Agents have reported a STOP command, begin to exit
if current_count == agent_count:
AGENTS_ACTIVE = False
# Gotta get out of the for loop somehow...
break
# Sleep a bit before reading additional data
time.sleep(self.arguments.repeat_rate[-1])
# Close the server socket
self.server_socket.close()
# Close the logfile as the server is about to exit
self.logfile.close()
# Cancel server operations if ctrl-c was pressed
except KeyboardInterrupt:
print 'Canceled by user. Wrote log:', self.arguments.outfile[0]
sys.exit(0)
# Normal exiting procedures
print '\n\nAll agents have stopped. Log file saved to:', self.arguments.outfile[0]
sys.exit(0)
def startClient(self):
Client(self.arguments)
def _launchClients(self):
# Read the environment PBS_NODEFILE
self._PBS_NODEFILE = open(os.getenv('PBS_NODEFILE'), 'r')
nodes = set(self._PBS_NODEFILE.read().split())
# Print some useful information about our setup
print 'Memory Logger running on Host:', self.host, 'Port:', self.port, '\nNodes:', ', '.join(nodes), '\nSample rate (including stdout):', self.arguments.repeat_rate[-1], 's (use --repeat-rate to adjust)\nRemote agents delaying', self.arguments.pbs_delay[-1], 'second/s before tracking. (use --pbs-delay to adjust)\n'
# Build our command list based on the PBS_NODEFILE
command = []
for node in nodes:
command.append([ 'ssh', node,
'bash --login -c "source /etc/profile && ' \
|
labordoc/labordoc-next
|
modules/bibformat/lib/elements/bfe_ILO_conventions_link.py
|
Python
|
gpl-2.0
| 1,688
| 0.013626
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that i
|
t will be usefu
|
l, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
#<BFE_ILO_CONVENTIONS prefix='<br/><small class="quicknote">' suffix="</small>"
#
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints link to ILO convention collections
"""
import cgi
import re
from invenio.urlutils import create_html_link
#from invenio.messages import gettext_set_language
from invenio.config import CFG_SITE_URL
def format_element(bfo, prefix, suffix):
conv = bfo.field('970__a')
convno = bfo.field('980__n')
lang = bfo.lang
target = '<a href="%s/search?jrec=1&ln=%s&cc=%s">' % (CFG_SITE_URL, lang, convno)
text = 'More background documents on ' + convno
if conv.startswith('ILOCONV') and len(convno) > 0:
return target + text + "</a>"
else:
return ''
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
gangadhar-kadam/hrshop
|
shopping_cart/config/desktop.py
|
Python
|
agpl-3.0
| 176
| 0.039773
|
data = {
"Shopping Cart": {
"color": "#B7E0
|
90",
"icon": "icon-shopping-cart",
|
"label": "Shopping Cart",
"link": "Form/Shopping Cart Settings",
"type": "module"
}
}
|
ssorgatem/pulsar
|
pulsar/client/transport/poster.py
|
Python
|
apache-2.0
| 1,308
| 0.002294
|
from __future__ import absolute_import
import logging
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
try:
from urllib2 import Request
except ImportError:
|
from urllib.request import Request
try:
from galaxy import eggs
eggs.require("poster")
|
except ImportError:
pass
try:
import poster
except ImportError:
poster = None
POSTER_UNAVAILABLE_MESSAGE = "Pulsar configured to use poster module - but it is unavailable. Please install poster."
log = logging.getLogger(__name__)
if poster is not None:
poster.streaminghttp.register_openers()
def post_file(url, path):
__ensure_poster()
try:
datagen, headers = poster.encode.multipart_encode({"file": open(path, "rb")})
request = Request(url, datagen, headers)
return urlopen(request).read()
except:
log.exception("problem")
raise
def get_file(url, path):
__ensure_poster()
request = Request(url=url)
response = urlopen(request)
with open(path, 'wb') as output:
while True:
buffer = response.read(1024)
if not buffer:
break
output.write(buffer)
def __ensure_poster():
if poster is None:
raise ImportError(POSTER_UNAVAILABLE_MESSAGE)
|
google/llvm-propeller
|
mlir/integration_test/lit.cfg.py
|
Python
|
apache-2.0
| 1,725
| 0
|
# -*- Python -*-
import os
import platform
import re
import subprocess
import tempfile
import lit.formats
import lit.util
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
# Configuration file for the 'lit' integration test runner.
# name: The name of this integration test suite.
config.name = 'MLIR_INTEGRATION'
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as integration test files.
config.suffixes = ['.mlir']
# test_source_root: The roo
|
t path where integration tests are located.
config.test_source_root = os.path.dirname(__file__)
# test_exec_root: The root path where integration tests should be run.
config.test_exec_root = os.path.join(config.mlir_obj_root, 'integration_test')
config.substitutions.append(('%PATH%', config.environment['PATH']))
config.
|
substitutions.append(('%shlibext', config.llvm_shlib_ext))
config.substitutions.append(('%mlir_src_root', config.mlir_src_root))
llvm_config.with_system_environment(['HOME', 'INCLUDE', 'LIB', 'TMP', 'TEMP'])
llvm_config.use_default_substitutions()
# excludes: A list of directories to exclude from the integration testsuite.
config.excludes = ['CMakeLists.txt', 'README.txt', 'LICENSE.txt']
# Tweak the PATH to include the tools dir.
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
tool_dirs = [config.mlir_tools_dir, config.llvm_tools_dir]
tools = [
'mlir-opt',
'mlir-cpu-runner',
]
# The following tools are optional.
tools.extend([
ToolSubst(
'%mlir_integration_test_dir',
config.mlir_integration_test_dir,
unresolved='ignore'),
])
llvm_config.add_tool_substitutions(tools, tool_dirs)
|
BansheeMediaPlayer/bockbuild
|
packages/mono-addins.py
|
Python
|
mit
| 144
| 0.0625
|
Git
|
HubPackage ('mono', 'mono-addins', '0.6.2',
'adcd75bb47ffc1665c8c410f44dad3511dec0da0',
configure = './autogen.s
|
h --prefix="%{prefix}"'
)
|
druss316/G-Harvestor
|
html/udPassword.py
|
Python
|
gpl-3.0
| 2,697
| 0.001854
|
#!/usr/bin/python
"""
Copyright 2016 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import hashlib
import MySQLdb
import dbSession
import dbShared
sys.path.append("../"
|
)
import dbInfo
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
form = cgi.FieldStorage()
# Get Cookies
errorstr = ''
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
errorstr = 'no cookies\n'
if errorstr == '':
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
|
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
else:
currentUser = ''
loginResult = 'success'
sid = form.getfirst('gh_sid', '')
userpass = form.getfirst("userpass")
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
userpass = dbShared.dbInsertSafe(userpass)
# Get a session
logged_state = 0
linkappend = ''
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
linkappend = 'gh_sid=' + sid
# Check for errors
errstr='';
if (len(userpass) < 6):
errstr = errstr + "Your password must be at least 6 characters. \r\n"
if (logged_state == 0):
errstr = errstr + "You must be logged in to update your password. \r\n"
if (errstr != ''):
result = "Your Password could not be updated because of the following errors:\r\n" + errstr
else:
crypt_pass = hashlib.sha1(dbInfo.DB_KEY3 + userpass).hexdigest()
conn = dbShared.ghConn()
cursor = conn.cursor()
cursor.execute("UPDATE tUsers SET userPassword='" + crypt_pass + "', lastReset=NOW() WHERE userID='" + currentUser + "';")
cursor.close()
conn.close()
result = "Password Updated"
print "Content-Type: text/html\n"
print result
|
rahulsharma1991/frontera
|
frontera/contrib/canonicalsolvers/__init__.py
|
Python
|
bsd-3-clause
| 78
| 0.012821
|
# -*- coding: utf-8 -*-
from . impo
|
rt basic
Basic
|
= basic.BasicCanonicalSolver
|
chuckus/chromewhip
|
chromewhip/base.py
|
Python
|
mit
| 1,195
| 0.001674
|
# https://stackoverflow.com/questions/30155138/how-can-i-write-asyncio-coroutines-that-optionally-act-as-regular-functions
import asyncio
class SyncAdder
|
(type):
""" A metaclass which adds synchronous version of coroutines.
This metaclass finds all coroutine functions defined on a class
and adds a synchronous version with a '_s' suffix appended to the
original function name.
"""
def __new__(cls, clsname, bases, dct, **kwargs):
new_dct = {}
for name,val in dct.items():
# Make a sy
|
nc version of all coroutine functions
if asyncio.iscoroutinefunction(val):
meth = cls.sync_maker(name)
syncname = '{}_s'.format(name)
meth.__name__ = syncname
meth.__qualname__ = '{}.{}'.format(clsname, syncname)
new_dct[syncname] = meth
dct.update(new_dct)
return super().__new__(cls, clsname, bases, dct)
@staticmethod
def sync_maker(func):
def sync_func(self, *args, **kwargs):
meth = getattr(self, func)
return asyncio.get_event_loop().run_until_complete(meth(*args, **kwargs))
return sync_func
|
goldhand/art-portfolio
|
art-portfolio/imagestore/urls.py
|
Python
|
bsd-3-clause
| 1,663
| 0.00902
|
from django.conf.
|
urls import patterns, url
from views import (AlbumListView, ImageListView, UpdateImage, UpdateAlbum, CreateImage, CreateAlbum, DeleteImage, \
DeleteAlbum, ImageView, AlbumList)
urlpatterns = patterns('imagestore.views',
url(r'^$', AlbumListView.as_view(), name='index'),
url(r'^album/add/$', CreateAlbum.as_view(), name='create-album'),
url(r'^album/(?P<album_id>\d+)/$', ImageListView.as_view(), name=
|
'album'),
url(r'^album/(?P<pk>\d+)/edit/$', UpdateAlbum.as_view(), name='update-album'),
url(r'^album/(?P<pk>\d+)/delete/$', DeleteAlbum.as_view(), name='delete-album'),
url(r'^tag/(?P<tag>[^/]+)/$', ImageListView.as_view(), name='tag'),
url(r'^user/(?P<username>\w+)/albums/', AlbumListView.as_view(), name='user'),
url(r'^user/(?P<username>\w+)/$', ImageListView.as_view(), name='user-images'),
url(r'^upload/$', CreateImage.as_view(), name='upload'),
url(r'^image/(?P<pk>\d+)/$', ImageView.as_view(), name='image'),
url(r'^album/(?P<album_id>\d+)/image/(?P<pk>\d+)/$', ImageView.as_view(), name='image-album'),
url(r'^tag/(?P<tag>[^/]+)/image/(?P<pk>\d+)/$', ImageView.as_view(), name='image-tag'),
url(r'^image/(?P<pk>\d+)/delete/$', DeleteImage.as_view(), name='delete-image'),
url(r'^image/(?P<pk>\d+)/update/$', UpdateImage.as_view(), name='update-image'),
)
|
ledbutter/ProjectEulerPython
|
Problem46.py
|
Python
|
mit
| 1,921
| 0.032795
|
# It was proposed by Christian Goldbach that every odd composite number can be written as the sum of a prime and twice a square.
# 9 = 7 + 2x1^2
# 15 = 7 + 2x2^2
# 21 = 3 + 2x3^2
# 25 = 7 + 2x3^2
# 27 = 19 + 2x2^2
# 33 = 31 + 2x1^2
# It turns out that the conjecture was false.
# What is the smallest odd composite that cannot be written as the sum of a prime and twice a square?
#here's a more pythony way of doing things
from itertools import count, dropwhile
from Utils import is_prime
def twice_squares(limit):
for i in range(limit):
yield 2*(i**2)
def not_right(num):
for n in twice_squares(num):
prime = num - n
if is_prime(prime):
return True
return False
composite_odds = filter(lambda x: not is_prime(x), count(35, 2))
print(next(dropwhile(not_r
|
ight, com
|
posite_odds)))
#5777
#mine: works fairly fast
# from itertools import count, takewhile
# from Utils import is_prime, primes_sieve
# #first come up with iterator to generate all odd composite numbers
# composite_odds = filter(lambda x: not is_prime(x), count(35, 2))
# # i = 0
# # for c in composite_odds:
# # print(c)
# # i += 1
# # if i == 10:
# # break
# #generate the twice squared values
# # def twice_squares(limit):
# # for i in range(limit):
# # yield 2*(i**2)
# twice_squares = list(2*(n**2) for n in range(1, 100))
# primes = list(primes_sieve(10000))
# def find_answer():
# for c in composite_odds:
# found_combo = False
# #find all primes smaller than c
# for p in primes:#primes_sieve(c):
# for t in twice_squares:#twice_squares((c-p)//2):
# if p + t == c:
# #print("Composite odd %i can be expressed as %i and %i" % (c, p, t))
# found_combo = True
# if found_combo == True:
# break
# if found_combo == True:
# break
# if found_combo == False:
# return c
# print(find_answer())
#5777
|
googleads/googleads-adxbuyer-examples
|
python/samples/v2_x/list_invitations.py
|
Python
|
apache-2.0
| 2,388
| 0.0067
|
#!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example lists the invitations sent out for a given client buyer."""
import argparse
import pprint
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from googleapiclient.errors import HttpError
import samples_util
DEFAULT_ACCOUNT_ID = 'ENTER_ACCOUNT_ID_HERE'
DEFAULT_CLIENT_BUYER_ID = 'ENTER_CLIENT_BUYER_ID_HERE'
def main(ad_exchange_buyer, account_id, client_account_id):
try:
response = ad_exchange_buyer.accounts().clients().invitations().list(
accountId=account_id, clientAccountId=client_account_id).execute()
invitations = response['invitations']
if invitations:
print(f'Invitations for Account ID "{account_id}" and Client Account Id: '
f'"{client_account_id}"')
for invitation in invitations:
pprint.pprint(invitation)
else:
print(f'No invitations for Account ID "{account_id}" and Client Account '
f'Id: "{client_account_id}"')
except HttpError as e:
print(e)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Lists invitations sent out '
'for a given client buyer.')
parser.add
|
_argument(
'-a', '--account_id', default=DEFAULT_ACCOUNT_ID, type=int,
help='The integer id of the Author
|
ized Buyers account.')
parser.add_argument(
'-c', '--client_buyer_id', default=DEFAULT_CLIENT_BUYER_ID, type=int,
help='The integer id of the client buyer.')
args = parser.parse_args()
try:
service = samples_util.GetService('v2beta1')
except IOError as ex:
print(f'Unable to create adexchangebuyer service - {ex}')
print('Did you specify the key file in samples_util.py?')
sys.exit(1)
main(service, args.account_id, args.client_buyer_id)
|
vishesh/pycket
|
pycket/test/test_struct.py
|
Python
|
mit
| 17,186
| 0.0032
|
from pycket.test.testhelper import *
from pycket.values import *
import pytest
skip = pytest.mark.skipif("True")
# Creating Structure Types
def test_make_struct_type(doctest):
"""
> (define-values (struct:a make-a a? a-ref a-set!)
(make-struct-type 'a #f 2 1 'uninitialized))
> (define an-a (make-a 'x 'y))
> (a-ref an-a 1)
'y
> (a-ref an-a 2)
'uninitialized
> (define a-first (make-struct-field-accessor a-ref 0))
> (a-first an-a)
'x
> (define-values (struct:b make-b b? b-ref b-set!)
(make-struct-type 'b struct:a 1 2 'b-uninitialized))
> (define a-b (make-b 'x 'y 'z))
> (a-ref a-b 1)
'y
> (a-ref a-b 2)
'uninitialized
> (b-ref a-b 0)
'z
> (
|
b-ref a-b 1)
'b-uninitialized
> (b-ref a-b 2)
'b-uninitialized
;;;;;;;;;;;;;;;;
> (define p1 #s(p a b c))
> (define-values (struct:p make-p p? p-ref p-set!)
(make-struct-type 'p #f
|
3 0 #f null 'prefab #f '(0 1 2)))
> (p? p1)
#t
> (p-ref p1 0)
'a
> (make-p 'x 'y 'z)
'#s(p x y z)
"""
assert doctest
def test_make_struct_type2(doctest):
"""
! (require racket/private/generic-interfaces)
> (struct color (r g b) #:constructor-name -color)
> (struct rectangle (w h color) #:extra-constructor-name rect)
> (rectangle 13 50 (-color 192 157 235))
> (rect 50 37 (-color 35 183 252))
> (struct circle (radius) #:reflection-name '<circle>)
> (circle 15)
;#<|<circle>|>
"""
assert doctest
def test_struct_main_functions(source):
"""
(struct posn (x y))
(let* ([p (posn 1 2)]
[p? (posn? p)]
[notp? (posn? 0)]
[x (posn-x p)]
[y (posn-y p)])
(and p? (not notp?) (= x 1) (= y 2)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_struct_inheritance(source):
"""
(struct posn (x y))
(struct 3d-posn posn (z))
(let* ([p (3d-posn 1 2 3)]
[p? (posn? p)]
[x (posn-x p)]
[z (3d-posn-z p)])
(and p? (= x 1) (= z 3)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_struct_inheritance2():
m = run_mod(
"""
#lang pycket
(require racket/private/kw)
(struct posn (x y))
(define (raven-constructor super-type)
(struct raven ()
#:super super-type
#:transparent
#:property prop:procedure (lambda (self) 'nevermore)) raven)
(define r ((raven-constructor struct:posn) 1 2))
(define x (posn-x r))
""")
ov = m.defs[W_Symbol.make("x")]
assert ov.value == 1
def test_struct_comparison(source):
"""
(struct glass (width height) #:transparent #:mutable)
(struct lead (width height))
(define slab (lead 1 2))
(let* ([glass_test (equal? (glass 1 2) (glass 1 2))]
[slab (lead 1 2)]
[lead_test1 (equal? slab slab)]
[lead_test2 (equal? slab (lead 1 2))])
(and glass_test lead_test1 (not lead_test2)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_struct_comparison2():
m = run_mod(
"""
#lang pycket
(require racket/private/generic-interfaces)
(struct lead (width height)
#:methods
gen:equal+hash
[(define (equal-proc a b equal?-recur)
; compare a and b
(and (equal?-recur (lead-width a) (lead-width b))
(equal?-recur (lead-height a) (lead-height b))))
(define (hash-proc a hash-recur)
; compute primary hash code of a
(+ (hash-recur (lead-width a))
(* 3 (hash-recur (lead-height a)))))
(define (hash2-proc a hash2-recur)
; compute secondary hash code of a
(+ (hash2-recur (lead-width a))
(hash2-recur (lead-height a))))])
(define result (equal? (lead 1 2) (lead 1 2)))
""")
assert m.defs[W_Symbol.make("result")] == w_true
def test_struct_mutation(source):
"""
(struct dot (x y) #:mutable)
(let* ([d (dot 1 2)]
[dx0 (dot-x d)]
[m (set-dot-x! d 10)]
[dx1 (dot-x d)])
(and (= dx0 1) (= dx1 10)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_struct_auto_values(source):
"""
(struct p3 (x y [z #:auto]) #:transparent #:auto-value 0)
(struct p4 p3 (t))
(let* ([p (p3 1 2)]
[4dp (p4 1 2 4)]
[pz (p3-z p)]
[4pdt (p4-t 4dp)])
(and (= pz 0) (= 4pdt 4)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_struct_guard():
run(
"""
((lambda (name) (struct thing (name) #:transparent #:guard
(lambda (name type-name) (cond
[(string? name) name]
[else (error type-name \"bad name: ~e\" name)])))
(thing? (thing name))) \"apple\")
""", w_true)
e = pytest.raises(SchemeException, run,
"""
((lambda (name) (struct thing (name) #:transparent #:guard
(lambda (name type-name) (cond
[(string? name) name]
[else (error type-name "bad name")])))
(thing? (thing name))) 1)
""")
assert "bad name" in e.value.msg
def test_struct_guard2():
m = run_mod(
"""
#lang pycket
(define-values (s:o make-o o? o-ref o-set!)
(make-struct-type 'o #f 1 0 'odefault null (make-inspector) #f null (lambda (o n) (+ o 1))))
(define x (o-ref (make-o 10) 0))
""")
ov = m.defs[W_Symbol.make("x")]
assert ov.value == 11
def test_struct_guard3():
m = run_mod(
"""
#lang pycket
(define got null)
(define-values (s:a make-a a? a-ref a-set!)
(make-struct-type 'a #f 2 1 'adefault null (make-inspector) #f null
(lambda (a b n) (set! got (cons (list a b n) got)) (values 1 2))))
(define-values (s:b make-b b? b-ref b-set!)
(make-struct-type 'b s:a 1 2 'bdefault null (make-inspector) #f null
(lambda (a b c n) (set! got (cons (list a b c n) got)) (values 10 20 30))))
(define x (a-ref (make-b 'x 'y 'z) 0))
""")
ov = m.defs[W_Symbol.make("x")]
assert ov.value == 1
def test_struct_prefab():
m = run_mod(
"""
#lang pycket
(require racket/private/kw)
(define lunch '#s(sprout bean))
(struct sprout (kind) #:prefab)
(define t (sprout? lunch))
(define f (sprout? #s(sprout bean #f 17)))
(define result (and (not f) t))
""")
assert m.defs[W_Symbol.make("result")] == w_true
def test_unsafe():
m = run_mod(
"""
#lang pycket
(struct posn ([x #:mutable] [y #:mutable]) #:transparent)
(struct 3dposn posn ([z #:mutable]))
(define p (3dposn 1 2 3))
(unsafe-struct*-set! p 2 4)
(define x (unsafe-struct*-ref p 2))
""")
ov = m.defs[W_Symbol.make("x")]
assert ov.value == 4
def test_unsafe_impersonators():
m = run_mod(
"""
#lang pycket
(struct posn ([x #:mutable] [y #:mutable]) #:transparent)
(define a (posn 1 1))
(define b (impersonate-struct a))
(unsafe-struct-set! b 1 2)
(define x (unsafe-struct-ref b 1))
""")
ov = m.defs[W_Symbol.make("x")]
assert ov.value == 2
# Structure Type Properties
def test_struct_prop_procedure():
m = run_mod(
"""
#lang pycket
(require racket/private/kw)
(require (prefix-in k: '#%kernel))
(struct x() #:property prop:procedure (lambda _ 1))
(struct y() #:property k:prop:procedure (lambda _ 2))
(define xval ((x)))
(define yval ((y)))
""")
assert m.defs[W_Symbol.make("xval")].value == 1
assert m.defs[W_Symbol.make("yval")].value == 2
def test_struct_prop_procedure_inheritance():
m = run_mod(
"""
#lang pycket
(require racket/private/kw)
(struct x (proc) #:property prop:procedure 0)
(struct y x ())
(define b (y (lambda (x) x)))
(define val (b 10))
""")
assert m.defs[W_Symbol.make("val")].value == 10
def test_struct_prop_procedure_fail():
e = pytest.raises(SchemeException, run_mod,
"""
#lang pycket
(require racket/private/kw)
(require (prefix-in k: '#%kernel))
|
heathseals/CouchPotatoServer
|
libs/pyutil/scripts/time_comparisons.py
|
Python
|
gpl-3.0
| 8,758
| 0.003882
|
# If you run this file, it will make up a random secret and then crack it
# using timing information from a string comparison function. Maybe--if it
# gets lucky. It takes a long, long time to work.
# So, the thing I need help with is statistics. The way this thing works is
# extremely stupid. Suppose you want to know which function invocation takes
# longer: comparison(secret, guess1) or comparison(secret, guess2)?
# If you can correctly determine that one of them takes longer than the
# other, then (a) you can use that to crack the secret, and (b) this is a
# unit test demonstrating that comparison() is not timing-safe.
# So how does this script do it? Extremely stupidly. First of all, you can't
# reliably measure tiny times, so to measure the time that a function takes,
# we run that function 10,000 times in a row, measure how long that took, and
# divide by 10,000 to estimate how long any one run would have taken.
# Then, we do that 100 times in a row, and take the fastest of 100 runs. (I
# also experimented with taking the mean of 100 runs instead of the fastest.)
# Then, we just say whichever comparison took longer (for its fastest run of
# 100 runs of 10,000 executions per run) is the one we think is a closer
# guess to the secret.
# Now I would *like* to think that there is some kind of statistical analysis
# more sophisticated than "take the slowest of the fastest of 100 runs of
# 10,000 executions". Such improved statistical analysis would hopefully be
# able to answer these two questions:
# 1. Are these two function calls -- comparison(secret, guess1) and
# comparison(secret, guess2) -- drawing from the same distribution or
# different? If you can answer that question, then you've answered the
# question of whether "comparison" is timing-safe or not.
# And, this would also allow the cracker to recover from a false step. If it
# incorrectly decides the the prefix of the secret is ABCX, when the real
# secret is ABC
|
D, then after that every next step it takes will be the
# "drawing from the same distribution" kind -- any difference between ABCXQ
# and ABCXR will be just due to noise, since both are equally far from the
# correct answer, which startsw with ABCD. If it could realize that there is
# no real difference between the distributions, then it could back-track and
# recover.
# 2.
|
Giving the ability to measure, noisily, the time taken by comparison(),
# how can you most efficiently figure out which guess takes the longest? If
# you can do that more efficiently, you can crack secrets more efficiently.
# The script takes two arguments. The first is how many symbols in the
# secret, and the second is how big the alphabet from which the symbols are
# drawn. To prove that this script can *ever* work, try passing length 5 and
# alphabet size 2. Also try editing the code to let is use sillycomp. That'll
# definitely make it work. If you can improve this script (as per the thing
# above about "needing better statistics") to the degree that it can crack a
# secret with length 32 and alphabet size 256, then that would be awesome.
# See the result of this commandline:
# $ python -c 'import time_comparisons ; time_comparisons.print_measurements()'
from pyutil import benchutil
import hashlib, random, os
from decimal import Decimal
D=Decimal
p1 = 'a'*32
p1a = 'a'*32
p2 = 'a'*31+'b' # close, but no cigar
p3 = 'b'*32 # different in the first byte
def randstr(n, alphabetsize):
alphabet = [ chr(x) for x in range(alphabetsize) ]
return ''.join([random.choice(alphabet) for i in range(n)])
def compare(n, f, a, b):
for i in xrange(n):
f(a, b)
def eqeqcomp(a, b):
return a == b
def sillycomp(a, b):
# This exposes a lot of information in its timing about how many leading bytes match.
for i in range(len(a)):
if a[i] != b[i]:
return False
for i in xrange(2**9):
pass
if len(a) == len(b):
return True
else:
return False
def hashcomp(a, b):
# Brian Warner invented this for Tahoe-LAFS. It seems like it should be very safe agaist timing leakage of any kind, because of the inclusion of a new random randkey every time. Note that exposing the value of the hash (i.e. the output of md5(randkey+secret)) is *not* a security problem. You can post that on your web site and let all attackers have it, no problem. (Provided that the value of "randkey" remains secret.)
randkey = os.urandom(32)
return hashlib.md5(randkey+ a).digest() == hashlib.md5(randkey+b).digest()
def xorcomp(a, b):
# This appears to be the most popular timing-insensitive string comparison function. I'm not completely sure it is fully timing-insensitive. (There are all sorts of funny things inside Python, such as caching of integer objects < 100...)
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def print_measurements():
N=10**4
REPS=10**2
print "all times are in nanoseconds per comparison (in scientific notation)"
print
for comparator in [eqeqcomp, hashcomp, xorcomp, sillycomp]:
print "using comparator ", comparator
# for (a, b, desc) in [(p1, p1a, 'same'), (p1, p2, 'close'), (p1, p3, 'far')]:
trials = [(p1, p1a, 'same'), (p1, p2, 'close'), (p1, p3, 'far')]
random.shuffle(trials)
for (a, b, desc) in trials:
print "comparing two strings that are %s to each other" % (desc,)
def f(n):
compare(n, comparator, a, b)
benchutil.rep_bench(f, N, UNITS_PER_SECOND=10**9, MAXREPS=REPS)
print
def try_to_crack_secret(cracker, comparator, secretlen, alphabetsize):
secret = randstr(secretlen, alphabetsize)
def test_guess(x):
return comparator(secret, x)
print "Giving cracker %s a chance to figure out the secret. Don't tell him, but the secret is %s. Whenever he makes a guess, we'll use comparator %s to decide if his guess is right ..." % (cracker, secret.encode('hex'), comparator,)
guess = cracker(test_guess, secretlen, alphabetsize)
print "Cracker %s guessed %r" % (cracker, guess,)
if guess == secret:
print "HE FIGURED IT OUT!? HOW DID HE DO THAT."
else:
print "HAHA. Our secret is safe."
def byte_at_a_time_cracker(test_guess, secretlen, alphabetsize):
# If we were cleverer, we'd add some backtracking behaviour where, if we can't find any x such that ABCx stands out from the crowd as taking longer than all the other ABCy's, then we start to think that we've taken a wrong step and we go back to trying ABy's. Make sense? But we're not that clever. Once we take a step, we don't backtrack.
print
guess=[]
while len(guess) < secretlen:
best_next_byte = None
best_next_byte_time = None
# For each possible byte...
for next_byte in range(alphabetsize):
c = chr(next_byte)
# Construct a guess with our best candidate so far...
candidate_guess = guess[:]
# Plus that byte...
candidate_guess.append(c)
s = ''.join(candidate_guess)
# Plus random bytes...
s += os.urandom(32 - len(s))
# And see how long it takes the test_guess to consider it...
def f(n):
for i in xrange(n):
test_guess(s)
times = benchutil.rep_bench(f, 10**7, MAXREPS=10**3, quiet=True)
fastesttime = times['mean']
print "%s..."%(c.encode('hex'),),
if best_next_byte is None or fastesttime > best_next_byte_time:
print "new candidate for slowest next-char: %s, took: %s" % (c.encode('hex'), fastesttime,),
best_next_byte_time = fastesttime
best_next_byte = c
# Okay we've tried all possible next bytes. Our guess is this one (the one that took longest to be tested by test_guess):
guess.append(best_next_byte)
print "SLOWEST next-char %s! Current guess at secret: %s" % (best_next_byte.encode('hex'), ''.join(guess).encode(
|
dracos/QGIS
|
python/plugins/processing/gui/EditScriptAction.py
|
Python
|
gpl-2.0
| 2,221
| 0
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
EditScriptAction.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.gui.ContextAction import ContextAction
from processing.gui.ScriptEditorDialog import
|
ScriptEditorDialog
from processing.algs.r.RAlgorithm import RAlgorithm
from processing.script.ScriptAlgorithm import ScriptAlgorithm
class EditScriptAction(ContextAction):
SCRIPT_PYTHON = 0
SCRIPT_R = 1
def __init__(self, scriptType):
self.name = self.tr('Edit script', 'EditScriptAction')
self.scriptType = scriptType
def isEnabled(self):
if self.scriptType == ScriptEditorDi
|
alog.SCRIPT_PYTHON:
return isinstance(self.alg, ScriptAlgorithm) and self.alg.allowEdit
elif self.scriptType == ScriptEditorDialog.SCRIPT_R:
return isinstance(self.alg, RAlgorithm)
def execute(self):
dlg = ScriptEditorDialog(self.scriptType, self.alg)
dlg.show()
dlg.exec_()
if dlg.update:
if self.scriptType == ScriptEditorDialog.SCRIPT_PYTHON:
self.toolbox.updateProvider('script')
elif self.scriptType == ScriptEditorDialog.SCRIPT_R:
self.toolbox.updateProvider('r')
|
vishalsahu5/carpool
|
journeys/__init__.py
|
Python
|
mit
| 51
| 0
|
default_app_config = 'journeys
|
.app
|
s.JourneyConfig'
|
conikuvat/edegal
|
backend/larppikuvat/migrations/0001_initial.py
|
Python
|
mit
| 1,162
| 0.001721
|
# Generated by Django 3.0.7 on 2020-09-27 13:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('edegal', '0025_photographer_flickr_handle'),
]
operations = [
migrations.CreateModel(
name='LarppikuvatPhotographerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contact', models.TextField(blank=True)),
('hours', models.TextField(blank=True)),
('delivery_schedule', models.TextField(blank=True)),
('delivery_practice', models.TextField(blank=True)),
('delivery_method', models.TextField(
|
blank=True)),
('copy_protection', models.TextField(blank=True)),
|
('expected_compensation', models.TextField(blank=True)),
('photographer', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='larppikuvat_profile', to='edegal.Photographer')),
],
),
]
|
felipegerard/arte_mexicano_antiguo
|
montactuaria/Analisis_access_log/luigi/ functions/functions.py
|
Python
|
agpl-3.0
| 362
| 0.005525
|
import shutil
from pprint import pprint
import
|
pandas as pd
import csv
import pickle
import inspect, os
import requests
from os import listdir
import numpy as np
import subprocess
from luigi import six
from sklearn.decomposition import NMF
from
|
sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.naive_bayes import MultinomialNB
|
jhprinz/openpathsampling
|
openpathsampling/high_level/transition.py
|
Python
|
lgpl-2.1
| 19,446
| 0.002263
|
import logging
import numpy as np
import openpathsampling as paths
from openpathsampling.numerics import (
Histogram, histograms_to_pandas_dataframe, LookupFunction, Histogrammer
)
from openpathsampling.numerics import WHAM
from openpathsampling.netcdfplus import StorableNamedObject
from openpathsampling.analysis.tools import (
pathlength, max_lambdas, guess_interface_lambda, minus_sides_summary,
sampleset_sample_generator
)
logger = logging.getLogger(__name__)
class Transition(StorableNamedObject):
"""
Describes (in general) a transition between two states.
"""
def __init__(self, stateA, stateB):
super(Transition, self).__init__()
self.stateA = stateA
self.stateB = stateB
# whoops: can't be set here, but must be set in subclass
# TODO: make that work in a more sensible way
#self.ensembles = []
@property
def all_ensembles(self):
return self.ensembles
def to_dict(self):
return {
'stateA' : self.stateA,
'stateB' : self.stateB,
}
@classmethod
def from_dict(cls, dct):
return Transition(
stateA=dct['stateA'],
stateB=dct['stateB']
)
class TPSTransition(Transition):
"""
Transition using TPS ensembles
"""
def __init__(self, stateA, stateB, name=None):
super(TPSTransition, self).__init__(stateA, stateB)
if name is not None:
self.name = name
if not hasattr(self, "ensembles"):
self.ensembles = [self._tps_ensemble(stateA, stateB)]
def to_dict(self):
return {
'stateA' : self.stateA,
'stateB' : self.stateB,
'ensembles' : self.ensembles,
'name' : self.name
}
@classmethod
def from_dict(cls, dct):
mytrans = TPSTransition(dct['stateA'], dct['stateB'], dct['name'])
mytrans.ensembles = dct['ensembles']
return mytrans
def _tps_ensemble(self, stateA, stateB):
return paths.SequentialEnsemble([
paths.AllInXEnsemble(stateA) & paths.LengthEnsemble(1),
paths.AllOutXEnsemble(stateA | stateB),
paths.AllInXEnsemble(stateB) & paths.LengthEnsemble(1)
])
def add_transition(self, stateA, stateB):
new_ens = self._tps_ensemble(stateA, stateB)
try:
self.ensembles[0] = self.ensembles[0] | new_ens
except AttributeError:
self.ensembles = [new_ens]
class FixedLengthTPSTransition(TPSTransition):
"""Transition using fixed length TPS ensembles"""
def __init__(self, stateA, stateB, length, name=None):
self.length = length
super(FixedLengthTPSTransition, self).__init__(stateA, stateB, name)
def to_dict(self):
dct = super(FixedLengthTPSTransition, self).to_dict()
dct['length'] = self.length
return dct
@classmethod
def from_dict(cls, dct):
mytrans = super(FixedLengthTPSTransition, cls).from_dict(dct)
mytrans.length = dct['length']
return mytrans
def _tps_ensemble(self, stateA, stateB):
return paths.SequentialEnsemble([
paths.LengthEnsemble(1) & paths.AllInXEnsemble(stateA),
paths.LengthEnsemble(self.length - 2),
paths.LengthEnsemble(1) & paths.AllInXEnsemble(stateB)
])
class TISTransition(Transition):
"""
Transition using TIS ensembles.
The additional information from the TIS ensembles allows us to set up
all the analysis (assuming we built these are proper TIS ensembles,
which we DO in the intitialization!)
Parameters
----------
stateA : Volume
Volume for the state from which the transition begins
stateB : Volume
Volume for the state in which the transition ends
interfaces : list of Volume
Volumes for the interfaces
orderparameter : CollectiveVariable
order parameter to be used in the analysis (does not need to be the
parameter which defines the interfaces, although it usually is)
name : string
name for the transition
"""
def __init__(self, stateA, stateB, interfaces, orderparameter=None, name=None):
super(TISTransition, self).__init__(stateA, stateB)
self.stateA = stateA
self.stateB = stateB
self.interfaces = interfaces
if name is not None:
self.name = name
# If we reload from a storage file, we want to use the
# ensembles from the file, not the automatically generated
# ones here
# build ensembles if we don't already have them
self.orderparameter = orderparameter
if not hasattr(self, "ensembles"):
self.build_ensembles(self.stateA, self.stateB,
self.interfaces, self.orderparameter)
self.default_orderparameter = self.orderparameter
self.total_crossing_probability_method = "wham"
self.histograms = {}
# caches for the results of our calculation
self._flux = None
self._rate = None
self.hist_args = {} # shortcut to ensemble_histogram_info[].hist_args
self.ensemble_histogram_info = {
'max_lambda' : Histogrammer(
f=max_lambdas,
f_args={'orderparameter' : self.orderparameter},
hist_args={}
),
'pathlength' : Histogrammer(
f=pathlength,
|
f_args={},
hist_args={}
)
}
self.minus_ensemble = paths.MinusInterfaceEnsemble(
state_vol=stateA,
innermost_vols=interfaces[0]
).named("Out " + stateA.name + " minus")
def copy(self, with_results=True):
copy
|
= self.from_dict(self.to_dict())
copy.copy_analysis_from(self)
return copy
def copy_analysis_from(self, other):
self.default_orderparameter = other.default_orderparameter
self.total_crossing_probability_method = other.total_crossing_probability_method
self.hist_args = other.hist_args
self.ensemble_histogram_info = other.ensemble_histogram_info
self.histograms = other.histograms
self._flux = other._flux
self._rate = other._rate
try:
self.tcp = other.tcp
except AttributeError:
pass
try:
self.ctp = other.ctp
except AttributeError:
pass
def __str__(self):
mystr = str(self.__class__.__name__) + ": " + str(self.name) + "\n"
mystr += (str(self.stateA.name) + " -> " + str(self.stateA.name)
+ " or " + str(self.stateB.name) + "\n")
for iface in self.interfaces:
mystr += "Interface: " + str(iface.name) + "\n"
return mystr
def build_ensembles(self, stateA, stateB, interfaces, orderparameter):
self.ensembles = paths.EnsembleFactory.TISEnsembleSet(
stateA, stateB, self.interfaces, orderparameter
)
for ensemble in self.ensembles:
ensemble.named(self.name + " " +
str(self.ensembles.index(ensemble)))
# parameters for different types of output
def ensemble_statistics(self, ensemble, samples, weights=None, force=False):
"""Calculate stats for a given ensemble: path length, crossing prob
In general we do all of these at once because the extra cost of
running through the samples twice is worse than doing the extra
calculations.
Parameters
----------
ensemble: Ensemble
samples : iterator over samples
"""
# figure out which histograms need to updated for this ensemble
run_it = []
if not force:
# TODO figure out which need to be rerun
pass
else:
run_it = list(self.ensemble_histogram_info.keys())
for hist in run_it:
hist_info = self.ensemble_histogram_info[hist]
if hist_info.hist_args == {} and self.hist_args[hist] != {}:
hist_info.hist_args = se
|
avalentino/PyTables
|
examples/carray1.py
|
Python
|
bsd-3-clause
| 468
| 0
|
import numpy as np
import tables as tb
fileName = 'carray1.h5'
shape = (200, 300)
atom = tb.UInt8Atom()
filters = tb.Filters(complevel=5, complib='zlib')
h5f = tb.open_file(fileName, 'w')
ca = h5f.create_carray(h5f.root, 'carray', atom, shape, filters=filters)
# Fill a hyperslab in ``ca``.
ca[10:60, 20:70] = np.ones((50, 50))
h5f.close()
# Re-open and read another hyperslab
h5f = tb.open_file(fileName)
print(h5f)
print(h5f.root.carray[8:12, 18:22])
|
h5f.close()
|
|
deployed/django-emailtemplates
|
emailtemplates/migrations/0006_auto_20201110_1151.py
|
Python
|
mit
| 689
| 0.002903
|
# Generated by Django 3.1.2 on 2020-11-10 11:51
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('emailtemplates', '0005_auto_20201110_1115'),
]
operations = [
migrations.AlterField(
model_name='emailtempl
|
ate',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='created'),
|
),
migrations.AlterField(
model_name='emailtemplate',
name='modified',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='modified'),
),
]
|
lcpt/xc
|
python_modules/import_export/sciaXML/scia_loads/LoadCaseContainer.py
|
Python
|
gpl-3.0
| 2,570
| 0.040109
|
# -*- coding: utf-8 -*-
#Based on sXML-master projet on gitHub
__author__= "Luis C. Pérez Tato (LCPT)"
__copyright__= "Copyright 2015 LCPT"
__license__= "GPL"
__version__= "3.0"
__email__= "l.pereztato@gmail.com"
__author__= "Luis C. Pérez Tato (LCPT)"
__copyright__= "Copyright 2015 LCPT"
__license__= "GPL"
__version__= "3.0"
__email__= "l.pereztato@gmail.com"
from import_export.sciaXML.xml_basics import Container as ctr
from import_export.sciaXML.xml_basics import TableXMLNodes as tb
from import_export.sciaXML.xml_basics import Header as hdr
from import_export.sciaXML.xml_basics import HeaderItem as hi
from import_export.sciaXML.xml_basics import Object as obj
from import_export.sciaXML.xml_basics import ObjectItem as oI
from import_export.sciaXML.xml_basics import Row as rw
import LoadGroupContainer as lgc
import LoadCaseProperties as lcp
import uuid
idLoadCaseContainer= lcp.containerId
tLoadCaseContainer= lcp.tbProgId
idLoadCaseContainerTb= lcp.tbId
tLoadCaseContainerTb= lcp.tbProgId
loadCasePrefix= 'LC'
def getLoadTypeName(ltype):
if(ltype==0):
return 'Poids propre'
elif(ltype==1):
return 'Standard'
elif(ltype==2):
return 'Effet primaire'
def getActionTypeCode(actionType):
if(actionType=='Permanent'):
return 0
elif(actionType=='Variable'):
return 1
else:
print actionType, "Unkno
|
wn action type (not permanent, not variable)."
return 0
def getLoadCaseObject(loadCase):
retval= obj.Object()
id= str(loadCase.id)
retval.setId(id)
name= loadCas
|
e.name
if(name==''):
name= loadCasePrefix+id
retval.setNm(name)
retval.setP0(oI.ObjectItem(name)) #Name
retval.setP1(oI.ObjectItem('{'+str(uuid.uuid4())+'}')) # Unique id
tmp= oI.ObjectItem(str(getActionTypeCode(loadCase.actionType)))
tmp.t= loadCase.actionType
retval.setP2(tmp) #??
retval.setP3(oI.ObjectItem(loadCase.desc)) #Description
gId= str(loadCase.loadGroupId)
gName= lgc.loadGroupPrefix+gId
tmp= oI.ObjectItem('',gId)
tmp.n= gName
retval.setP4(tmp)
ltyp= loadCase.ltyp
ltypName= getLoadTypeName(ltyp)
tmp= oI.ObjectItem(str(ltyp))
tmp.t= ltypName
retval.setP5(tmp) #??
return retval
class LoadCaseContainer(ctr.Container):
def __init__(self,loadCasesDict):
super(LoadCaseContainer,self).__init__(idLoadCaseContainer,tLoadCaseContainer)
loadCases= list()
for key in sorted(loadCasesDict):
ns= loadCasesDict[key]
loadCases.append(getLoadCaseObject(ns))
self.appendTable(tb.TableXMLNodes(idLoadCaseContainerTb,tLoadCaseContainerTb, 'Load cases', None,loadCases))
|
FRosner/drunken-data-quality
|
python/setup.py
|
Python
|
apache-2.0
| 1,956
| 0.001022
|
#!/usr/bin/env python
"""
Setup file for pyddq.
This file was generated with PyScaffold 2.5.6, a tool that easily
puts up a scaffold for yo
|
ur new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import sys
import os
import glob
import subprocess
from setuptools import setup, Command
class IntegrationTestCommand(Command):
description = "A command to run integration tests"
user_options = [("jar=", None, "Path to Drunken Data Quality jar")]
jar = None
addopts = None
def initialize_options(self):
pass
def
|
finalize_options(self):
if self.addopts is None:
exit("error: option addopts should be specified in setup.cfg")
elif self.jar is None:
exit("error: path to Drunken Data Quality jar should be specified")
def run(self):
log4j_path = os.path.abspath("../src/test/resources/log4j.properties")
result = 0
try:
for filename in glob.glob(os.path.join(self.addopts, "test_*.py")):
result = result or subprocess.call([
"spark-submit",
"--driver-java-options",
'"-Dlog4j.configuration=file://{path}"'.format(path=log4j_path),
"--driver-class-path",
self.jar,
filename
])
except OSError as e:
if e.errno == os.errno.ENOENT:
exit("spark-submit is not found!")
else:
exit(str(e))
exit(result)
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(setup_requires=['six', 'pyscaffold>=2.5a0,<2.6a0'] + sphinx,
use_pyscaffold=True,
cmdclass={
"integration_test": IntegrationTestCommand
})
if __name__ == "__main__":
setup_package()
|
OpenUpgrade-dev/OpenUpgrade
|
openerp/addons/test_inherit/models.py
|
Python
|
agpl-3.0
| 2,104
| 0.000951
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api, osv
# We just create a new model
class mother(models.Model):
_name = 'test.inherit.mother'
_columns = {
# check interoperability of field inheritance with old-style fields
'name': osv.fields.char('Name', required=True),
}
surname = fields.Char(compute='_compute_surname')
state = fields.Selection([('a', 'A'), ('b', 'B')])
@api.one
@api.depends('name')
def _compute_surname(self):
self.surname = self.name or ''
# We want to inherits from the parent model and we add some fields
# in the child object
class daughter(models.Model):
_name = 'test.inherit.daughter'
_inherits = {'test.inherit.mother': 'template_id'}
template_id = fields.Many2one('test.inherit.mother', 'Template',
required=True, ondelete='cascade')
field_in_daughter = fields.Char('Field1')
# We add a new field in the parent object. Because of a recent refactoring,
# this feature was broken.
# This test and these models try to show the bug and fix it.
class mother(models.Model):
_inherit = 'test.inherit.mother'
field_in_mother = fields.Char()
# extend the name field by adding a default value
name = fields.Char(default='Unknown')
# extend the selection of the state field
state = fields.Selection(selection_add=[('c', 'C')])
# override the computed field, and extend its dependencies
@api.one
@api.depends('field_in_mother')
def _compute_surname(self):
if self.field_in_mother:
self.surna
|
me = self.field_in_mother
else:
super(mother, self)._compute_surname()
class mother(models.Model):
_inherit = 'test.inherit.mother'
# extend again the selection
|
of the state field
state = fields.Selection(selection_add=[('d', 'D')])
class daughter(models.Model):
_inherit = 'test.inherit.daughter'
# simply redeclare the field without adding any option
template_id = fields.Many2one()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Xilinx/meta-petalinux
|
lib/devtool/plnx-sdk-update.py
|
Python
|
mit
| 6,269
| 0.005104
|
#*******************************************************************************
#
# Copyright (C) 2019 Xilinx, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ******************************************************************************
import os
import subprocess
import logging
import glob
import shutil
import errno
import sys
import tempfile
import re
from devtool import exec_build_env_command, setup_tinfoil, parse_recipe, DevtoolError
logger = logging.getLogger('devtool')
def plnx_sdk_update(args, config, basepath, workspace):
"""Entry point for devtool plnx-sdk-update command"""
updateserver = ''.join(args.updateserver)
if not updateserver:
updateserver = config.get('SDK', 'updateserver', '')
logger.debug("updateserver: %s" % updateserver)
sys.path.insert(0, os.path.join(basepath, 'layers/core/scripts/lib/devtool'))
from sdk import check_manifest, generate_update_dict, get_sstate_objects, install_sstate_objects
# Make sure we are using sdk-update from within SDK
logger.debug("basepath = %s" % basepath)
old_locked_sig_file_path = os.path.join(basepath, 'conf/locked-sigs.inc')
if not os.path.exists(old_locked_sig_file_path):
logger.error("Not using devtool's sdk-update command from within an extensible SDK. Please specify correct basepath via --basepath option")
return -1
else:
logger.debug("Found conf/locked-sigs.inc in %s" % basepath)
layers_dir = os.path.join(basepath, 'layers')
conf_dir = os.path.join(basepath, 'conf')
# Fetch manifest from server
tmpmanifest = os.path.join(updateserver, 'conf/sdk-conf-manifest')
changedfiles = check_manifest(tmpmanifest, basepath)
if not changedfiles:
logger.info("Already up-to-date")
return 0
#fetch sstate-cache
new_locked_sig_file_path = os.path.join(updateserver, 'conf/locked-sigs.inc')
if not os.path.exists(new_locked_sig_file_path):
logger.error("%s doesn't exist or is not an extensible SDK" % updateserver)
return -1
else:
logger.debug("Found conf/locked-sigs.inc in %s" % updateserver)
update_dict = generate_update_dict(new_locked_sig_file_path, old_locked_sig_file_path)
logger.debug("update_dict = %s" % update_dict)
newsdk_path = updateserver
sstate_dir = os.path.join(newsdk_path, 'sstate-cache')
if not os.path.exists(sstate_dir):
logger.error("sstate-cache directory not found under %s" % newsdk_path)
return 1
sstate_objects = get_sstate_objects(update_dict, sstate_dir)
logger.debug("sstate_objects = %s" % sstate_objects)
if len(sstate_objects) == 0:
logger.info("No need to update.")
logger.info("Installing sstate objects into %s", basepath)
install_sstate_objects(sstate_objects, updateserver.rstrip('/'), basepath)
# Check if UNINATIVE_CHECKSUM changed
uninative = False
if 'conf/local.conf' in changedfiles:
def read_uninative_checksums(fn):
chksumitems = []
with open(fn, 'r') as f:
for line in f:
if line.startswith('UNINATIVE_CHECKSUM'):
splitline = re.split(r'[\[\]"\']', line)
if len(splitline) > 3:
chksumitems.append((splitline[1], splitline[3]))
return chksumitems
oldsums = rea
|
d_uninative_checksums(os.path.join(basepath, 'conf/local.conf'))
newsums = read_uninative_checksums(os.path.join(updateserver, 'conf/local.conf'))
if oldsums != newsums:
uninative = True
if uninative:
shutil.rmtree(os.path.join(basepath, 'downloads', 'uninative'))
shutil.move(os.path.join(updateserver, 'downloads', 'uninative'), os.path.join(basepath, 'downloads'))
logger.info("Upd
|
ating configuration files")
new_conf_dir = os.path.join(updateserver, 'conf')
shutil.rmtree(conf_dir)
shutil.copytree(new_conf_dir, conf_dir)
logger.info("Updating layers")
new_layers_dir = os.path.join(updateserver, 'layers')
shutil.rmtree(layers_dir)
ret = subprocess.call("cp -a %s %s" % (new_layers_dir, layers_dir), shell=True)
if ret != 0:
logger.error("Copying %s to %s failed" % (new_layers_dir, layers_dir))
return ret
def register_commands(subparsers, context):
"""Register devtool subcommands from the sdk plugin"""
if context.fixed_setup:
parser_plnx_sdk_update = subparsers.add_parser('plnx-sdk-update',
help='Update file based SDK components',
description='Updates installed SDK components from a local file path',
group='sdk')
updateserver = context.config.get('SDK', 'updateserver', '')
parser_plnx_sdk_update.add_argument('updateserver', help='The update server to fetch latest SDK components from (default %s)' % updateserver, nargs='+')
parser_plnx_sdk_update.add_argument('--skip-prepare', action="store_true", help='Skip re-preparing the build system after updating (for debugging only)')
parser_plnx_sdk_update.set_defaults(func=plnx_sdk_update)
|
nferch/acd_cli
|
acdcli/api/metadata.py
|
Python
|
gpl-2.0
| 10,176
| 0.002752
|
"""Node metadata operations"""
import json
import logging
import http.client
from collections import namedtuple
from .common import *
logger = logging.getLogger(__name__)
ChangeSet = namedtuple('Changes', ['nodes', 'purged_nodes', 'checkpoint', 'reset'])
class MetadataMixin(object):
def get_node_list(self, **params) -> list:
""":param params: may include tempLink='True'"""
return self.BOReq.paginated_get(self.metadata_url + 'nodes', params)
def get_file_list(self) -> list:
return self.get_node_list(filters='kind:FILE')
def get_folder_list(self) -> list:
return self.get_node_list(filters='kind:FOLDER')
def get_asset_list(self) -> list:
return self.get_node_list(filters='kind:ASSET')
def get_trashed_folders(self) -> list:
return self.get_node_list(filters='status:TRASH AND kind:FOLDER')
def get_trashed_files(self) -> list:
return self.get_node_list(filters='status:TRASH AND kind:FILE')
def get_changes(self, checkpoint='', include_purged=False) -> 'Generator[ChangeSet]':
""" Generates a ChangeSet for each checkpoint in changes response. See
`<https://developer.amazon.com/public/apis/experience/cloud-drive/content/changes>`_."""
logger.info('Getting changes with checkpoint "%s".' % checkpoint)
body = {}
if checkpoint:
body['checkpoint'] = checkpoint
if include_purged:
body['includePurged'] = 'true'
r = self.BOReq.post(self.metadata_url + 'changes', data=json.dumps(body), stream=True)
if r.status_code not in OK_CODES:
r.close()
raise RequestError(r.status_code, r.text)
try:
for cs in self._iter_changes_lines(r):
yield cs
except (http.client.IncompleteRead, requests.exceptions.ChunkedEncodingError) as e:
logger.info(str(e))
raise RequestError(RequestError.CODE.INCOMPLETE_RESULT,
'[acd_api] reading changes terminated prematurely.')
except:
raise
finally:
r.close()
@staticmethod
def _iter_changes_lines(r: requests.Response) -> 'Generator[ChangeSet]':
"""Generates a ChangeSet per line in changes response
the expected return format should be:
{"checkpoint": str, "reset": bool, "nodes": []}
{"checkpoint": str, "reset": false, "nodes": []}
{"end": true}"""
end = False
pages = -1
for line in r.iter_lines(chunk_size=10 * 1024 ** 2, decode_unicode=False):
# filter out keep-alive new lines
if not line:
continue
reset = False
pages += 1
nodes = []
purged_nodes = []
try:
o = json.loads(line.decode('utf-8'))
except ValueError:
raise RequestError(RequestError.CODE.INCOMPLETE_RESULT,
'[acd_api] Invalid JSON in change set, page %i.' % pages)
try:
if o['end']:
end = True
continue
except KeyError:
pass
if o['reset']:
logger.info('Found "reset" tag in changes.')
reset = True
# could this actually happen?
if o['statusCode'] not in OK_CODES:
raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
'[acd_api] Partial failure in change request.')
for node in o['nodes']:
if node['status'] == 'PURGED':
purged_nodes.append(node['id'])
else:
nodes.append(node)
checkpoint = o['checkpoint']
logger.debug('Checkpoint: %s' % checkpoint)
yield ChangeSet(nodes, purged_nodes, checkpoint, reset)
logger.info('%i page(s) in changes.' % pages)
if not end:
logger.warning('End of change request not reached.')
def get_metadata(self, node_id: str, assets=False, temp_link=True) -> dict:
"""Gets a node's metadata."""
params = {'tempLink': 'true' if temp_link else 'false',
'asset': 'ALL' if assets else 'NONE'}
r = self.BOReq.get(self.metadata_url + 'nodes/' + node_id, params=params)
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
return r.json()
# this will increment the node's version attribute
def update_metadata(self, node_id: str, properties: dict) -> dict:
"""Update a node's properties like name, description, status, parents, ..."""
body = json.dumps(properties)
r = self.BOReq.patch(self.metadata_url + 'nodes/' + node_id, data=body)
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
return r.json()
def get_root_id(self) -> str:
"""Gets the ID of the root node
:returns: the topmost folder id"""
params = {'filters': 'isRoot:true'}
r = self.BOReq.get(self.metadata_url + 'nodes', para
|
ms=params)
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
|
data = r.json()
if 'id' in data['data'][0]:
return data['data'][0]['id']
def list_children(self, node_id: str) -> list:
l = self.BOReq.paginated_get(self.metadata_url + 'nodes/' + node_id + '/children')
return l
def add_child(self, parent_id: str, child_id: str) -> dict:
"""Adds node with ID *child_id* to folder with ID *parent_id*.
:returns: updated child node dict"""
r = self.BOReq.put(self.metadata_url + 'nodes/' + parent_id + '/children/' + child_id)
if r.status_code not in OK_CODES:
logger.error('Adding child failed.')
raise RequestError(r.status_code, r.text)
return r.json()
def remove_child(self, parent_id: str, child_id: str) -> dict:
""":returns: updated child node dict"""
r = self.BOReq.delete(
self.metadata_url + 'nodes/' + parent_id + "/children/" + child_id)
# contrary to response code stated in API doc (202 ACCEPTED)
if r.status_code not in OK_CODES:
logger.error('Removing child failed.')
raise RequestError(r.status_code, r.text)
return r.json()
def move_node_from(self, node_id: str, old_parent_id: str, new_parent_id: str) -> dict:
"""Moves node with given ID from old parent to new parent.
Not tested with multi-parent nodes.
:returns: changed node dict"""
data = {'fromParent': old_parent_id, 'childId': node_id}
r = self.BOReq.post(self.metadata_url + 'nodes/' + new_parent_id + '/children',
data=json.dumps(data))
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
return r.json()
def move_node(self, node_id: str, parent_id: str) -> dict:
return self.update_metadata(node_id, {'parents': [parent_id]})
def rename_node(self, node_id: str, new_name: str) -> dict:
properties = {'name': new_name}
return self.update_metadata(node_id, properties)
def set_available(self, node_id: str) -> dict:
"""Sets node status from 'PENDING' to 'AVAILABLE'."""
properties = {'status': 'AVAILABLE'}
return self.update_metadata(node_id, properties)
def get_owner_id(self):
"""Provisional function for retrieving the security profile's name, a.k.a. owner id."""
node = self.create_file('acd_cli_get_owner_id')
self.move_to_trash(node['id'])
return node['createdBy']
def list_properties(self, node_id: str, owner_id: str) -> dict:
"""This will always return an empty dict if the accessor is not the owner.
:param owner_id: owner ID (return status 404 if empty)"""
r = self.BOReq.get(self.metadata_url + 'nodes/' + node_id + '/properties/' + owner_id)
if r.status_code not in O
|
asajeffrey/servo
|
etc/start_servo.py
|
Python
|
mpl-2.0
| 992
| 0.001008
|
# Copyright 2018 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
Created on Mon Mar 26 20:08:25 2018
@author: Pranshu Sinha, Abhay Soni, Aayushi Agrawal
|
The script is intended to start servo on localhost:7002
"""
import subprocess
def start_servo(port, resolution):
# Use the below command if you are running this script on windows
# cmds =
|
'mach.bat run --webdriver ' + port + ' --resolution ' + resolution
cmds = './mach run --webdriver=' + port + ' --resolution ' + resolution
process = subprocess.Popen(cmds, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return process
|
janezd/kontrabant
|
kontrabant.py
|
Python
|
gpl-3.0
| 40,758
| 0.000589
|
## Unquill: Copyright (C) 2003 Janez Demsar
##
## During development I peeked a lot at Unquill from John Elliott, 1996-2000.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import pickle
import time
from PyQt5 import QtCore, QtWidgets, QtWidgets
from random import randint
class Quill:
class Event:
NIL, LOC, MSG, OBJ, SWAP, PLC = tuple(range(100, 106))
cond_ops = [("AT", "data.location_no == param1"),
("NOT AT", "data.location_no != param1"),
("AT GT", "data.location_no > param1"),
("AT LT", "data.location_no < param1"),
("PRESENT",
"data.objects[param1].location == data.location_no"),
("ABSENT",
"data.objects[param1].location != data.location_no"),
("WORN",
"data.objects[param1].location == data.Object.WORN"),
("NOT WORN",
"data.objects[param1].location != data.Object.WORN"),
("CARRIED",
"data.objects[param1].location == data.Object.CARRIED"),
("NOT CARR",
"data.objects[param1].location != data.Object.CARRIED"),
("CHANCE", "param1 < randint(1, 100)"),
("ZERO", "not data.flags[param1]"),
("NOT ZERO", "data.flags[param1]"),
("EQ", "data.flags[param1]==param2"),
("GT", "data.flags[param1]>param2"),
("LT", "data.flags[param1]<param2")]
ptas = {
0: (["INVEN", "DESC", "QUIT", "END", "DONE", "OK",
"ANYKEY", "SAVE", "LOAD", "TURNS", "SCORE",
"PAUSE", "GOTO", "MESSAGE", "REMOVE", "GET",
"DROP", "WEAR", "DESTROY", "CREATE", "SWAP",
"SET", "CLEAR", "PLUS", "MINUS", "LET", "BEEP"],
[0] * 11 + [1] * 9 + [2, 1, 1] + [2]*16,
[NIL] * 12 + [LOC, MSG] + [OBJ] * 6 + [SWAP] + [NIL] * 18),
5: (["INVEN", "DESC", "QUIT", "END", "DONE", "OK",
"ANYKEY", "SAVE", "LOAD", "TURNS", "SCORE",
"CLS", "DROPALL", "PAUSE", "PAPER", "INK",
"BORDER", "GOTO", "MESSAGE", "REMOVE", "GET",
"DROP", "WEAR", "DESTROY", "CREATE", "SWAP",
"PLACE", "SET", "CLEAR", "PLUS", "MINUS",
"LET", "BEEP"],
[0] * 13 + [1] * 12 + [2, 2, 1, 1] + [2] * 10,
[NIL] * 17 + [LOC, MSG] + [OBJ] * 6 + [SWAP, PLC] + [NIL]*12),
7: (["INVEN", "DESC", "QUIT", "END", "DONE", "OK",
"ANYKEY", "SAVE", "LOAD", "TURNS", "SCORE",
"CLS", "DROPALL", "AUTOG", "AUTOD", "AUTOW",
"AUTOR", "PAUSE", "PAPER", "INK", "BORDER",
"GOTO", "MESSAGE", "REMOVE", "GET", "DROP",
"WEAR", "DESTROY", "CREATE", "SWAP", "PLACE",
"SET", "CLEAR", "PLUS", "MINUS", "LET", "BEEP"],
[0] * 17 + [1] * 12 + [2, 2, 1] + [2] * 7,
[NIL] * 21 + [LOC, MSG] + [OBJ] * 6 + [SWAP, PLC] + [NIL] * 8)}
def __init__(self, sna, ptr, dbver=0):
self.act_ops, self.nparams, self.types = self.ptas[dbver]
self.word1 = sna[ptr]
self.word2 = sna[ptr + 1]
p = sna[ptr + 2] + 256 * sna[ptr + 3]
self.conditions = []
while sna[p] != 0xff:
opcode = sna[p]
param1 = sna[p + 1]
if opcode > 12:
param2 = sna[p + 2]
p += 3
else:
param2 = None
p += 2
self.conditions.append((opcode, param1, param2))
p += 1
self.actions = []
while sna[p] != 0xff:
opcode = sna[p]
nparams = self.nparams[opcode]
params = tuple(sna[p + 1:p + 1 + nparams])
self.actions.append((opcode, params))
p += 1 + nparams
# returns: -1 for error,
# 0 for not matching,
# 1 for matching and done (no further processing),
# 2 for matching, but process further
def __call__(self, data, system, word1, word2):
def match(w, sw):
return w == sw or (not w and sw == 255)
if system or match(word1, self.word1) and match(word2, self.word2):
for op, param1, param2 in self.conditions:
if not eval(self.cond_ops[op][1]):
return 0
for action in self.actions:
meth = getattr(data,
"do_" + self.act_ops[action[0]].lower())
res = meth(*action[1])
if res:
return res
return 2
class Location:
def __init__(self, description, conn=None):
self.description = description
self.connections = conn or {}
class Object:
INVALID, CARRIED, WORN, NOT_CREATED = 0xff, 0xfe, 0xfd, 0xfc
def __init__(self, description, initial=NOT_CREATED):
self.description = description
self.initial = self.location = initial
#######################################
# Actions
def do_get(self, param1):
loc = self.objects[param1].location
if loc == self.Object.WORN or loc == self.Object.CARRIED:
self.printout("To vendar že nosim!")
return -1
elif loc != self.location_no:
self.printout("Saj ni tukaj.")
return -1
elif self.flags[1] == self.nobjects_carry:
return -1
else:
self.objects[param1].location = self.Object.CARRIED
self.flags[1] += 1
def do_wear(self, param1):
loc = self.objects[param1].location
if loc == self.Object.WORN:
self.printout("To vendar že nosim!")
return -1
elif loc != self.Object.CARRIED:
self.printout("Tega sploh nimam!")
return -1
else:
self.objects[param1].location = self.Object.WORN
def do_drop(self, param1):
loc = self.objects[param1].location
if (loc == self.Object.WORN) or (loc == self.Object.C
|
ARRIED):
self.objects[param1].location = self.location_no
else:
self.printout("Tega sploh nimam.")
return -1
def do_remove(self, param1):
loc = self.objects[param1].location
if loc != self.Object.WORN:
self.printout("Tega sploh ne nosim!")
return -1
e
|
lse:
self.objects[param1].location = self.Object.CARRIED
def do_dropall(self):
for obj in self.objects:
if obj.location == self.Object.WORN or \
obj.location == self.Object.CARRIED:
obj.location = self.location_no
self.flags[1] = 0
def do_goto(self, locno):
self.location = self.locations[locno]
self.location_no = locno
self.flags[2] = locno
def do_create(self, objno):
loc = self.objects[objno].location
if loc == self.Object.WORN or loc == self.Object.CARRIED:
self.flags[1] -= 1
self.objects[objno].location = self.location_no
def do_destroy(self, objno):
loc = self.ob
|
JonasWallin/logisticnormal
|
logisticnormal/PurePython/priors.py
|
Python
|
gpl-3.0
| 5,934
| 0.004887
|
'''
Created on Jul 5, 2014
updated: May 28, 2015: added AMCMC
@author: jonaswallin
'''
import numpy as np
import numpy.random as npr
from ..utils.gammad import ln_gamma_d
def f_prior_nu(x, nu=0.01):
"""
deafult prior for nu which is gamma
returns : log of prior
"""
return -nu*x
class nu_class(object):
'''
Class object for sampling the prior paramaeter of an wishart distribution
'''
def __init__(self, nu0=None, param=None, prior=None, prior_func=None, AMCMC=True):
'''
param - dict with ['Q'] or better ['detQ'] (which containes log det of Q)
prior is empty
prior_func - function representing prior, should return log of prior
if None use expontial prior with lambda 0.01
AMCMC - use adaptive MCMC to calibrate sigma to get a fixed accpetance prob (deafult true)
'''
self.log2 = np.log(2)
self.n = 0
self.d = 0
if not param is None:
self.set_param(param)
self.nu = nu0
self.ln_gamma_d = None
self.acc = 0.
self.iter = 0.
self.calc_lik = False
self.sigma = 5
self.iterations = 5
if prior_func is None:
self.prior = {'nu': 0.01}
self.prior_func = f_prior_nu
self.AMCMC = AMCMC
if self.AMCMC:
self.set_AMCMC()
|
self.amcmc_count = 0.
self.amcmc_accept = 0.
def set_val(self, nu):
self.nu = nu
self.calc_lik = False
def set_d(self, d):
"""
Set dimension
"""
self
|
.calc_lik = False
self.d = d
if self.nu is None:
self.nu = 2*self.d
self.ln_gamma_d = ln_gamma_d(self.d)
def set_parameter(self, param):
"""
param - dict with ['Q'] or better ['detQ'] (which containes log det of Q)
"""
#print param['Q']
if 'detQ' not in param:
self.logDetQ = np.log(np.linalg.det(param['Q']))
else:
self.logDetQ = param['detQ']
self.calc_lik = False
def set_prior(self, *args):
"""
dont have prior for this class
"""
pass
def set_data(self, data=None, det_data=None):
"""
data is a list of covariances
and det_data is the list of the log determinant of data
"""
self.calc_lik = False
self.logDetSigma = 0
if det_data is None:
self.n = len(data)
for Sigma in data:
self.logDetSigma += np.log(np.linalg.det(Sigma))
else:
self.n = len(det_data)
for det in det_data:
self.logDetSigma += det
def set_MH_param(self, sigma=5, iterations=5):
"""
setting the parametet for the MH algorithm
sigma - the sigma in the MH algorihm on the Natural line
iteration - number of time to sample using the MH algortihm
"""
self.sigma = sigma
self.iterations = iterations
def sample(self):
"""
Samples a metropolis hastings random walk proposal
on N^+
"""
for i in range(self.iterations): # @UnusedVariable
self.sample_()
if self.AMCMC:
self.update_AMCMC()
return self.nu
def sample_(self):
self.iter += 1
self.amcmc_count += 1
nu_star = npr.randint(self.nu - self.sigma, self.nu + self.sigma + 1) # rounding
if nu_star == self.nu:
self.acc += 1
self.amcmc_accept += 1
return
if nu_star <= self.d + 1:
return
loglik_star = self.loglik(nu_star)
loglik = self.__call__()
#print "***********"
#print "nu = %d"%self.nu
#print "nus = %d"%nu_star
#print "loglik_star = %.2f"%loglik_star
#print "loglik = %.2f"%loglik
#print "log[Sigma] = %.2f"%self.logDetSigma
#print "n*log[Q] = %.2f"%(self.n * self.logDetQ)
#print "***********"
if np.log(npr.rand()) < loglik_star - loglik:
self.acc += 1
self.amcmc_accept += 1
self.loglik_val = loglik_star
self.nu = nu_star
def __call__(self):
if self.calc_lik == False:
self.loglik_val = self.loglik(self.nu)
self.calc_lik = True
return self.loglik_val
def loglik(self, nu):
nud2 = 0.5 * nu
loglik = -nud2*self.d*self.n*self.log2
loglik -= self.n * self.ln_gamma_d(nud2)
loglik -= nud2 * self.logDetSigma
loglik += nud2 * self.n * self.logDetQ
if not self.prior_func is None:
loglik += self.prior_func(nu, **self.prior)
return loglik
def set_AMCMC(self, batch=50, accpate=0.3, delta_rate=.5):
"""
Using AMCMC
batch - (int) how often to update sigma_MCMC
accpate - [0,1] desired accpance rate (0.3)
delta_rate - [0,1] updating ratio for the amcmc
"""
self.amcmc_delta_max = 0.1
self.amcmc_desired_accept = accpate
self.amcmc_batch = batch
self.amcmc_delta_rate = delta_rate
self.AMCMC = True
def update_AMCMC(self):
"""
Using roberts and rosenthal method for tunning the acceptance rate
"""
if (self.amcmc_count + 1) % self.amcmc_batch == 0:
delta = np.min([self.amcmc_delta_max, (self.amcmc_count/self.amcmc_batch)**(-self.amcmc_delta_rate)])
if self.amcmc_accept / self.amcmc_batch > self.amcmc_desired_accept:
self.sigma *= np.exp(delta)
else:
self.sigma /= np.exp(delta)
#self.amcmc_count = 0.
self.amcmc_accept = 0.
|
googleapis/python-service-directory
|
samples/generated_samples/servicedirectory_v1_generated_registration_service_list_services_sync.py
|
Python
|
apache-2.0
| 1,551
| 0.000645
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListServices
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require m
|
odifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-service-directory
# [START servicedirectory_v1_generated_RegistrationService_ListServices_sync]
from google.cloud import servicedirectory_v1
def sample_list_services():
# Create a client
client = servicedirectory_v1.RegistrationServiceClient()
|
# Initialize request argument(s)
request = servicedirectory_v1.ListServicesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_services(request=request)
# Handle the response
for response in page_result:
print(response)
# [END servicedirectory_v1_generated_RegistrationService_ListServices_sync]
|
biswajitsahu/kuma
|
vendor/packages/translate/filters/spelling.py
|
Python
|
mpl-2.0
| 2,109
| 0.000474
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
# 2013 F Wolff
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""An API to provide spell checking for use in checks or elsewhere."""
import logging
logger
|
= logging.getLogger(__name__)
available = False
try:
# Enchant
from enchant import checker, Error as EnchantError
available = True
checkers = {}
def _get_c
|
hecker(lang):
if not lang in checkers:
try:
checkers[lang] = checker.SpellChecker(lang)
# some versions only report an error when checking something
checkers[lang].check(u'bla')
except EnchantError as e:
# sometimes this is raised instead of DictNotFoundError
logger.error(str(e))
checkers[lang] = None
return checkers[lang]
def check(text, lang):
spellchecker = _get_checker(lang)
if not spellchecker:
return
spellchecker.set_text(unicode(text))
for err in spellchecker:
yield err.word, err.wordpos, err.suggest()
def simple_check(text, lang):
spellchecker = _get_checker(lang)
if not spellchecker:
return
spellchecker.set_text(unicode(text))
for err in spellchecker:
yield err.word
except ImportError:
def check(text, lang):
return []
def simple_check(text, lang):
return []
|
drxaero/calibre
|
src/calibre/gui2/dbus_export/menu.py
|
Python
|
gpl-3.0
| 14,935
| 0.00221
|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
# Support for excporting Qt's MenuBars/Menus over DBUS. The API is defined in
# dbus-menu.xml from the libdbusmenu project https://launchpad.net/libdbusmenu
import dbus, sip
from PyQt5.Qt import (
QApplication, QMenu, QIcon, QKeySequence, QObject, QEvent, QTimer, pyqtSignal, Qt)
from calibre.utils.dbus_service import Object, BusName, method as dbus_method, dbus_property, signal as dbus_signal
from calibre.gui2.dbus_export.utils import (
setup_for_cli_run, swap_mnemonic_char, key_sequence_to_dbus_shortcut, icon_to_dbus_menu_icon)
null = object()
def PropDict(mapping=()):
return dbus.Dictionary(mapping, signature='sv')
def create_properties_for_action(ac, previous=None):
ans = PropDict()
if ac.isSeparator():
ans['type'] = 'separator'
if not ac.isVisible():
ans['visible'] = False
return ans
text = ac.text() or ac.iconText()
if text:
ans['label'] = swap_mnemonic_char(text)
if not ac.isEnabled():
ans['enabled'] = False
if not ac.isVisible() or ac.property('blocked') is True:
ans['visible'] = False
if ac.menu() is not None:
ans['children-display'] = 'submenu'
if ac.isCheckable():
exclusive = ac.actionGroup() is not None and ac.actionGroup().isExclusive()
ans['toggle-type'] = 'radio' if exclusive else 'checkmark'
ans['toggle-state'] = int(ac.isChecked())
shortcuts = ac.shortcuts()
if shortcuts:
sc = dbus.Array(signature='as')
for s in shortcuts:
if not s.isEmpty():
for x in key_sequence_to_dbus_shortcut(s):
sc.append(dbus.Array(x, signature='s'))
if sc:
ans['shortcut'] = sc[:1] # Unity fails to display the shortcuts at all if more than one is specified
if ac.isIconVisibleInMenu():
icon = ac.icon()
if previous and previous.get('x-qt-icon-cache-key') == icon.cacheKey():
for x in 'icon-data x-qt-icon-cache-key'.split():
ans[x] = previous[x]
else:
data = icon_to_dbus_menu_icon(ac.icon())
if data is not None:
ans['icon-data'] = data
ans['x-qt-icon-cache-key'] = icon.cacheKey()
return ans
def menu_actions(menu):
try:
return menu.actions()
except TypeError:
if isinstance(menu, QMenu):
return QMenu.actions(menu)
raise
class DBusMenu(QObject):
handle_event_signal = pyqtSignal(object, object, object, object)
def __init__(self, object_path, parent=None, bus=None):
QObject.__init__(self, parent)
# Unity barfs is the Event DBUS method does not return immediately, so
# handle it asynchronously
self.handle_event_signal.connect(self.handle_event, type=Qt.QueuedConnection)
self.dbus_api = DBusMenuAPI(self, object_path, bus=bus)
self.set_status = self.dbus_api.set_status
self._next_id = 0
self.action_changed_timer = t = QTimer(self)
t.setInterval(0), t.setSingleShot(True), t.timeout.connect(self.actions_changed)
self.layout_changed_timer = t = QTimer(self)
t.setInterval(0), t.setSingleShot(True), t.timeout.connect(self.layouts_changed)
self.init_maps()
@property
def object_path(self):
return self.dbus_api._object_path
def init_maps(self, qmenu=None):
self.action_changes = set()
self.layout_changes = set()
self.qmenu = qmenu
self._id_to_action, self._action_to_id = {}, {}
self._action_properties = {}
@property
def next_id(self):
self._next_id += 1
return self._next_id
def id_to_action(self, action_id):
if self.qmenu is None:
return None
return self._id_to_action.get(action_id)
def action_to_id(self, action):
if self.qmenu is None:
return None
return self._action_to_id.get(action)
def action_properties(self, action_id, restrict_to=None):
if self.qmenu is None:
return {}
ans = self._action_properties.get(action_id, PropDict())
if restrict_to:
ans = PropDict({k:v for k, v in ans.iteritems() if k in restrict_to})
return ans
def publish_new_menu(self, qmenu=None):
self.init_maps(qmenu)
if qmenu is not None:
qmenu.destroyed.connect(lambda obj=None:self.publish_new_menu())
ac = qmenu.menuAction()
self.add_action(ac)
self.dbus_api.LayoutUpdated(self.dbus_api.revision, 0)
def set_visible(s
|
elf, visible):
ac = self.id_to_action(0)
if ac is not None and self.qmenu is not None:
changed = False
blocked = not visible
for ac in menu_actions(ac.menu()):
ac_id = self.action_to_id(ac)
if ac_id is not None:
|
old = ac.property('blocked')
if old is not blocked:
ac.setProperty('blocked', blocked)
self.action_changes.add(ac_id)
changed = True
if changed:
self.action_changed_timer.start()
def add_action(self, ac):
ac_id = 0 if ac.menu() is self.qmenu else self.next_id
self._id_to_action[ac_id] = ac
self._action_to_id[ac] = ac_id
self._action_properties[ac_id] = create_properties_for_action(ac)
if ac.menu() is not None:
self.add_menu(ac.menu())
def add_menu(self, menu):
menu.installEventFilter(self)
for ac in menu_actions(menu):
self.add_action(ac)
def eventFilter(self, obj, ev):
ac = getattr(obj, 'menuAction', lambda : None)()
ac_id = self.action_to_id(ac)
if ac_id is not None:
etype = ev.type()
if etype == QEvent.ActionChanged:
ac_id = self.action_to_id(ev.action())
self.action_changes.add(ac_id)
self.action_changed_timer.start()
elif etype == QEvent.ActionAdded:
self.layout_changes.add(ac_id)
self.layout_changed_timer.start()
self.add_action(ev.action())
elif etype == QEvent.ActionRemoved:
self.layout_changes.add(ac_id)
self.layout_changed_timer.start()
self.action_removed(ev.action())
return False
def actions_changed(self):
updated_props = dbus.Array(signature='(ia{sv})')
removed_props = dbus.Array(signature='(ias)')
for ac_id in self.action_changes:
ac = self.id_to_action(ac_id)
if ac is None:
continue
old_props = self.action_properties(ac_id)
new_props = self._action_properties[ac_id] = create_properties_for_action(ac, old_props)
removed = set(old_props) - set(new_props)
if removed:
removed_props.append((ac_id, dbus.Array(removed, signature='as')))
updated = PropDict({k:v for k, v in new_props.iteritems() if v != old_props.get(k, null)})
if updated:
updated_props.append((ac_id, updated))
self.action_changes = set()
if updated_props or removed_props:
self.dbus_api.ItemsPropertiesUpdated(updated_props, removed_props)
return updated_props, removed_props
def layouts_changed(self):
changes = set()
for ac_id in self.layout_changes:
if ac_id in self._id_to_action:
changes.add(ac_id)
self.layout_changes = set()
if changes:
self.dbus_api.revision += 1
for change in changes:
self.dbus_api.LayoutUpdated(self.dbus_api.revision, change)
return changes
def action_is_in_a_menu(self, ac):
if sip.isdeleted(ac):
|
ebrehault/diazo
|
docs/conf.py
|
Python
|
bsd-3-clause
| 6,987
| 0.006441
|
# -*- coding: utf-8 -*-
#
# Diazo documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 2 18:58:07 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Diazo'
copyright = u'2011, Plone Foundation'
# The version info for the project you're docume
|
nting, acts as replacement for
# |version| and |release|, also used in various other places throughout
|
the
# built documents.
#
# The short X.Y version.
version = '1.0b1'
# The full version, including alpha/beta/rc tags.
release = '1.0b1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Diazo"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = images/logo.jpg
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Diazodoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Diazo.tex', u'Diazo Documentation',
u'Plone Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'diazo', u'Diazo Documentation',
[u'Plone Foundation'], 1)
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.