repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
ChameleonCloud/horizon
|
horizon/tables/__init__.py
|
Python
|
apache-2.0
| 1,788
| 0
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this fi
|
le except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed
|
to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Convenience imports for public API components.
# Importing non-modules that are not used explicitly
from horizon.tables.actions import Action
from horizon.tables.actions import BatchAction
from horizon.tables.actions import DeleteAction
from horizon.tables.actions import FilterAction
from horizon.tables.actions import FixedFilterAction
from horizon.tables.actions import LinkAction
from horizon.tables.actions import NameFilterAction
from horizon.tables.base import Column
from horizon.tables.base import DataTable
from horizon.tables.base import Row
from horizon.tables.base import WrappingColumn
from horizon.tables.views import DataTableView
from horizon.tables.views import MixedDataTableView
from horizon.tables.views import MultiTableMixin
from horizon.tables.views import MultiTableView
from horizon.tables.views import PagedTableMixin
__all__ = [
'Action',
'BatchAction',
'DeleteAction',
'FilterAction',
'FixedFilterAction',
'LinkAction',
'NameFilterAction',
'Column',
'DataTable',
'Row',
'WrappingColumn',
'DataTableView',
'MixedDataTableView',
'MultiTableMixin',
'MultiTableView',
'PagedTableMixin',
]
|
tehmaze/natural
|
setup.py
|
Python
|
mit
| 1,143
| 0.011374
|
#!/usr/bin/env python
try:
from setuptools import setup
except:
from distutils.core import setup
setup(name='natural',
version='0.2.0',
description='Convert data to their natural (human-readable) format',
long_description='''
Example Usage
=============
Basic usage::
>>> from natural.file import accessed
>>> print accessed(__file__)
just now
We spe
|
ak your language (with `your support`_)::
>>> import locale
>>> locale.setlocale(locale.LC_MESSAGES, 'nl_NL')
>>> print accessed(__file__)
zojuist
Bugs/Features
=============
You can issue a ticket in GitHub: https://github.com/tehmaze/natural/issues
Documentation
=============
The project documentation can be found at http://natural.rtfd.org/
.. _your support: http://natural.readt
|
hedocs.org/en/latest/locales.html
''',
author='Wijnand Modderman-Lenstra',
author_email='maze@pyth0n.org',
license='MIT',
keywords='natural data date file number size',
url='https://github.com/tehmaze/natural',
packages=['natural'],
package_data={'natural': ['locale/*/LC_MESSAGES/*.mo']},
install_requires=['six'],
)
|
corpnewt/CorpBot.py
|
Cogs/Monitor.py
|
Python
|
mit
| 2,040
| 0.036275
|
import asyncio
import discord
from discord.ext import commands
def setup(bot):
# Disabled for now
return
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Monitor(bot, settings))
# This is the Monitor module. It keeps track of how many messages fail
class Monitor(commands.Cog):
# Init with the bot reference
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.commands = []
self.commandCount = 25 # Keep 25 commands in the list max
self.threshold = 0.9 # If we fall below 90% success - reboot the bot
async def oncommand(self, command, ctx):
# Check previous commands and see if we need to reboot
passed = 0
checked = 0
for command in self.commands:
checked += 1
if command['Success'] == True:
passed += 1
if checked > 1 and float(passed/checked) < self.threshold:
# We checked at least one command - and are below threshold
print('Command success below threshold - rebooting...')
self.settings.flushSettings(self.settings.file, True)
# Logout, stop the event loop, close the loop, quit
try:
tasks = asyncio.Task.all_tasks()
except AttributeError:
tasks = asyncio.all_tasks()
for task in tasks:
try:
task.cancel()
except Exception:
continue
try:
await self.bot.logout()
self.bot.loop.stop()
self.bot.loop.close()
except Exception:
pass
try:
await exit(0)
except Exception:
|
pass
# Once we're here - we add our new command
# Save the command to a list with t
|
he message
newCommand = { 'Message': ctx.message, 'Success': False }
self.commands.append(newCommand)
while len(self.commands) > self.commandCount:
# Remove the first item in the array until we're at our limit
self.commands.pop(0)
async def oncommandcompletion(self, command, ctx):
for command in self.commands:
# command passed
if command['Message'] == ctx.message:
command['Success'] = True
|
laenderoliveira/exerclivropy
|
exercicios_resolvidos/capitulo 05/exercicio-05-17.py
|
Python
|
mit
| 695
| 0.002941
|
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2014
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/1012
# Terceira reim
|
pressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: exercicios_resolvidos\capitulo 05\exercicio-05-17.py
#######################################
|
#######################################
# O programa pára logo após imprimir a quantidade de cédulas de R$50,00
|
loggerhead/dianping_crawler
|
dianping_crawler/spiders/base_spider.py
|
Python
|
mit
| 1,618
| 0
|
# -*- coding: utf-8 -*-
import scrapy
import logging
from datetime import datetime
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from .delta_helper import DeltaHelper
class BaseSpider(scrapy.Spider):
# need overwrite
|
in subclass
logger = logging.getLogger(__name__)
def init(self):
self.delta = DeltaHelper(self)
self.delta.connect_db()
def extract_int(self, text):
i = -1
for i in range(len(text)):
|
if text[i].isdigit():
break
j = len(text) if i >= 0 else -1
for j in range(i + 1, len(text)):
if not text[j].isdigit():
break
try:
return int(text[i:j])
except ValueError:
self.logger.warning('cannot extract integer from "%s"', text)
return None
def aa2urls(self, aa):
urls = []
for a in aa:
urls.append(a.attrib['href'])
return urls
def add_host(self, s):
return urljoin(self.settings['HOST'], s)
# return index of the first exists class
def find_classes_exists(self, d, classes):
for i in range(len(classes)):
if d(classes[i]):
return i
return None
def text2date(self, date):
if date.count('-') == 1:
date = '{}-{}'.format(datetime.now().year % 100, date)
try:
date = datetime.strptime(date, '%y-%m-%d')
except ValueError:
self.logger.warning('not a valid date: "%s"', date)
date = None
return date
|
WangYihang/Webshell-Sniper
|
core/utils/string_utils/random_string.py
|
Python
|
gpl-3.0
| 211
| 0.009479
|
#!/usr/bin/env python
# encoding: utf-8
fro
|
m random import choice
def random_string(length, random_range):
result = ""
for i in range(length):
result += choic
|
e(random_range)
return result
|
williechen/DailyApp
|
18/py201501/sample/sample.py
|
Python
|
lgpl-3.0
| 1,423
| 0.015544
|
'''
Created on 2015年1月19日
@author: Guan-yu Willie Chen
'''
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
import time
#browser = webdriver.Firefox()
#browser = webdriver.Ie()
browser = webdriver.Chrome("chromedriver.exe")
URL = ""
browser.get(URL+"/insurance/gs/sp/spLogin")
# 登入
browser.find_element_by_xpath("//input[@id='login:userName']").send_keys('')
browser.find_element_by_xpath("//input[@id='login:password']").send_keys('' + Keys.RETURN)
#進入前台
browser.find_element_by_xpath("//img[@name='Adminstration']").click()
#進入條碼列印作業
browser.get(URL+"insurance/eclaim/qrcodePrint.do")
# 選擇賠案號碼起
claimStartNo = browser.find_element_by_name("claimStartNo").send_keys("CLBR14V000000")
# 選擇文件名稱
docId = browser.find_element_by_name("docId")
for n in enumerate(docId.text.split("\n")):
print(n)
select = Select(docId)
select.select_by_index(1)
# 查詢
browser.find_element_by_xpath("//input[@name='queryBtn']").click()
# 分頁
browser.find_element
|
_by_xpath("//input[@id='gotoPageNo']").send_keys(Keys.BACKSPACE)
browser.find_element_by_xpath("//input[@id='gotoPageNo']").send_keys("3")
browser.find_element_by_xpath("//div[
|
@id='turnpage']/table/tbody/tr/td/input[@value='跳至']").click()
|
stephenmcd/ratemyflight
|
ratemyflight/scripts/create_project.py
|
Python
|
bsd-2-clause
| 1,581
| 0.003163
|
#!/usr/bin/env python
import os
import shutil
import sys
import ratemyflight
class ProjectException(Exception):
pass
def create_project():
"""
Copies the contents of the project_template directory to a new directory
specified as an argument to the command line.
"""
# Ensure a directory name is specified.
script_name = os.path.basename(sys.argv[0])
usage_text = "Usage: ratemyflight project_name"
usage_text += "\nProject names beginning with \"-\" are illegal."
if len(sys.argv) != 2:
raise ProjectException(usage_text)
project_name = sys.argv[1]
if project_name.startswith("-"):
raise ProjectException(usage_text)
# Ensure the given directory name doesn't clash with an existing Python
# package/module.
try:
__import__(project_name)
except ImportError:
pass
else:
raise ProjectException("'%s' conflicts with the name of an existing
|
"
"Python module and cannot be used as a project name. Please try "
"another name." % project_name)
ratemyflight_path = os.path.dirname(os.path.abspath(ratemyflight.__file__))
from_path = os.path.join(ratemyflight_path, "project_template")
to_path = o
|
s.path.join(os.getcwd(), project_name)
shutil.copytree(from_path, to_path)
shutil.move(os.path.join(to_path, "local_settings.py.template"),
os.path.join(to_path, "local_settings.py"))
if __name__ == "__main__":
try:
create_project()
except ProjectException, e:
print
print e
print
|
MostlyOpen/odoo_addons
|
myo_survey/__openerp__.py
|
Python
|
agpl-3.0
| 1,729
| 0
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
|
by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Th
|
is program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Survey',
'summary': 'Survey Module used by MostlyOpen Solutions.',
'version': '2.0.0',
'author': 'Carlos Eduardo Vercelino - CLVsol',
'category': 'Generic Modules/Others',
'license': 'AGPL-3',
'website': 'http://mostlyopen.org',
'depends': [
'survey',
'myo_base',
],
'data': [
'security/survey_security.xml',
'views/survey_survey_view.xml',
'views/survey_page_view.xml',
'views/survey_question_view.xml',
'views/survey_label_view.xml',
'views/survey_user_input_view.xml',
'wizard/survey_update_wizard_view.xml',
],
'demo': [],
'test': [],
'init_xml': [],
'test': [],
'update_xml': [],
'installable': True,
'application': False,
'active': False,
'css': [],
}
|
arthepsy/ssh-audit
|
test/test_output.py
|
Python
|
mit
| 4,631
| 0.038221
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
# pylint: disable=attribute-defined-outside-init
class TestOutput(object):
@pytest.fixture(autouse=True)
def init(self, ssh_audit):
self.Output = ssh_audit.Output
self.OutputBuffer = ssh_audit.OutputBuffer
def test_output_buffer_no_lines(self, output_spy):
output_spy.begin()
with self.OutputBuffer() as obuf:
pass
assert output_spy.flush() == []
output_spy.begin()
with self.OutputBuffer() as obuf:
pass
obuf.flush()
assert output_spy.flush() == []
def test_output_buffer_no_flush(self, output_spy):
output_spy.begin()
with self.OutputBuffer():
print(u'abc')
assert output_spy.flush() == []
def test_output_buffer_flush(self, output_spy):
output_spy.begin()
with self.OutputBuffer() as obuf:
print(u'abc')
print()
print(u'def')
obuf.flush()
assert output_spy.flush() == [u'abc', u'', u'def']
def test_output_defaults(self):
out = self.Output()
# default: on
assert out.batch is False
assert out.colors is True
assert out.minlevel == 'info'
def test_output_colors(self, output_spy):
out = self.Output()
# test without colors
out.colors = False
output_spy.begin()
out.info('info color')
assert output_spy.flush() == [u'info color']
output_spy.begin()
out.head('head color')
assert output_spy.flush() == [u'head color']
output_spy.begin()
out.good('good color')
assert output_spy.flush() == [u'good color']
output_spy.begin()
out.warn('warn color')
assert output_spy.flush() == [u'warn color']
output_spy.begin()
out.fail('fail color')
assert output_spy.flush() == [u'fail color']
if not out.colors_supported:
return
# test with colo
|
rs
out.colors = True
output_spy.begin()
out.info('info color')
assert output_spy.flush() == [u'info color']
output_spy.begin()
out.head('head color')
assert output_spy.flush() == [u'\x1b[0;36mhead color\x1b[0m']
output_spy.begin()
out.good('good color')
assert output_spy.flush() == [u'\x1b[0;32mgood color\x1b[0m']
output_spy.begin()
out.warn('warn color')
assert output_spy.flush() ==
|
[u'\x1b[0;33mwarn color\x1b[0m']
output_spy.begin()
out.fail('fail color')
assert output_spy.flush() == [u'\x1b[0;31mfail color\x1b[0m']
def test_output_sep(self, output_spy):
out = self.Output()
output_spy.begin()
out.sep()
out.sep()
out.sep()
assert output_spy.flush() == [u'', u'', u'']
def test_output_levels(self):
out = self.Output()
assert out.getlevel('info') == 0
assert out.getlevel('good') == 0
assert out.getlevel('warn') == 1
assert out.getlevel('fail') == 2
assert out.getlevel('unknown') > 2
def test_output_minlevel_property(self):
out = self.Output()
out.minlevel = 'info'
assert out.minlevel == 'info'
out.minlevel = 'good'
assert out.minlevel == 'info'
out.minlevel = 'warn'
assert out.minlevel == 'warn'
out.minlevel = 'fail'
assert out.minlevel == 'fail'
out.minlevel = 'invalid level'
assert out.minlevel == 'unknown'
def test_output_minlevel(self, output_spy):
out = self.Output()
# visible: all
out.minlevel = 'info'
output_spy.begin()
out.info('info color')
out.head('head color')
out.good('good color')
out.warn('warn color')
out.fail('fail color')
assert len(output_spy.flush()) == 5
# visible: head, warn, fail
out.minlevel = 'warn'
output_spy.begin()
out.info('info color')
out.head('head color')
out.good('good color')
out.warn('warn color')
out.fail('fail color')
assert len(output_spy.flush()) == 3
# visible: head, fail
out.minlevel = 'fail'
output_spy.begin()
out.info('info color')
out.head('head color')
out.good('good color')
out.warn('warn color')
out.fail('fail color')
assert len(output_spy.flush()) == 2
# visible: head
out.minlevel = 'invalid level'
output_spy.begin()
out.info('info color')
out.head('head color')
out.good('good color')
out.warn('warn color')
out.fail('fail color')
assert len(output_spy.flush()) == 1
def test_output_batch(self, output_spy):
out = self.Output()
# visible: all
output_spy.begin()
out.minlevel = 'info'
out.batch = False
out.info('info color')
out.head('head color')
out.good('good color')
out.warn('warn color')
out.fail('fail color')
assert len(output_spy.flush()) == 5
# visible: all except head
output_spy.begin()
out.minlevel = 'info'
out.batch = True
out.info('info color')
out.head('head color')
out.good('good color')
out.warn('warn color')
out.fail('fail color')
assert len(output_spy.flush()) == 4
|
TheAlgorithms/Python
|
graphs/minimum_spanning_tree_kruskal2.py
|
Python
|
mit
| 4,095
| 0.000488
|
from __future__ import annotations
from typing import Generic, TypeVar
T = TypeVar("T")
class DisjointSetTreeNode(Generic[T]):
# Disjoint Set Node to store the parent and rank
def __init__(self, data: T) -> None:
self.data = data
self.parent = self
self.rank = 0
class DisjointSetTree(Generic[T]):
# Disjoint Set DataStructure
def __init__(self) -> None:
# map from node name to the node object
self.map: dict[T, DisjointSetTreeNode[T]] = {}
def make_set(self, data: T) -> None:
# create a new set with x as its member
self.map[data] = DisjointSetTreeNode(data)
def find_set(self, data: T) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
elem_ref = self.map[data]
if elem_ref != elem_ref.parent:
elem_ref.parent = self.find_set(elem_ref.parent.data)
return elem_ref.parent
def link(
self, node1: DisjointSetTreeNode[T], node2: DisjointSetTreeNode[T]
) -> None:
# helper function for union operation
if node1.rank > node2.rank:
node2.parent = node1
else:
node1.parent = node2
if node1.rank == node2.rank:
node2.rank += 1
def union(self, data1: T, data2: T) -> None:
# merge 2 disjoint sets
self.link(self.find_set(data1), self.find_set(data2))
class GraphUndirectedWeighted(Generic[T]):
def __init__(self) -> None:
# connections: map fr
|
om the node to the neighbouring nodes (with weights)
self.conne
|
ctions: dict[T, dict[T, int]] = {}
def add_node(self, node: T) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
self.connections[node] = {}
def add_edge(self, node1: T, node2: T, weight: int) -> None:
# add an edge with the given weight
self.add_node(node1)
self.add_node(node2)
self.connections[node1][node2] = weight
self.connections[node2][node1] = weight
def kruskal(self) -> GraphUndirectedWeighted[T]:
# Kruskal's Algorithm to generate a Minimum Spanning Tree (MST) of a graph
"""
Details: https://en.wikipedia.org/wiki/Kruskal%27s_algorithm
Example:
>>> g1 = GraphUndirectedWeighted[int]()
>>> g1.add_edge(1, 2, 1)
>>> g1.add_edge(2, 3, 2)
>>> g1.add_edge(3, 4, 1)
>>> g1.add_edge(3, 5, 100) # Removed in MST
>>> g1.add_edge(4, 5, 5)
>>> assert 5 in g1.connections[3]
>>> mst = g1.kruskal()
>>> assert 5 not in mst.connections[3]
>>> g2 = GraphUndirectedWeighted[str]()
>>> g2.add_edge('A', 'B', 1)
>>> g2.add_edge('B', 'C', 2)
>>> g2.add_edge('C', 'D', 1)
>>> g2.add_edge('C', 'E', 100) # Removed in MST
>>> g2.add_edge('D', 'E', 5)
>>> assert 'E' in g2.connections["C"]
>>> mst = g2.kruskal()
>>> assert 'E' not in mst.connections['C']
"""
# getting the edges in ascending order of weights
edges = []
seen = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start))
edges.append((start, end, self.connections[start][end]))
edges.sort(key=lambda x: x[2])
# creating the disjoint set
disjoint_set = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(node)
# MST generation
num_edges = 0
index = 0
graph = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections) - 1:
u, v, w = edges[index]
index += 1
parent_u = disjoint_set.find_set(u)
parent_v = disjoint_set.find_set(v)
if parent_u != parent_v:
num_edges += 1
graph.add_edge(u, v, w)
disjoint_set.union(u, v)
return graph
|
litecoin-project/litecoin
|
test/functional/rpc_getchaintips.py
|
Python
|
mit
| 2,291
| 0.011349
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getchaintips RPC.
- introduce a network split
- work on chains of different lengths
- join the network together again
- verify that getchaintips now returns two chain tips.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def run_test(self):
tips = self.nodes[0].getchaintips()
assert_equal(len(tips), 1)
assert_equal(tips[0]['branchlen'], 0)
assert_equal(tips[0]['height'], 200)
assert_equal(tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network()
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
self.nodes[2].generatetoaddress(20, self.nodes[2].get_deterministic_priv_key().address)
self.sync_all([self.nodes[:2], self.nodes[2:]])
tips = self.nodes[1].getch
|
aintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
|
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
|
mozilla/zamboni
|
mkt/webapps/migrations/0009_remove_webapp_hosted_url.py
|
Python
|
bsd-3-clause
| 356
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependen
|
cies = [
('webapps', '0008_remove_china_queue'),
]
operations = [
migrations.RemoveField(
model_name='webapp',
name='hosted_url',
|
),
]
|
robertpyke/PyThesis
|
thesis/tests/gridded_mappable_point.py
|
Python
|
mit
| 6,740
| 0.003709
|
import unittest
import transaction
import os
import csv
from pyramid import testing
from thesis.models import DBSession
from sqlalchemy import create_engine
from thesis.models import (
Base,
GriddedMappablePoint,
Layer
)
class TestGriddedMappableItem(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
engine = create_engine('postgresql+psycopg2://thesis_db_user:_89_hHh_989g2988h08g2As@127.0.0.1:5432/thesis_test_db')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
# Add TestLayer1
test_layer_1 = Layer(name='TestLayer1')
test_layer_1.mappable_points = [
GriddedMappablePoint('Point(30 10)'),
GriddedMappablePoint('Point(20 10)'),
]
DBSession.add(test_layer_1)
# Add TestLayer2
test_layer_2 = Layer(name='TestLayer2')
test_layer_2.mappable_points = [
GriddedMappablePoint('Point(10 15)'),
GriddedMappablePoint('Point(10 15)'),
GriddedMappablePoint('Point(30 15)'),
]
DBSession.add(test_layer_2)
# Add Emu Layer
tests_path = os.path.dirname(os.path.abspath(__file__))
test_fixtures_path = os.path.join(tests_path, 'fixtures')
emu_csv_path = os.path.join(test_fixtures_path, 'emu.csv')
emu_layer = Layer(name='Emu')
with open(emu_csv_path, 'rb') as csvfile:
emu_reader = csv.reader(csvfile)
rownum = 0
header = None
for row in emu_reader:
# Save header row.
if rownum == 0:
header = row
else:
colnum = 0
latitude = 0
longitude = 0
for col in row:
column_label = header[colnum]
if column_label == "LNGDEC":
longitude = col
elif column_label == "LATDEC":
latitude = col
# print '%-8s: %s' % (column_label, col)
colnum += 1
if longitude and latitude:
mappable_point = GriddedMappablePoint('Point(%s %s)' % (longitude, latitude))
emu_layer.mappable_points.append(mappable_point)
rownum += 1
DBSession.add(emu_layer)
def tearDown(self):
DBSession.remove()
testing.tearDown()
engine = create_engine('postgresql+psycopg2://thesis_db_user:_89_hHh_989g2988h08g2As@127.0.0.1:5432/thesis_test_db')
DBSession.configure(bind=engine)
# Drop all the models
Base.metadata.drop_all(engine)
def test_search_layers_by_name(self):
test_layer_1 = DBSession.query(Layer).\
filter_by(name='TestLayer1').one()
self.assertEqual(test_layer_1.name, 'TestLayer1')
self.assertEqual(len(test_layer_1.mappable_points), 2)
test_layer_2 = DBSession.query(Layer).\
filter_by(name='TestLayer2').one()
self.assertEqual(test_layer_2.name, 'TestLayer2')
self.assertEqual(len(test_layer_2.mappable_points), 3)
def test_emu_fixure_loaded(self):
test_emu_layer = DBSession.query(Layer).\
filter_by(name='Emu').one()
self.assertGreater(len(test_emu_layer.mappable_points), 5)
def test_get_layer_points_as_geo_json(self):
test_layer_1 = DBSession.query(Layer).filter_by(name='TestLayer1').one()
test_layer_2 = DBSession.query(Layer).filter_by(name='TestLayer2').one()
q = GriddedMappablePoint.get_points_as_geojson(test_layer_1, grid_size=1)
result = q.all()
# self.assertEqual(result[0].locations, '{"type":"MultiPoint","coordinates":[[20,10]]}')
# self.assertEqual(result[1].locations, '{"type":"MultiPoint","coordinates":[[30,10]]}')
self.assertEqual(result[0].cluster_size, 1)
self.assertEqual(result[1].cluster_size, 1)
q2 = GriddedMappablePoint.get_points_as_geojson(test_layer_1, grid_size=100)
result2 = q2.all()
# self.assertEqual(result2[0].locations, '{"type":"MultiPoint","coordinates":[[30,10],[20,10]]}')
self.assertEqual(result2[0].cluster_size, 2)
q3 = GriddedMappablePoint.get_points_as_geojson(test_layer_2, grid_size=1)
result3 = q3.all()
# self.assertEqual(result3[0].locations, '{"type":"MultiPoint","coordinates":[[10,15],[10,15]]}')
# self.assertEqual(result3[1].locations, '{"type":"MultiPoint","coordinates":[[30,15]]}')
self.assertEqual(
|
result3[0].cluster_size
|
, 2)
self.assertEqual(result3[1].cluster_size, 1)
def test_get_cluster_centroids_as_geo_json(self):
test_layer_1 = DBSession.query(Layer).filter_by(name='TestLayer1').one()
test_layer_2 = DBSession.query(Layer).filter_by(name='TestLayer2').one()
q = GriddedMappablePoint.get_points_as_geojson(test_layer_1, grid_size=1)
result = q.all()
self.assertEqual(result[0].centroid, '{"type":"Point","coordinates":[20,10]}')
self.assertEqual(result[1].centroid, '{"type":"Point","coordinates":[30,10]}')
q2 = GriddedMappablePoint.get_points_as_geojson(test_layer_1, grid_size=100)
result2 = q2.one()
self.assertEqual(result2.centroid, '{"type":"Point","coordinates":[25,10]}')
q3 = GriddedMappablePoint.get_points_as_geojson(test_layer_2, grid_size=100)
result3 = q3.one()
self.assertEqual(result3.centroid, '{"type":"Point","coordinates":[16.6666666666667,15]}')
def test_get_layer_points_as_wkt(self):
test_layer_1 = DBSession.query(Layer).filter_by(name='TestLayer1').one()
q = GriddedMappablePoint.get_points_as_wkt(test_layer_1, grid_size=1)
result = q.all()
# self.assertEqual(result[0].locations, 'MULTIPOINT(20 10)')
# self.assertEqual(result[1].locations, 'MULTIPOINT(30 10)')
def test_normalise_grid_size(self):
grid_size_1 = GriddedMappablePoint.normalise_grid_size(10)
self.assertEqual(grid_size_1, 8)
grid_size_2 = GriddedMappablePoint.normalise_grid_size(0.00001)
self.assertEqual(grid_size_2, 0)
grid_size_3 = GriddedMappablePoint.normalise_grid_size(0.9)
self.assertEqual(grid_size_3, 0.5)
grid_size_4 = GriddedMappablePoint.normalise_grid_size(1.1)
self.assertEqual(grid_size_4, 1)
|
MiniSEC/GRR_clone
|
lib/aff4.py
|
Python
|
apache-2.0
| 74,406
| 0.007701
|
#!/usr/bin/env python
"""AFF4 interface implementation.
This contains an AFF4 data model implementation.
"""
import __builtin__
import abc
import StringIO
import time
import zlib
import logging
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import lexer
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.lib.rdfvalues import grr_rdf
config_lib.DEFINE_integer(
"AFF4.cache_age", 5,
"The number of seconds AFF4 objects live in the cache.")
config_lib.DEFINE_integer(
"AFF4.notification_rules_cache_age", 60,
"The number of seconds AFF4 notification rules are cached.")
# Factor to convert from seconds to microseconds
MICROSECONDS = 1000000
# Age specifications for opening AFF4 objects.
NEWEST_TIME = "NEWEST_TIME"
ALL_TIMES = "ALL_TIMES"
# Just something to write on an index attribute to make it exist.
EMPTY_DATA = "X"
AFF4_PREFIXES = ["aff4:.*", "metadata:.*"]
class Error(Exception):
pass
class LockError(Error):
pass
class InstanciationError(Error, IOError):
pass
class LockContextManager(object):
def __init__(self, aff4_obj, sync):
self.aff4_obj = aff4_obj
self.sync = sync
def __enter__(self):
return self.aff4_obj
def __exit__(self, unused_type, unused_value, unused_traceback):
self.aff4_obj.Close(sync=self.sync)
class Factory(object):
"""A central factory for AFF4 objects."""
def __init__(self):
# This is a relatively short lived cache of objects.
self.cache = utils.AgeBasedCache(
max_size=10000,
max_age=config_lib.CONFIG["AFF4.cache_age"])
self.intermediate_cache = utils.FastStore(2000)
# Create a token for system level actions:
self.root_token = access_control.ACLToken(username="system",
reason="Maintainance")
self.root_token.supervisor = True
self.notification_rules = []
self.notification_rules_timestamp = 0
@classmethod
def ParseAgeSpecification(cls, age):
"""Parses an aff4 age and returns a datastore age specification."""
try:
return (0, int(age))
except (ValueError, TypeError):
pass
if age == NEWEST_TIME:
return data_store.DB.NEWEST_TIMESTAMP
elif age == ALL_TIMES:
return data_store.DB.ALL_TIMESTAMPS
elif len(age) == 2:
start, end = age
return (int(start), int(end))
raise RuntimeError("Unknown age specification: %s" % age)
def GetAttributes(self, urns, ignore_cache=False, token=None,
age=NEWEST_TIME):
"""Retrieves all the attributes for all the urns."""
urns = [utils.SmartUnicode(u) for u in set(urns)]
try:
if not ignore_cache:
result = []
for subject in urns:
key = self._MakeCacheInvariant(subject, token, age)
result.append((subject, self.cache.Get(key)))
return result
except KeyError:
pass
subjects = []
result = {}
# If there are any cache misses, we need to go to the data store. So we
# might as well just re-fetch all the urns again in a single data store
# round trip.
for subject, values in data_store.DB.MultiResolveRegex(
urns, AFF4_PREFIXES,
timestamp=self.ParseAgeSpecification(age),
token=token, limit=None).items():
# Ensure the values are sorted.
values.sort(key=lambda x: x[-1], reverse=True)
key = self._MakeCacheInvariant(subject, token, age)
self.cache.Put(key, values)
result[utils.SmartUnicode(subject)] = values
subjects.append(subject)
return result.items()
def SetAttributes(self, urn, attributes, to_delete, sync=False, token=None):
"""Sets the attributes in the data store and update the cache."""
# Force a data_store lookup next.
try:
# Expire all entries in the cache for this urn (for all tokens, and
# timestamps)
self.cache.ExpirePrefix(utils.SmartStr(urn) + ":")
except KeyError:
pass
attributes[AFF4Object.SchemaCls.LAST] = [
rdfvalue.RDFDatetime().Now().SerializeToDataStore()]
to_delete.add(AFF4Object.SchemaCls.LAST)
data_store.DB.MultiSet(urn, attributes, token=token,
replace=False, sync=sync, to_delete=to_delete)
# TODO(user): This can run in the thread pool since its not time
# critical.
self._UpdateIndex(urn, attributes, token)
def _UpdateIndex(self, urn, attributes, token):
"""Updates any indexes we need."""
index = {}
for attribute, values in attributes.items():
if attribute.index:
for value, _ in values:
index.setdefault(attribute.index, []).append((attribute, value))
if index:
for index_urn, index_data in index.items():
aff4index = self.Create(index_urn, "AFF4Index", mode="w", token=token)
for attribute, value in index_data:
aff4index.Add(urn, attribute, value)
aff4index.Close()
self._UpdateChildIndex(urn, token)
def _UpdateChildIndex(self, urn, token):
"""Update the child indexes.
This function maintains the index for direct child relations. When we set
an AFF4 path, we always add an attribute like
index:dir/%(childname)s to its parent. This is written
asynchronously to its parent.
In order to query for all direct children of an AFF4 object, we then simple
get the attributes which match the regex index:dir/.+ which are the
direct children.
Args:
urn: The AFF4 object for which we update the index.
token: The token to use.
"""
try:
# Create navigation aids by touching intermediate subject names.
while urn.Path() != "/":
basename = urn.Basename()
dirname = rdfvalue.RDFURN(urn.Dirname())
try:
self.intermediate_cache.Get(urn.Path())
return
except KeyError:
data_store.DB.MultiSet(dirname, {
AFF4Object.SchemaCls.LAST: [
rdfvalue.RDFDatetime().Now().SerializeToDataStore()],
# This updates the directory index.
"index:dir/%s" % utils.SmartStr(basename): [EMPTY_DATA],
},
token=token, replace=True, sync=False)
self.intermediate_cache.Put(urn.Path(), 1)
urn = dirname
except access_control.UnauthorizedAccess:
pass
def _DeleteChildFromIndex(self, urn, token):
try:
# Create navigation aids by touching intermediate subject names.
basename = urn.Basename()
dirname = rdfvalue.RDFURN(urn.Dirname())
try:
self.intermediate_cache.ExpireObject(urn.Path())
except KeyError:
pass
data_store.DB.DeleteAttributes(
dirname, ["index:dir/%s" % utils.SmartStr(basename)], token=token,
sync=False)
data_store.DB.MultiSet(dirname, {
AFF4Object.SchemaCls.LAST: [
rdfvalue.RDFDatetime().Now().SerializeToDataStore()],
}, token=token,
|
replace=True, sync=False)
except access_control.UnauthorizedAccess:
pass
def _ExpandURNComponents(self, urn, unique_urns):
"""This expands URNs.
This met
|
hod breaks the urn into all the urns from its path components and
adds them to the set unique_urns.
Args:
urn: An RDFURN.
unique_urns: A set to add the components of the urn to.
"""
x = ROOT_URN
for component in rdfvalue.RDFURN(urn).Path().split("/"):
if component:
x = x.Add(component)
unique_urns.add(x)
def _MakeCacheInvariant(self, urn, token, age):
"""Returns an invariant key for an AFF4 object.
The object will be cached based on this key. This function is specifically
extracted to ensure that we encapsulate all security critical aspects of the
AFF4 object so that objects do not leak across security boundaries.
Args:
urn: The urn of the object.
token: The access token used to receive the object.
age: The age policy used to build this object. Should be one
of ALL_TIMES, NEWEST_TIME or a range.
Returns:
A key
|
vecnet/simulation-manager
|
sim_manager/tests/test_submit_group.py
|
Python
|
mpl-2.0
| 7,814
| 0.003583
|
# This file is part of the Simulation Manager project for VecNet.
# For copyright and lic
|
ensing information about this project, see the
# NOTICE.txt and LICENSE.md files in its top-level directory; they are
# available at https://github.com/vecnet/simulation-manager
#
# This Source Code Form is subject to the terms of the Mozill
|
a Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Tests for the submit_group.py script.
"""
import random
import sys
from crc_nd.utils.test_io import WritesOutputFiles
from django.test import LiveServerTestCase
from mock import patch
from path import path
from vecnet.simulation import ExecutionRequest, sim_model, Simulation, SimulationGroup as SimGroup, submission_status
from .constants import TEST_OUTPUT_ROOT
from .mixins import UsesDatabaseApi
from sim_manager import scripts, working_dirs
from sim_manager.models import SimulationGroup
from sim_manager.scripts import api_urls, batch, input_files, submit_group
from sim_manager.scripts.batch import test_utils
from sim_manager.scripts.constants import SIMULATION_DEFINITION_FILENAME, SIMULATION_SCRIPT
class MainTests(LiveServerTestCase, UsesDatabaseApi, WritesOutputFiles):
"""
Tests for the script's main function.
"""
@classmethod
def setUpClass(cls):
super(MainTests, cls).setUpClass()
cls.setup_database_api_user()
cls.set_output_root(TEST_OUTPUT_ROOT)
working_dirs.TestingApi.use_testing_root()
# Add the scripts package's directory to the module search path so the loading of the batch system in the
# submit_group.py script works. When the script is executed at the command line, the package directory will
# automatically be added to the search path. But here in the test suite, the package is imported, so it's
# directory is not added automatically. Therefore, we explicitly add it.
scripts_dir = path(scripts.__file__).dirname()
sys.path.append(scripts_dir)
cls.simulation_script = scripts_dir / SIMULATION_SCRIPT
@classmethod
def tearDownClass(cls):
cls.remove_database_api_user()
working_dirs.TestingApi.reset_root_to_default()
sys.path.pop()
@patch('sim_manager.scripts.submit_group.BATCH_SYSTEM', batch.MOCK)
def test_run_script(self):
group = SimulationGroup.objects.create(submitter=self.test_user)
self.group_id = group.id
self.assertEqual(group.script_status, submission_status.READY_TO_RUN)
self.sim_group = SimGroup()
simulation_1 = Simulation(model=sim_model.OPEN_MALARIA, model_version='32', id_on_client='349',
output_url='http://ingestor.example.com/output-files/')
simulation_1.input_files['scenario.xml'] = 'http://www.example.com/data/scenarios/1234/scenario.xml'
simulation_2 = Simulation(model=sim_model.EMOD, model_version='1.6', cmd_line_args=['--foo', 'bar'],
id_on_client='350', output_url=simulation_1.output_url)
simulation_2.input_files['config.json'] = 'https://files.vecnet.org/4710584372'
simulation_2.input_files['campaign.json'] = 'https://files.vecnet.org/678109'
self.sim_group.simulations = [simulation_1, simulation_2]
self.execution_request = ExecutionRequest(simulation_group=self.sim_group)
group.setup_working_dir(self.execution_request)
group_url = self.live_server_url + ('/api/v1/sim-groups/%s/' % group.id)
simulations_url = self.live_server_url + '/api/v1/simulations/'
api_urls.write_for_group(group.working_dir, group_url, simulations_url)
self.check_expected_state = self.expect_script_started
group.working_dir.chdir()
self.initialize_output_dir()
stdout = self.get_output_dir() / 'stdout.txt'
with stdout.open('w') as f:
exit_status = submit_group.main('foo', 'bar', stdout=f, test_callback=self.callback)
self.assertEqual(exit_status, 0)
group = SimulationGroup.objects.get(id=group.id)
self.assertEqual(group.script_status, submission_status.SCRIPT_DONE)
def callback(self):
if self.check_expected_state:
self.check_expected_state()
else:
self.fail('callback unexpectedly called')
def expect_script_started(self):
"""
Confirm that the submission script was started.
"""
self.assertGroupScriptStatus(submission_status.STARTED_SCRIPT)
self.check_expected_state = self.expect_cached_files
def expect_cached_files(self):
"""
Confirm that the submission script cached input files.
"""
self.assertGroupScriptStatus(submission_status.CACHING_FILES)
self.assertTrue(input_files.TestingApi.add_to_cache_mock.called)
args, kwargs = input_files.TestingApi.add_to_cache_mock.call_args
self.assertEqual((self.execution_request.input_files,), args)
self.check_expected_state = self.expect_simulation_created
self.simulations_created = 0
test_utils.Mocks.submit_job.reset_mock()
test_utils.Mocks.submit_job.return_value = generate_job_id()
def expect_simulation_created(self):
"""
Confirm that the submission script has created a new simulation in the database.
"""
self.assertGroupScriptStatus(submission_status.SUBMITTING_JOBS)
group = SimulationGroup.objects.get(id=self.group_id)
self.assertEqual(group.simulation_set.count(), self.simulations_created + 1)
self.simulations_created += 1
# Check that the working directory is set up properly for the simulation that was just created
simulation = group.simulation_set.order_by('created_when').last()
self.assertTrue(simulation.working_dir.isdir())
sim_definition_path = simulation.working_dir / SIMULATION_DEFINITION_FILENAME
self.assertTrue(sim_definition_path.isfile())
sim_definition = Simulation.read_json_file(sim_definition_path)
expected_sim_definition = self.sim_group.simulations[self.simulations_created - 1]
self.assertEqual(sim_definition.model, expected_sim_definition.model)
self.assertEqual(sim_definition.model_version, expected_sim_definition.model_version)
self.assertEqual(sim_definition.input_files, expected_sim_definition.input_files)
self.assertEqual(sim_definition.cmd_line_args, expected_sim_definition.cmd_line_args)
self.assertEqual(sim_definition.id_on_client, expected_sim_definition.id_on_client)
self.assertEqual(sim_definition.output_url, expected_sim_definition.output_url)
# Check that the simulation was submitted to the batch system.
self.assertTrue(test_utils.Mocks.submit_job.called)
args, kwargs = test_utils.Mocks.submit_job.call_args
executable, working_dir, cmd_args = args[0], args[1], args[2:]
self.assertEqual(executable, sys.executable)
self.assertEqual(working_dir, simulation.working_dir)
self.assertEqual(list(cmd_args), [self.simulation_script])
self.assertEqual(simulation.batch_job_id, test_utils.Mocks.submit_job.return_value)
test_utils.Mocks.submit_job.reset_mock()
if self.simulations_created < len(self.sim_group.simulations):
test_utils.Mocks.submit_job.return_value = generate_job_id()
else:
self.check_expected_state = None
def assertGroupScriptStatus(self, expected_status):
group = SimulationGroup.objects.get(id=self.group_id)
self.assertEqual(group.script_status, expected_status)
def generate_job_id():
return str(random.randint(1, 100000))
|
flosch/simpleapi
|
tests/settings.py
|
Python
|
mit
| 18
| 0.055556
|
SE
|
CRET_KEY = "foo
|
"
|
OSMNames/OSMNames
|
osmnames/logger.py
|
Python
|
gpl-2.0
| 788
| 0.001269
|
from subprocess import check_call
import logging
import datetime
def setup(name):
formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(messag
|
e)s')
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
timestamp = datetime.datetime.now().strftime('%Y_%m_%d-%H%M')
file_handler = logging.FileHandler("data/logs/{}.log".format(timestamp))
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
log = setup(__name__)
def l
|
ogged_check_call(parameters):
log.info("run {command}".format(command=' '.join(parameters)))
check_call(parameters)
log.info("finished")
|
stscieisenhamer/ginga
|
ginga/cvw/CvHelp.py
|
Python
|
bsd-3-clause
| 4,985
| 0.002207
|
#
# CvHelp.
|
py -- help classes for the Cv drawing
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import math
import n
|
umpy
import cv2
from ginga import colors
class Pen(object):
def __init__(self, color='black', linewidth=1, alpha=1.0):
self.color = color
self.linewidth = linewidth
self.alpha = alpha
class Brush(object):
def __init__(self, color='black', fill=False, alpha=1.0):
self.color = color
self.fill = fill
self.alpha = alpha
class Font(object):
def __init__(self, fontname='ariel', fontsize=12.0, color='black',
linewidth=1, alpha=1.0):
self.fontname = fontname
self.fontsize = fontsize
self.color = color
self.linewidth = linewidth
# scale relative to a 12pt font
self.scale = fontsize / 12.0
self.alpha = alpha
# TODO: currently there is only support for some simple built-in
# fonts. What kind of fonts/lookup can we use for this?
self.font = cv2.FONT_HERSHEY_SIMPLEX
class CvContext(object):
def __init__(self, canvas):
self.canvas = canvas
def set_canvas(self, canvas):
self.canvas = canvas
def get_color(self, color, alpha=1.0):
if isinstance(color, str) or isinstance(color, type(u"")):
r, g, b = colors.lookup_color(color)
elif isinstance(color, tuple):
# color is assumed to be a 3-tuple of RGB values as floats
# between 0 and 1
r, g, b = color
else:
r, g, b = 1.0, 1.0, 1.0
# According to documentation, OpenCV expects colors as BGRA tuple
# BUT, seems we need to specify RGBA--I suppose we need to match
# what is defined as _rgb_order attribute in ImageViewCv class
#return (int(alpha*255), int(b*255), int(g*255), int(r*255))
return (int(r*255), int(g*255), int(b*255), int(alpha*255))
def get_pen(self, color, linewidth=1, alpha=1.0):
# if hasattr(self, 'linestyle'):
# if self.linestyle == 'dash':
# cr.set_dash([ 3.0, 4.0, 6.0, 4.0], 5.0)
#op = int(alpha * 255)
color = self.get_color(color, alpha=alpha)
return Pen(color=color, linewidth=linewidth, alpha=alpha)
def get_brush(self, color, alpha=1.0):
color = self.get_color(color, alpha=alpha)
return Brush(color=color, fill=True, alpha=alpha)
def get_font(self, name, size, color, linewidth=1, alpha=1.0):
color = self.get_color(color, alpha=alpha)
return Font(fontname=name, fontsize=size, color=color,
linewidth=linewidth, alpha=alpha)
def text_extents(self, text, font):
## retval, baseline = cv2.getTextSize(text, font.font, font.fontsize,
## font.linewidth)
retval, baseline = cv2.getTextSize(text, font.font, font.scale,
font.linewidth)
wd, ht = retval
return wd, ht
def text(self, pt, text, font):
x, y = pt
## cv2.putText(self.canvas, text, (x, y), font.font, font.scale,
## font.color, thickness=font.linewidth,
## lineType=cv2.CV_AA)
cv2.putText(self.canvas, text, (x, y), font.font, font.scale,
font.color, thickness=font.linewidth)
def line(self, pt1, pt2, pen):
x1, y1 = int(round(pt1[0])), int(round(pt1[1]))
x2, y2 = int(round(pt2[0])), int(round(pt2[1]))
cv2.line(self.canvas, (x1, y1), (x2, y2), pen.color, pen.linewidth)
def circle(self, pt, radius, pen, brush):
x, y = pt
radius = int(radius)
if (brush is not None) and brush.fill:
cv2.circle(self.canvas, (x, y), radius, brush.color, -1)
cv2.circle(self.canvas, (x, y), radius, pen.color, pen.linewidth)
def rectangle(self, pt1, pt2, pen, brush):
x1, y1 = pt1
x2, y2 = pt2
cv2.rectangle(self.canvas, (x1, y1), (x2, y2), pen.color, pen.linewidth)
def ellipse(self, pt, xr, yr, theta, pen, brush):
x, y = pt
if (brush is not None) and brush.fill:
cv2.ellipse(self.canvas, (x, y), (xr, yr), theta, 0.0, 360.0,
brush.color, -1)
cv2.ellipse(self.canvas, (x, y), (xr, yr), theta, 0.0, 360.0,
pen.color, pen.linewidth)
def polygon(self, points, pen, brush):
pts = numpy.array(points, numpy.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(self.canvas, [pts], True, pen.color, pen.linewidth)
if (brush is not None) and brush.fill:
cv2.fillPoly(self.canvas, [pts], brush.color)
def path(self, points, pen):
pts = numpy.array(points, numpy.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(self.canvas, [pts], False, pen.color, pen.linewidth)
#END
|
simone-campagna/petra
|
petra/net.py
|
Python
|
apache-2.0
| 7,080
| 0.00339
|
__all__ = (
'Net',
)
import builtins
import collections
import itertools
from .errors import InternalError, NodeError
from .marking import Marking
from .net_element import NamedNetElement
from .node import Node
from .place import Place
from .transition import Transition
class Net(NamedNetElement):
__dict_fact
|
ory__ = collections.OrderedDict
__defaultdict_factory__ = collections.defaultdict
__globals__ = {attr_name: getattr(builtins, attr_name) for attr_name in dir(builtins)}
def __init__(self, name=None, globals_d=None):
super().__init__(name=name, net=self)
self._places = self.__dict_factory__()
self._transitions = self.__dict_factory__()
self._engines = []
if globals_d is None:
globals_d = self.__globals
|
__.copy()
self._globals_d = globals_d
### declare names:
def declare(self, name, value):
self._globals_d[name] = value
@property
def globals_d(self):
return self._globals_d
### add engine:
def add_engine(self, engine):
self._engines.append(engine)
def engines(self):
yield from self._engines
### add nodes:
def _check_node(self, node_type, node, **kwargs):
if isinstance(node, str):
node = node_type(net=self, name=node, **kwargs)
elif isinstance(node, node_type):
if node.net is not None and node.net is not self:
raise NodeError("cannot add {!r}: already bound".format(node))
if kwargs:
node.update(**kwargs)
else:
raise NodeError("cannot add {!r}: not a {}".format(node, node_type.__name__))
if node.name in self._places or node.name in self._transitions:
raise NodeError("cannot add {!r}: a node with the same name already exists".format(node))
node.bind(net=self)
return node
def add_place(self, place, tokens=None, **kwargs):
self.notify_net_changed()
place = self._check_node(Place, place, tokens=tokens, **kwargs)
self._places[place.name] = place
return place
def add_transition(self, transition, guard=None, **kwargs):
self.notify_net_changed()
transition = self._check_node(Transition, transition, guard=guard, **kwargs)
self._transitions[transition.name] = transition
return transition
def add_node(self, node):
if isinstance(node, Place):
return self.add_place(node)
elif isinstance(node, Transition):
return self.add_transition(node)
else:
raise NodeError("cannot add {!r}: not a valid node".format(node))
### get nodes:
def place(self, name):
return self._places[name]
def places(self):
yield from self._places.values()
def transition(self, name):
return self._transitions[name]
def transitions(self):
yield from self._transitions.values()
def node(self, name):
if name in self._places:
return self._places[name]
elif name in self._transitions:
return self._transitions[name]
else:
raise KeyError(name)
def nodes(self):
yield from self.places()
yield from self.transitions()
### add arcs:
def add_input(self, place, transition, annotation):
self.notify_net_changed()
if isinstance(place, str):
place = self.place(place)
if isinstance(transition, str):
transition = self.transition(transition)
arc = transition.add_input(place, annotation=annotation)
return arc
def add_output(self, place, transition, annotation):
self.notify_net_changed()
if isinstance(place, str):
place = self.place(place)
if isinstance(transition, str):
transition = self.transition(transition)
arc = transition.add_output(place, annotation=annotation)
return arc
def _get_node(self, node):
if isinstance(node, Node):
if node.net is not self:
raise NodeError("{!r}: node {!r} not bound to this net".format(self, node))
else:
node = self.node(node)
return node
def input_arcs(self, node):
node = self._get_node(node)
return node.input_arcs()
def inputs(self, node):
node = self._get_node(node)
return node.inputs()
def output_arcs(self, node):
node = self._get_node(node)
return node.output_arcs()
def outputs(self, node):
node = self._get_node(node)
return node.outputs()
### root nodes:
def root_places(self):
for node in self.places():
if len(node.inputs()) == 0:
yield node
def root_transitions(self):
for node in self.transitions():
if len(node.inputs()) == 0:
yield node
def root_nodes(self):
yield from self.root_places()
yield from self.root_transitions()
### dict interface:
def __getitem__(self, name):
return self.node(name)
def __iter__(self):
yield from self.nodes()
def __len__(self):
return len(self._places) + len(self._transitions)
### walk:
def walk(self, *, depth_first=False, first_only=True):
if first_only:
seen = set()
def not_seen(node):
b = node not in seen
if b:
seen.add(node)
return b
else:
not_seen = lambda node: True
nodes = itertools.chain(self.root_nodes())
while True:
if depth_first:
try:
node = next(nodes)
except StopIteration:
break
yield node
next_nodes = itertools.chain(filter(not_seen, self.outputs(node)), nodes)
nodes = iter(next_nodes)
else:
next_nodes = []
for node in nodes:
yield node
new_nodes = filter(not_seen, self.outputs(node))
next_nodes.extend(new_nodes)
if not next_nodes:
break
nodes = iter(next_nodes)
### marking:
def get_marking(self):
marking = Marking()
for place in self._places.values():
if place.tokens:
marking[place.name] = place.tokens.copy()
return marking
def set_marking(self, marking):
for place in self._places.values():
tokens = marking.get(place.name)
place.tokens.clear()
if tokens:
place.tokens.extend(tokens)
### notifications:
def notify_transition_fired(self, transition):
for engine in self._engines:
engine.notify_transition_fired(transition)
def notify_net_changed(self):
for engine in self._engines:
engine.notify_net_changed()
|
dichen001/Go4Jobs
|
JackChen/minimax/375. Guess Number Higher or Lower II.py
|
Python
|
gpl-3.0
| 1,292
| 0.004651
|
"""
We are playing the Guess Game. The game is as follows:
I pick a number from 1 to n. You have to guess which number I picked.
Every time you guess wrong, I'll tell you whether the number I picked is higher or lower.
However, when you guess a particular number x, and you guess wrong, you pay $x. You win the game when you guess the number I picked.
Example:
n = 10, I pick 8.
First round: You guess 5, I tell you that it's higher. You pay $5.
Second round: You guess 7, I tell you that it's
|
higher. You pay $7.
Third round: You guess 9, I tell you that it's lower. You pay $9.
Game over. 8 is the number I picked.
You end up paying $5 + $7 + $9 = $21.
Given a particular n ≥ 1, find out how much money you need to have to guarantee a win.
"""
class Solution(object):
def getMoneyAmount(self, n):
"""
:type n: int
:rtype: int
"""
|
self.dp = [[0] * (n + 1) for _ in range(n + 1)]
return self.helper(1, n)
def helper(self, s, e):
if s >= e:
return 0
if self.dp[s][e] != 0:
return self.dp[s][e]
res = float('inf')
for i in range(s, e + 1):
res = min(res, i + max(self.helper(s, i - 1), self.helper(i + 1, e)))
self.dp[s][e] = res
return res
|
ScreamingUdder/mantid
|
scripts/Diffraction/isis_powder/pearl.py
|
Python
|
gpl-3.0
| 9,681
| 0.005991
|
from __future__ import (absolute_import, division, print_function)
import mantid.simpleapi as mantid
from isis_powder.routines import common, instrument_settings
from isis_powder.abstract_inst import AbstractInst
from isis_powder.pearl_routines import pearl_advanced_config, pearl_algs, pearl_calibration_algs, pearl_output, \
pearl_param_mapping
class Pearl(AbstractInst):
def __init__(self, **kwargs):
self._inst_settings = instrument_settings.InstrumentSettings(
param_map=pearl_param_mapping.attr_mapping, adv_conf_dict=pearl_advanced_config.get_all_adv_variables(),
kwargs=kwargs)
super(Pearl, self).__init__(user_name=self._inst_settings.user_name,
calibration_dir=self._inst_settings.calibration_dir,
output_dir=self._inst_settings.output_dir, inst_prefix="PEARL")
self._cached_run_details = {}
def focus(self, **kwargs):
self._switch_long_mode_inst_settings(kwargs.get("long_mode"))
self._inst_settings.update_attributes(kwargs=kwargs)
return self._focus(run_number_string=self._inst_settings.run_number,
do_absorb_corrections=self._inst_settings.absorb_corrections,
do_van_normalisation=self._inst_settings.van_norm)
def create_vanadium(self, **kwargs):
self._switch_long_mode_inst_settings(kwargs.get("long_mode"))
kwargs["perform_attenuation"] = None # Hard code this off as we do not need an attenuation file
self._inst_settings.update_attributes(kwargs=kwargs)
if str(self._inst_settings.tt_mode).lower() == "all":
for new_tt_mode in ["tt35", "tt70", "tt88"]:
self._inst_settings.tt_mode = new_tt_mode
self._run_create_vanadium()
else:
self._run_create_vanadium()
def create_cal(self, **kwargs):
self._switch_long_mode_inst_settings(kwargs.get("long_mode"))
self._inst_settings.update_attributes(kwargs=kwargs)
run_details = self._get_run_details(self._inst_settings.run_number)
cross_correlate_params = {"ReferenceSpectra": self._inst_settings.reference_spectra,
"WorkspaceIndexMin": self._inst_settings.cross_corr_ws_min,
"WorkspaceIndexMax": self._inst_settings.cross_corr_ws_max,
"XMin": self._inst_settings.cross_corr_x_min,
"XMax": self._inst_settings.cross_corr_x_max}
get_detector_offsets_params = {"DReference": self._inst_settings.d_reference,
"Step": self._inst_settings.get_det_offsets_step,
"XMin": self._inst_settings.get_det_offsets_x_min,
"XMax": self._inst_settings.get_det_offsets_x_max}
return pearl_calibration_algs.create_calibration(calibration_runs=self._inst_settings.run_number,
instrument=self,
offset_file_name=run_details.offset_file_path,
grouping_file_name=run_details.grouping_f
|
ile_path,
calibration_dir=self._inst_settings.calibration_dir,
|
rebin_1_params=self._inst_settings.cal_rebin_1,
rebin_2_params=self._inst_settings.cal_rebin_2,
cross_correlate_params=cross_correlate_params,
get_det_offset_params=get_detector_offsets_params)
def _run_create_vanadium(self):
# Provides a minimal wrapper so if we have tt_mode 'all' we can loop round
return self._create_vanadium(run_number_string=self._inst_settings.run_in_range,
do_absorb_corrections=self._inst_settings.absorb_corrections)
def _get_run_details(self, run_number_string):
run_number_string_key = self._generate_run_details_fingerprint(run_number_string,
self._inst_settings.file_extension,
self._inst_settings.tt_mode)
if run_number_string_key in self._cached_run_details:
return self._cached_run_details[run_number_string_key]
self._cached_run_details[run_number_string_key] = pearl_algs.get_run_details(
run_number_string=run_number_string, inst_settings=self._inst_settings, is_vanadium_run=self._is_vanadium)
return self._cached_run_details[run_number_string_key]
def _generate_output_file_name(self, run_number_string):
inst = self._inst_settings
return pearl_algs.generate_out_name(run_number_string=run_number_string,
long_mode_on=inst.long_mode, tt_mode=inst.tt_mode)
def _normalise_ws_current(self, ws_to_correct):
monitor_spectra = self._inst_settings.monitor_spec_no
monitor_ws = common.extract_single_spectrum(ws_to_process=ws_to_correct,
spectrum_number_to_extract=monitor_spectra)
normalised_ws = pearl_algs.normalise_ws_current(ws_to_correct=ws_to_correct, monitor_ws=monitor_ws,
spline_coeff=self._inst_settings.monitor_spline,
integration_range=self._inst_settings.monitor_integration_range,
lambda_values=self._inst_settings.monitor_lambda,
ex_regions=self._inst_settings.monitor_mask_regions)
common.remove_intermediate_workspace(monitor_ws)
return normalised_ws
def _get_current_tt_mode(self):
return self._inst_settings.tt_mode
def _spline_vanadium_ws(self, focused_vanadium_spectra):
focused_vanadium_spectra = pearl_algs.strip_bragg_peaks(focused_vanadium_spectra)
splined_list = common.spline_workspaces(focused_vanadium_spectra=focused_vanadium_spectra,
num_splines=self._inst_settings.spline_coefficient)
# Ensure the name is unique if we are in tt_mode all
new_workspace_names = []
for ws in splined_list:
new_name = ws.getName() + '_' + self._inst_settings.tt_mode
new_workspace_names.append(mantid.RenameWorkspace(InputWorkspace=ws, OutputWorkspace=new_name))
return new_workspace_names
def _output_focused_ws(self, processed_spectra, run_details, output_mode=None):
if not output_mode:
output_mode = self._inst_settings.focus_mode
if self._inst_settings.perform_atten:
attenuation_path = self._inst_settings.attenuation_file_path
else:
attenuation_path = None
output_spectra = \
pearl_output.generate_and_save_focus_output(self, processed_spectra=processed_spectra,
run_details=run_details, focus_mode=output_mode,
attenuation_filepath=attenuation_path)
group_name = "PEARL" + str(run_details.output_run_string)
group_name += '_' + self._inst_settings.tt_mode + "-Results-D-Grp"
grouped_d_spacing = mantid.GroupWorkspaces(InputWorkspaces=output_spectra, OutputWorkspace=group_name)
return grouped_d_spacing, None
def _crop_banks_to_user_tof(self, focused_banks):
return common.crop_banks_using_crop_list(focused_banks, self._inst_settings.tof_cropping_values)
def _crop_raw_to_expected_tof_range(self, ws_to_crop):
out_ws = common.crop_in_tof(ws_to_crop=ws_to_crop, x_min=self._inst_settings.raw_data_crop_vals[0],
|
wmayner/pyphi
|
pyphi/compute/__init__.py
|
Python
|
gpl-3.0
| 1,545
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# compute/__init__.py
"""
See |compute.subsystem|, |compute.network|, |compute.distance|, and
|compute.parallel| for documentation.
Attributes:
all_complexes: Alias for :func:`pyphi.compute.network.all_complexes`.
ces: Alias for :func:`pyphi.compute.subsystem.ces`.
ces_distance: Alias for :func:`pyphi.compute.distance.ces_distance`.
complexes: Alias for :func:`py
|
phi.compute.network.complexes`.
concept_distance: Alias for
:func:`pyphi.compute.distance.concept_distance`.
conceptual_info: Alias for :
|
func:`pyphi.compute.subsystem.conceptual_info`.
condensed: Alias for :func:`pyphi.compute.network.condensed`.
evaluate_cut: Alias for :func:`pyphi.compute.subsystem.evaluate_cut`.
major_complex: Alias for :func:`pyphi.compute.network.major_complex`.
phi: Alias for :func:`pyphi.compute.subsystem.phi`.
possible_complexes: Alias for
:func:`pyphi.compute.network.possible_complexes`.
sia: Alias for :func:`pyphi.compute.subsystem.sia`.
subsystems: Alias for :func:`pyphi.compute.network.subsystems`.
"""
# pylint: disable=unused-import
from .distance import ces_distance, concept_distance
from .network import (
all_complexes,
complexes,
condensed,
major_complex,
possible_complexes,
subsystems,
)
from .subsystem import (
ConceptStyleSystem,
SystemIrreducibilityAnalysisConceptStyle,
ces,
concept_cuts,
conceptual_info,
evaluate_cut,
phi,
sia,
sia_concept_style,
)
|
mgedmin/zest.releaser
|
zest/releaser/lasttagdiff.py
|
Python
|
gpl-2.0
| 903
| 0
|
# GPL, (c) Reinout van Rees
#
# Script to show the diff with the last relevant tag.
import logging
import sys
import zest.releaser.choose
from zest.releaser.utils import system
from zest.releaser import utils
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(level=utils.loglevel(),
format="%(levelname)s: %(message)s")
vcs = zest.releaser.choose.version_control()
if len(sys.argv) > 1:
found = sys.argv[-1]
else:
found = utils.get_last_tag(vcs)
name = vcs.name
full_tag = vcs.tag_url(found)
logger.debug("Picked tag %r for %s (currently at %r).",
full_tag, name, vcs.version)
logger.info("Showing
|
differences from the last commit against tag %s",
full_tag)
diff_
|
command = vcs.cmd_diff_last_commit_against_tag(found)
print diff_command
print system(diff_command)
|
meisamhe/GPLshared
|
Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/matrix_rotation_constant.py
|
Python
|
gpl-3.0
| 1,490
| 0
|
import copy
import sys
import random
import itertools
def rotate_matrix(A):
for i in range(len(A) // 2):
for j in range(i, len(A) - i - 1):
temp = A[i][j]
A[i][j] = A[-1 - j][i]
A[-1 - j][i] = A[-1 - i][-1 - j]
A[-1 - i][-1 - j] = A[j][-1 - i]
A[j][-1 - i] = temp
# @include
class RotatedMatrix:
def __init__(self, square_matrix):
self._square_matrix = square_matrix
def read_entry(self, i, j):
# Note that A[~i] for i in [0, len(A) - 1] is A[~(i + 1)].
return self._square_matrix[~j][i]
def write_entry(self, i, j, v):
self._square_matrix[~j][i] = v
# @exclude
def check_answer(A, B):
rA = RotatedMatrix(A)
for i in rang
|
e(len(A)):
for j in range(len(
|
A)):
assert rA.read_entry(i, j) == B[i][j]
def main():
if len(sys.argv) == 2:
n = int(sys.argv[1])
k = itertools.count(1)
A = []
for _ in range(1 << n):
A.append([next(k) for _ in range(1 << n)])
B = copy.deepcopy(A)
rotate_matrix(B)
check_answer(A, B)
else:
for _ in range(100):
n = random.randint(1, 10)
k = itertools.count(1)
A = []
for _ in range(1 << n):
A.append([next(k) for _ in range(1 << n)])
B = copy.deepcopy(A)
rotate_matrix(B)
check_answer(A, B)
if __name__ == '__main__':
main()
|
our-city-app/oca-backend
|
src/solutions/common/to/pharmacy/order.py
|
Python
|
apache-2.0
| 1,626
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a co
|
py of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing
|
permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from mcfw.properties import unicode_property, long_property
class SolutionPharmacyOrderTO(object):
key = unicode_property('1')
description = unicode_property('2')
status = long_property('3')
sender_name = unicode_property('4')
sender_avatar_url = unicode_property('5')
timestamp = long_property('6')
picture_url = unicode_property('7')
remarks = unicode_property('8')
solution_inbox_message_key = unicode_property('9')
@staticmethod
def fromModel(model):
to = SolutionPharmacyOrderTO()
to.key = unicode(model.solution_order_key)
to.description = model.description
to.status = model.status
to.sender_name = model.get_sender().name
to.sender_avatar_url = model.get_sender().avatar_url
to.timestamp = model.timestamp
to.picture_url = model.picture_url
to.remarks = model.remarks
to.solution_inbox_message_key = model.solution_inbox_message_key
return to
|
ph1l/halo_radio
|
HaloRadio/PlaylistListMaker.py
|
Python
|
gpl-2.0
| 1,038
| 0.012524
|
#
#
# Copyright (C) 2004 Philip J Freeman
#
# This file is part of halo_radio
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A
|
PARTICULAR PURP
|
OSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import HaloRadio.TopListMaker as TopListMaker
class PlaylistListMaker(TopListMaker.TopListMaker):
"""
- PlaylistListMaker -
This class is for working with lists of playlists
"""
def __init__( self ):
self.list = [ ]
self.tablename = "playlists"
return
|
rcbops/python-novaclient-buildpackage
|
tests/v1_1/utils.py
|
Python
|
apache-2.0
| 814
| 0
|
from nose.tools import ok_
|
def fail(msg):
raise AssertionError(msg)
def assert_in(thing, seq, msg=None):
msg = msg or "'%s' not found in %s" % (thing, seq)
ok_(thing in seq, msg)
def assert_not_in(thing, seq, msg=Non
|
e):
msg = msg or "unexpected '%s' found in %s" % (thing, seq)
ok_(thing not in seq, msg)
def assert_has_keys(dict, required=[], optional=[]):
keys = dict.keys()
for k in required:
assert_in(k, keys, "required key %s missing from %s" % (k, dict))
allowed_keys = set(required) | set(optional)
extra_keys = set(keys).difference(set(required + optional))
if extra_keys:
fail("found unexpected keys: %s" % list(extra_keys))
def assert_isinstance(thing, kls):
ok_(isinstance(thing, kls), "%s is not an instance of %s" % (thing, kls))
|
johntellsall/shotglass
|
shotglass/app/tests/test_render.py
|
Python
|
mit
| 881
| 0
|
import pytest
# TODO: re-enable
# from app import render
# from app.models import SourceLine
class AttrDict(dict):
__getattr
|
__ = dict.__getitem__
# http://stackoverflow.com/a/11924754/143880
def is_subset(d1, obj):
d2 = var
|
s(obj)
return set(d1.items()).issubset(set(d2.items()))
@pytest.mark.skip("OLD: uses SourceLine")
@pytest.mark.django_db
def test_make_skeleton():
symbols = [
SourceLine(path="a.py", length=3),
SourceLine(path="a.py", length=3),
SourceLine(path="b.py", length=3),
]
for sym in symbols:
sym.line_number = -1
SourceLine.objects.bulk_create(symbols)
skeleton = list(render.make_skeleton(SourceLine.objects.all()))
result = [(sk.position, sk.x, sk.y) for sk in skeleton]
# X BUG: should be two-pixel "smudge" after a.py ends
assert result == [(0, 0, 0), (3, 0, 1), (6, 1, 3)]
|
bringsvor/bc_korps
|
utils/parse_members.py
|
Python
|
agpl-3.0
| 2,410
| 0.048608
|
#!/usr/bin/env python
#-.- encoding: utf-8 -.-
import csv,re
#medlemsfil = 'medlemmer_20032014.csv'
#medlemsfil = 'Medlemsliste 08.03.2015.csv'
medlemsfil = 'Medlemsliste 03.09.2015.csv'
def parse():
f = open(medlemsfil)
r = csv.reader(f)
index = 0
headings = None
members = None
category = None
for row in r:
print "ROW", row
if row[0] == 'Hjelpekorps':
headings.append('category_id')
return headings, members
if row[0].find('korps')!=-1:
category = row[0]
if members != None:
if category != None:
row.append(category)
members.append(row)
if index == 5:
headings = row
members = []
index += 1
return headings,members
postnr_re = re.compile('(\d{4}) ')
klasse_til_dato = {'2' : '2. kl',
'3' : '3. kl',
'4' : '4. kl',
'5' : '5. kl',
'6' : '6. kl',
'8' : '8. kl',
'9' : '9. kl',
'VGS' : 'VGS'}
def hent_postnr(postnr):
if not postnr:
return '1406 Ski'
postnr = postnr_re.search(postnr).group(1)
return postnr
def get_members(headings, members):
for m in members:
oerp = {}
o = zip(headings, m)
d = {}
for k,v in o:
d[k] = v
if not d['Etternavn'] or d['Etternavn']=='Etternavn' \
or d['Etternavn'] == 'Aspirantkorps' or d['Etternavn'] == 'Juniorkorps':
continue
"""
{'Etternavn': 'Refsdal', 'Postnr': '1400 Ski', 'Postadresse': 'Lysneveien 8 A', 'Telefon': '97544646', 'Kl': '3', 'Instrument': 'Slagverk', 'Start\xc3\xa5r': '2012', 'Fornavn': 'Adrian Normann', 'Epost': 'mona@refsdal.org'}
"""
oerp['name'] = ' '.join([d['Fornavn'], d['Etternavn']])
oerp['street'] = d['Postadresse']
oerp['city'] = 'Ski'
oerp['zip'] = hent_postnr(d['Postnr'])
#oerp['email'] = d['Epost']
epost = d['Epost'].split('/')
oerp['email'] = epost[0]
if len(epost)>1:
oerp['email2'] = epost[1]
tlf = d['Telefon']
|
.split('/')
print "TLF", d['Telefon'], tlf, d['Telefon'].split('/')
oerp['mobile'] = tlf[0]
if len(tlf)>1:
oerp['mobile2'] = tlf[1]
print "D_CATEG", d['category_id']
oerp['c
|
ategory_id'] = d['category_id']
# Startår
joined = d['Startår']
print "STARTÅR", joined
oerp['join_date'] = '01-01-%s' % joined
# Kl
oerp['birthdate'] = klasse_til_dato[d['Kl']]
oerp['instrument'] = d['Instrument']
#print "OERP", oerp
yield oerp
if __name__=='__main__':
headings, members = parse()
print "HE", headings
#
for mem in get_members(headings, members):
print mem
|
justinjfu/doodad
|
testing/remote/test_gcp.py
|
Python
|
gpl-3.0
| 1,085
| 0.003687
|
"""
|
Instructions:
1) Set up testing/config.py (copy from config.py.example and fill in the fields)
2) Run this script
3) Look inside your GCP_BUCKET under test_doodad and you should see
|
results in secret.txt
"""
import os
import doodad
from doodad.utils import TESTING_DIR
from testing.config import GCP_PROJECT, GCP_BUCKET, GCP_IMAGE
def run():
gcp_mount = doodad.MountGCP(
gcp_path='secret_output',
mount_point='/output'
)
local_mount = doodad.MountLocal(
local_dir=TESTING_DIR,
mount_point='/data',
output=False
)
mounts = [local_mount, gcp_mount]
launcher = doodad.GCPMode(
gcp_bucket=GCP_BUCKET,
gcp_log_path='test_doodad/gcp_test',
gcp_project=GCP_PROJECT,
instance_type='f1-micro',
zone='us-west1-a',
gcp_image=GCP_IMAGE,
gcp_image_project=GCP_PROJECT
)
doodad.run_command(
command='cat /data/secret.txt > /output/secret.txt',
mode=launcher,
mounts=mounts,
verbose=True
)
if __name__ == '__main__':
run()
|
vechnoe/products
|
src/urls.py
|
Python
|
mit
| 476
| 0
|
from django.c
|
onf.urls import include, url
from django.contrib import admin
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('products.urls', namespace='products')),
url(r'^', include('users.urls', namespace='users')),
url(r'^$', RedirectView.as_view(permanent=True,
url=reverse_lazy('products:products_list')), name='home')
|
,
]
|
stvstnfrd/edx-platform
|
cms/djangoapps/contentstore/views/tests/test_certificates.py
|
Python
|
agpl-3.0
| 31,931
| 0.001848
|
#-*- coding: utf-8 -*-
"""
Certificates Tests.
"""
import itertools
import json
import ddt
import mock
import six
from django.conf import settings
from django.test.utils import override_settings
from opaque_keys.edx.keys import AssetKey
from six.moves import range
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from cms.djangoapps.contentstore.utils import get_lms_link_for_certificate_web_view, reverse_course_url
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import CourseInstructorRole, CourseStaffRole
from common.djangoapps.student.tests.factories import UserFactory
from common.djangoapps.util.testing import EventTestMixin, UrlResetMixin
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from ..certificates import CERTIFICATE_SCHEMA_VERSION, CertificateManager
FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True
CERTIFICATE_JSON = {
u'name': u'Test certificate',
u'description': u'Test description',
u'is_active': True,
u'version': CERTIFICATE_SCHEMA_VERSION,
}
CERTIFICATE_JSON_WITH_SIGNATORIES = {
u'name': u'Test certificate',
u'description': u'Test description',
u'version': CERTIFICATE_SCHEMA_VERSION,
u'course_title': 'Course Title Override',
u'is_active': True,
u'signatories': [
{
"name": "Bob Smith",
"title": "The DEAN.",
"signature_image_path": "/c4x/test/CSS101/asset/Signature.png"
}
]
}
C4X_SIGNATORY_PATH = '/c4x/test/CSS101/asset/Signature{}.png'
SIGNATORY_PATH = 'asset-v1:test+CSS101+SP2017+type@asset+block@Signature{}.png'
# pylint: disable=no-member
class HelperMethods(object):
"""
Mixin that provides useful methods for certificate configuration tests.
"""
def _create_fake_images(self, asset_keys):
"""
Creates fake image files for a list of asset_keys.
"""
for asset_key_string in asset_keys:
asset_key = AssetKey.from_string(asset_key_string)
content = StaticContent(
asset_key, "Fake asset", "image/png", "data",
)
contentstore().save(content)
def _add_course_certificates(self, count=1, signatory_count=0, is_active=False,
asset_path_format=C4X_SIGNATORY_PATH):
"""
Create certificate for the course.
"""
signatories = [
{
'name': 'Name ' + str(i),
'title': 'Title ' + str(i),
'signature_image_path': asset_path_format.format(i),
'id': i
} for i in range(signatory_count)
]
# create images for signatory signatures except the last signatory
self._create_fake_images(signatory['signature_image_path'] for signatory in signatories[:-1])
certificates = [
{
'id': i,
'name': 'Name ' + str(i),
'description': 'Description ' + str(i),
'signatories': signatories,
'version': CERTIFICATE_SCHEMA_VERSION,
'is_active': is_active
} for i in range(count)
]
self.course.certificates = {'certificates': certificates}
self.save_course()
# pylint: disable=no-member
class CertificatesBaseTestCase(object):
"""
Mixin with base test cases for the certificates.
"""
def _remove_ids(self, content):
"""
Remove ids from the response. We cannot predict IDs, because they're
generated randomly.
We us
|
e this method to clean up response when creating new certificate.
"""
certificate_id = content.pop("id")
return certificate_id
def test_required_fields_are_absent(self):
"""
Test required fields are absent.
"""
bad_jsons = [
# must have name of the certificate
{
u'description': 'Test description',
u'version': CERTIFICATE_S
|
CHEMA_VERSION
},
# an empty json
{},
]
for bad_json in bad_jsons:
response = self.client.post(
self._url(),
data=json.dumps(bad_json),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 400)
self.assertNotIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("error", content)
def test_invalid_json(self):
"""
Test invalid json handling.
"""
# Invalid JSON.
invalid_json = u"{u'name': 'Test Name', u'description': 'Test description'," \
u" u'version': " + str(CERTIFICATE_SCHEMA_VERSION) + ", []}"
response = self.client.post(
self._url(),
data=invalid_json,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 400)
self.assertNotIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("error", content)
def test_certificate_data_validation(self):
#Test certificate schema version
json_data_1 = {
u'version': 100,
u'name': u'Test certificate',
u'description': u'Test description'
}
with self.assertRaises(Exception) as context:
CertificateManager.validate(json_data_1)
self.assertIn(
"Unsupported certificate schema version: 100. Expected version: 1.",
str(context.exception)
)
#Test certificate name is missing
json_data_2 = {
u'version': CERTIFICATE_SCHEMA_VERSION,
u'description': u'Test description'
}
with self.assertRaises(Exception) as context:
CertificateManager.validate(json_data_2)
self.assertIn('must have name of the certificate', str(context.exception))
@ddt.ddt
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
class CertificatesListHandlerTestCase(
EventTestMixin, CourseTestCase, CertificatesBaseTestCase, HelperMethods, UrlResetMixin
):
"""
Test cases for certificates_list_handler.
"""
def setUp(self): # lint-amnesty, pylint: disable=arguments-differ
"""
Set up CertificatesListHandlerTestCase.
"""
super(CertificatesListHandlerTestCase, self).setUp('cms.djangoapps.contentstore.views.certificates.tracker') # lint-amnesty, pylint: disable=super-with-arguments
self.reset_urls()
def _url(self):
"""
Return url for the handler.
"""
return reverse_course_url('certificates_list_handler', self.course.id)
def test_can_create_certificate(self):
"""
Test that you can create a certificate.
"""
expected = {
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'Test certificate',
u'description': u'Test description',
u'is_active': True,
u'signatories': []
}
response = self.client.ajax_post(
self._url(),
data=CERTIFICATE_JSON
)
self.assertEqual(response.status_code, 201)
self.assertIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
certificate_id = self._remove_ids(content)
self.assertEqual(content, expected)
self.assert_event_emitted(
'edx.certificate.configuration.created',
course_id=six.text_type(self.course.id),
configuration_id=certificate_id,
|
nikitos/npui
|
netprofile_access/netprofile_access/models.py
|
Python
|
agpl-3.0
| 23,665
| 0.046146
|
#!/usr/bin/env python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-
#
# NetProfile: Access module - Models
# © Copyright 2013-2015 Alex 'Unik' Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division
)
__all__ = [
'AccessBlock',
'AccessEntity',
'AccessEntityLink',
'AccessEntityLinkType',
'PerUserRateModifier',
'AcctAddProcedure',
'AcctAuthzProcedure',
'AcctPollProcedure',
'AcctRateModsProcedure',
'AcctRollbackProcedure',
'CheckAuthFunction',
'AccessblockExpireEvent',
'AcctPollEvent'
]
import datetime as dt
from sqlalchemy import (
Boolean,
Column,
DateTime,
FetchedValue,
ForeignKey,
Index,
Sequence,
TIMESTAMP,
Unicode,
UnicodeText,
func,
text
)
from sqlalchemy.orm import (
backref,
relationship
)
from sqlalchemy.ext.associationproxy import association_proxy
from netprofile.db.connection import (
Base,
DBSession
)
from netprofile.db.fields import (
ASCIIString,
DeclEnum,
Money,
NPBoolean,
Traffic,
UInt8,
UInt16,
UInt32,
UInt64,
npbool
)
from netprofile.db.ddl import (
Comment,
CurrentTimestampDefault,
InArgument,
InOutArgument,
OutArgument,
SQLEvent,
SQLFunction,
SQLFunctionArgument,
Trigger
)
from netprofile.ext.columns import MarkupColumn
from netprofile.ext.wizards import (
SimpleWizard,
Wizard,
Step,
ExternalWizardField
)
from netprofile.ext.data import (
ExtModel,
_name_to_class
)
from netprofile.common.hooks import register_hook
from pyramid.i18n import (
TranslationStringFactory,
get_localizer
)
from pyramid.threadlocal import get_current_request
from netprofile_entities.models import (
Entity,
EntityType
)
_ = TranslationStringFactory('netprofile_access')
EntityType.add_symbol('access', ('access', _('Access'), 50))
@register_hook('np.wizard.init.entities.Entity')
def _wizcb_aent_init(wizard, model, req):
def _wizcb_aent_submit(wiz, em, step, act, val, req):
sess = DBSession()
em = ExtModel(AccessEntity)
obj = AccessEntity()
# Work around field name clash
if 'state' in val:
del val['state']
em.set_values(obj, val, req, True)
sess.add(obj)
return {
'do' : 'close',
'reload' : True
}
wizard.steps.append(Step(
ExternalWizardField('AccessEntity', 'password'),
ExternalWizardField('AccessEntity', 'stash'),
ExternalWizardField('AccessEntity', 'rate'),
id='ent_access1', title=_('Access entity properties'),
on_prev='generic',
on_submit=_wizcb_aent_submit
))
class AccessState(DeclEnum):
"""
Enumeration of access entity status codes
"""
ok = 0, _('OK'), 10
block_auto = 1, _('Blocked automatically'), 20
block_manual = 2, _('Blocked manually'), 30
block_maxsim = 3, _('Blocked after reaching max sessions'), 40
block_rejected = 4, _('Rejected'), 50
block_inactive = 5, _('Inactive'), 60
error = 99, _('Error'), 70
class AccessBlockState(DeclEnum):
planned = 'planned', _('Planned'), 10
active = 'active', _('Active'), 20
expired = 'expired', _('Expired'), 30
class AccessEntity(Entity):
"""
Access entity object.
"""
DN_ATTR = 'uid'
__tablename__ = 'entities_access'
__table_args__ = (
Comment('Access entities'),
Index('entities_access_i_stashid', 'stashid'),
Index('entities_access_i_rateid', 'rateid'),
Index('entities_access_i_aliasid', 'aliasid'),
Index('entities_access_i_ipaddrid', 'ipaddrid'),
Index('entities_access_i_ip6addrid', 'ip6addrid'),
Index('entities_access_i_nextrateid', 'nextrateid'),
Trigger('before', 'insert', 't_entities_access_bi'),
Trigger('before', 'update', 't_entities_access_bu'),
Trigger('after', 'update', 't_entities_access_au'),
Trigger('after', 'delete', 't_entities_access_ad'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ENTITIES',
'cap_read' : 'ENTITIES_LIST',
'cap_create' : 'ENTITIES_CREATE',
'cap_edit' : 'ENTITIES_EDIT',
'cap_delete' : 'ENTITIES_DELETE',
'show_in_menu' : 'modules',
'menu_name' : _('Access Entities'),
'menu_parent' : 'entities',
'default_sort' : ({ 'property': 'nick' ,'direction': 'ASC' },),
'grid_view' : (
MarkupColumn(
name='icon',
header_string=' ',
help_text=_('Entity icon'),
column_width=22,
column_name=_('Icon'),
column_resizable=False,
cell_class='np-nopad',
template='<img class="np-block-img" src="{grid_icon}" />'
),
'entityid',
'nick', 'stash', 'rate'
),
'grid_hidden' : ('entityid',),
'form_view' : (
'nick', 'parent', 'state', 'flags',
'password', 'stash', 'rate', 'next_rate', #'alias_of',
'ipv4_address', 'ipv6_address',
'ut_ingress', 'ut_egress', 'u_sec',
'qpend', 'access_state',
'pol_ingress', 'pol_egress',
'bcheck', 'pcheck',
'descr'
),
'easy_search' : ('nick',),
'extra_data' : ('grid_icon',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : Wizard(
Step(
'nick', 'parent', 'state',
'flags', 'descr',
id='generic', title=_('Generic entity properties'),
),
Step(
'password', 'stash', 'rate',
id='ent_access1', title=_('Access entity properties'),
),
title=_('Add new access entity'), validator='CreateAccessEntity'
)
}
}
)
__mapper_args__ = {
'polymorphic_identity' : EntityType.access
}
id = Column(
'entityid',
UInt32(),
ForeignKey('entities_def.entityid', name='entities_access_fk_entityid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Entity ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
password = Column(
Unicode(255),
Comment('Cleartext password'),
nullable=False,
info={
'header_string' : _('Password'),
'secret_value' : True,
'editor_xtype' : 'passwordfie
|
ld'
}
)
stash_id = Column(
'stashid',
UInt32(),
ForeignKey('stashes_def.stashid', name='entities_access_fk_stashid', onupdate='CASCADE'),
Comment('Used stash ID'),
nullable=False,
inf
|
o={
'header_string' : _('Stash'),
'column_flex' : 3
}
)
rate_id = Column(
'rateid',
UInt32(),
ForeignKey('rates_def.rateid', name='entities_access_fk_rateid', onupdate='CASCADE'),
Comment('Used rate ID'),
nullable=False,
info={
'header_string' : _('Rate'),
'column_flex' : 2
}
)
alias_of_id = Column(
'aliasid',
UInt32(),
ForeignKey('entities_access.entityid', name='entities_access_fk_aliasid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Aliased access entity ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Alias Of'),
'filter_type' : 'none'
}
)
next_rate_id = Column(
'nextrateid',
UInt32(),
ForeignKey('rates_def.rateid', name='entities_access_fk_nextrateid', ondelete='SET NULL', onupdate='CASCADE'),
Comment('Next rate ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Next Rate')
}
)
ipv4_address_id = Column(
'ipaddrid',
UInt32(),
ForeignKey('ipaddr_def.ipaddrid', name='entities_access_fk_ipaddrid', ondelete='SET NULL', onupdate='CASCADE'),
Comment('IPv4 address ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
|
hackerspace/memberportal
|
payments/common.py
|
Python
|
gpl-2.0
| 1,647
| 0.004857
|
from datetime import date
class YearInfo(object):
def __init__(self, year, months_ok, months_na):
self.year = year
self.months = set(range(1, 13))
self.months_ok = set(months_ok)
self.months_na = set(months_na)
self.months_er = self.months - (self.months_ok | self.month
|
s_na)
today = date.today()
if self.year == today.year:
self.months_er -= set(range(today.month, 13))
def __unicode__(self):
return u'%s' % self.year
def missing(self):
return len(self.months_er) != 0
def payments
|
_by_month(payments_list):
monthly_data = set()
if not payments_list:
return []
for payment in payments_list:
for m in payment.formonths():
monthly_data.add(m)
since_year = payment.user.date_joined.year
since_month = payment.user.date_joined.month
years = set(range(since_year, date.today().year+1))
out = []
for y in years:
ok = map(lambda x: x[1],
filter(lambda x: x[0] == y, monthly_data))
na = []
if y == since_year:
na = range(1, since_month)
yi = YearInfo(y, ok, na)
out.append(yi)
return out
def no_missing_payments(payments_list):
plist = payments_by_month(payments_list)
for year in plist:
if year.missing():
return False
return True
def missing_months(payments_list):
plist = payments_by_month(payments_list)
missing = []
for yi in plist:
if yi.missing():
for month in yi.months_er:
missing.append((yi.year, month))
return missing
|
viktorki/Discrete-Distributions
|
manage.py
|
Python
|
gpl-3.0
| 264
| 0.003788
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__m
|
ain__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DiscreteDistributions.settings")
|
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
uclmr/inferbeddings
|
inferbeddings/models/base.py
|
Python
|
mit
| 7,187
| 0.005148
|
# -*- coding: utf-8 -*-
import abc
import tensorflow as tf
from inferbeddi
|
ngs.models import embeddings
import sys
class BaseModel(metaclass=abc.ABCMeta):
def __init__(self, entity_embeddings=None, predicate_embeddings=
|
None, similarity_function=None,
reuse_variables=False, *args, **kwargs):
"""
Abstract class inherited by all models.
:param entity_embeddings: (batch_size, 2, entity_embedding_size) Tensor.
:param predicate_embeddings: (batch_size, walk_size, predicate_embedding_size) Tensor.
:param similarity_function: similarity function.
:param reuse_variables: States whether the variables within the model need to be reused.
"""
self.entity_embeddings = entity_embeddings
self.predicate_embeddings = predicate_embeddings
self.similarity_function = similarity_function
self.reuse_variables = reuse_variables
@abc.abstractmethod
def __call__(self):
raise NotImplementedError
@property
def parameters(self):
return []
class TranslatingModel(BaseModel):
def __init__(self, *args, **kwargs):
"""
Implementation of a compositional extension of the Translating Embeddings model [1].
[1] Bordes, A. et al. - Translating Embeddings for Modeling Multi-relational Data - NIPS 2013
"""
super().__init__(*args, **kwargs)
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
subject_embedding, object_embedding = self.entity_embeddings[:, 0, :], self.entity_embeddings[:, 1, :]
walk_embedding = embeddings.additive_walk_embedding(self.predicate_embeddings)
translated_subject_embedding = subject_embedding + walk_embedding
return self.similarity_function(translated_subject_embedding, object_embedding)
class BilinearDiagonalModel(BaseModel):
def __init__(self, *args, **kwargs):
"""
Implementation of a compositional extension of the Bilinear-Diagonal model [1]
[1] Yang, B. et al. - Embedding Entities and Relations for Learning and Inference in Knowledge Bases - ICLR 2015
"""
super().__init__(*args, **kwargs)
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
subject_embedding, object_embedding = self.entity_embeddings[:, 0, :], self.entity_embeddings[:, 1, :]
walk_embedding = embeddings.bilinear_diagonal_walk_embedding(self.predicate_embeddings)
scaled_subject_embedding = subject_embedding * walk_embedding
return self.similarity_function(scaled_subject_embedding, object_embedding)
class BilinearModel(BaseModel):
def __init__(self, *args, **kwargs):
"""
Implementation of a compositional extension of the Bilinear model [1]
[1] Nickel, M. et al. - A Three-Way Model for Collective Learning on Multi-Relational Data - ICML 2011
"""
super().__init__(*args, **kwargs)
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
subject_embedding, object_embedding = self.entity_embeddings[:, 0, :], self.entity_embeddings[:, 1, :]
entity_embedding_size = subject_embedding.get_shape()[-1].value
walk_embedding = embeddings.bilinear_walk_embedding(self.predicate_embeddings, entity_embedding_size)
es = tf.expand_dims(subject_embedding, 1)
sW = tf.matmul(es, walk_embedding)[:, 0, :]
return self.similarity_function(sW, object_embedding)
class ComplexModel(BaseModel):
def __init__(self, *args, **kwargs):
"""
Implementation of a compositional extension of the ComplEx model [1]
[1] Trouillon, T. et al. - Complex Embeddings for Simple Link Prediction - ICML 2016
"""
super().__init__(*args, **kwargs)
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
subject_embedding, object_embedding = self.entity_embeddings[:, 0, :], self.entity_embeddings[:, 1, :]
walk_embedding = embeddings.complex_walk_embedding(self.predicate_embeddings)
es_re, es_im = tf.split(value=subject_embedding, num_or_size_splits=2, axis=1)
eo_re, eo_im = tf.split(value=object_embedding, num_or_size_splits=2, axis=1)
ew_re, ew_im = tf.split(value=walk_embedding, num_or_size_splits=2, axis=1)
def dot3(arg1, rel, arg2):
return self.similarity_function(arg1 * rel, arg2)
score = dot3(es_re, ew_re, eo_re) + dot3(es_re, ew_im, eo_im) + dot3(es_im, ew_re, eo_im) - dot3(es_im, ew_im, eo_re)
return score
class ERMLP(BaseModel):
def __init__(self, hidden_size=None, f=tf.tanh, *args, **kwargs):
"""
Implementation of the ER-MLP model described in [1, 2]
[1] Dong, X. L. et al. - Knowledge Vault: A Web-Scale Approach to Probabilistic Knowledge Fusion - KDD 2014
[2] Nickel, M. et al. - A Review of Relational Machine Learning for Knowledge Graphs - IEEE 2016
"""
super().__init__(*args, **kwargs)
self.f = f
# ent_emb_size, pred_emb_size = self.entity_embeddings_size, self.predicate_embeddings_size
ent_emb_size = self.entity_embeddings.get_shape()[-1].value
pred_emb_size = self.predicate_embeddings.get_shape()[-1].value
input_size = ent_emb_size + ent_emb_size + pred_emb_size
with tf.variable_scope("ERMLP", reuse=self.reuse_variables) as _:
self.C = tf.get_variable('C', shape=[input_size, hidden_size], initializer=tf.contrib.layers.xavier_initializer())
self.w = tf.get_variable('w', shape=[hidden_size, 1], initializer=tf.contrib.layers.xavier_initializer())
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
subject_embedding, object_embedding = self.entity_embeddings[:, 0, :], self.entity_embeddings[:, 1, :]
# This model is non-compositional in nature, so it might not be trivial to represent a walk embedding
walk_embedding = self.predicate_embeddings[:, 0, :]
e_ijk = tf.concat(values=[subject_embedding, object_embedding, walk_embedding], axis=1)
h_ijk = tf.matmul(e_ijk, self.C)
f_ijk = tf.squeeze(tf.matmul(self.f(h_ijk), self.w), axis=1)
return f_ijk
@property
def parameters(self):
params = super().parameters + [self.C, self.w]
return params
# Aliases
TransE = TranslatingEmbeddings = TranslatingModel
DistMult = BilinearDiagonal = BilinearDiagonalModel
RESCAL = Bilinear = BilinearModel
ComplEx = ComplexE = ComplexModel
ER_MLP = ERMLP
def get_function(function_name):
this_module = sys.modules[__name__]
if not hasattr(this_module, function_name):
raise ValueError('Unknown model: {}'.format(function_name))
return getattr(this_module, function_name)
|
pythonpro-dev/pp-testing
|
tests/unit/test_sample.py
|
Python
|
bsd-3-clause
| 415
| 0
|
# $HeadUR
|
L$
import sys
def test_import():
""" Test to make sure the project imports OK.
"""
import pp.testing
def test_app():
""" Test the command-line app runs OK.
"""
from pp.testing.sc
|
ripts import app
sys.argv = []
app.main()
if __name__ == '__main__':
# Run this tet file through py.test if executed on the cmdline
import pytest
pytest.main(args=[sys.argv[0]])
|
ryfeus/lambda-packs
|
pytorch/source/torch/optim/sparse_adam.py
|
Python
|
mit
| 4,595
| 0.001741
|
import math
import torch
from .optimizer import Optimizer
class SparseAdam(Optimizer):
r"""Implements lazy version of Adam algorithm suitable for sparse tensors.
In this variant, only moments that show up in the gradient get updated, and
only those portions of the gradient get applied to the parameters.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8):
if not 0.0 < lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 < eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps)
super(SparseAdam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if not grad.is_sparse:
raise RuntimeError('SparseAdam does not support dense gradients, please consider Adam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
state['step'] += 1
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# Decay the first and second moment running average coefficient
|
# old <- b * old + (1 - b) * new
# <==> old += (1 - b) * (new - old)
old_exp_avg_values = exp_avg.sparse_mask(grad)._values()
exp_avg_u
|
pdate_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1)
exp_avg.add_(make_sparse(exp_avg_update_values))
old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values()
exp_avg_sq_update_values = grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)
exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values))
# Dense addition again is intended, avoiding another sparse_mask
numer = exp_avg_update_values.add_(old_exp_avg_values)
exp_avg_sq_update_values.add_(old_exp_avg_sq_values)
denom = exp_avg_sq_update_values.sqrt_().add_(group['eps'])
del exp_avg_update_values, exp_avg_sq_update_values
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.add_(make_sparse(-step_size * numer.div_(denom)))
return loss
|
springer-math/Mathematics-of-Epidemics-on-Networks
|
docs/examples/fig4p11.py
|
Python
|
mit
| 2,565
| 0.015595
|
import EoN
import networkx as nx
import matplotlib.pyplot as plt
import scipy
import random
print(r"Warning, book says \tau=2\gamma/<K>, but it's really 1.5\gamma/<K>")
print(r"Warning - for the power law graph the text says k_{max}=110, but I believe it is 118.")
N=1000
gamma = 1.
iterations = 200
rho = 0.05
tmax = 15
tcount = 101
kave = 20
tau = 1.5*gamma/kave
def simulate_process(graph_function, iterations, tmax, tcount, rho, kave, tau, gamma, symbol):
Isum = scipy.zeros(tcount)
report_times = scipy.linspace(0,tmax,tcount)
for counter in range(iterations):
G = graph_function()
t, S, I = EoN.fast_SIS(G, tau, gamma, rho=rho, tmax=tmax)
I = EoN.subsample(report_times, t, I)
Isum += I
plt.plot(report_times, Isum*1./(N*iterations), symbol)
#regular
symbol = 'o'
graph_function = lambda : nx.configuration_model(N*[kave])
simulate_process(graph_function, iterations, tmax, tcount, rho, kave, tau,
|
gamma, symbol)
#bimodal
symbol='x'
graph_function = lambda: nx.configuration_model([5,35]*int(N/2+0.01))
simulate_process(graph_function, i
|
terations, tmax, tcount, rho, kave, tau, gamma, symbol)
#erdos-renyi
symbol = 's'
graph_function = lambda : nx.fast_gnp_random_graph(N, kave/(N-1.))
simulate_process(graph_function, iterations, tmax, tcount, rho, kave, tau, gamma, symbol)
symbol = 'd'
pl_kmax = 118
pl_kmin = 7
pl_alpha = 2.
Pk={}
for k in range(pl_kmin, pl_kmax+1):
Pk[k] = k**(-pl_alpha)
valsum = sum(Pk.values())
for k in Pk.keys():
Pk[k] /= valsum
#print sum(k*Pk[k] for k in Pk.keys())
def generate_sequence(Pk, N):
while True:
sequence = []
for counter in range(N):
r = random.random()
for k in Pk.keys():
if r< Pk[k]:
break
else:
r-=Pk[k]
sequence.append(k)
if sum(sequence)%2==0:
break
return sequence
graph_function = lambda : nx.configuration_model(generate_sequence(Pk,N))
simulate_process(graph_function, iterations, tmax, tcount, rho, kave, tau, gamma, symbol)
symbol = '--'
S0 = (1-rho)*N
I0 = rho*N
t, S, I = EoN.SIS_homogeneous_meanfield(S0, I0, kave, tau, gamma, tmax=tmax, tcount=tcount)
plt.plot(t, I/N, symbol)
symbol = '-'
S0 = (1-rho)*N
I0 = rho*N
SI0 = (1-rho)*N*kave*rho
SS0 = (1-rho)*N*kave*(1-rho)
t, S, I = EoN.SIS_homogeneous_pairwise(S0, I0, SI0, SS0, kave, tau, gamma, tmax=tmax, tcount=tcount)
plt.plot(t, I/N, symbol)
plt.xlabel('$t$')
plt.ylabel('Prevalence')
plt.savefig('fig4p11.png')
|
UdK-VPT/Open_eQuarter
|
mole/stat_corr/window_wall_ratio_east_SDH_by_building_age_lookup.py
|
Python
|
gpl-2.0
| 1,995
| 0.175527
|
# coding: utf8
# OeQ autogenerated lookup function for 'Window/Wall Ratio East in correlation to year of construction, based on the source data of the survey for the "German Building Typology developed by the
|
"Institut für Wohnen und Umwelt", Darmstadt/Germany, 2011-2013'
import math
import numpy as np
import oeqLookuptable as oeq
def get(*xin):
l_lookup = oeq.lookuptable(
[
1849,0,
1850,0,
1851,0,
1852,0,
1853,0,
1854,0,
1855,0,
1856,0,
1857,0,
1858,0,
1859,0,
1860,0,
1861,0,
1862,0,
1863,0,
1864,0,
1865,0,
1866,0,
1867,0,
1868,0,
1869,0,
1870,0,
1871,0,
1872,0,
1873,0,
1874,0,
1875,0,
1876,0,
1877,0,
1878,0,
1879,0,
1880,0,
1881,0,
1882,0,
1883,0,
|
1884,0,
1885,0,
1886,0,
1887,0,
1888,0,
1889,0,
1890,0,
1891,0,
1892,0,
1893,0,
1894,0,
1895,0,
1896,0,
1897,0,
1898,0,
1899,0,
1900,0,
1901,0,
1902,0,
1903,0,
1904,0,
1905,0,
1906,0,
1907,0,
1908,0,
1909,0,
1910,0,
1911,0,
1912,0,
1913,0,
1914,0,
1915,0,
1916,0,
1917,0,
1918,0,
1919,0,
1920,0,
1921,0,
1922,0,
1923,0,
1924,0,
1925,0,
1926,0,
1927,0,
1928,0,
1929,0,
1930,0,
1931,0,
1932,0,
1933,0,
1934,0,
1935,0,
1936,0,
1937,0,
1938,0,
1939,0,
1940,0,
1941,0,
1942,0,
1943,0,
1944,0,
1945,0,
1946,0,
1947,0,
1948,0,
1949,0,
1950,0,
1951,0,
1952,0,
1953,0,
1954,0,
1955,0,
1956,0,
1957,0,
1958,0.001,
1959,0.002,
1960,0.002,
1961,0,
1962,0,
1963,0,
1964,0,
1965,0,
1966,0.019,
1967,0.046,
1968,0.077,
1969,0.11,
1970,0.141,
1971,0.169,
1972,0.195,
1973,0.22,
1974,0.22,
1975,0.22,
1976,0.22,
1977,0.22,
1978,0.161,
1979,0.089,
1980,0.028,
1981,0,
1982,0.019,
1983,0.07,
1984,0.131,
1985,0.18,
1986,0.2,
1987,0.199,
1988,0.188,
1989,0.18,
1990,0.184,
1991,0.192,
1992,0.195,
1993,0.18,
1994,0.142,
1995,0.09,
1996,0.038,
1997,0,
1998,0,
1999,0,
2000,0.007,
2001,0.025,
2002,0.038,
2003,0.045,
2004,0.049,
2005,0.05,
2006,0.05,
2007,0.051,
2008,0.05,
2009,0.05,
2010,0.05,
2011,0.05,
2012,0.05,
2013,0.05,
2014,0.05,
2015,0.05,
2016,0.05,
2017,0.05,
2018,0.05,
2019,0.05,
2020,0.05,
2021,0.05])
return(l_lookup.lookup(xin))
|
google-research/google-research
|
f_net/models_test.py
|
Python
|
apache-2.0
| 13,843
| 0.00354
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for f_net.models."""
from typing import Any, Dict, Mapping, Sequence
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import numpy as jnp
import ml_collections
from f_net import models
from f_net.configs import base as base_config
from f_net.configs.base import HybridAttentionLayout
from f_net.configs.base import ModelArchitecture
# Type Stubs
PRNGKey = Any
Model = Any
def dummy_config(model_arch):
"""Creates a dummy model config that can be used by all tests."""
config = base_config.get_config()
config.model_arch = model_arch
config.d_emb = 8
config.d_model = 8
config.d_ff = 8
config.max_seq_length = 16
config.num_heads = 1
config.num_layers = 2
config.vocab_size = 280
config.train_batch_size = 3
config.eval_batch_size = 2
config.use_fft = True
return config
def dummy_inputs(
key,
config):
"""Creates a dummy model base inputs."""
input_ids = jax.random.randint(
key, (config.train_batch_size, config.max_seq_length),
minval=0,
maxval=10)
return {
"input_ids":
input_ids,
"input_mask": (input_ids > 0).astype(jnp.int32),
"type_ids":
jax.random.randint(
key, (config.train_batch_size, config.max_seq_length),
minval=0,
maxval=config.type_vocab_size)
}
def init_encoder_batch(
config):
"""Creates a batch of inputs used to initialize the models.EncoderModel."""
return {
"input_ids": jnp.ones((1, config.max_seq_length), jnp.int32),
"input_mask": jnp.ones((1, config.max_seq_length), jnp.int32),
"type_ids": jnp.ones((1, config.max_seq_length), jnp.int32)
}
def init_model_params(
key, model,
init_batch):
"""Initializes model parameters."""
key, dropout_key = jax.random.split(key)
jit_init = jax.jit(model.init)
initial_variables = jit_init({
"params": key,
"dropout": dropout_key
}, **init_batch)
return initial_variables["params"]
class ModelsTest(parameterized.TestCase):
@parameterized.parameters(ModelArchitecture.F_NET, ModelArchitecture.FF_ONLY,
ModelArchitecture.RANDOM)
def test_unparametrized_mixing_encoder(self, model_arch):
config = dummy_config(model_arch=model_arch)
frozen_config = ml_collections.FrozenConfigDict(config)
encoder = models.EncoderModel(config=frozen_config)
rng = jax.random.PRNGKey(0)
init_batch = init_encoder_batch(config)
params = init_model_params(rng, encoder, init_batch)
# Unparameterized mixing encoders do not have any parameters in their mixing
# layers, so their mixing layer names do not show up in params.
expected_keys = {
"embedder", "encoder_0", "encoder_1", "feed_forward_0",
"feed_forward_1", "pooler"
}
self.assertEqual(params.keys(), expected_keys)
inputs = dummy_inputs(rng, config)
hidden_states, pooled_output = encoder.apply({"params": params},
rngs={"dropout": rng},
**inputs)
expected_hidden_states_shape = (config.train_batch_size,
config.max_seq_length, config.d_model)
self.assertEqual(hidden_states.shape, expected_hidden_states_shape)
expected_pooled_output_shape = (config.train_batch_size, config.d_model)
self.assertEqual(pooled_output.shape, expected_pooled_output_shape)
def test_f_net_encoder_bad_long_seq(self):
config = dummy_config(model_arch=ModelArchitecture.F_NET)
with config.unlocked():
config.max_seq_length = 8194
frozen_config = ml_collections.FrozenConfigDict(config)
encoder = models.EncoderModel(config=frozen_config)
rng = jax.random.PRNGKey(0)
init_batch = init_encoder_batch(config)
with self.assertRaisesRegex(
ValueError,
"must be a power of 2 to take advantage of FFT optimizations"):
_ = init_model_params(rng, encoder, init_batch)
@parameterized.parameters(
dict(
model_arch=ModelArchitecture.BERT,
mixing_layer_name="self_attention"),
dict(
model_arch=ModelArchitecture.LINEAR,
mixing_layer_name="linear_transform"))
def test_parameterized_mixing_encoder(self, model_arch,
mixing_layer_name):
config = dummy_config(model_arch=model_arch)
frozen_config = ml_collections.FrozenConfigDict(config)
encoder = models.EncoderModel(config=frozen_config)
rng = jax.random.PRNGKey(0)
init_batch = init_encoder_batch(config)
params = init_model_params(rng, encoder, init_batch)
expected_keys = {
"embedder", "encoder_0", "encoder_1", "feed_forward_0",
"feed_forward_1", f"{mixing_layer_name}_0", f"{mixing_layer_name}_1",
"pooler"
}
self.assertEqual(params.keys(), expected_keys)
inputs = dummy_inputs(rng, config)
hidden_states, pooled_output = encoder.apply({"params": params},
rngs={"dropout": rng},
**inputs)
expected_hidden_states_shape = (config.train_batch_size,
config.max_seq_length, config.d_model)
self.assertEqual(hidden_states.shape, expected_hidden_states_shape)
expected_pooled_output_shape = (config.train_batch_size, config.d_model)
self.assertEqual(pooled_output.shape, expected_pooled_output_shape)
@parameterized.parameters(
dict(
attention_layout=Hy
|
bridAttentionLayout.BOTTOM,
num_attention_layers=0,
expected_attention_layers=[]),
dict(
|
attention_layout=HybridAttentionLayout.MIDDLE,
num_attention_layers=2,
expected_attention_layers=[1, 2]),
dict(
attention_layout=HybridAttentionLayout.MIXED,
num_attention_layers=2,
expected_attention_layers=[0, 2]),
dict(
attention_layout=HybridAttentionLayout.TOP,
num_attention_layers=1,
expected_attention_layers=[3]))
def test_hybrid_encoder(self, attention_layout,
num_attention_layers,
expected_attention_layers):
config = dummy_config(model_arch=ModelArchitecture.F_NET)
with config.unlocked():
config.num_layers = 4
config.attention_layout = attention_layout
config.num_attention_layers = num_attention_layers
frozen_config = ml_collections.FrozenConfigDict(config)
encoder = models.EncoderModel(config=frozen_config)
rng = jax.random.PRNGKey(0)
init_batch = init_encoder_batch(config)
params = init_model_params(rng, encoder, init_batch)
expected_keys = {
"embedder", "encoder_0", "encoder_1", "encoder_2", "encoder_3",
"feed_forward_0", "feed_forward_1", "feed_forward_2", "feed_forward_3",
"pooler"
}
for expected_attention_layer in expected_attention_layers:
expected_keys.add(f"self_attention_{expected_attention_layer}")
self.assertEqual(params.keys(), expected_keys)
inputs = dummy_inputs(rng, config)
hidden_states, pooled_output = encoder.apply({"params": params},
rngs={"dropout": rng},
**inputs)
expected_hidden_states_shape = (config.train_batch_size,
config.max_seq_length, config.d_model)
self.assertEqual(hidden_states.shape, expected_hidden_s
|
Laisky/laisky-blog
|
gargantua/apis/__init__.py
|
Python
|
apache-2.0
| 130
| 0
|
# import ipdb; ipdb.set_trace()
from .posts
|
import PostAPIHandler, PostCategoriesAPI
|
Handler
from .tweets import TweetsAPIHandler
|
kubeflow/kfp-tekton-backend
|
sdk/python/kfp/notebook/__init__.py
|
Python
|
apache-2.0
| 598
| 0
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0
|
(the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Li
|
cense.
from . import _magic
|
nikhilponnuru/codeCrumbs
|
code/create_file.py
|
Python
|
mit
| 4,006
| 0.01972
|
#!/usr/bin/env python
#to create a file in codesnippets folder
import pyperclip
import os
import re
import subprocess
def get_extension(file_name):
if file_name.find('.')!=-1:
ext = file_name.split('.')
return (ext[1])
else:
return 'txt'
def cut(str, len1):
return str[len1 + 1:] #to remove first line which is meant for reading from which file
#for displaying contents
def find(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
#ubuntu notification (message sending)
def sendmessage(mes
|
sage):
subprocess.Popen(['notify-send', message])
return
while True:
str = pyperclip.paste()
if (str==" "):
continue
str_low = str.lower()
str_lower=str_low.split("\n") #this is to ensure that only create a file if "add to code snippets" line is first line since if this line is present with other text which is not intended to be saved (i.e in btw that unwanted text
#as we are using regular expression it checks a pattern in
|
a given text so "add to code snippets " must be definitely first line
if(str_lower[0]=="stop -safe"):
sendmessage("Stopped the background process for code snippet management...byebye")
os.exit()
if (str_lower[0].find("add") != -1 and str_lower[0].find("code")!=-1 and
str_lower[0].find("snippets") !=-1 and str_lower[0].find("-safe") !=-1 ):
if re.search(r'\w+\.[a-z,A-Z]',str_lower[0])==None:
sendmessage("SPECIFY FILEEXTENSION (default file type is txt)")
str1 = str.split('\n')
str2 = str1[0].split(' ')
length = len(str2)
file_name = str2[length - 2]
new_str = cut(str, len(str1[0]))
# until here we removed first line which contains " add this to code snippet filename"
# print new_str
# creating a file with the above name
try:
# code_snippets is the head folder
if not os.path.exists('/home/nikhil/code_snippets'):
os.makedirs('/home/nikhil/code_snippets') # creating the directory if not exists
extension = get_extension(file_name)
# creating a folder with respective extenion names in uppercase
if not os.path.exists('/home/nikhil/code_snippets/'
+ extension.upper()):
os.makedirs('/home/nikhil/code_snippets/' + extension.upper())
print
# creating a file in respective folder
if not os.path.exists('/home/nikhil/code_snippets/' + extension.upper() + '/'
+ file_name):
name = open('/home/nikhil/code_snippets/' + extension.upper() + '/'
+ file_name, 'w')
name.write(new_str)
name.truncate()
name.close()
sendmessage("successfully added to code snippets collection")
pyperclip.copy(" ")
except Exception:
try:
already_exists = open('/home/nikhil/code_snippets/' + extension.upper() + '/'
+ file_name, 'a+')
#new_str = cut(str, len(str1[0]))
str_from_file = already_exists.read()
#already_exists.seek(0) #http://stackoverflow.com/questions/6648493/open-file-for-both-reading-and-writing#answer-15976014
already_exists.write('\n\n@@\n'+new_str)
already_exists.truncate()
already_exists.close()
sendmessage("successfully added to code snippets collection (code has been appended to already existing file with same name)")
str=pyperclip.copy(" ")
except:
print "oops some error in finding file to append content"
sendmessage("ERROR OCCURED")
pyperclip.copy(" ")
os.system('python /home/nikhil/Desktop/haha.py')
|
Nimmard/james-olson.com
|
main/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 1,003
| 0.018943
|
# encoding: utf8
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
fields = [(u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True),), ('name', models.CharField(
|
max_length=255),), ('email', models.EmailField(max_length=75),), ('message', models.TextField(),), ('date', models.DateField(auto_now=True),)],
bases = (models.Model,),
options = {},
name = 'Contact',
),
migrations.CreateModel(
|
fields = [(u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True),), ('date', models.DateTimeField(),), ('title', models.CharField(max_length=255),), ('code', models.CharField(max_length=255),), ('summary', models.TextField(),)],
bases = (models.Model,),
options = {},
name = 'Commits',
),
]
|
msabramo/django-netezza
|
netezza/pyodbc/introspection.py
|
Python
|
bsd-3-clause
| 4,001
| 0.002999
|
from django.db.backends import BaseDatabaseIntrospection
import pyodbc as Database
import types
import datetime
import decimal
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Map type codes to Django Field types.
data_types_reverse = {
types.StringType: 'TextField',
types.UnicodeType: 'TextField',
types.LongType: 'IntegerField',
types.IntType: 'IntegerField',
types.BooleanType: 'BooleanField',
types.FloatType: 'FloatField',
datetime.da
|
tetime: 'DateTimeField',
datetime.date: 'DateField',
datetime.time: 'TimeField',
decimal.Decimal: 'DecimalField',
}
de
|
f get_table_list(self, cursor):
"""
Returns a list of table names in the current database.
"""
# db = cursor.db.alias
# if db == 'default':
db = 'public'
cursor.execute("""
SELECT distinct objname
FROM _v_obj_relation
WHERE objclass IN (4905,4906,4908,4907,4909,4940,4911,4913,4953);""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name, identity_check=True):
"Returns a description of the table, with the DB-API cursor.description interface."
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return cursor.description
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, identity_check=False))])
def get_relations(self, cursor, table_name):
return []
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT fk.ORDINAL_POSITION, col.ORDINAL_POSITION, fk.REFERENCE_TABLE_NAME
FROM FOREIGN_KEYS fk
INNER JOIN COLUMNS col on fk.REFERENCE_COLUMN_NAME = col.COLUMN_NAME
and fk.REFERENCE_TABLE_NAME = col.TABLE_NAME
WHERE fk.TABLE_NAME = %s
""", [table_name])
relations = {}
for row in cursor.fetchall():
# row[0] and row[1] are like "{2}", so strip the curly braces.
relations[row[0]] = (row[1], row[2])
return relations
def get_indexes(self, cursor, table_name):
return []
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index,
'db_index': boolean representing whether it's a non-unique index}
"""
cursor.execute("""
SELECT col.COLUMN_NAME,pk.CONSTRAINT_TYPE
FROM V_CATALOG.COLUMNS col
left join V_CATALOG.PRIMARY_KEYS pk
ON col.TABLE_NAME = pk.TABLE_NAME AND col.COLUMN_NAME = pk.COLUMN_NAME
WHERE col.TABLE_NAME = %s""", [table_name])
indexes = {}
for row in cursor.fetchall():
indexes[row[0]] = {'primary_key': row[1] == 'p', 'unique': False}
return indexes
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
try:
return self.data_types_reverse[data_type]
except:
print '*' * 10,'DEBUG add the type', data_type, 'to introspection.py'
raise
|
jogral/tigris-python-sdk
|
tigrissdk/auth/permission.py
|
Python
|
apache-2.0
| 3,756
| 0
|
# coding: utf-8
from __future__ import unicode_literals, absolute_import
from ..exception import TigrisException
import urllib.parse
class Permission(object):
""" Tigris Permission object """
BASE_ENDPOINT = 'permissions'
def __init__(self, permission_obj, session):
"""
:param permission_obj:
The permission data.
:type permission_obj:
`dict`
:param session:
The network session.
:type session:
:class:`TigrisSession`
"""
self._session = session
self._populate(permission_obj)
@property
def id(self):
return self._id
def _
|
populate(self, permission_obj):
try:
self._id = permission_obj['id']
except KeyError:
self._id = False
try:
self.name = permission_obj['name']
except KeyError:
self.name = None
try:
self.description = permission_obj['description']
|
except KeyError:
self.description = None
try:
self.is_active = permission_obj['is_active']
except KeyError:
self.is_active = None
def activate(self):
"""
Changes `is_active` to `True`
"""
if not self._id:
raise TigrisException(
'ERROR: You are activate an unsaved permission. '
'Please save it first, then activate.')
self.is_active = True
query = '?' + urllib.parse.urlencode({'activate': self.is_active})
url = '{0}/{1}'.format(self.BASE_ENDPOINT, self._id)
url = url + query
self._session._put(url, data={})
def deactivate(self):
"""
Changes `is_active` to `False`
"""
if not self._id:
raise TigrisException(
'ERROR: You are activate an unsaved permission. '
'Please save it first, then activate.')
self.is_active = False
query = '?' + urllib.parse.urlencode({'activate': self.is_active})
url = '{0}/{1}'.format(self.BASE_ENDPOINT, self._id)
url = url + query
self._session._put(url, data={})
def destroy(self):
"""
Deletes Permission
"""
url = '{0}/{1}'.format(self.BASE_ENDPOINT, self._id)
self._session._delete(url)
def get(self):
"""
Retrieves Permission
:rtype:
`dict`
"""
if self._id:
url = '{0}/{1}'.format(self.BASE_ENDPOINT, self._id)
content, status_code, headers = self._session._get(url)
self._populate(content)
return content
else:
return None
def save(self, new=False):
"""
Upserts the User object.
:param new:
Determines whether or not this User is to be inserted or updated.
:type new:
`bool`
:rtype:
`dict`
"""
permission_obj = dict(vars(self))
del permission_obj['_session']
del permission_obj['_id']
if new:
content, status_code, headers = self._session._post(
self.BASE_ENDPOINT,
data={'fields': permission_obj})
if 'error' in content:
raise TigrisException(content['error'])
self._populate(content)
else:
url = '{0}/{1}'.format(self.BASE_ENDPOINT, self._id)
content, status_code, headers = self._session._patch(
url,
data={'fields': permission_obj})
if 'error' in content:
raise TigrisException(content['error'])
self.get()
return self
|
ionrock/json_url_rewriter
|
json_url_rewriter/middleware.py
|
Python
|
bsd-3-clause
| 2,398
| 0.000417
|
import json
from json_url_rewriter import config
from json_url_rewriter.rewrite import URLRewriter
class HeaderToPathPrefixRewriter(object):
"""
A rewriter to take the value of a header and prefix any path.
"""
def __init__(self, keys, base, header_name):
self.keys = keys
self.base = base
self.header_name = header_name
@property
def regex(self):
return '(%s)(.*)' % self.base
def header(self):
|
return 'HTTP_' + self.header_name.upper().replace('-', '_')
def __call__(self, doc, environ):
key = self.header()
if not key in environ:
return doc
prefix = environ[key]
def replacement(match):
base, path = match.groups()
return '%s/%s%s' % (base, prefix, path)
rewriter = URLRewri
|
ter(self.keys, self.regex, replacement)
return rewriter(doc)
class RewriteMiddleware(object):
def __init__(self, app, rewriter):
self.app = app
self.rewriter = rewriter
@staticmethod
def content_type(headers):
return dict([(k.lower(), v) for k, v in headers]).get('content-type')
def is_json(self, headers):
return 'json' in self.content_type(headers)
@staticmethod
def ok(status):
return status.startswith('20')
def rewrite(self, resp, environ):
doc = self.rewriter(self.json(resp), environ)
return json.dumps(doc)
def json(self, resp):
return json.loads(''.join(resp))
def __call__(self, environ, start_response):
# Set a local variable for the request
self.do_rewrite = False
# Our request local start response wrapper to grab the
# response headers
def sr(status, response_headers, exc_info=None):
if self.ok(status) and self.is_json(response_headers):
self.do_rewrite = True
# Call the original start_response
return start_response(status, response_headers, exc_info)
# call our app
resp = self.app(environ, sr)
# Our local variable should have been set to True if we should
# rewrite
if self.do_rewrite:
return [self.rewrite(resp, environ)]
return resp
def json_url_rewriter_filter_factory(global_conf, *args, **kw):
print(global_conf, args, kw)
raise Exception('Blastoff')
|
Sergiy-DBX/pynet_test-
|
Files/Ex_1.py
|
Python
|
unlicense
| 661
| 0
|
#!/usr/bin/env python
from __future__ import print_function
with open("../File_example.txt")
|
as file_in:
for line in file_in:
print(line.strip())
print('#' * 40)
file_to_write = open("../File_example.txt", "wt")
print(file_to_write)
file_to_write.write
|
("Line one\nLine two\nLine three\n")
file_to_write.flush()
file_to_write.close()
print('#' * 40)
append_file = open("../File_example.txt", "at")
append_file.write("Line Three and Half\n")
append_file.flush()
append_file.seek(0)
append_file.write("Line addition\n")
append_file.flush()
append_file.close()
print(append_file)
file_read = open("../File_example.txt")
print(file_read.read())
|
powervm/pypowervm
|
pypowervm/tests/utils/test_uuid.py
|
Python
|
apache-2.0
| 2,049
| 0
|
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from pypowervm.utils import uuid as uuid_utils
import unittest
class TestUUID(unittest.TestCase):
"""Unit tests for the uuid."""
def test_uuid_conversion(self):
uuid = '089ffb20-5d19-4a8c-bb80-13650627d985'
pvm_uuid = uuid_utils.convert_uuid_to_pvm(uuid)
self.assertEqual(uuid, pvm_uuid)
uuid = '989ffb20-5d19-4a8c-bb80-13650627d985'
pvm_uuid = uuid_utils.convert_uuid_to_pvm(uuid)
self.assertEqual('1' + uuid[1:], pvm_uuid)
uuid = 'c89ffb20-5d19-4a8c-bb80-13650627d985'
pvm_uuid = uuid_utils.convert_uuid_to_pvm(uuid)
self.assertEqual('4' + uuid[1:], pvm_uuid)
def test_id_or_uuid(self):
self.assertEqual((False, 123), uuid_utils.id_or_uuid(123))
# Test all stringish permutations
converters = [lambda x: x, six.text_type]
for conv in converters:
self.assertEqual((False, 123), uuid_utils.id_or_uuid(conv('123')))
uuid = conv('12345678-abcd-ABCD-0000-0a1B2c3D4e5F')
self.assertEqual((True, uuid), uuid_utils.id_or_uuid(uuid))
uuid = conv('12345678abcdABCD00000a1B2c
|
3D4e5F')
self.asse
|
rtEqual((True, uuid), uuid_utils.id_or_uuid(uuid))
# This one has too many digits
self.assertRaises(ValueError, uuid_utils.id_or_uuid,
conv('12345678-abcd-ABCD-0000-0a1B2c3D4e5F0'))
|
baike21/blog
|
blogadmin/migrations/0007_auto_20170828_2317.py
|
Python
|
gpl-3.0
| 3,940
| 0.005076
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-28 15:17
from __future__ import unicode_literals
import DjangoUeditor.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogadmin', '0006_auto_20170827_1142'),
]
operations = [
migrations.CreateModel(
name='BookReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=128, null=True, verbose_name='\u6807\u9898')),
('tag', models.CharField(blank=True, max_length=32, null=True, verbose_name='\u6807\u7b7e')),
('pub_time', models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('content', DjangoUeditor.models.UEditorField(blank=True, default='', verbose_name='\u6b63\u6587')),
],
options={
'ordering': ['-update_time'],
'verbose_name': '\u4e66\u520a\u8bc4\u8bba',
'verbose_name_plural': '\u4e66\u520a\u8bc4\u8bba',
},
),
migrations.CreateModel(
name='Essay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=128, null=True, verbose_name='\u6807\u9898')),
('tag', models.CharField(blank=T
|
rue, max_length=32, null=True, verbose_name='\u6807\u7b7e')),
('pub_time', models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('content', DjangoUeditor.models.UEditorField(blank=True, default='', verbose_name='\u6b63\u6587')),
],
options={
'ordering': ['-update_time'],
|
'verbose_name': '\u6742\u6587',
'verbose_name_plural': '\u6742\u6587',
},
),
migrations.CreateModel(
name='FilmReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=128, null=True, verbose_name='\u6807\u9898')),
('tag', models.CharField(blank=True, max_length=32, null=True, verbose_name='\u6807\u7b7e')),
('pub_time', models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('content', DjangoUeditor.models.UEditorField(blank=True, default='', verbose_name='\u6b63\u6587')),
],
options={
'ordering': ['-update_time'],
'verbose_name': '\u5f71\u89c6\u8bc4\u8bba',
'verbose_name_plural': '\u5f71\u89c6\u8bc4\u8bba',
},
),
migrations.AlterModelOptions(
name='article',
options={'ordering': ['-update_time'], 'verbose_name': '\u6280\u672f\u7c7b\u535a\u5ba2', 'verbose_name_plural': '\u6280\u672f\u7c7b\u535a\u5ba2'},
),
migrations.AlterField(
model_name='article',
name='category',
field=models.CharField(choices=[('web', 'Web\u5f00\u53d1'), ('linux', '\u7cfb\u7edf\u8fd0\u7ef4'), ('algorithm', '\u7b97\u6cd5'), ('language', '\u7f16\u7a0b\u8bed\u8a00'), ('others', '\u5176\u4ed6')], default='web', max_length=64, verbose_name='\u7c7b\u522b'),
),
]
|
stefanklug/mapnik
|
scons/scons-local-2.3.6/SCons/Tool/pdf.py
|
Python
|
lgpl-2.1
| 3,010
| 0.007641
|
"""SCons.Tool.pdf
Common PDF Builder definition for various other Tool modules that use it.
Add an explicit action to run epstopdf to convert .eps files to .pdf
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
|
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy o
|
f this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/pdf.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import SCons.Builder
import SCons.Tool
PDFBuilder = None
EpsPdfAction = SCons.Action.Action('$EPSTOPDFCOM', '$EPSTOPDFCOMSTR')
def generate(env):
try:
env['BUILDERS']['PDF']
except KeyError:
global PDFBuilder
if PDFBuilder is None:
PDFBuilder = SCons.Builder.Builder(action = {},
source_scanner = SCons.Tool.PDFLaTeXScanner,
prefix = '$PDFPREFIX',
suffix = '$PDFSUFFIX',
emitter = {},
source_ext_match = None,
single_source=True)
env['BUILDERS']['PDF'] = PDFBuilder
env['PDFPREFIX'] = ''
env['PDFSUFFIX'] = '.pdf'
# put the epstopdf builder in this routine so we can add it after
# the pdftex builder so that one is the default for no source suffix
def generate2(env):
bld = env['BUILDERS']['PDF']
#bld.add_action('.ps', EpsPdfAction) # this is covered by direct Ghostcript action in gs.py
bld.add_action('.eps', EpsPdfAction)
env['EPSTOPDF'] = 'epstopdf'
env['EPSTOPDFFLAGS'] = SCons.Util.CLVar('')
env['EPSTOPDFCOM'] = '$EPSTOPDF $EPSTOPDFFLAGS ${SOURCE} --outfile=${TARGET}'
def exists(env):
# This only puts a skeleton Builder in place, so if someone
# references this Tool directly, it's always "available."
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
analysiscenter/dataset
|
batchflow/tests/config_test.py
|
Python
|
apache-2.0
| 2,864
| 0.000349
|
# pylint: disable=redefined-outer-name, missing-docstring
import sys
import pytest
sys.path.append('..')
from batchflow import Config
@pytest.fixture
def config():
_config = dict(key1='val1', key2=dict())
_config['key2']['subkey1'] = 'val21'
return Config(_config)
class TestConfig:
def test_getitem_key(self, config):
assert config['key1'] == config.config['key1']
def test_getitem_missing_key(self, config):
with pytest.raises(KeyError):
_ = config['missing key']
def test_getitem_nested_key(self, config):
assert config['key2/subkey1'] == config.config['key2']['subkey1']
def test_get_key(self, config):
assert config.get('key1') == config.config.get('key1')
de
|
f test_get_nested_key(self, config):
assert config.get('key2/subkey1') == config.config['key2']['subkey1']
def test_get_missing_key(self, config):
assert config.get('missing key') is None
def test_get_missing_key_
|
with_default(self, config):
assert config.get('missing key', default=1) == 1
def test_get_nested_missing_key_with_default(self, config):
assert config.get('key2/missing key', default=1) == 1
def test_pop_key(self, config):
val = config.config.get('key1')
assert config.pop('key1') == val
assert 'key1' not in config, 'key should have been deleted'
def test_pop_nested_key(self, config):
val = config.config['key2']['subkey1']
assert config.pop('key2/subkey1') == val
assert 'subkey1' not in config, 'nested key should have been deleted'
assert 'key2' in config, 'outer key should remain'
def test_pop_missing_key(self, config):
with pytest.raises(KeyError):
_ = config.pop('missing key')
def test_pop_missing_key_with_default(self, config):
assert config.pop('missing key', default=1) == 1
def test_pop_nested_missing_key_with_default(self, config):
assert config.pop('key2/missing key', default=1) == 1
def test_setitem_key(self, config):
config['key1'] = 'new_val1'
assert config['key1'] == config.config['key1']
assert config.config['key1'] == 'new_val1'
def test_setitem_nested_key(self, config):
config['key2/subkey1'] = 'new_val21'
assert config['key2/subkey1'] == config.config['key2']['subkey1']
assert config.config['key2']['subkey1'] == 'new_val21'
def test_setitem_new_key(self, config):
config['key0'] = 'new_val0'
assert config['key0'] == config.config['key0']
assert config.config['key0'] == 'new_val0'
def test_setitem_nested_new_key(self, config):
config['key2/subkey2'] = 'new_val22'
assert config['key2/subkey2'] == config.config['key2']['subkey2']
assert config.config['key2']['subkey2'] == 'new_val22'
|
flaub/plaidml
|
plaidml/context.py
|
Python
|
agpl-3.0
| 865
| 0
|
# Copyright Vertex.AI
import ctypes
import json
class Context(object):
def __init__(self, lib):
self._as_parameter_ = lib.vai_alloc_ctx()
if not self._as_parameter_:
raise MemoryError('PlaidML operation context')
self._free = lib.vai_free_ctx
self._cancel = lib.vai_cancel_ctx
self._set_eventlog = lib.vai_set_eventlog
def __del__(self):
self.shutdown()
def cancel(self):
self._cancel(self)
def set_eventlog_filename(self, filename):
|
config = {
'@type': 'type.vertex.ai/vertexai.eventing.file.proto.EventLog',
'filename': filename
}
self._set_eventlog(self, json.dumps(config))
def shutdown(self):
if hasattr(self, '_free')
|
and self._as_parameter_:
self._free(self)
self._as_parameter_ = None
|
cwacek/python-jsonschema-objects
|
test/test_regression_156.py
|
Python
|
mit
| 1,032
| 0
|
import pytest # noqa
import python_jsonschema_objects as pjo
def test_regression_156(markdown_examples):
builder = pjo.ObjectBuilder(
markdown_examples["MultipleObjects"], resolved=markdown_examples
)
classes = builder.build_classes(named_only=True)
er = classes.ErrorResponse(message="Danger!", status=99
|
)
vgr = classes.VersionGetResponse(local=False, version="1.2.3")
# round-trip serialize-deserialize into named classes
classes.ErrorResponse.from_json(er.serialize())
classes.VersionGetResponse.from_json(vgr.serialize())
# round-trip serialize-deserialize into class defined with `oneOf`
classes.Multipleo
|
bjects.from_json(er.serialize())
classes.Multipleobjects.from_json(vgr.serialize())
def test_toplevel_oneof_gets_a_name(markdown_examples):
builder = pjo.ObjectBuilder(
markdown_examples["MultipleObjects"], resolved=markdown_examples
)
classes = builder.build_classes(named_only=True)
assert classes.Multipleobjects.__title__ is not None
|
kreatorkodi/repository.torrentbr
|
plugin.video.yatp/site-packages/hachoir_parser/misc/gnome_keyring.py
|
Python
|
gpl-2.0
| 6,255
| 0.003357
|
"""
Gnome keyring parser.
Sources:
- Gnome Keyring source code,
function generate_file() in keyrings/gkr-keyring.c,
Author: Victor Stinner
Creation date: 2008-04-09
"""
from hachoir_core.tools import paddingSize
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
Bit, NullBits, NullBytes,
UInt8, UInt32, String, RawBytes, Enum,
TimestampUnix64, CompressedField,
SubFile)
from hachoir_core.endian import BIG_ENDIAN
try:
import hashlib
def sha256(data):
hash = hashlib.new('sha256')
hash.update(data)
return hash.digest()
except ImportError:
def sha256(data):
raise ImportError("hashlib module is missing")
try:
from Crypto.Cipher import AES
class DeflateStream:
def __init__(self, stream):
hash_iterations = 1234
password = "x" * 8
salt = "\0" * 8
key, iv = generate_key(password, salt, hash_iterations)
self.cipher = AES.new(key, AES.MODE_CBC, iv)
def __call__(self, size, data=None):
if data is None:
return ''
return self.cipher.decrypt(data)
def Deflate(field):
CompressedField(field, DeflateStream)
return field
except ImportError:
def Deflate(field):
return field
class KeyringString(FieldSet):
def createFields(self):
yield UInt32(self, "length")
length = self["length"].value
if length == 0xffffffff:
return
yield String(self, "text", length, charset="UTF-8")
def createValue(self):
if "text" in self:
return self["text"].value
else:
return u''
def createDescription(self):
if "text" in self:
return self["text"].value
else:
return u"(empty string)"
class Attribute(FieldSet):
def createFields(self):
yield KeyringString(self, "name")
yield UInt32(self, "type")
type = self["type"].value
if type == 0:
yield KeyringString(self, "value")
elif type == 1:
yield UInt32(self, "value")
else:
raise TypeError("Unknown attribute type (%s)" % type)
def createDescription(self):
return 'Attribute "%s"' % self["name"].value
class ACL(FieldSet):
def createFields(self):
yield UInt32(self, "types_allowed")
yield KeyringString(self, "display_name")
yield KeyringString(self, "pathname")
yield KeyringString(self, "reserved[]")
yield UInt32(self, "reserved[]")
class Item(FieldSet):
def createFields(self):
yield UInt32(self, "id")
yield UInt32(self, "type")
yield UInt32(self, "attr_count")
for index in xrange(self["attr_count"].value):
yield Attribute(self, "attr[]")
def createDe
|
scription(self):
return "Item #%s: %s attributes" % (self["id"].value, self["attr_count"].value)
class Items(FieldSet):
def createFields(self):
yield UInt32(self, "count")
for index in xrange(self["count"].value):
yield Item(self, "item[]")
class EncryptedItem(
|
FieldSet):
def createFields(self):
yield KeyringString(self, "display_name")
yield KeyringString(self, "secret")
yield TimestampUnix64(self, "mtime")
yield TimestampUnix64(self, "ctime")
yield KeyringString(self, "reserved[]")
for index in xrange(4):
yield UInt32(self, "reserved[]")
yield UInt32(self, "attr_count")
for index in xrange(self["attr_count"].value):
yield Attribute(self, "attr[]")
yield UInt32(self, "acl_count")
for index in xrange(self["acl_count"].value):
yield ACL(self, "acl[]")
# size = 8 # paddingSize((self.stream.size - self.current_size) // 8, 16)
# if size:
# yield NullBytes(self, "hash_padding", size, "16 bytes alignment")
class EncryptedData(Parser):
PARSER_TAGS = {
"id": "gnomeencryptedkeyring",
"min_size": 16*8,
"description": u"Gnome encrypted keyring",
}
endian = BIG_ENDIAN
def validate(self):
return True
def createFields(self):
yield RawBytes(self, "md5", 16)
while True:
size = (self.size - self.current_size) // 8
if size < 77:
break
yield EncryptedItem(self, "item[]")
size = paddingSize(self.current_size // 8, 16)
if size:
yield NullBytes(self, "padding_align", size)
class GnomeKeyring(Parser):
MAGIC = "GnomeKeyring\n\r\0\n"
PARSER_TAGS = {
"id": "gnomekeyring",
"category": "misc",
"magic": ((MAGIC, 0),),
"min_size": 47*8,
"description": u"Gnome keyring",
}
CRYPTO_NAMES = {
0: u"AEL",
}
HASH_NAMES = {
0: u"MD5",
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return u"Invalid magic string"
return True
def createFields(self):
yield String(self, "magic", len(self.MAGIC), 'Magic string (%r)' % self.MAGIC, charset="ASCII")
yield UInt8(self, "major_version")
yield UInt8(self, "minor_version")
yield Enum(UInt8(self, "crypto"), self.CRYPTO_NAMES)
yield Enum(UInt8(self, "hash"), self.HASH_NAMES)
yield KeyringString(self, "keyring_name")
yield TimestampUnix64(self, "mtime")
yield TimestampUnix64(self, "ctime")
yield Bit(self, "lock_on_idle")
yield NullBits(self, "reserved[]", 31, "Reserved for future flags")
yield UInt32(self, "lock_timeout")
yield UInt32(self, "hash_iterations")
yield RawBytes(self, "salt", 8)
yield NullBytes(self, "reserved[]", 16)
yield Items(self, "items")
yield UInt32(self, "encrypted_size")
yield Deflate(SubFile(self, "encrypted", self["encrypted_size"].value, "AES128 CBC", parser_class=EncryptedData))
def generate_key(password, salt, hash_iterations):
sha = sha256(password+salt)
for index in xrange(hash_iterations-1):
sha = sha256(sha)
return sha[:16], sha[16:]
|
beebyte/irisett
|
irisett/contact.py
|
Python
|
mit
| 17,565
| 0.003985
|
"""Basic contact management functions.
Contacts are linked to monitors and are used to determine where to send
alerts for monitors.
Contacts are basic name/email/phone sets.
Contacts are only stored in the database and not in memory, they are loaded
from the database each time an alert is sent.
"""
from typing import Dict, Iterable, Optional, Any, Set
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
contact_exists,
active_monitor_exists,
contact_group_exists,
)
async def create_contact(dbcon: DBConnection, name: Optional[str], email: Optional[str],
phone: Optional[str], active: bool) -> str:
"""Add a contact to the database."""
q = """insert into contacts (name, email, phone, active) values (%s, %s, %s, %s)"""
q_args = (name, email, phone, active)
contact_id = await dbcon.operation(q, q_args)
return contact_id
async def update_contact(dbcon: DBConnection, contact_id: int, data: Dict[str, str]) -> None:
"""Update a contacts information in the database.
Data is a dict with name/email/phone/active values that
will be updated.
"""
async def _run(cur: Cursor) -> None:
for key, value in data.items():
if key not in ['name', 'email', 'phone', 'active']:
raise errors.IrisettError('invalid contact key %s' % key)
q = """update contacts set %s=%%s where id=%%s""" % key
q_args = (value, contact_id)
await cur.execute(q, q_args)
if not await contact_exists(dbcon, contact_id):
raise errors.InvalidArguments('contact does not exist')
await dbcon.transact(_run)
async def delete_contact(dbcon: DBConnection, contact_id: int) -> None:
"""Remove a contact from the database."""
if not await contact_exists(dbcon, contact_id):
raise errors.InvalidArguments('contact does not exist')
q = """delete from contacts where id=%s"""
await dbcon.operation(q, (contact_id,))
async def create_contact_group(dbcon: DBConnection, name: str, active: bool) -> str:
"""Add a contact group to the database."""
q = """insert into contact_groups (name, active) values (%s, %s)"""
q_args = (name, active)
contact_group_id = await dbcon.operation(q, q_args)
return contact_group_id
async def update_contact_group(dbcon: DBConnection, contact_group_id: int, data: Dict[str, str]) -> None:
"""Update a contact groups information in the database.
Data is a dict with name/active values that will be updated.
"""
async def _run(cur: Cursor) -> None:
for key, value in data.items():
if key not in ['name', 'active']:
raise errors.IrisettError('invalid contact key %s' % key)
q = """update contact_groups set %s=%%s where id=%%s""" % key
q_args = (value, contact_group_id)
await cur.execute(q, q_args)
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact group does not exist')
await dbcon.transact(_run)
async def delete_contact_group(dbcon: DBConnection, contact_group_id: int) -> None:
"""Remove a contact group from the database."""
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact group does not exist')
q = """delete from contact_groups where id=%s"""
await dbcon.operation(q, (contact_group
|
_id,))
async def get_all_contacts_for_active_monitor(dbcon: DBConnection, monitor_id: int) -> Iterable[object_models.Contact]:
"""Get a list of all contacts for an active monitor.
This includes directly attached contacts, contacts from contact groups,
monitor groups etc.
"""
contacts = set()
contacts.update(await _active_monitor
|
_contacts(dbcon, monitor_id))
contacts.update(await _active_monitor_contact_groups(dbcon, monitor_id))
contacts.update(await _active_monitor_monitor_group_contacts(dbcon, monitor_id))
contacts.update(await _active_monitor_monitor_group_contact_groups(dbcon, monitor_id))
return list(contacts)
async def _active_monitor_contacts(dbcon: DBConnection, monitor_id: int) -> Set[object_models.Contact]:
# Get contacts directly connected to the monitor.
q = """select
contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active
from active_monitor_contacts, contacts
where active_monitor_contacts.active_monitor_id = %s
and active_monitor_contacts.contact_id = contacts.id
and contacts.active = true"""
return {object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))}
async def _active_monitor_contact_groups(dbcon: DBConnection, monitor_id: int) -> Set[object_models.Contact]:
# Get contacts connected to the monitor via a contact group.
q = """select contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active
from active_monitor_contact_groups, contact_groups, contact_group_contacts, contacts
where active_monitor_contact_groups.active_monitor_id = %s
and active_monitor_contact_groups.contact_group_id = contact_groups.id
and contact_groups.active = true
and contact_groups.id = contact_group_contacts.contact_group_id
and contact_group_contacts.contact_id = contacts.id
and contacts.active = true"""
return {object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))}
async def _active_monitor_monitor_group_contacts(dbcon: DBConnection, monitor_id: int) -> Set[object_models.Contact]:
# Get contacts connected to the monitor via monitor group -> contacts
q = """select contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active
from monitor_group_active_monitors
left join monitor_groups on monitor_group_active_monitors.monitor_group_id=monitor_groups.id
left join monitor_group_contacts on monitor_group_contacts.monitor_group_id=monitor_groups.id
left join contacts on contacts.id=monitor_group_contacts.contact_id
where monitor_group_active_monitors.active_monitor_id=%s and contacts.active = true"""
return {object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))}
async def _active_monitor_monitor_group_contact_groups(
dbcon: DBConnection, monitor_id: int) -> Set[object_models.Contact]:
# Get contacts connected to the monitor via monitor group -> contact group -> contacts
q = """select contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active
from monitor_group_active_monitors
left join monitor_groups on monitor_group_active_monitors.monitor_group_id=monitor_groups.id
left join monitor_group_contact_groups on monitor_group_contact_groups.monitor_group_id=monitor_groups.id
left join contact_groups on contact_groups.id=monitor_group_contact_groups.contact_group_id
left join contact_group_contacts on contact_group_contacts.contact_group_id=contact_groups.id
left join contacts on contacts.id=contact_group_contacts.contact_id
where monitor_group_active_monitors.active_monitor_id=%s
and contact_groups.active=true
and contacts.active=true"""
return {object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))}
async def get_contact_dict_for_active_monitor(dbcon: DBConnection, monitor_id: int) -> Dict[str, set]:
"""Get all contact addresses/numbers for a specific active monitor.
Return: Dict[str, Set(str)] for 'email' and 'phone'.
"""
ret = {
'email': set(),
'phone': set(),
} # type: Dict[str, set]
contacts = await get_all_contacts_for_active_monitor(dbcon, monitor_id)
for contact in contacts:
if contact.email:
ret['email'].add(contact.email)
if contact.phone:
ret['phone'].add(contact.phone)
return ret
async def add_contact_to_active_monitor(dbcon: DBConnection, contact_id: int, monitor_id: int) -> None:
"""Connect a contact and an active m
|
nicolashainaux/mathmaker
|
mathmaker/lib/old_style_sheet/AlgebraMiniTest0.py
|
Python
|
gpl-3.0
| 2,680
| 0
|
# -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <nh.techn@gmail.com>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from . import exercise
from .S_Structure import S_Structure
FONT_SIZE_OFFSET = -1
SHEET_LAYOUT_TYPE = 'default'
SHEET_LAYOUT_UNIT = "cm"
# EXAMPLE OF A SHEET NOT USING ANY LAYOUT
# ---------------
|
-------- lines_nb col_widths exercises
SHEET_LAYOUT = {'exc': [None, 'all'],
'ans': [None, 'all']
}
# ------------------------------------------------------------------------------
# --------------------------------------------------------------------------
# ------------------------------------------------------------------------------
##
# @class AlgebraMiniTest0
# @brief A simple algebra mini-test
class AlgebraMiniTest0(S
|
_Structure):
# --------------------------------------------------------------------------
##
# @brief Constructor
# @param **options Any options
# @return One instance of sheet.Model
def __init__(self, **options):
self.derived = True
S_Structure.__init__(self, FONT_SIZE_OFFSET,
SHEET_LAYOUT_UNIT, SHEET_LAYOUT,
SHEET_LAYOUT_TYPE)
# BEGINING OF THE ZONE TO REWRITE (see explanations below) ------------
self.header = ""
# self.title = _("Training exercises sheet:")
self.title = ""
self.subtitle = ""
self.text = ""
self.answers_title = _("Examples of answers")
# For instance:
# ex1 = exercise.ProductReduction(many=30)
# self.exercises_list.append(ex1)
for i in range(10):
ex1 = exercise.X_AlgebraExpressionExpansion(x_kind='mini_test',
x_subkind='two_'
'randomly')
self.exercises_list.append(ex1)
|
LYZhelloworld/Courses
|
50.020/08/ecb.py
|
Python
|
mit
| 1,521
| 0.011834
|
# ECB wrapper skeleton file for 50.020 Security
# Oka, SUTD, 2014
from present import *
import argparse
nokeybits=80
blocksize=64
def ecb(infile,outfile,keyfile,mode):
key = 0x0
with open(keyfile, 'rb') as fkey:
|
for i in range(nokeybits / 8):
key |= ord(fkey.read(1)) << i * 8
with open(infile, 'rb') as fin:
with open(outfile, 'wb') as fout:
while True:
buf = fin.read(blocksize / 8)
chunk = 0x0
if buf == '':
break
if len(buf) != blocksize / 8:
buf += '\0' * (blocksize / 8 - len(buf))
for i in range(blocksize / 8):
|
chunk |= ord(buf[i]) << i * 8
if mode == 'c':
result = present(chunk, key)
else:
result = present_inv(chunk, key)
for i in range(blocksize / 8):
fout.write(chr((result >> i * 8) & 0xff))
if __name__=="__main__":
parser=argparse.ArgumentParser(description='Block cipher using ECB mode.')
parser.add_argument('-i', dest='infile',help='input file')
parser.add_argument('-o', dest='outfile',help='output file')
parser.add_argument('-k', dest='keyfile',help='key file')
parser.add_argument('-m', dest='mode',help='mode')
args=parser.parse_args()
infile=args.infile
outfile=args.outfile
keyfile=args.keyfile
mode=args.mode
ecb(infile, outfile, keyfile, mode)
|
EricMuller/mynotes-backend
|
requirements/twisted/Twisted-17.1.0/src/twisted/mail/test/test_mail.py
|
Python
|
mit
| 84,944
| 0.00292
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for large portions of L{twisted.mail}.
"""
import os
import errno
import shutil
import pickle
import StringIO
import email.message
import email.parser
import tempfile
import signal
import time
from hashlib import md5
from zope.interface.verify import verifyClass
from zope.interface import Interface, implementer
from twisted.trial import unittest
from twisted.mail import smtp
from twisted.mail import pop3
from twisted.names import dns
from twisted.internet import protocol
from twisted.internet import defer
from twisted.internet.defer import Deferred
from twisted.internet import reactor
from twisted.internet import interfaces
from twisted.internet import task
from twisted.internet.error import DNSLookupError, CannotListenError
from twisted.internet.error import ProcessDone, ProcessTerminated
from twisted.internet import address
from twisted.python import failure
from twisted.python.filepath import FilePath
from twisted.python import log
from twisted.mail.relaymanager import _AttemptManager
from twisted.test.proto_helpers import MemoryReactorClock, StringTransport
from twisted import mail
import twisted.mail.mail
import twisted.mail.maildir
import twisted.mail.relay
import twisted.mail.relaymanager
import twisted.mail.protocols
import twisted.mail.alias
from twisted.names.error import DNSNameError
from twisted.names.dns import RRHeader, Record_CNAME, Record_MX
from twisted import cred
import twisted.cred.credentials
import twisted.cred.checkers
import twisted.cred.portal
from twisted.test.proto_helpers import LineSendingProtocol
class DomainWithDefaultsTests(unittest.TestCase):
def testMethods(self):
d = dict([(x, x + 10) for x in range(10)])
d = mail.mail.DomainWithDefaultDict(d, 'Default')
self.assertEqual(len(d), 10)
self.assertEqual(list(iter(d)), range(10))
self.assertEqual(list(d.iterkeys()), list(iter(d)))
items = list(d.iteritems())
items.sort()
self.assertEqual(items, [(x, x + 10) for x in range(10)])
values = list(d.itervalues())
values.sort()
self.assertEqual(values, range(10, 20))
items = d.items()
items.sort()
self.assertEqual(items, [(x, x + 10) for x in range(10)])
values = d.values()
values.sort()
self.assertEqual(values, range(10, 20))
for x in range(10):
self.assertEqual(d[x], x + 10)
self.assertEqual(d.get(x), x + 10)
self.assertTrue(x in d)
del d[2], d[4], d[6]
self.assertEqual(len(d), 7)
self.assertEqual(d[2], 'Default')
self.assertEqual(d[4], 'Default')
self.assertEqual(d[6], 'Default')
d.update({'a': None, 'b': (), 'c': '*'})
self.assertEqual(len(d), 10)
self.assertEqual(d['a'], None)
self.assertEqual(d['b'], ())
self.assertEqual(d['c'], '*')
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(d.setdefault('key', 'value'), 'value')
self.assertEqual(d['key'], 'value')
self.assertEqual(d.popitem(), ('key', 'value'))
self.assertEqual(len(d), 0)
dcopy = d.copy()
self.assertEqual(d.domains, dcopy.domains)
self.assertEqual(d.default, dcopy.default)
def _stringificationTest(self, stringifier):
"""
Assert that
|
the
|
class name of a L{mail.mail.DomainWithDefaultDict}
instance and the string-formatted underlying domain dictionary both
appear in the string produced by the given string-returning function.
@type stringifier: one-argument callable
@param stringifier: either C{str} or C{repr}, to be used to get a
string to make assertions against.
"""
domain = mail.mail.DomainWithDefaultDict({}, 'Default')
self.assertIn(domain.__class__.__name__, stringifier(domain))
domain['key'] = 'value'
self.assertIn(str({'key': 'value'}), stringifier(domain))
def test_str(self):
"""
L{DomainWithDefaultDict.__str__} should return a string including
the class name and the domain mapping held by the instance.
"""
self._stringificationTest(str)
def test_repr(self):
"""
L{DomainWithDefaultDict.__repr__} should return a string including
the class name and the domain mapping held by the instance.
"""
self._stringificationTest(repr)
def test_has_keyDeprecation(self):
"""
has_key is now deprecated.
"""
sut = mail.mail.DomainWithDefaultDict({}, 'Default')
sut.has_key('anything')
message = (
'twisted.mail.mail.DomainWithDefaultDict.has_key was deprecated '
'in Twisted 16.3.0. Use the `in` keyword instead.'
)
warnings = self.flushWarnings(
[self.test_has_keyDeprecation])
self.assertEqual(1, len(warnings))
self.assertEqual(DeprecationWarning, warnings[0]['category'])
self.assertEqual(message, warnings[0]['message'])
class BounceTests(unittest.TestCase):
def setUp(self):
self.domain = mail.mail.BounceDomain()
def testExists(self):
self.assertRaises(smtp.AddressError, self.domain.exists, "any user")
def testRelay(self):
self.assertEqual(
self.domain.willRelay("random q emailer", "protocol"),
False
)
def testAddUser(self):
self.domain.addUser("bob", "password")
self.assertRaises(smtp.SMTPBadRcpt, self.domain.exists, "bob")
class BounceWithSMTPServerTests(unittest.TestCase):
"""
Tests for L{twisted.mail.mail.BounceDomain} with
L{twisted.mail.smtp.SMTPServer}.
"""
def test_rejected(self):
"""
Incoming emails to a SMTP server with L{twisted.mail.mail.BounceDomain}
are rejected.
"""
service = mail.mail.MailService()
domain = mail.mail.BounceDomain()
service.addDomain(b'foo.com', domain)
factory = mail.protocols.SMTPFactory(service)
protocol = factory.buildProtocol(None)
deliverer = mail.protocols.SMTPDomainDelivery(service, None, None)
protocol.delivery = deliverer
transport = StringTransport()
protocol.makeConnection(transport)
protocol.lineReceived(b'HELO baz.net')
protocol.lineReceived(b'MAIL FROM:<a@baz.net>')
protocol.lineReceived(b'RCPT TO:<any@foo.com>')
protocol.lineReceived(b'QUIT')
self.assertTrue(transport.disconnecting)
protocol.connectionLost(None)
self.assertEqual(transport.value().strip().split(b'\r\n')[-2],
b'550 Cannot receive for specified address')
class FileMessageTests(unittest.TestCase):
def setUp(self):
self.name = "fileMessage.testFile"
self.final = "final.fileMessage.testFile"
self.f = open(self.name, 'w')
self.fp = mail.mail.FileMessage(self.f, self.name, self.final)
def tearDown(self):
try:
self.f.close()
except:
pass
try:
os.remove(self.name)
except:
pass
try:
os.remove(self.final)
except:
pass
def testFinalName(self):
return self.fp.eomReceived().addCallback(self._cbFinalName)
def _cbFinalName(self, result):
self.assertEqual(result, self.final)
self.assertTrue(self.f.closed)
self.assertFalse(os.path.exists(self.name))
def testContents(self):
contents = "first line\nsecond line\nthird line\n"
for line in contents.splitlines():
self.fp.lineReceived(line)
self.fp.eomReceived()
with open(self.final) as f:
self.assertEqual(f.read(), contents)
def testInterrupted(self):
contents = "first line\nsecond line\n"
for line in contents.splitlines():
self.fp.lineReceived(line)
self.fp.connectionLost()
self.assertFalse(os.path.exists(self.name))
self
|
healthchecks/healthchecks
|
hc/accounts/management/commands/pruneusers.py
|
Python
|
bsd-3-clause
| 1,501
| 0
|
from datetime import timedelta
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db.models import Count, F
from django.utils.timezone import now
from hc.accounts.models import Profile
class Command(BaseCommand):
he
|
lp = """Prune old, inactive user accounts.
Conditions for removing an user acc
|
ount:
- created 1 month ago and never logged in. Does not belong
to any team.
Use case: visitor types in their email at the website but
never follows through with login.
"""
def handle(self, *args, **options):
month_ago = now() - timedelta(days=30)
# Old accounts, never logged in, no team memberships
q = User.objects.order_by("id")
q = q.annotate(n_teams=Count("memberships"))
q = q.filter(date_joined__lt=month_ago, last_login=None, n_teams=0)
n, summary = q.delete()
count = summary.get("auth.User", 0)
self.stdout.write("Pruned %d never-logged-in user accounts." % count)
# Profiles scheduled for deletion
q = Profile.objects.order_by("id")
q = q.filter(deletion_notice_date__lt=month_ago)
# Exclude users who have logged in after receiving deletion notice
q = q.exclude(user__last_login__gt=F("deletion_notice_date"))
for profile in q:
self.stdout.write("Deleting inactive %s" % profile.user.email)
profile.user.delete()
return "Done!"
|
timlev/Proxy-Hours
|
main.py
|
Python
|
mit
| 1,572
| 0.022901
|
#!/usr/bin/python
import Proxy_Hours, proxyhours_gather_all_data
try:
from PyQt4 import QtCore, QtGui
qtplatform = "PyQt4"
except:
from PySide import QtCore, QtGui
qtplatform = "PySide"
import os
def which(pgm):
path=os.getenv('PATH')
for p in path.split(os.path.pathsep):
p=os.path.join(p,pgm)
if os.path.exists(p) and os.access(p,os.X_OK):
return p
os.which=which
print os.which("pdftohtml")
def selectFile():
name = QtGui.QFileDialog.getOpenFileName()
if qtplatform
|
== "PySide":
name = name[0]
print name
ui.FilelineEdit.setText(name)
nametxt = str(ui.FilelineEdit.text())
nametxt = os.path.abspath(nametxt)
print "Nametxt:", namet
|
xt
write_out_0, write_out_1, write_out_2, write_out_3 = proxyhours_gather_all_data.proxy_hours(nametxt)
ui.log_lineEdit.setText(write_out_1)
ui.all_data_lineEdit.setText(write_out_2)
ui.time_lineEdit.setText(write_out_3)
ui.tableWidget.setRowCount(len(write_out_0))
for pos, row in enumerate(write_out_0):
add_row(pos,row)
def add_row(pos,row):
r = pos
for c, t in enumerate(row):
ui.tableWidget.setItem(r,c,QtGui.QTableWidgetItem(str(t)))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = Proxy_Hours.QtGui.QMainWindow()
ui = Proxy_Hours.Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
ui.OpenpushButton.clicked.connect(selectFile)
ui.actionOpen.triggered.connect(selectFile)
ui.actionQuit.triggered.connect(QtCore.QCoreApplication.instance().quit)
sys.exit(app.exec_())
|
riga/luigi
|
test/config_toml_test.py
|
Python
|
apache-2.0
| 3,012
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Vote inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from luigi.configuration import LuigiTomlParser, get_config, add_config_path
from helpers import LuigiTestCase
class TomlConfigParserTest(LuigiTestCase):
@classmethod
def setUpClass(cls):
add_config_path('test/testconfig/luigi.toml')
add_config_path('test/testconfig/luigi_local.toml')
def setUp(self):
LuigiTomlParser._instance = None
super(TomlConfigParserTest, self).setUp()
def test_get_config(self):
config = get_config('toml')
self.assertIsInstance(config, LuigiTomlParser)
def test_file_reading(self):
config = get_config('toml')
self.assertIn('hdfs', config.data)
def test_get(self):
config = get_config('toml')
# test getting
self.assertEqual(config.get('hdfs', 'client'), 'hadoopcli')
self.assertEqual(config.get('hdfs', 'client', 'test'), 'hadoopcli')
# test default
self.assertEqual(config.get('hdfs', 'test', 'check'), 'check')
with self.assertRaises(KeyError):
config.get('hdfs', 'test')
# test override
self.assertEqual(config.get('hdfs', 'namenode_host'), 'localhost')
# test non-string values
self.assertEqual(config.get('hdfs', 'namenode_port'), 50030)
def test_set(self):
config = get_config('toml')
self.assertEqual(config.get('hdfs', 'client'), 'hadoopcli')
config.set('hdfs', 'client', 'test')
self.assertEqual(config.get('hdfs', 'client'), 'test')
config.set('hdfs', 'check', 'test me')
self.assertEqual(config.get('hdfs', 'check'), 'test me')
def test_has_option(self):
config
|
= get_config('toml')
|
self.assertTrue(config.has_option('hdfs', 'client'))
self.assertFalse(config.has_option('hdfs', 'nope'))
self.assertFalse(config.has_option('nope', 'client'))
class HelpersTest(LuigiTestCase):
def test_add_without_install(self):
enabled = LuigiTomlParser.enabled
LuigiTomlParser.enabled = False
with self.assertRaises(ImportError):
add_config_path('test/testconfig/luigi.toml')
LuigiTomlParser.enabled = enabled
def test_get_without_install(self):
enabled = LuigiTomlParser.enabled
LuigiTomlParser.enabled = False
with self.assertRaises(ImportError):
get_config('toml')
LuigiTomlParser.enabled = enabled
|
Dangetsu/vnr
|
Frameworks/Sakura/py/libs/qtbrowser/qtplayer.py
|
Python
|
gpl-3.0
| 2,795
| 0.025045
|
# coding: utf8
# qtplayer.py
# 10/1/2014 jichi
__all__ = 'HiddenPlayer',
from PySide.QtCore import QUrl
from sakurakit.skdebug import dprint
class _HiddenPlayer:
def __init__(self, parent):
self.parent = parent # QWidget
self._webView = None # QWebView
@property
def webView(self):
if not self._webView:
dprint("create web view")
from PySide.QtWebKit import QWebView
self._webView = QWebView(self.parent)
update_web_settings(self._webView.settings())
self._webView.resize(0, 0) # zero size
return self._webView
def setParent(self, value):
self.parent = value
if self._webView:
self._webView.setParent(value)
def stop(self):
if self._webView:
self._webView.stop()
class HiddenPlayer(object):
def __init__(self, parent=None):
self.__d = _HiddenPlayer(parent)
def parentWidget(self): return self.__d.parent
def setParentWidget(self, value): self.__d.setParent(value)
def webView(self): return self.__d.webView
def stop(self):
self.__d.stop()
def play(self, url, **kwargs):
"""
@param url str or QUrl
"""
if not isinstance(url, QUrl):
url = QUrl(url)
for k,v in kwargs.iteritems():
#url.addQueryItem(k, v)
if not isinstance(v, basestring):
v = "%s" % v
url.addEncodedQueryItem(k, QUrl.toPercentEncoding(v))
self.__d.webView.load(url)
def update_web_settings(settings=None):
"""
@param settings QWebSettings or None
"""
from PySide.QtWebKit import QWebSettings
ws = settings or QWebSettings.globalSettings()
ws.setAttribute(QWebSettings.PluginsEnabled, True)
ws.setAttribute(QWebSettings.JavaEnabled, True)
ws.setAttribute(QWebSettings.DnsPrefetchEnabled, True) # better performance
ws
|
.setAttribute(QWebSettings.AutoLoadImages, False) # do NOT load images
#ws.setAttribute(QWebSettings.JavascriptCanOpenWindows, True)
#ws.setAttribute(QWebSettings.JavascriptCanAccessClipboard, True)
#ws.setAttribute(QWebSettings.DeveloperExtrasEnabled, True)
#w
|
s.setAttribute(QWebSettings.OfflineStorageDatabaseEnabled, True)
#ws.setAttribute(QWebSettings.OfflineWebApplicationCacheEnabled, True)
#ws.setAttribute(QWebSettings.LocalStorageEnabled, True)
#ws.setAttribute(QWebSettings.LocalContentCanAccessRemoteUrls, True)
#ws.setAttribute(QWebSettings.ZoomTextOnly, False)
#ws.setDefaultTextEncoding("SHIFT-JIS")
#ws.setDefaultTextEncoding("EUC-JP")
#ws.setLocalStoragePath(G_PATH_CACHES)
#QWebSettings.setIconDatabasePath(G_PATH_CACHES)
#QWebSettings.setOfflineStoragePath(G_PATH_CACHES)
#QWebSettings.setOfflineWebApplicationCachePath(G_PATH_CACHES)
# See: http://webkit.org/blog/427/webkit-page-cache-i-the-basics/
ws.setMaximumPagesInCache(10) # do not cache lots of pages
# EOF
|
totoro72/pt1
|
ep/item_26_multiple_inheritance_for_mixin_only.py
|
Python
|
mit
| 2,705
| 0.001479
|
import json
from collections import abc
# item 26: use muptiple inheritance for mixin only
# a mixin that transforms a python object to a dictionary that's ready for seralization
class ToDictMixin(object):
def to_dict(self):
"""Return a dictionary representation of this object"""
return self._traverse('none', self.__dict__)
def _traverse(self, key, obj):
"""Return a dictionary representation of this obj"""
if isinstance(obj, ToDictMixin):
return obj.to_dict()
if isinstance(obj, dict):
return {k: self._traverse(k, v) for k, v in obj.items()}
if isinstance(obj, tuple) or isinstance(obj, list):
return [self._traverse(key, item) for item in obj]
# if it's any other object with __dict__ attr, use it!
if hasattr(obj, '__dict__'):
return self._traverse(key, obj.__dict__)
return obj
class BinaryTreeNode(ToDictMixin):
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
class BinaryTreeWithParent(BinaryTreeNode):
def __init__(self, value, left=None, right=None, parent=None):
super().__init__(value, left, right)
self.parent = parent
# override so the backref to parent does not cause infinite recursion
def _traverse(self, key, obj):
# if the key is parent, stop the recursion and return parent's value instead
if key == 'parent' and isinstance(obj, BinaryTreeNode):
return obj.value
return super()._traverse(key, obj)
class NamedSubTree(ToDictMixin):
def __init__(self, name, tree):
self.name = name
self.tree = tree
# Mixins can also play together
class ToJsonMixin(object):
@classmethod
def from_json(cls, kwargs):
"""given kwargs in json format, get it into dictionary format"""
kwargs = json.loads(kwargs)
return cls(**kwargs)
def to_json(self):
d = self.to_dict()
return json.dumps(d)
class BinaryTreeWithJson(BinaryTreeNode, ToJsonMixin):
pass
class EqualityMixin(object):
def __eq__(self,
|
other):
return self.__dict__ == other.__dict__
class Switch(EqualityMixin):
def __init__(s
|
elf, ports, speed):
self.ports = ports
self.speed = speed
class Machine(EqualityMixin):
def __init__(self, ram, cpu, disk):
self.ram = ram
self.cpu = cpu
self.disk = disk
class DatacenterRack(ToJsonMixin, ToDictMixin, EqualityMixin):
def __init__(self, switch, machines):
self.switch = Switch(**switch)
self.machines = [Machine(**kwargs) for kwargs in machines]
|
IAryan/edCTF
|
edctf/api/views/challengeboard.py
|
Python
|
apache-2.0
| 2,095
| 0.012411
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from edctf.api.models import challengeboard, category, challenge
from edctf.api.serializers import challengeboard_serializer, category_serializer, challenge_serializer
class challengeboard_view(APIView):
"""
Manages challengeboard requests
"""
permission_classes = (IsAuthenticated,)
def get(self, request, id=None, format=None):
"""
Gets all challengeboards or gets one challengeboard via
challengeboards/:id.
"""
# If challengeboard id was requested, return that challengeboard
# else return list of all challengeboards in the database.
if id:
# Retrieve and serialize the requested challengeboard data.
challengeboards = challengeboard.objects.filter(id=id)
challengeboards_serializer = challengeboard_serializer(challengeboards, many=True, context={'request': request})
# Retrieve and serialize the categories in the challengeboard.
categories = category.objects.filter(challengeboard=challengeboards.first())
categories_serializer = category_serializer(categories, many=True, context={'request': request})
# Retrieve and serialize the challenges in each category.
challenges = []
for cat in categories:
challenges += challenge.objects.filter(category=cat)
challenges_serializer = challenge_serializer(challenges, many=True, context={'request': request})
# Return the serialized dat
|
a.
return Response({
'challengeboards': challengeboards_serializer.data,
'categories': categories_serializer.data,
'challenges': challenges_serializer.data,
})
else:
# Retrieve and serialize the requested challengeboard data.
challengeboards = challengeboard.objects.all()
serializer = challengeboard_serializer(challengeboards, many=True, context={'request': request})
#
|
Return the serialized data.
return Response({
'challengeboards': serializer.data,
})
|
gmartinvela/Incubator
|
Incubator/wsgi.py
|
Python
|
mit
| 1,428
| 0.0007
|
"""
WSGI config for Incubator project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
|
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This break
|
s
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "Incubator.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Incubator.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
cgvarela/vitess
|
test/custom_sharding.py
|
Python
|
bsd-3-clause
| 7,223
| 0.005399
|
#!/usr/bin/env python
#
# Copyright 2015, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import unittest
import environment
import utils
import tablet
# shards
shard_0_master = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
shard_1_master = tablet.Tablet()
shard_1_rdonly = tablet.Tablet()
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [
shard_0_master.init_mysql(),
shard_0_rdonly.init_mysql(),
shard_1_master.init_mysql(),
shard_1_rdonly.init_mysql(),
]
utils.Vtctld().start()
utils.VtGate().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
teardown_procs = [
shard_0_master.teardown_mysql(),
shard_0_rdonly.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_rdonly.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_rdonly.remove_tree()
shard_1_master.remove_tree()
shard_1_rdonly.remove_tree()
class TestCustomSharding(unittest.TestCase):
def _insert_data(self, shard, start, count, table='data'):
sql = 'insert into %s(id, name) values (:id, :name)' % table
for x in xrange(count):
bindvars = {
'id': start+x,
'name': 'row %d' % (start+x),
}
utils.vtgate.execute_shard(sql, 'test_keyspace', shard,
bindvars=bindvars)
def _check_data(self, shard, start, count, table='data'):
sql = 'select name from %s where id=:id' % table
for x in xrange(count):
bindvars = {
'id': start+x,
}
qr = utils.vtgate.execute_shard(sql, 'test_keyspace', shard,
bindvars=bindvars)
self.assertEqual(len(qr['Rows']), 1)
v = qr['Rows'][0][0]
self.assertEqual(v, 'row %d' % (start+x))
def test_custom_end_to_end(self):
"""Runs through the common operations of a custom sharded keyspace.
Tests creation with one shard, schema change, reading / writing
data, adding one more shard, reading / writing data from both
shards, applying schema changes again, and reading / writing data
from both shards again.
"""
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# start the first shard only for now
shard_0_master.init_tablet('master', 'test_keyspace', '0')
shard_0_rdonly.init_tablet('rdonly', 'test_keyspace', '0')
for t in [shard_0_master, shard_0_rdonly]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None)
for t in [shard_0_master, shard_0_rdonly]:
t.wait_for_vttablet_state('SERVING')
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(len(ks['Partitions']['master']['ShardReferences']), 1)
self.assertEqual(len(ks['Partitions']['rdonly']['ShardReferences']), 1)
s = utils.run_vtctl_json(['GetShard', 'test_keyspace/0'])
self.assertEqual(len(s['served_types']), 3)
# create a table on shard 0
sql = '''create table data(
id bigint auto_increment,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema', '-sql=' + sql, 'test_keyspace'],
auto_log=True)
# insert data on shard 0
self._insert_data('0', 100, 10)
# re-read shard 0 data
self._check_data('0', 100, 10)
# create shard 1
shard_1_master.init_tablet('master', 'test_keyspace', '1')
shard_1_rdonly.init_tablet('rdonly', 'test_keyspace', '1'
|
)
for t in [shard_1_master, shard_1_rdonly]:
t.start_vttablet(wait_for_state=None)
for t in [shard_1_master, shard_1_rdonly]:
t.wait_for_vttablet_state('NOT_SERVING')
s = utils.run_vtctl_json(['GetShard', 'test_keyspace/1'])
self.assertEqual(len(s['served_types']), 3)
utils.run_vtctl(['InitShardMaster', 'test_keyspace/1',
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['CopySchemaShard', shard_0_rdonly.tablet_al
|
ias,
'test_keyspace/1'], auto_log=True)
for t in [shard_1_master, shard_1_rdonly]:
utils.run_vtctl(['RefreshState', t.tablet_alias], auto_log=True)
t.wait_for_vttablet_state('SERVING')
# rebuild the keyspace serving graph now that the new shard was added
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# insert data on shard 1
self._insert_data('1', 200, 10)
# re-read shard 1 data
self._check_data('1', 200, 10)
# create a second table on all shards
sql = '''create table data2(
id bigint auto_increment,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema', '-sql=' + sql, 'test_keyspace'],
auto_log=True)
# insert and read data on all shards
self._insert_data('0', 300, 10, table='data2')
self._insert_data('1', 400, 10, table='data2')
self._check_data('0', 300, 10, table='data2')
self._check_data('1', 400, 10, table='data2')
# reload schema everywhere so the QueryService knows about the tables
for t in [shard_0_master, shard_0_rdonly, shard_1_master, shard_1_rdonly]:
utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(len(ks['Partitions']['master']['ShardReferences']), 2)
self.assertEqual(len(ks['Partitions']['rdonly']['ShardReferences']), 2)
# Now test SplitQuery API works (used in MapReduce usually, but bringing
# up a full MR-capable cluster is too much for this test environment)
sql = 'select id, name from data'
s = utils.vtgate.split_query(sql, 'test_keyspace', 4)
self.assertEqual(len(s), 4)
shard0count = 0
shard1count = 0
for q in s:
if q['QueryShard']['Shards'][0] == '0':
shard0count += 1
if q['QueryShard']['Shards'][0] == '1':
shard1count += 1
self.assertEqual(shard0count, 2)
self.assertEqual(shard1count, 2)
# run the queries, aggregate the results, make sure we have all rows
rows = {}
for q in s:
qr = utils.vtgate.execute_shard(
q['QueryShard']['Sql'],
'test_keyspace', ','.join(q['QueryShard']['Shards']),
tablet_type='master', bindvars=q['QueryShard']['BindVariables'])
for r in qr['Rows']:
id = int(r[0])
rows[id] = r[1]
self.assertEqual(len(rows), 20)
expected = {}
for i in xrange(10):
expected[100 + i] = 'row %d' % (100 + i)
expected[200 + i] = 'row %d' % (200 + i)
self.assertEqual(rows, expected)
if __name__ == '__main__':
utils.main()
|
opethe1st/CompetitiveProgramming
|
Hackerrank/WeekOfCode/WoC29/minimalbruteforce.py
|
Python
|
gpl-3.0
| 1,231
| 0.028432
|
#!/bin/python
import sys
from decimal import Decimal, getcontext,Context
from math import pi as PI
pi = Context(prec=60).create_decimal('3.1415926535897932384626433832795028841971693993751')
PI = pi
def calc(fun, n):
temp = Decimal("0.0")
for ni in xrange(n+1, 0, -1):
(a, b) = fun(ni)
temp = Decimal(b) / (a + temp)
return fun(0)[0] + temp
def fpi(n):
return (6
|
if n > 0 else 3, (2 * n - 1) ** 2)
#print "%.50f"%(calc(fpi, 1001))
#mini,maxi = raw_input().strip().split(' ')
mini,maxi = 200,231#[long(mini),long(maxi)]
# your code goes here
minifraction = (3,1)
minidecimal = Decimal(3.0)
#print PI
for d in xrange(mini,maxi+1):
|
#print d
n = int(pi*d)
d1 = n/Decimal(d)
d2 = (n+1)/Decimal(d)
#print n,d,d1,d2
if abs(d1-pi)<abs(d2-pi):
if abs(d1-pi)<abs(minidecimal-pi):
minifraction = (n,d)
minidecimal = n/Decimal(d)
if abs(d1-pi)>abs(d2-pi):
if abs(d2-pi)<abs(minidecimal-pi):
#print n,d,d1,d2
minifraction = (n+1,d)
minidecimal = (n+1)/Decimal(d)
#print minifraction
print "%d/%d"%(minifraction[0],minifraction[1])
|
marcydoty/geraldo
|
site/newsite/site-geraldo/django/db/backends/sqlite3/client.py
|
Python
|
lgpl-3.0
| 283
| 0.003534
|
from django.db.backends import BaseDatabaseClient
from django.conf import settings
import os
class DatabaseClient
|
(BaseDatabaseClient):
executable_name = 'sqlite3'
def runshell(self):
args = ['', settings.DATABASE_NAME]
os.execvp(self.e
|
xecutable_name, args)
|
abadger/stellarmagnate
|
magnate/ui/urwid/numbers.py
|
Python
|
agpl-3.0
| 1,371
| 0.001459
|
# Stellar Magnate - A space-themed commodity trading game
# Copyright (C) 2017 Toshio Kuratomi <toshio@fedoraproject.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utility functions for dealing with numbers"""
import locale
def format_number(number, max_chars=7):
"""
Format a number in a human readable form. This adds locale-specific separators.
If the number is too long, se scientific n
|
otation.
:kwarg max_chars: The maximum number of characters a number can take on
the screen before it is turned into scientific notation.
"""
formatted_number = locale.format('%d', number, grouping=True)
if len(fo
|
rmatted_number) > max_chars:
formatted_number = '{:.1E}'.format(number)
return formatted_number
|
madbook/reddit-plugin-adzerk
|
reddit_adzerk/lib/events.py
|
Python
|
bsd-3-clause
| 7,482
| 0.000668
|
from baseplate.events import FieldKind
from pylons import app_globals as g
from r2.lib.eventcollector import (
EventQueue,
Event,
squelch_exceptions,
)
from r2.lib.utils import sampled
from r2.models import (
FakeSubreddit,
)
class AdEvent(Event):
@classmethod
def get_context_data(cls, request, context):
data = super(AdEvent, cls).get_context_data(request, context)
dnt_header = request.headers.get("DNT", None)
if dnt_header is not None:
data["dnt"] = dnt_header == "1"
return data
class AdzerkAPIEvent(Event):
def add_target_fields(self, thing):
self.add("target_fullname", thing._fullname)
self.add("target_type", thing.__class__.__name__)
self.add("is_deleted", thing._deleted)
def add_caller_fields(self, user):
if user:
self.add("caller_user_id", user._id)
self.add("caller_user_name", user.name)
else:
self.add("is_automated", True)
def add_error_fields(self, error):
if error:
self.add("error_status_code", error.status_code)
self.add("error_body", error.response_body)
class AdEventQueue(EventQueue):
@squelch_exceptions
@sampled("events_collector_ad_serving_sample_rate")
def ad_request(
self,
keywords,
properties,
platform,
placements,
is_refresh,
subreddit=None,
request=None,
context=None,
):
"""Create an `ad_request` for event-collector.
keywords: Array of keywords used to select the ad.
properties: Object contain custom targeting parameters.
platform: The platform the ad was requested for.
|
placements: Array of placement objects (name, types) to be filled.
is_refresh: Whether or not the request is for the initi
|
al ad or a
refresh after refocusing the page.
subreddit: The Subreddit of the ad was displayed on.
request, context: Should be pylons.request & pylons.c respectively;
"""
event = AdEvent(
topic="ad_serving_events",
event_type="ss.ad_request",
request=request,
context=context,
)
# keywords are case insensitive, normalize and sort them
# for easier equality testing.
keywords = sorted(k.lower() for k in keywords)
event.add("keywords", keywords)
event.add("properties", properties)
event.add("platform", platform)
event.add("placements", placements)
event.add("is_refresh", is_refresh)
if not isinstance(subreddit, FakeSubreddit):
event.add_subreddit_fields(subreddit)
self.save_event(event)
@squelch_exceptions
@sampled("events_collector_ad_serving_sample_rate")
def ad_response(
self,
keywords,
properties,
platform,
placement_name,
placement_type,
adserver_ad_id,
adserver_campaign_id,
adserver_creative_id,
adserver_flight_id,
impression_id,
matched_keywords,
rate_type,
clearing_price,
link_fullname=None,
campaign_fullname=None,
subreddit=None,
priority=None,
ecpm=None,
request=None,
context=None,
):
"""Create an `ad_response` for event-collector.
keywords: Array of keywords used to select the ad.
properties: Object contain custom targeting parameters.
platform: The platform the ad was requested for.
placement_name: The identifier of the placement.
placement_type: The type of placement the ad is.
adserver_ad_id: Unique id of the ad response (from the ad server).
adserver_campaign_id: Unique id of the ad campaign (from the ad server).
adserver_creative_id: Unique id of the ad creative (from the ad server).
adserver_flight_id: Unique id of the ad flight (from the ad server).
impression_id: Unique id of the impression.
matched_keywords: An array of the keywords which matched for the ad.
rate_type: Flat/CPM/CPC/etc.
clearing_price: What was paid for the rate type.
link_fullname: The fullname of the promoted link.
campaign_fullname: The fullname of the PromoCampaign.
subreddit: The Subreddit of the ad was displayed on.
priority: The priority name of the ad.
ecpm: The effective cpm of the ad.
request, context: Should be pylons.request & pylons.c respectively;
"""
event = AdEvent(
topic="ad_serving_events",
event_type="ss.ad_response",
request=request,
context=context,
)
event.add("properties", properties)
event.add("platform", platform)
event.add("placement_name", placement_name)
event.add("placement_type", placement_type)
event.add("adserver_ad_id", adserver_ad_id)
event.add("adserver_campaign_id", adserver_campaign_id)
event.add("adserver_creative_id", adserver_creative_id)
event.add("adserver_flight_id", adserver_flight_id)
event.add("impression_id",
impression_id, kind=FieldKind.HIGH_CARDINALITY)
event.add("rate_type", rate_type)
event.add("clearing_price", clearing_price)
event.add("link_fullname", link_fullname)
event.add("campaign_fullname", campaign_fullname)
event.add("priority", priority)
event.add("ecpm", ecpm)
# keywords are case insensitive, normalize and sort them
# for easier equality testing.
keywords = sorted(k.lower() for k in keywords)
event.add("keywords", keywords)
# don't send empty arrays.
if matched_keywords:
matched_keywords = sorted(k.lower() for k in matched_keywords)
event.add("matched_keywords", matched_keywords)
if not isinstance(subreddit, FakeSubreddit):
event.add_subreddit_fields(subreddit)
self.save_event(event)
@squelch_exceptions
def adzerk_api_request(
self,
request_type,
thing,
request_body,
triggered_by=None,
additional_data=None,
request_error=None,
):
"""
Create an `adzerk_api_events` event for event-collector.
request_type: The type of request being made
thing: The `Thing` which the request data is derived from
request_body: The JSON payload to be sent to adzerk
triggered_by: The user who triggered the API call
additional_data: A dict of any additional meta data that may be
relevant to the request
request_error: An `adzerk_api.AdzerkError` if the request fails
"""
event = AdzerkAPIEvent(
topic='adzerk_api_events',
event_type='ss.%s_request' % request_type,
)
event.add_target_fields(thing)
event.add_caller_fields(triggered_by)
event.add_error_fields(request_error)
event.add("request_body", request_body)
if additional_data:
for key, value in additional_data.iteritems():
event.add(key, value)
self.save_event(event)
|
viswimmer1/PythonGenerator
|
data/python_files/30552411/__init__.py
|
Python
|
gpl-2.0
| 4,785
| 0.002508
|
import inspect
import os.path
import django
import SocketServer
import sys
from django.conf import settings
from django.views.debug import linebreak_iter
# Figure out some paths
django_path = os.path.realpath(os.path.dirname(django.__file__))
socketserver_path = os.path.realpath(os.path.dirname(SocketServer.__file__))
def ms_from_timedelta(td):
"""
Given a timedelta object, returns a float representing milliseconds
"""
return (td.seconds * 1000) + (td.microseconds / 1000.0)
def tidy_stacktrace(stack):
"""
Clean up stacktrace and remove all entries that:
1. Are part of Django (except contrib apps)
2. Are part of SocketServer (used by Django's dev server)
3. Are the last entry (which is part of our stacktracing code)
``stack`` should be a list of frame tuples from ``inspect.stack()``
"""
trace = []
for frame, path, line_no, func_name, text in (f[:5] for f in stack):
s_path = os.path.realpath(path)
# Support hiding of frames -- used in various utilities that provide
# inspection.
if '__traceback_hide__' in frame.f_locals:
continue
if getattr(settings, 'DEBUG_TOOLBAR_CONFIG', {}).get('HIDE_DJANGO_SQL', True) \
and django_path in s_path and not 'django/contrib' in s_path:
continue
if socketserver_path in s_path:
continue
if not text:
text = ''
else:
text = (''.join(text)).strip()
trace.append((path, line_no, func_name, text))
return trace
def get_template_info(source, context_lines=3):
line = 0
upto = 0
source_lines = []
# before = during = after = ""
origin, (start, end) = source
template_source = origin.reload()
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
# before = template_source[upto:start]
# during = template_source[start:end]
# after = template_source[end:next]
source_lines.append((num, template_source[upto:next]))
upto = next
top = max(1, line - context_lines)
bottom = min(len(source_lines), line + 1 + context_lines)
context = []
for num, content in source_lines[top:bottom]:
context.append({
'num': num,
'content': content,
'highlight': (num == line),
})
return {
'name': origin.name,
'context': context,
}
def get_name_from_obj(obj):
if hasattr(obj, '__name__'):
name = obj.__name__
el
|
if hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
name = obj.__class__.__name__
else:
name = '<unknown>'
if hasattr(obj, '__module__'):
module = obj.__module__
name = '%s.%s' % (module, name)
return name
def getframeinfo(frame, context=1):
"""
Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of contex
|
t from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line.
This originally comes from ``inspect`` but is modified to handle issues
with ``findsource()``.
"""
if inspect.istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not inspect.isframe(frame):
raise TypeError('arg is not a frame or traceback object')
filename = inspect.getsourcefile(frame) or inspect.getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = inspect.findsource(frame)
except (IOError, IndexError):
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return inspect.Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def get_stack(context=1):
"""
Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context.
Modified version of ``inspect.stack()`` which calls our own ``getframeinfo()``
"""
frame = sys._getframe(1)
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
|
ringw/MetaOMR
|
metaomr_tests/eval_kanungo_est.py
|
Python
|
gpl-3.0
| 1,954
| 0.005629
|
import env
import numpy as np
import metaomr
import metaomr.kanungo as kan
from metaomr.page import Page
import glob
import pandas as pd
import itertools
import os.path
import sys
from datetime import datetime
from random import random, randint
IDEAL = [path for path in sorted(glob.glob('testset/modern/*.png'))
if 'nostaff' not in path]
def random_params():
if random() < 0.25:
nu = 0
else:
nu = random() * 0.05
if random() < 0.25:
a0 = a = 0
else:
a0 = random() * 0.2
a = 0.5 + random() * 2
if random() < 0.25:
b0 = b = 0
else:
b0 = random() * 0.2
b = 0.5 + random() * 2
k = randint(0, 4)
return nu, a0, a, b0, b, k
columns = pd.MultiIndex.from_product([['real', 'estimate'], 'nu a0
|
a b0 b k'.split()])
columns = columns.append(pd.MultiIndex.from_product([['estimate'],['stat','time','status','nfev']]))
cols = []
results = []
fun = 'ks'
method = 'Nelder-Mead'
for image in IDEAL:
name = os.path.basename(image).split('.')[0]
page, = metaomr.open(image)
kimg = kan.KanungoImage(kan.normalized_page(page)[0])
for i in xrange(3):
params = random_params()
synth = Page(kimg.degrade(params))
|
synth.staff_dist = 8
for maxfev in [25, 50]:
start = datetime.now()
est_params = kan.est_parameters(synth, test_fn=kan.test_hists_ks if fun == 'ks' else kan.test_hists_chisq, opt_method=method, maxfev=maxfev)
end = datetime.now()
cols.append((name, fun, maxfev, i))
results.append(list(params) + list(est_params.x) + [est_params.fun, (end - start).total_seconds(), est_params.status, est_params.nfev])
sys.stderr.write('.')
res = pd.DataFrame(results, columns=columns)
res.index = pd.MultiIndex.from_tuples(cols)
res.index.names = 'doc test maxfev num'.split()
res.to_csv('kanungo_eval.csv')
sys.stderr.write('\n')
|
crisis-economics/CRISIS
|
CRISIS/test/eu/crisis_economics/abm/household/market.py
|
Python
|
gpl-3.0
| 1,926
| 0.021807
|
#!/bin/env/python
#
# This file is part of CRISIS, an economics simulator.
#
# Copyright (C) 2015 John Kieran Phillips
#
# CRISIS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CRISIS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CRISIS. If not, see <http:
|
//www.gnu.org/licenses/>.
import math
class Market:
def __init__(self, buyer):
sel
|
f.buyer = buyer
self.askPrices = {}
self.bidPrices = {}
self.supply = {}
self.demand = {}
def setAskPrice(self, type, price):
self.askPrices[type] = price
def setBidPrice(self, type, price):
self.bidPrices[type] = price
def setSupply(self, type, supply):
self.supply[type] = supply
def setDemand(self, type, demand):
self.demand[type] = demand
def process(self, type):
demandFor = (self.demand[type] if self.demand.has_key(type) else 0)
supplyOf = self.supply[type] if self.supply.has_key(type) else 0
askPrice = self.askPrices[type] if self.askPrices.has_key(type) else 0
bidPrice = self.bidPrices[type] if self.bidPrices.has_key(type) else 0
price = (bidPrice + askPrice) / 2.
trade = min(demandFor, supplyOf)
self.buyer.credit(price * trade)
self.buyer.addGoods(type, trade)
print("bought {} units of goods, total cost {}".format(price * trade, trade))
def clear():
self.askPrices.clear()
self.bidPrices.clear()
self.supply.clear()
self.demand.clear()
|
datacommonsorg/data
|
scripts/eurostat/regional_statistics_by_nuts/population_density/csv_template_mcf_compatibility_checker.py
|
Python
|
apache-2.0
| 976
| 0
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# U
|
nless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
def test_col_names(cleaned_csv, tmcf):
|
"""Check if all the column names specified in the template mcf
is found in the CSV file."""
cols = pd.read_csv(cleaned_csv, nrows=0).columns
with open(tmcf, "r") as file:
for line in file:
if " C:" in line:
col_name = line[:-1].split("->")[1]
assert col_name in cols
|
C3BI-pasteur-fr/Galaxy-playbook
|
galaxy-pasteur/roles/galaxy_tools/files/install_tool_shed_tools.py
|
Python
|
gpl-2.0
| 28,649
| 0.001571
|
"""
A script to automate installation of tool repositories from a Galaxy Tool Shed
into an instance of Galaxy.
Galaxy instance details and the installed tools can be provided in one of three
ways:
1. In the YAML format via dedicated files (see ``tool_list.yaml.sample`` for a
sample of such a file)
2. On the command line as dedicated script options (see the usage help).
3. As a single composite parameter to the script. The parameter must be a
single, YAML-formatted string with the keys corresponding to the keys
available for use in the YAML formatted file (for example:
`--yaml_tool "{'owner': 'kellrott', 'tool_shed_url':
'https://testtoolshed.g2.bx.psu.edu', 'tool_panel_section_id':
'peak_calling', 'name': 'synapse_interface'}"`).
Only one of the methods can be used with each invocation of the script but if
more than one are provided are provided, precedence will correspond to order
of the items in the list above.
When installing tools, Galaxy expects any `tool_panel_section_id` provided when
installing a tool to already exist in the configuration. If the section
does not exist, the tool will be installed outside any section. See
`shed_tool_conf.xml.sample` in this directory for a sample of such file. Before
running this script to install the tools, make sure to place such file into
Galaxy's configuration directory and set Galaxy configuration option
`tool_config_file` to include it.
Usage:
python install_tool_shed_tools.py [-h]
Required libraries:
bioblend, pyyaml
"""
import datetime as dt
import logging
import time
import yaml
from argparse import ArgumentParser
from bioblend.galaxy import GalaxyInstance
from bioblend.galaxy.toolshed import ToolShedClient
from bioblend.toolshed import ToolShedInstance
from bioblend.galaxy.client import ConnectionError
# Omit (most of the) logging by external libraries
logging.getLogger('bioblend').setLevel(logging.ERROR)
logging.getLogger('requests').setLevel(logging.ERROR)
try:
logging.captureWarnings(True) # Capture HTTPS warngings from urllib3
except AttributeError:
pass
MTS = 'https://toolshed.g2.bx.psu.edu/' # Main Tool Shed
class ProgressConsoleHandler(logging.StreamHandler):
"""
A handler class which allows the cursor to stay on
one line for selected messages
"""
on_same_line = False
def emit(self, record):
try:
msg = self.format(record)
stream = self.stream
same_line = hasattr(record, 'same_line')
if self.on_same_line and not same_line:
stream.write('\r\n')
stream.write(msg)
if same_line:
stream.write('.')
self.on_same_line = True
else:
stream.write('\r\n')
self.on_same_line = False
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def _setup_global_logger():
formatter = logging.Formatter('%(asctime)s %(levelname)-5s - %(message)s')
progress = ProgressConsoleHandler()
file_handler = logging.FileHandler('/tmp/galaxy_tool_install.log')
console = logging.StreamHandler()
console.setFormatter(formatter)
logger = logging.getLogger('test')
logger.setLevel(logging.DEBUG)
logger.addHandler(progress)
logger.addHandler(file_handler)
return logger
def log_tool_install_error(tool, start, end, e, errored_tools):
"""
Log failed tool installations
"""
log.error("\t* Error installing a tool (after %s)! Name: %s," "owner: %s, "
"revision: %s, error: %s" % (tool['name'], str(end - start),
tool['owner'], tool['revision'],
e.body))
errored_tools.append({'name': tool['name'], 'owner': tool['owner'],
'revision': tool['revision'], 'error': e.body})
def log_tool_install_success(tool, start, end, installed_tools):
"""
Log successfull tool installation.
Tools that finish in error still count as successfull installs currently.
"""
installed_tools.append({'name': tool['name'], 'owner': tool['owner'],
'revision': tool['revision']})
log.debug("\tTool %s installed successfully (in %s) at revision %s" %
(tool['name'], str(end - start), tool['revision']))
def load_input_file(tool_list_file='tool_list.yaml'):
"""
Load YAML from the `tool_list_file` and return a dict with the content.
"""
with open(tool_list_file, 'r') as f:
tl = yaml.load(f)
return tl
def dump_to_yaml_file(content, file_name):
"""
Dump YAML-compatible `content` to `file_name`.
"""
with open(file_name, 'w') as f:
yaml.dump(content, f, default_flow_style=False)
def galaxy_instance(url=None, api_key=None):
"""
Get an instance of the `GalaxyInstance` object. If the arguments are not
provided, load the default values using `load_input_file` method.
"""
if not (url and api_key):
tl = load_input_file(
|
)
url = tl['galaxy_instance']
api_key = tl
|
['api_key']
return GalaxyInstance(url, api_key)
def tool_shed_client(gi=None):
"""
Get an instance of the `ToolShedClient` on a given Galaxy instance. If no
value is provided for the `galaxy_instance`, use the default provided via
`load_input_file`.
"""
if not gi:
gi = galaxy_instance()
return ToolShedClient(gi)
def the_same_tool(tool_1_info, tool_2_info):
"""
Given two dicts containing info about tools, determine if they are the same
tool.
Each of the dicts must have the following keys: `name`, `owner`, and
(either `tool_shed` or `tool_shed_url`).
"""
t1ts = tool_1_info.get('tool_shed', tool_1_info.get('tool_shed_url', None))
t2ts = tool_2_info.get('tool_shed', tool_2_info.get('tool_shed_url', None))
if tool_1_info.get('name') == tool_2_info.get('name') and \
tool_1_info.get('owner') == tool_2_info.get('owner') and \
(t1ts in t2ts or t2ts in t1ts):
return True
return False
def installed_tool_revisions(gi=None, omit=None):
"""
Get a list of tool revisions installed from a Tool Shed on a Galaxy instance.
Included are all the tool revisions that were installed from a Tool
Shed and are available from `/api/tool_shed_repositories` url on the
given instance of Galaxy.
:type gi: GalaxyInstance object
:param gi: A GalaxyInstance object as retured by `galaxy_instance` method.
:type omit: list of strings
:param omit: A list of strings that, if found in a tool name, will result
in the tool not being included in the returned list.
:rtype: list of dicts
:return: Each dict in the returned list will have the following keys:
`name`, `owner`, `tool_shed_url`, `revisions`.
.. seealso:: this method returns a subset of data returned by
`installed_tools` function
"""
if not omit:
omit = []
tsc = tool_shed_client(gi)
installed_revisions_list = []
itl = tsc.get_repositories()
for it in itl:
if it['status'] == 'Installed':
skip = False
# Check if we already processed this tool and, if so, add the new
# revision to the existing list entry
for ir in installed_revisions_list:
if the_same_tool(it, ir):
ir['revisions'].append(it.get('changeset_revision', None))
skip = True
# Check if the repo name is contained in the 'omit' list
for o in omit:
if o in it['name']:
skip = True
# We have not processed this tool so create a list entry
if not skip:
ti = {'name': it['name'],
'owner': it['owner'],
'revisions': [it.get('changeset_revision', None)],
'tool_shed_url': 'https://' + it['tool_shed']}
installed_revisions_list.append(ti)
return i
|
tshi04/machine-learning-codes
|
deliberation_network/utils.py
|
Python
|
gpl-3.0
| 9,232
| 0.005199
|
import numpy as np
import torch
import time
from torch.autograd import Variable
'''
fast beam search
'''
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def tensor_transformer(seq0, batch_size, beam_size):
seq = seq0.unsqueeze(2)
seq = seq.repeat(1, 1, beam_size, 1)
seq = seq.contiguous().view(batch_size, beam_size*beam_size, seq.size(3))
return seq
'''
First beam search
'''
def fast_beam_search_1(
model_emb,
model_s2s,
src_text_rep,
vocab2id,
batch_size,
|
beam_size,
trg_len,
encoder_hy,
hidden_,
h_attn_new,
p_gen_new,
past_attn_new,
|
pt_idx
):
(h0_new, c0_new) = hidden_
beam_seq = Variable(torch.LongTensor(
batch_size, beam_size, trg_len+1).fill_(vocab2id['<pad>'])).cuda()
beam_seq[:, :, 0] = vocab2id['<s>']
beam_prb = torch.FloatTensor(batch_size, beam_size).fill_(0.0)
last_wd = Variable(torch.LongTensor(
batch_size, beam_size, 1).fill_(vocab2id['<s>'])).cuda()
beam_h_attn = Variable(torch.FloatTensor(
trg_len, batch_size, beam_size, h_attn_new.size(1)).fill_(0.0)).cuda()
for j in range(trg_len):
last_emb = model_emb(last_wd.view(-1, 1))
output_s2s, (h0, c0), h_attn, past_attn = model_s2s.forward_onestep_decoder1(
j,
last_emb,
(h0_new, c0_new),
h_attn_new,
encoder_hy,
p_gen_new,
past_attn_new,
pt_idx
)
p_gen_new.fill_(0.0)
(h0, c0) = repackage_hidden((h0, c0))
prob, wds = output_s2s.data.topk(k=beam_size)
prob = prob.view(batch_size, beam_size, prob.size(1), prob.size(2))
wds = wds.view(batch_size, beam_size, wds.size(1), wds.size(2))
if j == 0:
beam_prb = prob[:, 0, 0]
beam_seq[:, :, 1] = wds[:, 0, 0]
last_wd = Variable(wds[:, 0, 0].unsqueeze(2).clone()).cuda()
h0_new = h0
c0_new = c0
h_attn_new = h_attn
past_attn_new = past_attn
beam_h_attn[j] = h_attn_new.view(batch_size, beam_size, h_attn_new.size(-1))
continue
cand_seq = tensor_transformer(beam_seq, batch_size, beam_size)
cand_seq[:, :, j+1] = wds.squeeze(2).view(batch_size, -1)
cand_last_wd = wds.squeeze(2).view(batch_size, -1)
cand_prob = beam_prb.unsqueeze(1).repeat(1, beam_size, 1).transpose(1,2)
cand_prob += prob[:, :, 0]
cand_prob = cand_prob.contiguous().view(batch_size, beam_size*beam_size)
h0_new = h0_new.view(batch_size, beam_size, h0_new.size(-1))
c0_new = c0_new.view(batch_size, beam_size, c0_new.size(-1))
h_attn_new = h_attn_new.view(batch_size, beam_size, h_attn_new.size(-1))
past_attn_new = past_attn_new.view(batch_size, beam_size, past_attn_new.size(-1))
h0 = h0.view(batch_size, beam_size, h0.size(-1))
h0 = tensor_transformer(h0, batch_size, beam_size)
c0 = c0.view(batch_size, beam_size, c0.size(-1))
c0 = tensor_transformer(c0, batch_size, beam_size)
h_attn = h_attn.view(batch_size, beam_size, h_attn.size(-1))
h_attn = tensor_transformer(h_attn, batch_size, beam_size)
past_attn = past_attn.view(batch_size, beam_size, past_attn.size(-1))
past_attn = tensor_transformer(past_attn, batch_size, beam_size)
tmp_prb, tmp_idx = cand_prob.topk(k=beam_size, dim=1)
for x in range(batch_size):
for b in range(beam_size):
last_wd[x, b] = cand_last_wd[x, tmp_idx[x, b]]
beam_seq[x, b] = cand_seq[x, tmp_idx[x, b]]
beam_prb[x, b] = tmp_prb[x, b]
h0_new[x, b] = h0[x, tmp_idx[x, b]]
c0_new[x, b] = c0[x, tmp_idx[x, b]]
h_attn_new[x, b] = h_attn[x, tmp_idx[x, b]]
past_attn_new[x, b] = past_attn[x, tmp_idx[x, b]]
beam_h_attn[j] = h_attn_new
h0_new = h0_new.view(-1, h0_new.size(-1))
c0_new = c0_new.view(-1, c0_new.size(-1))
h_attn_new = h_attn_new.view(-1, h_attn_new.size(-1))
past_attn_new = past_attn_new.view(-1, past_attn_new.size(-1))
return beam_seq, beam_prb, beam_h_attn
'''
second beam search
'''
def fast_beam_search_2(
model_emb,
model_s2s,
src_text_rep,
vocab2id,
batch_size,
beam_size,
trg_len,
encoder_hy,
hidden_,
h_attn21_new,
h_attn22_new,
p_gen21_new,
past_attn21_new,
past_attn22_new,
beam_h_attn1,
pt_idx
):
(h0_new, c0_new) = hidden_
beam_seq = Variable(torch.LongTensor(batch_size, beam_size, trg_len+1).fill_(vocab2id['<pad>'])).cuda()
beam_seq[:, :, 0] = vocab2id['<s>']
beam_prb = torch.FloatTensor(batch_size, beam_size).fill_(0.0)
last_wd = Variable(torch.LongTensor(batch_size, beam_size, 1).fill_(vocab2id['<s>'])).cuda()
for j in range(trg_len):
last_emb = model_emb(last_wd.view(-1, 1))
output_s2s, (h0, c0), h_attn21, h_attn22, past_attn21, past_attn22 = model_s2s.forward_onestep_decoder2(
j,
last_emb,
(h0_new, c0_new),
h_attn21_new,
h_attn22_new,
encoder_hy,
p_gen21_new,
past_attn21_new,
past_attn22_new,
beam_h_attn1,
pt_idx
)
p_gen21_new.fill_(0.0)
(h0, c0) = repackage_hidden((h0, c0))
prob, wds = output_s2s.data.topk(k=beam_size)
prob = prob.view(batch_size, beam_size, prob.size(1), prob.size(2))
wds = wds.view(batch_size, beam_size, wds.size(1), wds.size(2))
if j == 0:
beam_prb = prob[:, 0, 0]
beam_seq[:, :, 1] = wds[:, 0, 0]
last_wd = Variable(wds[:, 0, 0].unsqueeze(2).clone()).cuda()
h0_new = h0
c0_new = c0
h_attn21_new = h_attn21
h_attn22_new = h_attn22
past_attn21_new = past_attn21
past_attn22_new = past_attn22
continue
cand_seq = tensor_transformer(beam_seq, batch_size, beam_size)
cand_seq[:, :, j+1] = wds.squeeze(2).view(batch_size, -1)
cand_last_wd = wds.squeeze(2).view(batch_size, -1)
cand_prob = beam_prb.unsqueeze(1).repeat(1, beam_size, 1).transpose(1,2)
cand_prob += prob[:, :, 0]
cand_prob = cand_prob.contiguous().view(batch_size, beam_size*beam_size)
h0_new = h0_new.view(batch_size, beam_size, h0_new.size(-1))
c0_new = c0_new.view(batch_size, beam_size, c0_new.size(-1))
h_attn21_new = h_attn21_new.view(batch_size, beam_size, h_attn21_new.size(-1))
h_attn22_new = h_attn22_new.view(batch_size, beam_size, h_attn22_new.size(-1))
past_attn21_new = past_attn21_new.view(batch_size, beam_size, past_attn21_new.size(-1))
past_attn22_new = past_attn22_new.view(batch_size, beam_size, past_attn22_new.size(-1))
h0 = h0.view(batch_size, beam_size, h0.size(-1))
h0 = tensor_transformer(h0, batch_size, beam_size)
c0 = c0.view(batch_size, beam_size, c0.size(-1))
c0 = tensor_transformer(c0, batch_size, beam_size)
h_attn21 = h_attn21.view(batch_size, beam_size, h_attn21.size(-1))
h_attn21 = tensor_transformer(h_attn21, batch_size, beam_size)
h_attn22 = h_attn22.view(batch_size, beam_size, h_attn22.size(-1))
h_attn22 = tensor_transformer(h_attn22, batch_size, beam_size)
past_attn21 = past_attn21.view(batch_size, beam_size, past_attn21.size(-1))
past_attn21 = tensor_transformer(past_attn21, batch_size, beam_size)
past_attn22 = past_attn22.view(batch_size, beam_size, past_attn22.size(-1))
past_attn22 = tensor_transformer(past_attn22, batch_size, beam_size)
tmp_prb, tmp_idx = cand_prob.topk(k=beam_size, dim=1)
for x in range
|
dev1x-org/python-example
|
lib/model/task.py
|
Python
|
mit
| 1,058
| 0.006616
|
#coding:utf-8
"""
"""
class Task(object):
def __init__(self, id_, project_name, title, serial_no, timelimit, timestamp, note, status):
self.id_ = id_
self.project_name = project_name
self.title = title
self.serial_no = serial_no
self.timelimit = timelimit
self.timestamp = timestamp
self.note = note
self.status = status
def __str__(self):
values = (self.id_, self.project_name, self.title, self.serial_no, self.timelimit, self.timestamp, self.note, self.status)
return "[%s, %s, %s, %s, %s, %s, %s, %s]" % values
def get_id(self):
return self.id_
def get_project_name(self):
return self.project_name
def get_serial_no(self):
return self.serial_no
def get_title(self):
return self.title
def get_timelimit(self):
return self.timelimit
def get_status(self):
retu
|
rn self.status
def get_note(self):
return self.note
|
def get_created(self):
return self.timestamp
|
hookehu/utility
|
editors/studio/core/logic_center.py
|
Python
|
gpl-2.0
| 88
| 0.056818
|
#
|
-*- coding:utf-8 -*-
import wx
if evt_handler == None:
|
evt_handler = wx.EvtHandler()
|
kidburglar/audible-activator
|
unused/extract-activation-bytes.py
|
Python
|
gpl-3.0
| 1,379
| 0.00145
|
#!/usr/bin/env python
import traceback
import binascii
import sys
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s <licenseForCustomerToken file>\n"
% sys.argv[0])
sys.exit(-1)
try:
data = open(sys.argv[1], "rb").read()
if (b"BAD_LOGIN" in data or b"Whoops" in data) or \
b"group_id" not in data:
print(data)
print("\nActivation failed! ;(")
sys.exit(-1)
k = data.rfind(b"group_id")
l = data[k:].find(b")")
keys = data[k + l + 1 + 1:]
output_keys = []
# each key is of 70 bytes
|
for i in range(0, 8):
key = keys[i * 70 + i:(i + 1) * 70
|
+ i]
h = binascii.hexlify(bytes(key))
h = [h[i:i+2] for i in range(0, len(h), 2)]
h = b",".join(h)
output_keys.append(h)
except SystemExit as e:
sys.exit(e)
except:
traceback.print_exc()
# only 4 bytes of output_keys[0] are necessary for decryption! ;)
activation_bytes = output_keys[0].replace(b",", b"")[0:8]
# get the endianness right (reverse string in pairs of 2)
activation_bytes = "".join(reversed([activation_bytes[i:i+2] for i in
range(0, len(activation_bytes), 2)]))
print(activation_bytes)
|
xeroc/uptick
|
uptick/htlc.py
|
Python
|
mit
| 3,974
| 0.001761
|
import click
from bitshares.amount import Amount
from .decorators import online, unlock
from .main import main, config
from .ui import print_tx
@main.group()
def htlc():
pass
@htlc.command()
@click.argument("to")
@click.argument("amount")
@click.argument("symbol")
@click.option(
"--type", type=click.Choice(["ripemd160", "sha1", "sha256", "hash160"]),
default="sha256", prompt="Hash algorithm", show_default=True,
help="Hash algorithm"
)
@click.option(
"--hash", prompt="Hash (hex string)", hide_input=False, confirmation_prompt=True,
help="Hash value as string of hex digits"
)
@click.option(
"--expiration", default=60 * 60, prompt="Expiration (seconds)",
help="Duration of HTLC in seconds"
)
@click.option(
"--length", help="Length of PREIMAGE (not of hash). Generally OK " +
"to leave this as 0 for unconstrained.", default=0, show_default=True
)
@click.option("--account")
@click.pass_context
@online
@unlock
def create(ctx, to, amount, symbol, type, hash, expiration, length, account):
""" Create an HTLC contract from a hash and lock-time
"""
ctx.blockchain.blocking = True
tx = ctx.blockchain.htlc_create(
Amount(amount, symbol),
to,
hash_type=type,
hash_hex=hash,
expiration=expiration,
account=account,
preimage_length=length
)
tx.pop("trx", None)
print_tx(tx)
results = tx.get("operation_results", {})
if results:
htlc_id = results[0][1]
print("Your htlc_id is: {}".format(htlc_id))
@htlc.command()
@click.argument("to")
@click.argument("amount")
@click.argument("symbol")
@click.option(
"--type", type=click.Choice(["ripemd160", "sha1", "sha256", "hash160"]),
default="sha256", prompt="Hash algorithm", show_default=True,
help="Hash algorithm"
)
@click.option(
"--secret", prompt="Redeem Password", hide_input=True, confirmation_prompt=True,
help="Ascii-text preimage"
)
@click.option("--expiration", default=60 * 60, prompt="Expiration (seconds)",
help="Duration of HTLC in seconds"
)
@click.option(
"--length", help="Length of PREIMAGE (not of hash). Generally OK " +
"to leave this as 0 for unrestricted. If non-zero, must match length " +
"of provided preimage", default=0, show_default=True
)
@click.option("--account")
@click.pass_context
@online
@unlock
def create_from_secret(ctx, to, amount, symbol, type, secret, expiration,
length, account):
"""Create an HTLC contract from a secret preimage
If you are the party choosing the pr
|
eimage, this version of
htlc_create will compute the hash for yo
|
u from the supplied
preimage, and create the HTLC with the resulting hash.
"""
if length != 0 and length != len(secret):
raise ValueError("Length must be zero or agree with actual preimage length")
ctx.blockchain.blocking = True
tx = ctx.blockchain.htlc_create(
Amount(amount, symbol),
to,
preimage=secret,
preimage_length=length,
hash_type=type,
expiration=expiration,
account=account,
)
tx.pop("trx", None)
print_tx(tx)
results = tx.get("operation_results", {})
if results:
htlc_id = results[0][1]
print("Your htlc_id is: {}".format(htlc_id))
@htlc.command()
@click.argument("htlc_id")
@click.option(
"--secret", prompt="Redeem Password", hide_input=False, confirmation_prompt=False,
type=str, help="The preimage, as ascii-text, unless --hex is passed"
)
@click.option(
"--hex", is_flag=True, help="Interpret preimage as hex-encoded bytes"
)
@click.option("--account")
@click.pass_context
@online
@unlock
def redeem(ctx, htlc_id, secret, hex, account):
""" Redeem an HTLC contract by providing preimage
"""
encoding = "hex" if hex else "utf-8"
print_tx(ctx.blockchain.htlc_redeem(htlc_id, secret, encoding=encoding,
account=account)
)
|
bioasp/shogen
|
setup.py
|
Python
|
gpl-3.0
| 1,416
| 0.029661
|
# Copyright (c) 2014, Sven Thiele <sthiele78@gmail.com>
#
# This file is part of shogen.
#
# shogen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# shogen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
|
with shogen. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name = 'shogen',
version = '2.0.0',
url = 'http://pypi.python.org/pypi/shogen/',
license = 'GPLv3+',
description = 'Finding shortest
|
genome segments that regulate metabolic pathways',
long_description = open('README.rst').read(),
author = 'Sven Thiele',
author_email = 'sthiele78@gmail.com',
packages = ['__shogen__'],
package_dir = {'__shogen__' : 'src'},
package_data = {'__shogen__' : ['encodings/*.lp']},
scripts = ['shogen.py'],
install_requires = ['pyasp == 1.4' ]
)
|
bairdj/beveridge
|
src/scrapy/afltables/afltables/common.py
|
Python
|
mit
| 1,563
| 0.007678
|
team_mapping = {
"SY": "Sydney",
"WB": "Western Bulldogs",
"WC": "West Coast",
"HW": "Hawthorn",
"GE": "Geelong",
"FR": "Fremantle",
"RI": "Richmond",
"CW": "Collingwood",
"CA": "Carlton",
"GW": "Greater Western Sydney",
"AD": "Adelaide",
"GC": "Gold Coast",
"ES": "Essendon",
"ME": "Melbourne",
"NM": "North Melbourne",
"PA": "Port Adelaide",
"BL": "Brisbane Lions",
"SK": "St Kilda"
}
def get_team_name(code):
return team_mapping[code]
def get_team_code(full_name):
for code, name in team_mapping.items():
if name == full_name:
return code
return full_name
def get_match_description(response):
match_container = respo
|
nse.xpath("//td[@colspan = '5' and @align = 'center']")[0]
match_details = match_container.xpath(".//text()").extract()
return {
"round": match_details[1],
"venue": match_details[3],
"date": match_details[6],
"attendance": match_details[8],
|
"homeTeam": response.xpath("(//a[contains(@href, 'teams/')])[1]/text()").extract_first(),
"awayTeam": response.xpath("(//a[contains(@href, 'teams/')])[2]/text()").extract_first(),
"homeScore": int(response.xpath("//table[1]/tr[2]/td[5]/b/text()").extract_first()),
"awayScore": int(response.xpath("//table[1]/tr[3]/td[5]/b/text()").extract_first())
}
def get_match_urls(response):
for match in response.xpath("//a[contains(@href, 'stats/games/')]/@href").extract():
yield response.urljoin(match)
|
project-zerus/blade
|
src/blade/configparse.py
|
Python
|
bsd-3-clause
| 11,273
| 0.003193
|
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Michaelpeng <michaelpeng@tencent.com>
# Date: January 09, 2012
"""
This is the configuration parse module which parses
the BLADE_ROOT as a configuration file.
"""
import os
import sys
import console
from blade_util import var_to_list
from cc_targets import HEAP_CHECK_VALUES
from proto_library_target import ProtocPlugin
# Global config object
blade_config = None
def config_items(**kwargs):
"""Used in config functions for config file, to construct a appended
items dict, and then make syntax more pretty
"""
return kwargs
class BladeConfig(object):
"""BladeConfig. A configuration parser class. """
def __init__(self, current_source_dir):
self.current_source_dir = current_source_dir
self.current_file_name = ''
self.configs = {
'global_config' : {
'build_path_template': 'build${m}_${profile}',
'duplicated_source_action': 'warning', # Can be 'warning', 'error', 'none'
'test_timeout': None,
},
'cc_test_config': {
'dynamic_link': False,
'heap_check': '',
'gperftools_libs': [],
'gperftools_debug_libs': [],
'gtest_libs': [],
'gtest_main_libs': [],
'pprof_path': '',
},
'cc_binary_config': {
'extra_libs': [],
'run_lib_paths' : [],
},
'distcc_config': {
'enabled': False
},
'link_config': {
'link_on_tmp': False,
'enable_dccc': False
|
},
'java_config': {
'version': '1.6',
'source_version': '',
'target_version': '',
'maven': 'mvn',
'maven_central': '',
'warnings':['-Werror'
|
, '-Xlint:all'],
'source_encoding': None,
'java_home':''
},
'java_binary_config': {
'one_jar_boot_jar' : '',
},
'java_test_config': {
'junit_libs' : [],
'jacoco_home' : '',
'coverage_reporter' : '',
},
'scala_config': {
'scala_home' : '',
'target_platform' : '',
'warnings' : '',
'source_encoding' : None,
},
'scala_test_config': {
'scalatest_libs' : '',
},
'go_config' : {
'go' : '',
'go_home' : '', # GOPATH
},
'thrift_config': {
'thrift': 'thrift',
'thrift_libs': [],
'thrift_incs': [],
},
'fbthrift_config': {
'fbthrift1': 'thrift1',
'fbthrift2': 'thrift2',
'fbthrift_libs': [],
'fbthrift_incs': [],
},
'proto_library_config': {
'protoc': 'thirdparty/protobuf/bin/protoc',
'protoc_java': '',
'protobuf_libs': [],
'protobuf_path': '',
'protobuf_incs': [],
'protobuf_php_path': '',
'protoc_php_plugin': '',
'protobuf_java_libs' : [],
'protoc_go_plugin': '',
# All the generated go source files will be placed
# into $GOPATH/src/protobuf_go_path
'protobuf_go_path': '',
},
'protoc_plugin_config' : {
},
'cc_config': {
'extra_incs': [],
'cppflags': [],
'cflags': [],
'cxxflags': [],
'linkflags': [],
'c_warnings': [],
'cxx_warnings': [],
'warnings': [],
'cpplint': 'cpplint.py',
'optimize': [],
'benchmark_libs': [],
'benchmark_main_libs': [],
'securecc' : None,
},
'cc_library_config': {
'generate_dynamic' : None,
# Options passed to ar/ranlib to control how
# the archive is created, such as, let ar operate
# in deterministic mode discarding timestamps
'arflags': [],
'ranlibflags': [],
}
}
def _try_parse_file(self, filename):
"""load the configuration file and parse. """
try:
self.current_file_name = filename
if os.path.exists(filename):
execfile(filename)
except SystemExit:
console.error_exit('Parse error in config file %s, exit...' % filename)
def parse(self):
"""load the configuration file and parse. """
self._try_parse_file(os.path.join(os.path.dirname(sys.argv[0]), 'blade.conf'))
self._try_parse_file(os.path.expanduser('~/.bladerc'))
self._try_parse_file(os.path.join(self.current_source_dir, 'BLADE_ROOT'))
def update_config(self, section_name, append, user_config):
"""update config section by name. """
config = self.configs.get(section_name, {})
if config:
if append:
self._append_config(section_name, config, append)
self._replace_config(section_name, config, user_config)
else:
console.error('%s: %s: unknown config section name' % (
self.current_file_name, section_name))
def _append_config(self, section_name, config, append):
"""Append config section items"""
if not isinstance(append, dict):
console.error('%s: %s: append must be a dict' %
(self.current_file_name, section_name))
else:
for k in append:
if k in config:
if isinstance(config[k], list):
config[k] += var_to_list(append[k])
else:
console.warning('%s: %s: config item %s is not a list' %
(self.current_file_name, section_name, k))
else:
console.warning('%s: %s: unknown config item name: %s' %
(self.current_file_name, section_name, k))
def _replace_config(self, section_name, config, user_config):
"""Replace config section items"""
unknown_keys = []
for k in user_config:
if k in config:
if isinstance(config[k], list):
user_config[k] = var_to_list(user_config[k])
else:
console.warning('%s: %s: unknown config item name: %s' %
(self.current_file_name, section_name, k))
unknown_keys.append(k)
for k in unknown_keys:
del user_config[k]
config.update(user_config)
def get_config(self, section_name):
"""get config section, returns default values if not set """
return self.configs.get(section_name, {})
def cc_test_config(append=None, **kwargs):
"""cc_test_config section. """
heap_check = kwargs.get('heap_check')
if heap_check is not None and heap_check not in HEAP_CHECK_VALUES:
console.error_exit('cc_test_config: heap_check can only be in %s' %
HEAP_CHECK_VALUES)
blade_config.update_config('cc_test_config', append, kwargs)
def cc_binary_config(append=None, **kwargs):
"""cc_binary_config section. """
blade_config.update_config('cc_binary_config', append, kwargs)
def cc_library_config(append=None, **kwargs):
"""cc_library_config section. """
blade_config.update_config('cc_library_config', append, kwargs)
__DUPLICATED_SOURCE_ACTION_VALUES = set(['warning', 'error', 'none', None])
def global_config(append=None, **kwargs):
"""global_config section. """
duplicated_source_action = kwargs.get('duplicated_source_action
|
gmathers/iii-addons
|
mrp_custom/__openerp__.py
|
Python
|
agpl-3.0
| 1,656
| 0.004227
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'MRP-Custom Module',
'version': '1.1',
'category': 'mrp_repair',
# 'sequence': 19,
# 'summary': 'Purchase Orders, Receptions, Supplier Invoices',
'description': """
For customized Partner & product screen with button link to Repair Order.
""",
'author': '4devnet.com',
'website': 'http://www.4devnet.com',
# 'images' : ['images/purchase_order.jpeg', 'images/purchase_analysis.jpeg', 'images/requ
|
est_for_quotation.jpeg'],
'depends': ['product','base'
|
,'mrp_repair'],
'data': [
#'abc_report.xml',
'mrp_custom.xml'
],
'installable': True,
'auto_install': False,
'application': True,
}
|
akrherz/pyWWA
|
parsers/pywwa/workflows/afos_dump.py
|
Python
|
mit
| 2,891
| 0
|
"""AFOS Database Workflow."""
# 3rd Party
from twisted.internet import reactor
from txyam.client import YamClient
from pyiem.util import LOG
from pyiem.nws import product
# Local
from pywwa import common
from pywwa.ldm import bridge
from pywwa.database import get_database
DBPOOL = get_database("afos", cp_max=5)
MEMCACHE_EXCLUDE = [
"RR1",
"RR2",
"RR3",
"RR4",
"RR5",
"RR6",
"RR7",
"RR8",
"RR9",
"ROB",
"HML",
]
MEMCACHE_CLIENT = YamClient(reactor, ["tcp:iem-memcached3:11211"])
MEMCACHE_CLIENT.connect()
def process_data(data):
"""Process the product"""
defer = DBPOOL.runInteraction(real_parser, data)
defer.addCallback(write_memcache)
defer.addErrback(common.email_error, data)
defer.addErrback(LOG.error)
def write_memcache(nws):
"""write our TextProduct to memcached"""
if nws is None:
return
# 10 minutes should be enough time
LOG.debug("writing %s to memcache", nws.get_product_id())
df = MEMCACHE_CLIENT.set(
nws.get_product_id().encode("utf-8"),
nws.unixtext.replace("\001\n", "").encode("utf-8"),
expireTime=600,
)
df.addErrback(LOG.error)
def real_parser(txn, buf):
"""Actually do something with the buffer, please"""
if buf.strip() == "":
return None
utcnow = common.utcnow()
nws = product.TextProduct(buf, utcnow=utcnow, parse_segments=False)
# When we are in realtime processing, do not consider old data, typically
# when a WFO fails to update the date in their MND
if not common.replace_enabled() and (
(utcnow - nws.valid).days > 180 or (utcnow - nws.valid).days < -180
):
raise Exception(f"Very Latent Product! {nws.valid}")
|
if nws.warnings:
common.email_error("\n".join(nws.warnings), buf)
if nws.afos is None:
if nws.source[0] not in ["K", "P"]:
return None
raise Exception("TextProduct.afos is null")
if common.replace_en
|
abled():
args = [nws.afos.strip(), nws.source, nws.valid]
bbb = ""
if nws.bbb:
bbb = " and bbb = %s "
args.append(nws.bbb)
txn.execute(
"DELETE from products where pil = %s and source = %s and "
f"entered = %s {bbb}",
args,
)
LOG.info("Removed %s rows for %s", txn.rowcount, nws.get_product_id())
txn.execute(
"INSERT into products (pil, data, entered, "
"source, wmo, bbb) VALUES(%s, %s, %s, %s, %s, %s)",
(nws.afos.strip(), nws.text, nws.valid, nws.source, nws.wmo, nws.bbb),
)
if nws.afos[:3] in MEMCACHE_EXCLUDE:
return None
return nws
def main():
"""Fire up our workflow."""
common.main(with_jabber=False)
bridge(process_data)
reactor.run() # @UndefinedVariable
# See how we are called.
if __name__ == "__main__":
main()
|
camilonova/sentry
|
src/sentry/models/file.py
|
Python
|
bsd-3-clause
| 3,050
| 0.000328
|
"""
sentry.models.file
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.db import models
from django.utils import timezone
from hashlib import md5
from uuid import uuid4
from sentry.db.models import (
BoundedPositiveIntegerField, GzippedDictField, Model
)
ONE_DAY = 60 * 60 * 24
class File(Model):
name = models.CharField(max_length=128)
storage = models.CharField(max_length=128, null=True)
storage_options = GzippedDictField()
path = models.TextField(null=True)
type = models.CharField(max_length=64)
size = BoundedPositiveIntegerField(null=True)
checksum = models.CharField(max_length=32, null=True)
timestamp = models.DateTimeField(default=timezone.now, db_index=True)
class Meta:
unique_together = (('name', 'checksum'),)
app_label = 'sentry'
db_table = 'sentry_file'
def delete(self, *args, **kwargs):
if self.path:
self.deletefile(commit=False)
super(File, self).delete(*args, **kwargs)
def generate_unique_path(self):
pieces = self.type.split('.')
pieces.extend(map(str, divmod(int(self.timestamp.strftime('%s')), ONE_DAY)))
pieces.append('%s-%s' % (uuid4().hex, self.name))
|
return '/'.join(pieces)
def get_storage(self):
backend = self.storage
options = self.storage_options
storage = get_st
|
orage_class(backend)
return storage(**options)
def deletefile(self, commit=False):
assert self.path
storage = self.get_storage()
storage.delete(self.path)
self.path = None
if commit:
self.save()
def putfile(self, fileobj, commit=True):
"""
Upload this given File's contents.
A file's content is idempotent and you may not re-save a given file.
>>> my_file = File(name='app.dsym', type='objc.dsym')
>>> my_file.putfile(fileobj, commit=False)
>>> my_file.save()
"""
assert not self.path
self.path = self.generate_unique_path()
self.storage = settings.SENTRY_FILESTORE
self.storage_options = settings.SENTRY_FILESTORE_OPTIONS
checksum = md5('')
for chunk in fileobj.chunks():
checksum.update(chunk)
self.checksum = checksum.hexdigest()
storage = self.get_storage()
storage.save(self.path, fileobj)
if commit:
self.save()
def getfile(self):
"""
Return a file-like object for this File's content.
>>> fileobj = my_file.getfile()
>>> with open('/tmp/localfile', 'wb') as fp:
>>> for chunk in fileobj.chunks():
>>> fp.write(chunk)
"""
assert self.path
storage = self.get_storage()
return storage.open(self.path)
|
bcoca/ansible-modules-extras
|
database/misc/redis.py
|
Python
|
gpl-3.0
| 10,653
| 0.00169
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: redis
short_description: Various redis commands, slave and flush
description:
- Unified utility to interact with redis instances.
'slave' sets a redis instance in slave or master mode.
'flush' flushes all the instance or a specified db.
'config' (new in 1.6), ensures a configuration setting on an instance.
version_added: "1.3"
options:
command:
description:
- The selected redis command
required: true
default: null
choices: [ "slave", "flush", "config" ]
login_password:
description:
- The password used to authenticate with (usually not used)
required: false
default: null
login_host:
description:
- The ho
|
st running the database
required: false
default: localhost
login_port:
description:
- The port to connect to
required: false
default: 6379
master_host:
description:
- The host of the master instance [slave command]
required: false
default: null
master_port:
description:
- The port of the master instance [slave command]
r
|
equired: false
default: null
slave_mode:
description:
- the mode of the redis instance [slave command]
required: false
default: slave
choices: [ "master", "slave" ]
db:
description:
- The database to flush (used in db mode) [flush command]
required: false
default: null
flush_mode:
description:
- Type of flush (all the dbs in a redis instance or a specific one)
[flush command]
required: false
default: all
choices: [ "all", "db" ]
name:
version_added: 1.6
description:
- A redis config key.
required: false
default: null
value:
version_added: 1.6
description:
- A redis config value.
required: false
default: null
notes:
- Requires the redis-py Python package on the remote host. You can
install it with pip (pip install redis) or with a package manager.
https://github.com/andymccurdy/redis-py
- If the redis master instance we are making slave of is password protected
this needs to be in the redis.conf in the masterauth variable
requirements: [ redis ]
author: "Xabier Larrakoetxea (@slok)"
'''
EXAMPLES = '''
# Set local redis instance to be slave of melee.island on port 6377
- redis: command=slave master_host=melee.island master_port=6377
# Deactivate slave mode
- redis: command=slave slave_mode=master
# Flush all the redis db
- redis: command=flush flush_mode=all
# Flush only one db in a redis instance
- redis: command=flush db=1 flush_mode=db
# Configure local redis to have 10000 max clients
- redis: command=config name=maxclients value=10000
# Configure local redis to have lua time limit of 100 ms
- redis: command=config name=lua-time-limit value=100
'''
try:
import redis
except ImportError:
redis_found = False
else:
redis_found = True
# ===========================================
# Redis module specific support methods.
#
def set_slave_mode(client, master_host, master_port):
try:
return client.slaveof(master_host, master_port)
except Exception:
return False
def set_master_mode(client):
try:
return client.slaveof()
except Exception:
return False
def flush(client, db=None):
try:
if type(db) != int:
return client.flushall()
else:
# The passed client has been connected to the database already
return client.flushdb()
except Exception:
return False
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
command=dict(default=None, choices=['slave', 'flush', 'config']),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default=6379, type='int'),
master_host=dict(default=None),
master_port=dict(default=None, type='int'),
slave_mode=dict(default='slave', choices=['master', 'slave']),
db=dict(default=None, type='int'),
flush_mode=dict(default='all', choices=['all', 'db']),
name=dict(default=None),
value=dict(default=None)
),
supports_check_mode = True
)
if not redis_found:
module.fail_json(msg="python redis module is required")
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
command = module.params['command']
# Slave Command section -----------
if command == "slave":
master_host = module.params['master_host']
master_port = module.params['master_port']
mode = module.params['slave_mode']
#Check if we have all the data
if mode == "slave": # Only need data if we want to be slave
if not master_host:
module.fail_json(
msg='In slave mode master host must be provided')
if not master_port:
module.fail_json(
msg='In slave mode master port must be provided')
#Connect and check
r = redis.StrictRedis(host=login_host,
port=login_port,
password=login_password)
try:
r.ping()
except Exception:
e = get_exception()
module.fail_json(msg="unable to connect to database: %s" % e)
#Check if we are already in the mode that we want
info = r.info()
if mode == "master" and info["role"] == "master":
module.exit_json(changed=False, mode=mode)
elif mode == "slave" and\
info["role"] == "slave" and\
info["master_host"] == master_host and\
info["master_port"] == master_port:
status = {
'status': mode,
'master_host': master_host,
'master_port': master_port,
}
module.exit_json(changed=False, mode=status)
else:
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "slave":
if module.check_mode or\
set_slave_mode(r, master_host, master_port):
info = r.info()
status = {
'status': mode,
'master_host': master_host,
'master_port': master_port,
}
module.exit_json(changed=True, mode=status)
else:
module.fail_json(msg='Unable to set slave mode')
else:
if module.check_mode or set_master_mode(r):
module.exit_json(changed=True, mode=mode)
else:
module.fail_json(msg='Unable to set master mode')
# flush Command section -----------
elif command == "flush":
db = module.params['db']
mode = module.params['flush_
|
Antikythera/hoot
|
Application/central_service/settings.py
|
Python
|
gpl-2.0
| 3,154
| 0
|
"""
Django settings for central_service project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_PATH = os.path.dirname(os.path.realpath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd$^6$7ybljkbz@b#7j&4cz_46dhe$=uiqnxuz+h3yoyj6u$$fk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# Application definition
INSTALLED_APPS = (
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'services',
'service_pages',
'rest_framework',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'central_service.urls'
WSGI_APPLICATION = 'central_service.wsgi.application'
# Templates
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, 'templates'),
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES
|
= {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
# Internationalization
# https://docs
|
.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Used in production to define where collectstatic stores stuff
STATIC_ROOT = os.path.join(PROJECT_PATH, '../static')
ADMIN_MEDIA_PREFIX = '/static/admin/'
# STATICFILES_FINDERS = (
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# )
# Used in development to force django to serve static files
STATICFILES_DIRS = [
os.path.join(PROJECT_PATH, "static"),
]
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAdminUser',
]
}
|
charanpald/wallhack
|
wallhack/clusterexp/LaplacianExp.py
|
Python
|
gpl-3.0
| 2,822
| 0.013466
|
"""
Observe the effect in the perturbations of Laplacians
"""
import sys
import logging
import numpy
import scipy
import itertools
import copy
import matplotlib.pyplot as plt
from apgl.graph import *
from sandbox.util.PathDefaults import PathDefaults
from sandbox.misc.IterativeSpectralClustering import IterativeSpectralClustering
from apgl.graph.GraphUtils import GraphUtils
from apgl.generator.SmallWorldGenerator import SmallWorldGenerator
from apgl.generator.ErdosRenyiGenerator import ErdosRenyiGenerator
from sandbox.util.Util import Util
from wallhack.clusterexp.BoundGraphIterator import BoundGraphIterator
numpy.random.seed(21)
#numpy.seterr("raise")
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.set_printoptions(suppress=True, linewidth=200, precision=3)
k1 = 3
k2 = 3
logging.debug("k=" + str(k1))
numRepetitions = 50
numGraphs = 80
saveResults = False
resultsDir = PathDefaults.getOutputDir() + "cluster/"
fileName = resultsDir + "ErrorBoundTheorem44.npy"
if saveResults:
errors = numpy.zeros((numGraphs, numRepetitions))
allBoundLists = numpy.zeros((numRepetitions,
|
numGraphs, 5))
for r in range(numRepetitions):
iterator = BoundGraphIterator(numGraphs=numGraphs)
clusterer = IterativeSpectralClustering(k1, k2, T=100, computeBound=True, alg="IASC")
clusterer.nb_iter_kmeans = 20
logging.debug("Starting clustering")
clusterList, timeList, boundList = clusterer.clusterFromIterator(iterator, verbose=True)
allBoundLists[r, :, :] = numpy.array(boundList)
|
for i in range(len(clusterList)):
errors[i, r] = GraphUtils.randIndex(clusterList[i], iterator.realClustering)
print(allBoundLists.mean(0))
numpy.save(fileName, allBoundLists)
logging.debug("Saved results as " + fileName)
else:
allBoundLists = numpy.load(fileName)
boundList = allBoundLists.mean(0)
stdBoundList = allBoundLists.std(0)
stdBoundList[:, 0] = boundList[:, 0]
plotStyles1 = ['k-', 'k--', 'k-.', 'k:', 'b--', 'b-.', 'g-', 'g--', 'g-.', 'r-', 'r--', 'r-.']
print(boundList)
print(stdBoundList)
plt.figure(0)
plt.plot(boundList[:, 0], boundList[:, 1], plotStyles1[0], label="Frobenius approx")
plt.plot(boundList[:, 0], boundList[:, 2], plotStyles1[1], label="2-norm approx")
plt.plot(boundList[:, 0], boundList[:, 3], plotStyles1[2], label="Frobenius precise")
plt.plot(boundList[:, 0], boundList[:, 4], plotStyles1[3], label="2-norm precise")
plt.xlabel("Graph no.")
plt.ylabel(r"$||\sin \; \Theta(\mathcal{R}(U_k), \mathcal{R}(V_k) )||$")
plt.legend(loc="upper left")
plt.grid(True)
#plt.figure(1)
#plt.plot(numpy.arange(errors.shape[0]), errors)
plt.show()
|
adsabs/ADSDeploy
|
ADSDeploy/pipeline/workers.py
|
Python
|
gpl-3.0
| 216
| 0.00463
|
# encoding: utf-8
"""
Place holder for all wor
|
kers
"""
from .integration_tester import IntegrationTestWorker
from .db_writer import DatabaseWriterWorker
from .deploy import Before
|
Deploy, Deploy, Restart, GithubDeploy
|
weissercn/learningml
|
learningml/GoF/optimisation_and_evaluation/automatisation_gaussian_same_projection/automatisation_Gaussian_same_projection_optimisation_and_evaluation_euclidean.py
|
Python
|
mit
| 3,583
| 0.017304
|
import numpy as np
import math
import sys
import os
sys.path.insert(0,os.environ['learningml']+'/GoF/')
import classifier_eval
from classifier_eval import name_to_nclf, nclf, experiment, make_keras_model
from sklearn import tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
from rep.estimators import XGBoostClassifier
from keras.wrappers.scikit_learn import KerasClassifier
import time
#nclf_list = [nclf()]
#nclf_list = [nclf(), name_to_nclf("bdt"), nclf('xgb',XGBoostClassifier(),['n_estimators','eta'], [[10,1000],[0.01,1.0]]) ]
#nclf_list = [nclf('xgb',XGBoostClassifier(),['n_estimators','eta'], [[10,1000],[0.01,1.0]], param_opt=[1000.,0.9738])]
#nclf_list = [nclf('nn',"no classifier needed for nn", ['n_hidden_layers','dimof_middle'], [[0,1],[100,500]],param_opt=[0,500])]
#nclf_list = [name_to_nclf("nn")]
#nclf_list = [name_to_nclf("bdt"), name_to_nclf("xgb"), name_to_nclf("svm"), name_to_nclf("nn")]
#nclf_list = [name_to_nclf("bdt"), name_to_nclf("xgb"), name_to_nclf("nn")]
nclf_lis
|
t =
|
[name_to_nclf("svm")]
#nclf_list = [nclf('bdt',AdaBoostClassifier(base_estimator=tree.DecisionTreeClassifier(max_depth=2)), ['learning_rate','n_estimators'], [[0.01,2.0],[1,1000]], param_opt=[1.181, 319]), nclf('xgb',XGBoostClassifier(), ['n_estimators','eta'], [[10,1000],[0.01,1.0]], param_opt=[524, 0.151]), nclf('nn',"no classifier needed for nn", ['n_hidden_layers','dimof_middle'], [[0,1],[100,500]],param_opt=[0,455])]
#nclf_list = [nclf('nn',"no classifier needed for nn", ['n_hidden_layers','dimof_middle'], [[0,1],[100,500]],param_opt=[0,455])]
systematics_fraction = 0.01
file_name_patterns= [ os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{0}D_10000_0.0_1.0_1.0_{1}_euclidean.txt", os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{0}D_10000_0.0_0.95_0.95_{1}_euclidean.txt" ]
#file_name_patterns= [ os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_redefined_{0}D_10000_0.0_1.0_1.0_optimisation_{1}.txt", os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_redefined_{0}D_10000_0.0_1.0_0.9_optimisation_{1}.txt" ]
name_CPV= "{0}Dgauss__0_95__0_95_CPV_not_redefined_euclidean"
name_noCPV= "{0}Dgauss__1_0__1_0_noCPV_not_redefined_euclidean"
#name_CPV= "{0}Dgauss__1_0__0_95_CPV_chi2scoringopt"
#name_noCPV= "{0}Dgauss__1_0__1_0_noCPV_chi2scoringopt"
title_CPV = "Gauss 0.95 0.95 euclidean"
title_noCPV="Gauss 1.0 1.0 euclidean"
directory_name = "_0_95__0_95_not_redefined_euclidean"
expt = experiment(nclf_list=nclf_list, file_name_patterns=file_name_patterns, scoring='chi2',single_no_bins_list = [5], systematics_fraction = systematics_fraction, only_mod=False, title_CPV=title_CPV, title_noCPV=title_noCPV, name_CPV=name_CPV, name_noCPV=name_noCPV, directory_name=directory_name)
start_time = time.time()
expt.optimise(optimisation_dimension = 4, keras_optimisation_dimension = 1, number_of_iterations=50)
#optimisation gave nn param_opt
evaluation_start_time = time.time()
print(50*"-"+"\noptimisation took ", (evaluation_start_time - start_time)/60. , " minutes\n" +50*"-")
expt.evaluate(evaluation_dimensions = range(1,11), keras_evaluation_dimensions = [1]*10, number_of_evaluations=100)
end_time = time.time()
print(50*"-"+"\nevaluation took ", (end_time - evaluation_start_time)/60. , " minutes\n" +50*"-")
|
wgwoods/anaconda
|
pyanaconda/ui/gui/spokes/advstorage/zfcp.py
|
Python
|
gpl-2.0
| 6,251
| 0.0016
|
# zFCP configuration dialog
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Samantha N. Bueno <sbueno@redhat.com>
#
import gi
gi.require_version("BlockDev", "1.0")
from gi.repository import BlockDev as blockdev
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.utils import gtk_action_nowait
from pyanaconda.storage_utils import try_populate_devicetree
__all__ = ["ZFCPDialog"]
class ZFCPDialog(GUIObject):
""" Gtk dialog which allows users to manually add zFCP devices without
having previously specified them in a parm file.
.. inheritance-diagram:: ZFCPDialog
:parts: 3
"""
builderObjects = ["zfcpDialog"]
mainWidgetName = "zfcpDialog"
uiFile = "spokes/advstorage/zfcp.glade"
def __init__(self, data, storage):
GUIObject.__init__(self, data)
self.storage = storage
self.zfcp = self.storage.zfcp()
self._discoveryError = None
self._update_devicetree = False
# grab all of the ui objects
self._configureGrid = self.builder.get_object("configureGrid")
self._conditionNotebook = self.builder.get_object("conditionNotebook")
self._startButton = self.builder.get_object("startButton")
self._okButton = self.builder.get_object("okButton")
self._cancelButton = self.builder.get_object("cancelButton")
self._retryButton = self.builder.get_object("retryButton")
self._deviceEntry = self.builder.get_object("deviceEntry")
self._wwpnEntry = self.builder.get_object("wwpnEntry")
self._lunEntry = self.builder.get_object("lunEntry")
def refresh(self):
self._deviceEntry.set_text("")
self._deviceEntry.set_sensitive(True)
self._startButton.set_sensitive(True)
def run(self):
rc = self.window.run()
self.window.destroy()
# We need to call this to get the device nodes to show up
# in our devicetree.
if self._update_devicetree:
try_populate_devicetree(self.storage.devicetree)
return rc
def _set_configure_sensitive(self, sensitivity):
""" Set entries to a given sensitivity. """
for child in self._configureGrid.get_children():
child.set_sensitive(sensitivity)
def on_start_clicked(self, *args):
""" Go through the process of validating entry contents and then
attempt to add the device.
"""
# First update widgets
self._startButton.hide()
self._cancelButton.set_sensitive(False)
self._okButton.set_sensitive(False)
self._conditionNotebook.set_current_page(1)
self._set_configure_sensitive(False)
self._deviceEntry.set_sensitive(False)
self._conditionNotebook.set_current_page(1)
# below really, really is ugly and needs to be re-factored, but this
# should give a good base idea as far as expected behavior should go
try:
device = blockdev.s390.sanitize_dev_input(self._deviceEntry.get_text())
wwpn = blockdev.s390.zfcp_sanitize_wwpn_input(self._wwpnEntry.get_text())
lun = blockdev.s390.zfcp_sanitize_lun_input(self._lunEntry.get_text())
except blockdev.S390Error as err:
_config_error = str(err)
self.builder.get_object("deviceErrorLabel").set_text(_config_error)
self._conditionNotebook.set_current_page(2)
spinner = self.builder.get_object("waitSpinner")
spinner.start()
self._discover(device, wwpn, lun)
self._check_discover()
@gtk_action_nowait
def _check_discover(self, *args):
""" After the zFCP discover thread runs, check to see whether a valid
device was discovered. Display an error message if not.
"""
spinner = self.builder.get_object("waitSpinner")
spinner.stop()
if self._discoveryError:
# Failure, display a message and leave the user on the dialog so
# they can try again (or cancel)
self.builder.get_object("deviceErrorLabel").set_text(self._discoveryError)
self._discoveryError = None
self._conditionNotebook.set_current_page(2)
self._set_configure_sensitive(True)
else:
# Great success. Just return to the advanced storage window and let the
# UI update with the newly-added device
self.window.response(1)
return True
self._cancelButton.set_sensitive(True)
return False
def _discover(self, *args):
""" Given the configuration options from a user, attempt to discover
a zFCP device. This includes searching black-listed devices.
"""
# attempt to add the device
try:
self.zfcp.addFCP(args[0], args[1], args[2])
self._update_devicetree = True
except ValueError as e:
|
self._discoveryError = str(e)
return
def on_entry_activated(self, entry, user
|
_data=None):
# When an entry is activated, press the discover or retry button
current_page = self._conditionNotebook.get_current_page()
if current_page == 0:
self._startButton.clicked()
elif current_page == 2:
self._retryButton.clicked()
|
rsnakamura/iperflexer
|
tests/testoatbran.py
|
Python
|
mit
| 4,746
| 0.001896
|
from unittest import TestCase
import re
from iperflexer import oatbran
bran = oatbran
COW = 'cow'
class TestOatBran(TestCase):
def test_brackets(self):
L_BRACKET = '['
R_BRACKET = "]"
self.assertRegexpMatches(L_BRACKET, bran.L_BRACKET)
self.assertNotRegexpMatches(R_BRACKET, bran.L_BRACKET)
self.assertRegexpMatches(R_BRACKET, bran.R_BRACKET)
self.assertNotRegexpMatches(L_BRACKET, bran.R_BRACKET)
return
def test_spaces(self):
space = ' '
empty_string = ''
spaces = ' '
self.assertRegexpMatches(space, bran.SPACE)
self.assertNotRegexpMatches(empty_string, bran.SPACE)
self.assertNotRegexpMatches(COW, bran.SPACE)
self.assertRegexpMatches(spaces, bran.SPACE)
self.assertRegexpMatches(spaces, bran.SPACES)
self.assertNotRegexpMatches(empty_string, bran.SPACES)
self.assertNotRegexpMatches(COW, bran.SPACES)
self.assertRegexpMatches(spaces, bran.OPTIONAL_SPACES)
self.assertRegexpMatches(empty_string, bran.OPTIONAL_SPACES)
self.assertRegexpMatches(COW, bran.OPTIONAL_SPACES)
return
def test_named(self):
name = "boy"
expression = COW
match = re.search(bran.NAMED(n=name, e=expression), "a cow for liebowitz")
self.assertEqual(expression, match.group(name))
return
def test_digit(self):
digits = "1 2 3 4 5 6 7 8 9 0".split()
for digit in digits:
self.assertRegexpMatches(digit, bran.DIGIT)
self.assertNotRegexpMatches(COW, bran.DIGIT)
return
def test_integer(self):
n1 = "112345"
n2 = "0.1"
self.assertRegexpMatches(n1, bran.INTEGER)
match = re.search(bran.GROUP(e=bran.INTEGER), n2)
self.assertIsNone(match)
return
def test_float(self):
n1 = '12.3'
n2 = "11"
self.assertRegexpMatches(n1, bran.FLOAT)
self.assertNotRegexpMatches(n2, bran.FLOAT)
return
def test_real(self):
n1 = "0.340"
n2 = "123"
match = re.search(bran.GROUP(e=bran.REAL), n1)
self.assertEqual(n1, match.groups()[0])
self.assertRegexpMatches(n2, bran.REAL)
self.assertNotRegexpMatches(COW, bran.REAL)
return
def test_class(self):
s = "Bboy"
e = bran.CLASS(e="Bb") + "boy"
self. assertRegexpMatches(s, e)
def test_single_digit(self):
self.assertRegexpMatches("0", bran.SINGLE_DIGIT)
return
def test_two_digits(self):
self.assertRegexpMatches("19", bran.TWO_DIGITS)
self.assertRegexpMatches("99", bran.TWO_DIGITS)
self.assertN
|
otRegexpMatches("9", bran.TWO_DIGITS)
self.assertNotRegexpMatches("100", bran.TWO_DIGITS)
return
def test_zero_or_one(self):
s = "Gb"
s2 = "Gab"
s3 = "Gaab"
e = "G(a)" + bran.ZERO_OR_ONE + 'b'
self.assertRegexpMatches(s, e)
match = re.search(e, s)
self.assertIsNone(match.groups()[0])
self.assertRegexpMatches(s2, e)
match = re.search(e, s2)
sel
|
f.assertEqual("a", match.groups()[0])
self.assertNotRegexpMatches(s3, e)
return
def test_range(self):
s = "1"
s3 = "315"
s2 = "a" + s3 + "21"
e = bran.NAMED(n="octet", e=bran.M_TO_N(m=1, n=3, e=bran.DIGIT))
self.assertRegexpMatches(s, e)
self.assertRegexpMatches(s2, e)
self.assertRegexpMatches(s3,e)
match = re.search(e, s2)
self.assertEqual(s3, match.group("octet"))
return
def test_absolute_range(self):
s = "a123"
e = bran.NAMED(n="octet", e=bran.M_TO_N_ONLY(m=1, n=3, e=bran.DIGIT))
self.assertNotRegexpMatches(s, e)
return
def test_octet(self):
name = "octet"
e = re.compile(bran.NAMED(name,bran.OCTET))
sources = (str(i) for i in range(256))
for source in sources:
match = e.search(source)
self.assertEqual(source, match.group(name))
s = "256"
self.assertNotRegexpMatches(s, bran.OCTET)
return
def test_ip_address(self):
s = "0.0.0.0"
self.assertRegexpMatches(s, bran.IP_ADDRESS)
self.assertNotRegexpMatches("256.255.255.255", bran.IP_ADDRESS)
return
def test_not(self):
source = ",,323.5,"
match = re.search(bran.NAMED('not',bran.NOT(",")), source)
self.assertEqual(match.group('not'), '323.5')
self.assertRegexpMatches(",,3,", bran.NOT(","))
self.assertNotRegexpMatches(",,,,,", bran.NOT(','))
# end class TestOatBran
|
mrDoctorWho/vk4xmpp
|
library/longpoll.py
|
Python
|
mit
| 10,264
| 0.029529
|
# coding: utf-8
# © simpleApps, 2014 — 2016.
__authors__ = ("Al Korgun <alkorgun@gmail.com>", "John Smith <mrdoctorwho@gmail.com>")
__version__ = "2.3"
__license__ = "MIT"
"""
Implements a single-threaded longpoll client
"""
import select
import socket
import json
import httplib
import threading
import time
import vkapi as api
import utils
from __main__ import *
SOCKET_CHECK_TIMEOUT = 10
LONGPOLL_RETRY_COUNT = 10
LONGPOLL_RETRY_TIMEOUT = 10
SELECT_WAIT = 25
OPENER_LIFETIME = 60
CODE_SKIP = -1
CODE_FINE = 0
CODE_ERROR = 1
TYPE_MSG = 4
TYPE_MSG_EDIT = 5
TYPE_MSG_READ_IN = 6 # we read the message
TYPE_MSG_READ_OUT = 7 # they read the message
TYPE_PRS_IN = 8
TYPE_PRS_OUT = 9
TYPE_TYPING = 61
FLAG_OUT = 2
FLAG_CHAT = 16
MIN_CHAT_UID = 2000000000
TCP_KEEPINTVL = 60
TCP_KEEPIDLE = 60
def debug(message, *args):
if DEBUG_POLL:
logger.debug(message, *args)
def read(opener, source):
"""
Read a socket ignoring errors
Args:
opener: a socket to read
source: the user's jid
Returns:
JSON data or an empty string
"""
try:
data = opener.read()
except (httplib.BadStatusLine, socket.error, socket.timeout) as e:
data = ""
logger.warning("longpoll: got error `%s` (jid: %s)", e.message, source)
return data
def processPollResult(user, data):
"""
Processes a poll result
Decides whether to send a chat/groupchat message or presence or just pass the iteration
Args:
user: the User object
data: a valid json with poll result
Returns:
CODE_SKIP: just skip iteration, not adding the user to poll again
CODE_FINE: add user for the next iteration
CODE_ERROR: user should be added to the init buffer
"""
debug("longpoll: processing result (jid: %s)", user.source)
retcode = CODE_FINE
try:
data = json.loads(data)
except ValueError:
logger.error("longpoll: no data. Gonna request again (jid: %s)",
user.source)
retcode = CODE_ERROR
return retcode
if "failed" in data:
logger.debug("longpoll: failed. Searching for a new server (jid: %s)", user.source)
retcode = CODE_ERROR
else:
user.vk.pollConfig["ts"] = data["ts"]
for evt in data.get("updates", ()):
typ = evt.pop(0)
debug("longpoll: got updates, processing event %s with arguments %s (jid: %s)",
typ, str(evt), user.source)
if typ == TYPE_MSG: # new message
message = None
mid, flags, uid, date, body, subject, attachments = evt
if subject:
subject = subject.get("title")
out = flags & FLAG_OUT
chat = (uid > MIN_CHAT_UID) # a groupchat always has uid > 2000000000
# there is no point to request messages if there's only a single emoji attachment
# we actually only need to request for new messages if there are complex attachments in it (e.g. photos)
if len(attachments) == 1 and "emoji" in attachments:
attachments = None
if not out:
if not attachments and not chat:
message = [{"out": 0, "from_id": uid, "id": mid, "date": date, "text": body}]
# we substract 1 from msg id b/c VK now has reverse history so we need to ask what happened before this exact message
utils.runThread(user.sendMessages, (False, message, mid - 1, uid), "sendMessages-%s" % user.source)
elif typ == TYPE_MSG_READ_OUT:
uid, mid, _ = evt
cache = user.msgCacheByUser.get(uid)
if cache:
xmppMID = cache["xmpp"]
cache.clear()
sendChatMarker(user.source, vk2xmpp(uid), xmppMID)
elif typ == TYPE_PRS_IN: # user has joined
uid = abs(evt[0])
sendPresence(user.source, vk2xmpp(uid), hash=USER_CAPS_HASH)
elif typ == TYPE_PRS_OUT: # user has left
uid = abs(evt[0])
sendPresence(user.source, vk2xmpp(uid), "unavailable")
elif typ == TYPE_TYPING: # user is typing
uid = evt[0]
if uid not in user.typing:
sendMessage(user.source, vk2xmpp(uid), typ="composing")
user.typing[uid] = time.time()
retcode = CODE_FINE
return retcode
def configureSocket(sock):
# see man(7) tcp
debug("setting socket parameters...")
try:
# enable keepalive probes
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# the interval between subsequential keepalive probes, regardless of what the connection has exchanged in the meantime
# overrides tcp_keepalive_intvl
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, TCP_KEEPINTVL)
# the interval between the last data packet sent (simple ACKs are not considered data) and the first keepalive probe;
# after the connection is marked to need keepalive, this counter is not used any further
# overrides tcp_keepalive_time
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, TCP_KEEPIDLE)
except (AttributeError, OSError):
debug("u
|
nable to set socket parameters")
# TODO: make it abstract, to reuse in Steampunk
class Poll(object):
"""
Class used to
|
handle longpoll
"""
__list = {}
__buff = set()
__lock = threading.Lock()
clear = staticmethod(__list.clear)
watchdogRunning = False
@classmethod
def init(cls):
cls.watchdogRunning ^= True
cls.watchdog()
@classmethod
def __add(cls, user):
"""
Issues a readable socket to use it in select()
Adds user in buffer if a error occurred
Adds user in cls.__list if no errors
"""
if user.source in Users:
# in case the new instance was created
user = Users[user.source]
opener = user.vk.makePoll()
debug("longpoll: user has been added to poll (jid: %s)", user.source)
if opener:
sock = opener.sock
configureSocket(sock)
cls.__list[sock] = (user, opener)
return opener
logger.warning("longpoll: got null opener! (jid: %s)", user.source)
cls.__addToBuffer(user)
@classmethod
def add(cls, some_user):
"""
Adds the User class object to poll
"""
debug("longpoll: adding user to poll (jid: %s)", some_user.source)
with cls.__lock:
if some_user in cls.__buff:
return None
# check if someone is trying to add an already existing user
for sock, (user, opener) in cls.__list.iteritems():
if some_user == user:
break
else:
try:
cls.__add(some_user)
except api.LongPollError as e:
logger.debug("longpoll: failed to make poll: %s (jid: %s)", e.message, some_user.source)
cls.__addToBuffer(some_user)
except Exception:
crashLog("poll.add")
@classmethod
def __addToBuffer(cls, user):
"""
Adds user to the list of "bad" users
The list is mostly contain users whose poll
request was failed for some reasons
Args:
user: the user object
"""
cls.__buff.add(user)
logger.debug("longpoll: adding user to the init buffer (jid: %s)", user.source)
utils.runThread(cls.handleUser, (user,), "handleBuffer-%s" % user.source)
@classmethod
def __removeFromBuffer(cls, user):
"""
Instantly removes a user from the buffer
Args:
user: the user object
"""
if user in cls.__buff:
cls.__buff.remove(user)
@classmethod
def removeFromBuffer(cls, user):
"""
Removes a user from the buffer
Args:
user: the user object
"""
with cls.__lock:
cls.__removeFromBuffer(user)
@classmethod
def handleUser(cls, user):
"""
Tries to reinitialize poll for LONGPOLL_RETRY_COUNT every LONGPOLL_RETRY_TIMEOUT seconds
As soon as poll is initialized the user will be removed from buffer
Args:
user: the user object
"""
for _ in xrange(LONGPOLL_RETRY_COUNT):
if user.source in Users:
user = Users[user.source] # we might have a new instance here
if user.vk.initPoll():
with cls.__lock:
logger.debug("longpoll: successfully initialized longpoll (jid: %s)",
user.source)
cls.__add(user)
cls.__removeFromBuffer(user)
break
else:
logger.debug("longpoll: while we were wasting our time"
", the user has left (jid: %s)", user.source)
cls.removeFromBuffer(user)
return None
time.sleep(LONGPOLL_RETRY_TIMEOUT)
else:
cls.removeFromBuffer(user)
logger.error("longpoll: failed to add user to poll in 10 retries"
" (jid: %s)", user.source)
@classmethod
def process(cls):
"""
Processes poll sockets by select.select()
As soon as socket will be ready for reading, user.processPollResult() is called
Read processPollResult.__doc__ to learn more about status codes
"""
wh
|
ianstalk/Flexget
|
flexget/tests/test_content_filter.py
|
Python
|
mit
| 3,857
| 0.001296
|
import pytest
@pytest.mark.usefixtures('tmpdir')
@pytest.mark.filecopy('test.torrent', '__tmp__/')
class TestContentFilter:
config = """
tasks:
test_reject1:
mock:
- {title: 'test', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
reject: '*.iso'
test_reject2:
mock:
- {title: 'test', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
reject: '*.avi'
test_require1:
mock:
- {title: 'test', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
require:
- '*.bin'
- '*.iso'
test_require2:
mock:
- {title: 'test', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
require: '*.avi'
test_require_all1:
mock:
- {title: 'test', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
require_all:
- 'ubu*'
- '*.iso'
test_require_all2:
mock:
- {title: 'test', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
require_all:
- '*.iso'
- '*.avi'
test_strict:
mock:
- {title: 'test'}
accept_all: yes
content_filter:
require: '*.iso'
strict: true
test_cache:
mock:
- {title: 'test', url: 'http://localhost/', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
reject: ['*.iso']
"""
def test_reject1(self, execute_task):
task = execute_task('test_reject1')
assert task.find_entry('rejected', title='test'), 'should have rejected, contains *.iso'
def test_reject2(self, execute_task):
task = execute_task('test_reject2')
assert task.find_entry(
|
'accepted', title='test'
), 'should have accepted, doesn\t contain *.avi'
def test_require1(self, execute_task):
task = execute_task('test_require1')
assert task.find_entry('accepted', title='test'), 'should have accepted, contains *.iso'
def test_require2(self, execute_task):
task = execute_task('test_require2')
assert task.find_entry(
'rejected', title='te
|
st'
), 'should have rejected, doesn\t contain *.avi'
def test_require_all1(self, execute_task):
task = execute_task('test_require_all1')
assert task.find_entry(
'accepted', title='test'
), 'should have accepted, both masks are satisfied'
def test_require_all2(self, execute_task):
task = execute_task('test_require_all2')
assert task.find_entry(
'rejected', title='test'
), 'should have rejected, one mask isn\'t satisfied'
def test_strict(self, execute_task):
"""Content Filter: strict enabled"""
task = execute_task('test_strict')
assert task.find_entry('rejected', title='test'), 'should have rejected non torrent'
def test_cache(self, execute_task):
"""Content Filter: caching"""
task = execute_task('test_cache')
assert task.find_entry('rejected', title='test'), 'should have rejected, contains *.iso'
# Test that remember_rejected rejects the entry before us next time
task = execute_task('test_cache')
assert task.find_entry(
'rejected', title='test', rejected_by='remember_rejected'
), 'should have rejected, content files present from the cache'
|
vladikoff/fxa-mochitest
|
tests/mozbase/manifestparser/tests/test_default_skipif.py
|
Python
|
mpl-2.0
| 1,518
| 0.006588
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import unittest
from manifestparser import ManifestParser
here = os.path.dirname(os.path.abspath(__file__))
class TestDefaultSkipif(unittest.TestCase):
"""test applying a skip-if condition in [DEFAULT] and || with the value for the test"""
def test_defaults(self):
default = os.path.join(here, 'default-skipif.ini')
parser = ManifestParser(manifests=(default,))
for test in parser.tests:
if test['name'] == 'test1':
self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (debug)")
|
elif test['name'] == 'test2':
self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (os == 'linux')")
elif test['name'] == 'test3':
self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (os == 'win')")
elif test['name'] == 'test4':
self.assertEqual(test['skip-if'], "(os == 'win' && debug )
|
|| (os == 'win' && debug)")
elif test['name'] == 'test5':
self.assertEqual(test['skip-if'], "os == 'win' && debug # a pesky comment")
elif test['name'] == 'test6':
self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (debug )")
if __name__ == '__main__':
unittest.main()
|
anaruse/chainer
|
chainer/functions/connection/shift.py
|
Python
|
mit
| 4,492
| 0
|
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class Shift(function_node.FunctionNode):
def __init__(self, ksize=3, dilate=1):
super(Shift, self).__init__()
self.kh, self.kw = _pair(ksize)
if self.kh % 2 != 1:
raise ValueError('kh must be odd')
if self.kw % 2 != 1:
raise ValueError('kw must be odd')
self.dy, self.dx = _pair(dilate)
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.shape[1] >= self.kh * self.kw,
)
def forward_cpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
py = self.kh // 2 * abs(self.dy)
px = self.kw // 2 * abs(self.dx)
x = numpy.pad(x, ((0, 0), (0, 0), (py, py), (px, px)),
'constant')
n_groups = self.kh * self.kw
group_size = c // n_groups
ret = []
for i, group_idx in enumerate(range(n_groups)):
# Make sure that center group is last
if group_idx == (n_groups - 1) // 2:
group_idx = n_groups - 1
elif group_idx == (n_groups - 1):
group_idx = (n_groups - 1) // 2
ky = (group_idx // self.kw) - py // abs(self.dy)
kx = (group_idx % self.kw) - px // abs(self.dx)
hs = py + -ky * self.dy
ws = px + -kx * self.dx
he = hs + h
we = ws + w
cs = i * group_size
ce = (i + 1) * group_size if i < n_groups - 1 else None
ret.append(x[:, cs:ce, hs:he, ws:we])
return numpy.concatenate(ret, axis=1),
def forward_gpu(self, inputs):
x = inputs[0]
|
b, c, h, w = x.shape
y = cuda.cupy.empty_like(x)
cuda.elementwise(
'raw T x, int32 c, int32 h, int32 w,'
'int32 kh, int32 kw,'
'int32 dy, int32 dx',
|
'T y',
'''
int b0 = i / (c * h * w);
int rest = i % (c * h * w);
int c0 = rest / (h * w);
rest %= h * w;
int out_row = rest / w;
int out_col = rest % w;
int n_groups = kh * kw;
int group_size = c / n_groups;
int group_idx = c0 / group_size;
// Make sure that center group is last
if (group_idx == (n_groups - 1) / 2) {
group_idx = n_groups - 1;
} else if (group_idx == n_groups - 1) {
group_idx = (n_groups - 1) / 2;
}
int ky = (group_idx / kw) - kh / 2;
int kx = (group_idx % kw) - kw / 2;
if (group_idx >= n_groups) {
ky = 0;
kx = 0;
}
int in_row = -ky * dy + out_row;
int in_col = -kx * dx + out_col;
if (in_row >= 0 && in_row < h && in_col >= 0 && in_col < w) {
y = x[b0 * c * h * w + c0 * h * w + in_row * w + in_col];
} else {
y = 0;
}
''',
'shift_gpu')(x, c, h, w, self.kh, self.kw, self.dy, self.dx, y)
return y,
def backward(self, indexes, grad_outputs):
return shift(grad_outputs[0], ksize=(self.kh, self.kw),
dilate=(-self.dy, -self.dx)),
def shift(x, ksize=3, dilate=1):
"""Shift function.
See: `Shift: A Zero FLOP, Zero Parameter Alternative to Spatial \
Convolutions <https://arxiv.org/abs/1711.08141>`_
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable of shape :math:`(n, c, h, w)`.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
Returns:
~chainer.Variable:
Output variable of same shape as ``x``.
"""
fnode = Shift(ksize, dilate)
y, = fnode.apply((x,))
return y
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.