text stringlengths 4 1.02M | meta dict |
|---|---|
from spotify.utils.cache import Cache
class IntegrationTestCase(object):
def setUp(self):
Cache.clear()
def tearDown(self):
Cache.clear() | {
"content_hash": "106dca3f28b6d1bdb476f15cc4cefc1f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 37,
"avg_line_length": 18.22222222222222,
"alnum_prop": 0.6646341463414634,
"repo_name": "husman/WoTrack",
"id": "921b78d2af8e7856b3f6de9038ca41d59063fadb",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/spotify/spotify/tests/integration/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "549"
},
{
"name": "HTML",
"bytes": "2910"
},
{
"name": "Python",
"bytes": "16380"
}
],
"symlink_target": ""
} |
import qpid
from qpid.client import Client
from qpid.content import Content
class QpidAMQP08Hub(BaseAMQPHub):
client = None
def __init__(self, broker, username=None, password=None, ssl=False):
"""
Initialize the Moksha Hub.
`broker`
[amqps://][<user>[/<password>]@]<host>[:<port>]
"""
self.set_broker(broker)
self.init_qpid_connection()
# We need 0.8 for RabbitMQ
self.amqp_spec=qpid.spec08.load('/usr/share/amqp/amqp.0-8.xml')
def set_broker(self, broker):
self.url = URL(broker)
self.user = self.url.password or 'guest'
self.password = self.url.password or 'guest'
self.host = self.url.host
if self.url.scheme == URL.AMQPS:
self.ssl = True
default_port = 5671
else:
self.ssl = False
default_port = 5672
self.port = self.url.port or default_port
def init_qpid_connection(self):
self.client = Client(self.host, self.port, spec=self.amqp_spec)
self.client.start({'LOGIN': self.user, 'PASSWORD': self.password})
self.conn = self.client.channel(1)
self.conn.channel_open()
print "opened channel!"
def create_queue(self, queue, routing_key, exchange='amq.topic',
auto_delete=False, durable=True, **kw):
self.conn.queue_declare(queue=queue, auto_delete=auto_delete,
durable=durable, **kw)
self.conn.queue_bind(queue=queue, exchange=exchange,
routing_key=routing_key)
print "Created %s queue" % queue
def send_message(self, message, exchange='amq.topic', routing_key=''):
self.conn.basic_publish(routing_key=routing_key,
content=Content(message),
exchange=exchange)
def get(self, queue):
t = self.conn.basic_consume(queue=queue, no_ack=True)
print "t.consumer_tag =", t.consumer_tag
q = self.client.queue(t.consumer_tag)
msg = q.get()
print "got message: ", msg
return msg.content.body
q.close()
def close(self):
if self.conn:
print "Closing connection"
self.conn.close()
| {
"content_hash": "ebbf4a5bf20f94dc1e25c9a2746ef5cd",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 74,
"avg_line_length": 33.76470588235294,
"alnum_prop": 0.568815331010453,
"repo_name": "pombredanne/moksha",
"id": "bd802c4d59d8fbba13d09e2bdca1bf845092ba0a",
"size": "2988",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "moksha.hub/moksha/hub/amqp/qpid08.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21145"
},
{
"name": "HTML",
"bytes": "14308"
},
{
"name": "JavaScript",
"bytes": "628040"
},
{
"name": "Makefile",
"bytes": "18249"
},
{
"name": "Python",
"bytes": "244340"
},
{
"name": "Shell",
"bytes": "4024"
}
],
"symlink_target": ""
} |
'''
Created on 21 janv. 2014
@author: Nicolas Carre
'''
from AbstractDAO import AbstractDAO
from system.Qualification import Qualification
from interfacebdd.CraftDAO import CraftDAO
from string import Template
class QualificationDAO(AbstractDAO):
'''
Classe QualificationDAO :
Herite de la classe AbstractDAO
'''
MAPPING_COLS = {"ID": "num",
"NAME": "name"}
MAPPING_FIL = {"num": "ID",
"name": "NAME"}
def __init__(self):
super(QualificationDAO, self).__init__(tableName="QUALIFICATION", mappingCols=self.MAPPING_COLS, mappingFil=self.MAPPING_FIL)
def _createElement(self):
'''
Cree un objet de type Qualification
'''
return Qualification()
def insert(self, co=None, Obj=None):
qDAO = QualificationDAO()
q = qDAO.getByFilter(co, False, [], ("name", Obj.name))
if q != None:
return q.num
else:
return super(QualificationDAO, self).insert(co, Obj)
| {
"content_hash": "bcda06ede4ad729507e948edc43f7bf1",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 133,
"avg_line_length": 26.51219512195122,
"alnum_prop": 0.5749770009199632,
"repo_name": "gpierre42/optraj",
"id": "cc8a0c68d160abb0ce71c6a9f84ed203b3e72cce",
"size": "1087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vagrant/optraj.istic.univ-rennes1.fr/src/interfacebdd/QualificationDAO.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "135378"
},
{
"name": "HTML",
"bytes": "5529538"
},
{
"name": "JavaScript",
"bytes": "2013225"
},
{
"name": "PHP",
"bytes": "112377"
},
{
"name": "Python",
"bytes": "354555"
},
{
"name": "Ruby",
"bytes": "479"
},
{
"name": "Shell",
"bytes": "5936"
}
],
"symlink_target": ""
} |
import unittest
from leetcode.no1_two_sum import Solution
class Test(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def test_two_sum(self):
nums = [3, 2, 4]
target = 6
self.assertEqual(self.solution.twoSum(nums, target), [1, 2])
def test_two_sum_2(self):
nums = [2, 1, 9, 4, 4, 56, 90, 3]
target = 8
self.assertEqual(self.solution.twoSum(nums, target), [3, 4])
def test_none(self):
nums = []
target = 0
self.assertEqual(self.solution.twoSum(nums, target), None)
| {
"content_hash": "9bd5714df77ea7056dd2323f4029daef",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 25.26086956521739,
"alnum_prop": 0.5834767641996558,
"repo_name": "minfun/leetcode",
"id": "5b627ad33c5ed3f8fd642c0baf9485ba82841a28",
"size": "712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_no1_two_sum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51797"
}
],
"symlink_target": ""
} |
import os
from office365.sharepoint.file import File
from office365.sharepoint.file_creation_information import FileCreationInformation
from tests import random_seed
from tests.sharepoint_case import SPTestCase
from tests.test_methods import (
read_file_as_binary,
read_file_as_text,
ensure_list)
from office365.sharepoint.list_creation_information import ListCreationInformation
from office365.sharepoint.list_template_type import ListTemplateType
from office365.sharepoint.template_file_type import TemplateFileType
def normalize_response(response):
content = response.decode("utf-8")
if (content[0] == content[-1]) and content.startswith(("'", '"')):
return content[1:-1]
return content
def upload_file(list_object, url, content):
info = FileCreationInformation()
info.content = content
info.url = url
info.overwrite = True
uploaded_file = list_object.rootFolder.files.add(info)
list_object.context.execute_query()
return uploaded_file
class TestSharePointFile(SPTestCase):
content_placeholder = "1234567890 abcdABCD %s" % random_seed
file_entries = [
{"Name": "Sample.txt", "Type": "Text"},
{"Name": "SharePoint User Guide.docx", "Type": "Binary"}
]
target_list = None
@classmethod
def setUpClass(cls):
super(TestSharePointFile, cls).setUpClass()
cls.target_list = ensure_list(cls.client.web,
ListCreationInformation(
"Archive Documents N%s" % random_seed,
None,
ListTemplateType.DocumentLibrary))
@classmethod
def tearDownClass(cls):
cls.target_list.delete_object()
cls.client.execute_query()
def test1_upload_file(self):
for entry in self.file_entries:
path = "{0}/data/{1}".format(os.path.dirname(__file__), entry["Name"])
if entry["Type"] == "Binary":
file_content = read_file_as_binary(path)
else:
file_content = read_file_as_text(path)
uploaded_file = upload_file(self.__class__.target_list, entry["Name"], file_content)
self.assertEqual(uploaded_file.properties["Name"], entry["Name"])
def test2_list_files(self):
files = self.__class__.target_list.rootFolder.files.top(1)
self.client.load(files)
self.client.execute_query()
files_items = list(files)
self.assertEqual(len(files_items), 1)
first_item = files[0].listItemAllFields
self.client.load(first_item, ["File"])
self.client.execute_query()
self.assertIsNotNone(first_item.file)
def test3_update_file_content(self):
"""Test file upload operation"""
files = self.__class__.target_list.rootFolder.files
self.client.load(files)
self.client.execute_query()
for file_upload in files:
response = File.save_binary(self.client, file_upload.properties["ServerRelativeUrl"],
self.content_placeholder)
self.assertTrue(response.ok)
def test4_update_file_metadata(self):
"""Test file update metadata"""
files = self.__class__.target_list.rootFolder.files.top(1)
self.client.load(files)
self.client.execute_query()
first_file = files[0]
list_item = first_file.listItemAllFields # get metadata
list_item.set_property('Title', 'Updated')
list_item.update()
self.client.execute_query()
def test5_download_file(self):
"""Test file upload operation"""
files = self.__class__.target_list.rootFolder.files
self.client.load(files)
self.client.execute_query()
for file_download in files:
content = file_download.read()
enc_content = normalize_response(content)
self.assertEqual(enc_content, self.content_placeholder)
def test6_copy_file(self):
files = self.__class__.target_list.rootFolder.files
self.client.load(files)
self.client.execute_query()
for cur_file in files:
file_url = cur_file.properties["ServerRelativeUrl"]
path, file_name = os.path.split(file_url)
new_file_url = '/'.join([path, "copied_" + file_name])
cur_file.copyto(new_file_url, True)
self.client.execute_query()
moved_file = self.client.web.get_file_by_server_relative_url(new_file_url)
self.client.load(moved_file)
self.client.execute_query()
self.assertEqual(new_file_url, moved_file.properties["ServerRelativeUrl"])
def test7_move_file(self):
files = self.__class__.target_list.rootFolder.files
self.client.load(files)
self.client.execute_query()
for cur_file in files:
file_url = cur_file.properties["ServerRelativeUrl"]
path, file_name = os.path.split(file_url)
new_file_url = '/'.join([path, "moved_" + file_name])
cur_file.moveto(new_file_url, 1)
self.client.execute_query()
moved_file = self.client.web.get_file_by_server_relative_url(new_file_url)
self.client.load(moved_file)
self.client.execute_query()
self.assertEqual(new_file_url, moved_file.properties["ServerRelativeUrl"])
def test8_recycle_first_file(self):
"""Test file upload operation"""
files = self.__class__.target_list.rootFolder.files
self.client.load(files)
self.client.execute_query()
files_count = len(files)
if files_count > 0:
first_file = files[0]
first_file.recycle()
self.client.execute_query()
files_after = self.__class__.target_list.rootFolder.files
self.client.load(files_after)
self.client.execute_query()
self.assertEqual(len(files) - 1, len(files_after))
def test9_create_template_file(self):
target_folder = self.__class__.target_list.rootFolder
self.client.load(target_folder)
self.client.execute_query()
file_url = '/'.join([target_folder.properties["ServerRelativeUrl"], "WikiPage.aspx"])
file_new = self.__class__.target_list.rootFolder.files.add_template_file(file_url, TemplateFileType.WikiPage)
self.client.execute_query()
self.assertEqual(file_new.properties["ServerRelativeUrl"], file_url)
def test10_delete_file(self):
files_to_delete = self.__class__.target_list.rootFolder.files
self.client.load(files_to_delete)
self.client.execute_query()
for file_to_delete in files_to_delete:
file_to_delete.delete_object()
self.client.execute_query()
result = self.__class__.target_list.rootFolder.files
self.client.load(result)
self.client.execute_query()
files_items = list(result)
self.assertEqual(len(files_items), 0)
| {
"content_hash": "59812ce1d9bdf73cf8e5c1c0651c722d",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 117,
"avg_line_length": 40.25,
"alnum_prop": 0.6198475437605873,
"repo_name": "vgrem/SharePointOnline-REST-Python-Client",
"id": "320b5f43b988f260fb3c579c122e4610e30c7bf2",
"size": "7084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sharepoint_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64358"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:13144")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:13144")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | {
"content_hash": "cb9c94483671aa33532fdb3f80623a55",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 79,
"avg_line_length": 24.188271604938272,
"alnum_prop": 0.661860405767513,
"repo_name": "LiveChains/Live-Coin",
"id": "a8846ab7f42b525c49c3e90ec595e9b3985a5f92",
"size": "7837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/bitrpc/bitrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "102129"
},
{
"name": "C++",
"bytes": "2429380"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "13827"
},
{
"name": "Objective-C",
"bytes": "2734"
},
{
"name": "Python",
"bytes": "37297"
},
{
"name": "Shell",
"bytes": "2535"
},
{
"name": "TypeScript",
"bytes": "5236802"
}
],
"symlink_target": ""
} |
import sys
import os.path
import warnings
import unittest
def addParentToPath():
"""
Add the parent directory to sys.path to make it importable.
"""
try:
d = os.path.dirname(__file__)
except NameError:
d = os.path.dirname(sys.argv[0])
parent = os.path.normpath(os.path.join(d, '..'))
if parent not in sys.path:
print("adding {} to sys.path".format(parent))
sys.path.insert(0, parent)
def specialCaseTests():
"""
Some modules have an explicit `test` function that collects tests --
collect these together as a suite.
"""
function_test_modules = [
'cryptutil',
'oidutil',
'dh',
]
suite = unittest.TestSuite()
for module_name in function_test_modules:
module_name = 'openid.test.' + module_name
try:
test_mod = __import__(module_name, {}, {}, [None])
except ImportError:
print(('Failed to import test %r' % (module_name, )))
else:
suite.addTest(unittest.FunctionTestCase(test_mod.test))
return suite
def pyUnitTests():
"""
Aggregate unit tests from modules, including a few special cases, and
return a suite.
"""
test_module_names = [
'server',
'consumer',
'message',
'symbol',
'etxrd',
'xri',
'xrires',
'association_response',
'auth_request',
'negotiation',
'verifydisco',
'sreg',
'ax',
'pape',
'pape_draft2',
'pape_draft5',
'rpverify',
'extension',
'codecutil',
]
test_modules = [
__import__('openid.test.test_{}'.format(name), {}, {}, ['unused'])
for name in test_module_names
]
try:
from openid.test import test_examples
except ImportError:
# This is very likely due to twill being unimportable, since it's
# ancient and unmaintained. Until the examples are reimplemented using
# something else, we just need to skip it
warnings.warn("Could not import twill; skipping test_examples.")
else:
test_modules.append(test_examples)
# Some modules have data-driven tests, and they use custom methods
# to build the test suite -- the module-level pyUnitTests function should
# return an appropriate test suite
custom_module_names = [
'kvform',
'linkparse',
'oidutil',
'storetest',
'test_accept',
'test_association',
'test_discover',
'test_fetchers',
'test_htmldiscover',
'test_nonce',
'test_openidyadis',
'test_parsehtml',
'test_urinorm',
'test_yadis_discover',
'trustroot',
]
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for m in test_modules:
suite.addTest(loader.loadTestsFromModule(m))
for name in custom_module_names:
mod = __import__('openid.test.{}'.format(name), {}, {}, ['unused'])
try:
suite.addTest(mod.pyUnitTests())
except AttributeError:
# because the AttributeError doesn't actually say which
# object it was.
print(("Error loading tests from %s:" % (name, )))
raise
return suite
def _import_djopenid():
"""
Import djopenid from the examples directory without putting it in sys.path
permanently (which we don't really want to do as we don't want namespace
conflicts)
"""
# Find our way to the examples/djopenid directory
grandParentDir = os.path.join(__file__, "..", "..", "..")
grandParentDir = os.path.abspath(grandParentDir)
examplesDir = os.path.join(grandParentDir, "examples")
sys.path.append(examplesDir)
import djopenid
sys.path.remove(examplesDir)
def djangoExampleTests():
"""
Run tests from examples/djopenid.
@return: number of failed tests.
"""
# Django uses this to find out where its settings are.
os.environ['DJANGO_SETTINGS_MODULE'] = 'djopenid.settings'
_import_djopenid()
try:
import django.test.simple
except ImportError:
raise unittest.SkipTest("Skipping django examples. "
"django.test.simple not found.")
import djopenid.server.models
import djopenid.consumer.models
print("Testing Django examples:")
runner = django.test.simple.DjangoTestSuiteRunner()
return runner.run_tests(['server', 'consumer'])
# These tests do get put into a test suite, so we could run them with the
# other tests, but django also establishes a test database for them, so we
# let it do that thing instead.
return django.test.simple.run_tests(
[djopenid.server.models, djopenid.consumer.models])
def test_suite():
"""
Collect all of the tests together in a single suite.
"""
addParentToPath()
combined_suite = unittest.TestSuite()
combined_suite.addTests(specialCaseTests())
combined_suite.addTests(pyUnitTests())
combined_suite.addTest(unittest.FunctionTestCase(djangoExampleTests))
return combined_suite
| {
"content_hash": "0744717818a72583651bd2610303e72a",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 78,
"avg_line_length": 28.23497267759563,
"alnum_prop": 0.607122121153474,
"repo_name": "necaris/python3-openid",
"id": "ca0d036c53ac86659d0caf84c9f210f70a6e9901",
"size": "5167",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "openid/test/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2892"
},
{
"name": "Makefile",
"bytes": "885"
},
{
"name": "Python",
"bytes": "902717"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.sites.models import Site
from shoppy.l10n.models import Language
class Link(models.Model):
"""
Simple link object. multi-site is supported.
"""
url = models.URLField()
added_date = models.DateTimeField(auto_now_add=True)
site = models.ForeignKey(Site)
def __unicode__(self):
return u'%s' % (self.url)
class LinkTranslation(models.Model):
"""
For example this is used to define the title-tag and <a>name</a> for the
link and selected language.
"""
language = models.ForeignKey(Language)
link = models.ForeignKey('Link')
name = models.CharField(max_length=150)
description = models.TextField(max_length=150, null=True, blank=True)
def __unicode__(self):
return u'%s %s' % (self.language.code, self.link) | {
"content_hash": "79ac2ea5b49f7c986bfa8cbf37d2f16e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 76,
"avg_line_length": 32.03846153846154,
"alnum_prop": 0.673469387755102,
"repo_name": "pocketone/django-shoppy",
"id": "72cf892f4191c2749030a4725e984a61a3926f1e",
"size": "833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shoppy/links/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Django settings for rooster project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'uylb@tox(j5r2=0i7il7xd2buk4-tmmx39%=(^=z(-4f^2ihnn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'scheduler',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'rooster.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'rooster', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'rooster.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "858f90efcf98ebb2d7352c907ed5090d",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 91,
"avg_line_length": 26.491803278688526,
"alnum_prop": 0.6896658415841584,
"repo_name": "thomasleese/rooster",
"id": "1af2c09967fbc83b469aa44685d17cada69381b2",
"size": "3232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rooster/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "358"
},
{
"name": "HTML",
"bytes": "3182"
},
{
"name": "Python",
"bytes": "58736"
},
{
"name": "Shell",
"bytes": "2438"
}
],
"symlink_target": ""
} |
AUTH = 'b9c39a336bb97a9c9bda2b82bdaacff3'
# directory to save output files to
ARCHIVE_DIR = 'archive'
#
# The below configs let you bake in the server IP and beacon interval
# into the final executable so it can simply be executed without supplying
# command line arguments.
#
# server IP
#
# if this is None, it *must* be specified as a command line argument
# when client is executed
#
# SERVER_IP = '1.2.3.4' # example
SERVER_IP = None
# client beacon interval
#
# if this is None, it *may* be specified as a command line argument,
# otherwise, it will take the default value
#
# BEACON_INTERVAL = 300 # example
BEACON_INTERVAL = None
| {
"content_hash": "c42a4062091bd2c50afcaa507a3038e3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 74,
"avg_line_length": 24.76923076923077,
"alnum_prop": 0.734472049689441,
"repo_name": "mossberg/poet",
"id": "22ba1046cffc1942c326b5bdc6a74de8c8c6c39d",
"size": "744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poet/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2419"
},
{
"name": "Python",
"bytes": "35599"
}
],
"symlink_target": ""
} |
import zmq
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--sub', help='django subscription socket', type=str, required=True)
parser.add_argument('-p', '--pub', help='tornado publication socket', type=str, required=True)
args = parser.parse_args()
ctx = zmq.Context()
django_sub = ctx.socket(zmq.SUB)
django_sub.bind(args.sub)
django_sub.setsockopt(zmq.SUBSCRIBE, "")
tornado_pub = ctx.socket(zmq.PUB)
tornado_pub.bind(args.pub)
try:
dev = zmq.device(zmq.FORWARDER, django_sub, tornado_pub)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| {
"content_hash": "4c6eccba80d0524633ff67593bcafec7",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 98,
"avg_line_length": 25.923076923076923,
"alnum_prop": 0.6439169139465876,
"repo_name": "mike-grayhat/djazator",
"id": "a47f24c01ad42fe3df5400c74b016c6c9f4a853b",
"size": "674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/djazator/mq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9045"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from tgext.ecommerce.lib.exceptions import CategoryAssignedToProductException
from stroller2.lib import get_new_category_form, get_edit_category_form
from tg import expose, TGController, app_globals, validate, redirect, flash
import tg
from tgext.pluggable import plug_url
from tg.i18n import lazy_ugettext as l_, ugettext as _
class ManageCategoryController(TGController):
@expose('genshi:stroller2.templates.manage.category.index')
def index(self, **kw):
categories = app_globals.shop.category.get_all().all()
return dict(categories=categories)
@expose('genshi:stroller2.templates.manage.category.new')
def new(self, **kw):
return dict(form=get_new_category_form(), action=plug_url('stroller2', '/manage/category/create'))
@expose()
@validate(get_new_category_form(), error_handler=new)
def create(self, **kw):
kw['parent'] = app_globals.shop.category.get(kw.pop('parent_id'))
app_globals.shop.category.create(**kw)
flash(_('Category created'))
return redirect(plug_url('stroller2', '/manage/category/index'))
@expose('genshi:stroller2.templates.manage.category.edit')
def edit(self, **kw):
category = app_globals.shop.category.get(kw['category_id'])
return dict(form=get_edit_category_form(), action=plug_url('stroller2', '/manage/category/save'),
value=dict(category_id=category._id, name=category.name[tg.config.lang],
parent_id=category.parent))
@expose()
@validate(get_edit_category_form(), error_handler=edit)
def save(self, **kw):
kw['parent'] = app_globals.shop.category.get(kw.pop('parent_id'))
kw['_id'] = kw.pop('category_id')
app_globals.shop.category.edit(**kw)
flash(_('Category edited'))
return redirect(plug_url('stroller2', '/manage/category/index'))
@expose()
def delete(self, category_id, **kw):
try:
app_globals.shop.category.delete(category_id)
flash(_('Category deleted'))
except CategoryAssignedToProductException:
flash(_('Is impossible to delete a category assigned to product'), 'error')
return redirect(plug_url('stroller2', '/manage/category/index'))
| {
"content_hash": "aaf6c87b2a68e2eaba3a862bef061c9f",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 106,
"avg_line_length": 44.38461538461539,
"alnum_prop": 0.6650779896013865,
"repo_name": "gasbasd/tgapp-stroller2",
"id": "6575a6e3c4c5e0f3ddc2b4fc860faef145a86d9f",
"size": "2323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stroller2/controllers/manage/category.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3359"
},
{
"name": "Python",
"bytes": "74545"
}
],
"symlink_target": ""
} |
from datetime import datetime
def convert_date(datestring):
return datetime.strptime(datestring, '%b %d %Y %I:%M%p')
def convert_to_json(lineEntry):
with open('data.txt') as f:
lines = f.readlines()
print lines
for entry in lines:
convert_to_json(entry)
| {
"content_hash": "18890400874f388f0748141d30dd9c65",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 57,
"avg_line_length": 15.705882352941176,
"alnum_prop": 0.7078651685393258,
"repo_name": "apokellypse/shout-webapp",
"id": "1421d09fa4017a95dea759ddf3cc459eb0786849",
"size": "327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shout/models/markers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "35655"
},
{
"name": "JavaScript",
"bytes": "5346"
},
{
"name": "Python",
"bytes": "3657"
}
],
"symlink_target": ""
} |
from node import hex, nullid, nullrev, short
from lock import release
from i18n import _, gettext
import os, re, sys, difflib, time, tempfile
import hg, util, revlog, bundlerepo, extensions, copies, error
import patch, help, mdiff, url, encoding, templatekw
import archival, changegroup, cmdutil, sshserver, hbisect
from hgweb import server
import merge as merge_
import minirst
# Commands start here, listed alphabetically
def add(ui, repo, *pats, **opts):
"""add the specified files on the next commit
Schedule files to be version controlled and added to the
repository.
The files will be added to the repository at the next commit. To
undo an add before that, see hg forget.
If no names are given, add all files to the repository.
.. container:: verbose
An example showing how new (unknown) files are added
automatically by ``hg add``::
$ ls
foo.c
$ hg status
? foo.c
$ hg add
adding foo.c
$ hg status
A foo.c
"""
bad = []
names = []
m = cmdutil.match(repo, pats, opts)
oldbad = m.bad
m.bad = lambda x, y: bad.append(x) or oldbad(x, y)
for f in repo.walk(m):
exact = m.exact(f)
if exact or f not in repo.dirstate:
names.append(f)
if ui.verbose or not exact:
ui.status(_('adding %s\n') % m.rel(f))
if not opts.get('dry_run'):
bad += [f for f in repo.add(names) if f in m.files()]
return bad and 1 or 0
def addremove(ui, repo, *pats, **opts):
"""add all new files, delete all missing files
Add all new files and remove all missing files from the
repository.
New files are ignored if they match any of the patterns in
.hgignore. As with add, these changes take effect at the next
commit.
Use the -s/--similarity option to detect renamed files. With a
parameter greater than 0, this compares every removed file with
every added file and records those similar enough as renames. This
option takes a percentage between 0 (disabled) and 100 (files must
be identical) as its parameter. Detecting renamed files this way
can be expensive.
"""
try:
sim = float(opts.get('similarity') or 0)
except ValueError:
raise util.Abort(_('similarity must be a number'))
if sim < 0 or sim > 100:
raise util.Abort(_('similarity must be between 0 and 100'))
return cmdutil.addremove(repo, pats, opts, similarity=sim / 100.0)
def annotate(ui, repo, *pats, **opts):
"""show changeset information by line for each file
List changes in files, showing the revision id responsible for
each line
This command is useful for discovering when a change was made and
by whom.
Without the -a/--text option, annotate will avoid processing files
it detects as binary. With -a, annotate will annotate the file
anyway, although the results will probably be neither useful
nor desirable.
"""
if opts.get('follow'):
# --follow is deprecated and now just an alias for -f/--file
# to mimic the behavior of Mercurial before version 1.5
opts['file'] = 1
datefunc = ui.quiet and util.shortdate or util.datestr
getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
if not pats:
raise util.Abort(_('at least one filename or pattern is required'))
opmap = [('user', lambda x: ui.shortuser(x[0].user())),
('number', lambda x: str(x[0].rev())),
('changeset', lambda x: short(x[0].node())),
('date', getdate),
('file', lambda x: x[0].path()),
]
if (not opts.get('user') and not opts.get('changeset')
and not opts.get('date') and not opts.get('file')):
opts['number'] = 1
linenumber = opts.get('line_number') is not None
if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
raise util.Abort(_('at least one of -n/-c is required for -l'))
funcmap = [func for op, func in opmap if opts.get(op)]
if linenumber:
lastfunc = funcmap[-1]
funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1])
ctx = repo[opts.get('rev')]
m = cmdutil.match(repo, pats, opts)
follow = not opts.get('no_follow')
for abs in ctx.walk(m):
fctx = ctx[abs]
if not opts.get('text') and util.binary(fctx.data()):
ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
continue
lines = fctx.annotate(follow=follow, linenumber=linenumber)
pieces = []
for f in funcmap:
l = [f(n) for n, dummy in lines]
if l:
ml = max(map(len, l))
pieces.append(["%*s" % (ml, x) for x in l])
if pieces:
for p, l in zip(zip(*pieces), lines):
ui.write("%s: %s" % (" ".join(p), l[1]))
def archive(ui, repo, dest, **opts):
'''create an unversioned archive of a repository revision
By default, the revision used is the parent of the working
directory; use -r/--rev to specify a different revision.
To specify the type of archive to create, use -t/--type. Valid
types are:
:``files``: a directory full of files (default)
:``tar``: tar archive, uncompressed
:``tbz2``: tar archive, compressed using bzip2
:``tgz``: tar archive, compressed using gzip
:``uzip``: zip archive, uncompressed
:``zip``: zip archive, compressed using deflate
The exact name of the destination archive or directory is given
using a format string; see 'hg help export' for details.
Each member added to an archive file has a directory prefix
prepended. Use -p/--prefix to specify a format string for the
prefix. The default is the basename of the archive, with suffixes
removed.
'''
ctx = repo[opts.get('rev')]
if not ctx:
raise util.Abort(_('no working directory: please specify a revision'))
node = ctx.node()
dest = cmdutil.make_filename(repo, dest, node)
if os.path.realpath(dest) == repo.root:
raise util.Abort(_('repository root cannot be destination'))
matchfn = cmdutil.match(repo, [], opts)
kind = opts.get('type') or 'files'
prefix = opts.get('prefix')
if dest == '-':
if kind == 'files':
raise util.Abort(_('cannot archive plain files to stdout'))
dest = sys.stdout
if not prefix:
prefix = os.path.basename(repo.root) + '-%h'
prefix = cmdutil.make_filename(repo, prefix, node)
archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
matchfn, prefix)
def backout(ui, repo, node=None, rev=None, **opts):
'''reverse effect of earlier changeset
Commit the backed out changes as a new changeset. The new
changeset is a child of the backed out changeset.
If you backout a changeset other than the tip, a new head is
created. This head will be the new tip and you should merge this
backout changeset with another head.
The --merge option remembers the parent of the working directory
before starting the backout, then merges the new head with that
changeset afterwards. This saves you from doing the merge by hand.
The result of this merge is not committed, as with a normal merge.
See 'hg help dates' for a list of formats valid for -d/--date.
'''
if rev and node:
raise util.Abort(_("please specify just one revision"))
if not rev:
rev = node
if not rev:
raise util.Abort(_("please specify a revision to backout"))
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
cmdutil.bail_if_changed(repo)
node = repo.lookup(rev)
op1, op2 = repo.dirstate.parents()
a = repo.changelog.ancestor(op1, node)
if a != node:
raise util.Abort(_('cannot backout change on a different branch'))
p1, p2 = repo.changelog.parents(node)
if p1 == nullid:
raise util.Abort(_('cannot backout a change with no parents'))
if p2 != nullid:
if not opts.get('parent'):
raise util.Abort(_('cannot backout a merge changeset without '
'--parent'))
p = repo.lookup(opts['parent'])
if p not in (p1, p2):
raise util.Abort(_('%s is not a parent of %s') %
(short(p), short(node)))
parent = p
else:
if opts.get('parent'):
raise util.Abort(_('cannot use --parent on non-merge changeset'))
parent = p1
# the backout should appear on the same branch
branch = repo.dirstate.branch()
hg.clean(repo, node, show_stats=False)
repo.dirstate.setbranch(branch)
revert_opts = opts.copy()
revert_opts['date'] = None
revert_opts['all'] = True
revert_opts['rev'] = hex(parent)
revert_opts['no_backup'] = None
revert(ui, repo, **revert_opts)
commit_opts = opts.copy()
commit_opts['addremove'] = False
if not commit_opts['message'] and not commit_opts['logfile']:
# we don't translate commit messages
commit_opts['message'] = "Backed out changeset %s" % short(node)
commit_opts['force_editor'] = True
commit(ui, repo, **commit_opts)
def nice(node):
return '%d:%s' % (repo.changelog.rev(node), short(node))
ui.status(_('changeset %s backs out changeset %s\n') %
(nice(repo.changelog.tip()), nice(node)))
if op1 != node:
hg.clean(repo, op1, show_stats=False)
if opts.get('merge'):
ui.status(_('merging with changeset %s\n')
% nice(repo.changelog.tip()))
hg.merge(repo, hex(repo.changelog.tip()))
else:
ui.status(_('the backout changeset is a new head - '
'do not forget to merge\n'))
ui.status(_('(use "backout --merge" '
'if you want to auto-merge)\n'))
def bisect(ui, repo, rev=None, extra=None, command=None,
reset=None, good=None, bad=None, skip=None, noupdate=None):
"""subdivision search of changesets
This command helps to find changesets which introduce problems. To
use, mark the earliest changeset you know exhibits the problem as
bad, then mark the latest changeset which is free from the problem
as good. Bisect will update your working directory to a revision
for testing (unless the -U/--noupdate option is specified). Once
you have performed tests, mark the working directory as good or
bad, and bisect will either update to another candidate changeset
or announce that it has found the bad revision.
As a shortcut, you can also use the revision argument to mark a
revision as good or bad without checking it out first.
If you supply a command, it will be used for automatic bisection.
Its exit status will be used to mark revisions as good or bad:
status 0 means good, 125 means to skip the revision, 127
(command not found) will abort the bisection, and any other
non-zero exit status means the revision is bad.
"""
def print_result(nodes, good):
displayer = cmdutil.show_changeset(ui, repo, {})
if len(nodes) == 1:
# narrowed it down to a single revision
if good:
ui.write(_("The first good revision is:\n"))
else:
ui.write(_("The first bad revision is:\n"))
displayer.show(repo[nodes[0]])
else:
# multiple possible revisions
if good:
ui.write(_("Due to skipped revisions, the first "
"good revision could be any of:\n"))
else:
ui.write(_("Due to skipped revisions, the first "
"bad revision could be any of:\n"))
for n in nodes:
displayer.show(repo[n])
displayer.close()
def check_state(state, interactive=True):
if not state['good'] or not state['bad']:
if (good or bad or skip or reset) and interactive:
return
if not state['good']:
raise util.Abort(_('cannot bisect (no known good revisions)'))
else:
raise util.Abort(_('cannot bisect (no known bad revisions)'))
return True
# backward compatibility
if rev in "good bad reset init".split():
ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
cmd, rev, extra = rev, extra, None
if cmd == "good":
good = True
elif cmd == "bad":
bad = True
else:
reset = True
elif extra or good + bad + skip + reset + bool(command) > 1:
raise util.Abort(_('incompatible arguments'))
if reset:
p = repo.join("bisect.state")
if os.path.exists(p):
os.unlink(p)
return
state = hbisect.load_state(repo)
if command:
changesets = 1
try:
while changesets:
# update state
status = util.system(command)
if status == 125:
transition = "skip"
elif status == 0:
transition = "good"
# status < 0 means process was killed
elif status == 127:
raise util.Abort(_("failed to execute %s") % command)
elif status < 0:
raise util.Abort(_("%s killed") % command)
else:
transition = "bad"
ctx = repo[rev or '.']
state[transition].append(ctx.node())
ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition))
check_state(state, interactive=False)
# bisect
nodes, changesets, good = hbisect.bisect(repo.changelog, state)
# update to next check
cmdutil.bail_if_changed(repo)
hg.clean(repo, nodes[0], show_stats=False)
finally:
hbisect.save_state(repo, state)
return print_result(nodes, good)
# update state
node = repo.lookup(rev or '.')
if good or bad or skip:
if good:
state['good'].append(node)
elif bad:
state['bad'].append(node)
elif skip:
state['skip'].append(node)
hbisect.save_state(repo, state)
if not check_state(state):
return
# actually bisect
nodes, changesets, good = hbisect.bisect(repo.changelog, state)
if changesets == 0:
print_result(nodes, good)
else:
assert len(nodes) == 1 # only a single node can be tested next
node = nodes[0]
# compute the approximate number of remaining tests
tests, size = 0, 2
while size <= changesets:
tests, size = tests + 1, size * 2
rev = repo.changelog.rev(node)
ui.write(_("Testing changeset %d:%s "
"(%d changesets remaining, ~%d tests)\n")
% (rev, short(node), changesets, tests))
if not noupdate:
cmdutil.bail_if_changed(repo)
return hg.clean(repo, node)
def branch(ui, repo, label=None, **opts):
"""set or show the current branch name
With no argument, show the current branch name. With one argument,
set the working directory branch name (the branch will not exist
in the repository until the next commit). Standard practice
recommends that primary development take place on the 'default'
branch.
Unless -f/--force is specified, branch will not let you set a
branch name that already exists, even if it's inactive.
Use -C/--clean to reset the working directory branch to that of
the parent of the working directory, negating a previous branch
change.
Use the command 'hg update' to switch to an existing branch. Use
'hg commit --close-branch' to mark this branch as closed.
"""
if opts.get('clean'):
label = repo[None].parents()[0].branch()
repo.dirstate.setbranch(label)
ui.status(_('reset working directory to branch %s\n') % label)
elif label:
utflabel = encoding.fromlocal(label)
if not opts.get('force') and utflabel in repo.branchtags():
if label not in [p.branch() for p in repo.parents()]:
raise util.Abort(_('a branch of the same name already exists'
" (use 'hg update' to switch to it)"))
repo.dirstate.setbranch(utflabel)
ui.status(_('marked working directory as branch %s\n') % label)
else:
ui.write("%s\n" % encoding.tolocal(repo.dirstate.branch()))
def branches(ui, repo, active=False, closed=False):
"""list repository named branches
List the repository's named branches, indicating which ones are
inactive. If -c/--closed is specified, also list branches which have
been marked closed (see hg commit --close-branch).
If -a/--active is specified, only show active branches. A branch
is considered active if it contains repository heads.
Use the command 'hg update' to switch to an existing branch.
"""
hexfunc = ui.debugflag and hex or short
activebranches = [repo[n].branch() for n in repo.heads()]
def testactive(tag, node):
realhead = tag in activebranches
open = node in repo.branchheads(tag, closed=False)
return realhead and open
branches = sorted([(testactive(tag, node), repo.changelog.rev(node), tag)
for tag, node in repo.branchtags().items()],
reverse=True)
for isactive, node, tag in branches:
if (not active) or isactive:
encodedtag = encoding.tolocal(tag)
if ui.quiet:
ui.write("%s\n" % encodedtag)
else:
hn = repo.lookup(node)
if isactive:
notice = ''
elif hn not in repo.branchheads(tag, closed=False):
if not closed:
continue
notice = _(' (closed)')
else:
notice = _(' (inactive)')
rev = str(node).rjust(31 - encoding.colwidth(encodedtag))
data = encodedtag, rev, hexfunc(hn), notice
ui.write("%s %s:%s%s\n" % data)
def bundle(ui, repo, fname, dest=None, **opts):
"""create a changegroup file
Generate a compressed changegroup file collecting changesets not
known to be in another repository.
If you omit the destination repository, then hg assumes the
destination will have all the nodes you specify with --base
parameters. To create a bundle containing all changesets, use
-a/--all (or --base null).
You can change compression method with the -t/--type option.
The available compression methods are: none, bzip2, and
gzip (by default, bundles are compressed using bzip2).
The bundle file can then be transferred using conventional means
and applied to another repository with the unbundle or pull
command. This is useful when direct push and pull are not
available or when exporting an entire repository is undesirable.
Applying bundles preserves all changeset contents including
permissions, copy/rename information, and revision history.
"""
revs = opts.get('rev') or None
if revs:
revs = [repo.lookup(rev) for rev in revs]
if opts.get('all'):
base = ['null']
else:
base = opts.get('base')
if base:
if dest:
raise util.Abort(_("--base is incompatible with specifying "
"a destination"))
base = [repo.lookup(rev) for rev in base]
# create the right base
# XXX: nodesbetween / changegroup* should be "fixed" instead
o = []
has = set((nullid,))
for n in base:
has.update(repo.changelog.reachable(n))
if revs:
visit = list(revs)
has.difference_update(revs)
else:
visit = repo.changelog.heads()
seen = {}
while visit:
n = visit.pop(0)
parents = [p for p in repo.changelog.parents(n) if p not in has]
if len(parents) == 0:
if n not in has:
o.append(n)
else:
for p in parents:
if p not in seen:
seen[p] = 1
visit.append(p)
else:
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
other = hg.repository(cmdutil.remoteui(repo, opts), dest)
revs, checkout = hg.addbranchrevs(repo, other, branches, revs)
o = repo.findoutgoing(other, force=opts.get('force'))
if not o:
ui.status(_("no changes found\n"))
return
if revs:
cg = repo.changegroupsubset(o, revs, 'bundle')
else:
cg = repo.changegroup(o, 'bundle')
bundletype = opts.get('type', 'bzip2').lower()
btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
bundletype = btypes.get(bundletype)
if bundletype not in changegroup.bundletypes:
raise util.Abort(_('unknown bundle type specified with --type'))
changegroup.writebundle(cg, fname, bundletype)
def cat(ui, repo, file1, *pats, **opts):
"""output the current or given revision of files
Print the specified files as they were at the given revision. If
no revision is given, the parent of the working directory is used,
or tip if no revision is checked out.
Output may be to a file, in which case the name of the file is
given using a format string. The formatting rules are the same as
for the export command, with the following additions:
:``%s``: basename of file being printed
:``%d``: dirname of file being printed, or '.' if in repository root
:``%p``: root-relative path name of file being printed
"""
ctx = repo[opts.get('rev')]
err = 1
m = cmdutil.match(repo, (file1,) + pats, opts)
for abs in ctx.walk(m):
fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs)
data = ctx[abs].data()
if opts.get('decode'):
data = repo.wwritedata(abs, data)
fp.write(data)
err = 0
return err
def clone(ui, source, dest=None, **opts):
"""make a copy of an existing repository
Create a copy of an existing repository in a new directory.
If no destination directory name is specified, it defaults to the
basename of the source.
The location of the source is added to the new repository's
.hg/hgrc file, as the default to be used for future pulls.
See 'hg help urls' for valid source format details.
It is possible to specify an ``ssh://`` URL as the destination, but no
.hg/hgrc and working directory will be created on the remote side.
Please see 'hg help urls' for important details about ``ssh://`` URLs.
A set of changesets (tags, or branch names) to pull may be specified
by listing each changeset (tag, or branch name) with -r/--rev.
If -r/--rev is used, the cloned repository will contain only a subset
of the changesets of the source repository. Only the set of changesets
defined by all -r/--rev options (including all their ancestors)
will be pulled into the destination repository.
No subsequent changesets (including subsequent tags) will be present
in the destination.
Using -r/--rev (or 'clone src#rev dest') implies --pull, even for
local source repositories.
For efficiency, hardlinks are used for cloning whenever the source
and destination are on the same filesystem (note this applies only
to the repository data, not to the working directory). Some
filesystems, such as AFS, implement hardlinking incorrectly, but
do not report errors. In these cases, use the --pull option to
avoid hardlinking.
In some cases, you can clone repositories and the working directory
using full hardlinks with ::
$ cp -al REPO REPOCLONE
This is the fastest way to clone, but it is not always safe. The
operation is not atomic (making sure REPO is not modified during
the operation is up to you) and you have to make sure your editor
breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
this is not compatible with certain extensions that place their
metadata under the .hg directory, such as mq.
Mercurial will update the working directory to the first applicable
revision from this list:
a) null if -U or the source repository has no changesets
b) if -u . and the source repository is local, the first parent of
the source repository's working directory
c) the changeset specified with -u (if a branch name, this means the
latest head of that branch)
d) the changeset specified with -r
e) the tipmost head specified with -b
f) the tipmost head specified with the url#branch source syntax
g) the tipmost head of the default branch
h) tip
"""
if opts.get('noupdate') and opts.get('updaterev'):
raise util.Abort(_("cannot specify both --noupdate and --updaterev"))
hg.clone(cmdutil.remoteui(ui, opts), source, dest,
pull=opts.get('pull'),
stream=opts.get('uncompressed'),
rev=opts.get('rev'),
update=opts.get('updaterev') or not opts.get('noupdate'),
branch=opts.get('branch'))
def commit(ui, repo, *pats, **opts):
"""commit the specified files or all outstanding changes
Commit changes to the given files into the repository. Unlike a
centralized RCS, this operation is a local operation. See hg push
for a way to actively distribute your changes.
If a list of files is omitted, all changes reported by "hg status"
will be committed.
If you are committing the result of a merge, do not provide any
filenames or -I/-X filters.
If no commit message is specified, the configured editor is
started to prompt you for a message.
See 'hg help dates' for a list of formats valid for -d/--date.
"""
extra = {}
if opts.get('close_branch'):
extra['close'] = 1
e = cmdutil.commiteditor
if opts.get('force_editor'):
e = cmdutil.commitforceeditor
def commitfunc(ui, repo, message, match, opts):
return repo.commit(message, opts.get('user'), opts.get('date'), match,
editor=e, extra=extra)
node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
if not node:
ui.status(_("nothing changed\n"))
return
cl = repo.changelog
rev = cl.rev(node)
parents = cl.parentrevs(rev)
if rev - 1 in parents:
# one of the parents was the old tip
pass
elif (parents == (nullrev, nullrev) or
len(cl.heads(cl.node(parents[0]))) > 1 and
(parents[1] == nullrev or len(cl.heads(cl.node(parents[1]))) > 1)):
ui.status(_('created new head\n'))
if ui.debugflag:
ui.write(_('committed changeset %d:%s\n') % (rev, hex(node)))
elif ui.verbose:
ui.write(_('committed changeset %d:%s\n') % (rev, short(node)))
def copy(ui, repo, *pats, **opts):
"""mark files as copied for the next commit
Mark dest as having copies of source files. If dest is a
directory, copies are put in that directory. If dest is a file,
the source must be a single file.
By default, this command copies the contents of files as they
exist in the working directory. If invoked with -A/--after, the
operation is recorded, but no copying is performed.
This command takes effect with the next commit. To undo a copy
before that, see hg revert.
"""
wlock = repo.wlock(False)
try:
return cmdutil.copy(ui, repo, pats, opts)
finally:
wlock.release()
def debugancestor(ui, repo, *args):
"""find the ancestor revision of two revisions in a given index"""
if len(args) == 3:
index, rev1, rev2 = args
r = revlog.revlog(util.opener(os.getcwd(), audit=False), index)
lookup = r.lookup
elif len(args) == 2:
if not repo:
raise util.Abort(_("There is no Mercurial repository here "
"(.hg not found)"))
rev1, rev2 = args
r = repo.changelog
lookup = repo.lookup
else:
raise util.Abort(_('either two or three arguments required'))
a = r.ancestor(lookup(rev1), lookup(rev2))
ui.write("%d:%s\n" % (r.rev(a), hex(a)))
def debugcommands(ui, cmd='', *args):
for cmd, vals in sorted(table.iteritems()):
cmd = cmd.split('|')[0].strip('^')
opts = ', '.join([i[1] for i in vals[1]])
ui.write('%s: %s\n' % (cmd, opts))
def debugcomplete(ui, cmd='', **opts):
"""returns the completion list associated with the given command"""
if opts.get('options'):
options = []
otables = [globalopts]
if cmd:
aliases, entry = cmdutil.findcmd(cmd, table, False)
otables.append(entry[1])
for t in otables:
for o in t:
if o[0]:
options.append('-%s' % o[0])
options.append('--%s' % o[1])
ui.write("%s\n" % "\n".join(options))
return
cmdlist = cmdutil.findpossible(cmd, table)
if ui.verbose:
cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
ui.write("%s\n" % "\n".join(sorted(cmdlist)))
def debugfsinfo(ui, path = "."):
open('.debugfsinfo', 'w').write('')
ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
and 'yes' or 'no'))
os.unlink('.debugfsinfo')
def debugrebuildstate(ui, repo, rev="tip"):
"""rebuild the dirstate as it would look like for the given revision"""
ctx = repo[rev]
wlock = repo.wlock()
try:
repo.dirstate.rebuild(ctx.node(), ctx.manifest())
finally:
wlock.release()
def debugcheckstate(ui, repo):
"""validate the correctness of the current dirstate"""
parent1, parent2 = repo.dirstate.parents()
m1 = repo[parent1].manifest()
m2 = repo[parent2].manifest()
errors = 0
for f in repo.dirstate:
state = repo.dirstate[f]
if state in "nr" and f not in m1:
ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
errors += 1
if state in "a" and f in m1:
ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
errors += 1
if state in "m" and f not in m1 and f not in m2:
ui.warn(_("%s in state %s, but not in either manifest\n") %
(f, state))
errors += 1
for f in m1:
state = repo.dirstate[f]
if state not in "nrm":
ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
errors += 1
if errors:
error = _(".hg/dirstate inconsistent with current parent's manifest")
raise util.Abort(error)
def showconfig(ui, repo, *values, **opts):
"""show combined config settings from all hgrc files
With no arguments, print names and values of all config items.
With one argument of the form section.name, print just the value
of that config item.
With multiple arguments, print names and values of all config
items with matching section names.
With --debug, the source (filename and line number) is printed
for each config item.
"""
untrusted = bool(opts.get('untrusted'))
if values:
if len([v for v in values if '.' in v]) > 1:
raise util.Abort(_('only one config item permitted'))
for section, name, value in ui.walkconfig(untrusted=untrusted):
sectname = section + '.' + name
if values:
for v in values:
if v == section:
ui.debug('%s: ' %
ui.configsource(section, name, untrusted))
ui.write('%s=%s\n' % (sectname, value))
elif v == sectname:
ui.debug('%s: ' %
ui.configsource(section, name, untrusted))
ui.write(value, '\n')
else:
ui.debug('%s: ' %
ui.configsource(section, name, untrusted))
ui.write('%s=%s\n' % (sectname, value))
def debugsetparents(ui, repo, rev1, rev2=None):
"""manually set the parents of the current working directory
This is useful for writing repository conversion tools, but should
be used with care.
"""
if not rev2:
rev2 = hex(nullid)
wlock = repo.wlock()
try:
repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
finally:
wlock.release()
def debugstate(ui, repo, nodates=None):
"""show the contents of the current dirstate"""
timestr = ""
showdate = not nodates
for file_, ent in sorted(repo.dirstate._map.iteritems()):
if showdate:
if ent[3] == -1:
# Pad or slice to locale representation
locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ",
time.localtime(0)))
timestr = 'unset'
timestr = (timestr[:locale_len] +
' ' * (locale_len - len(timestr)))
else:
timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
time.localtime(ent[3]))
if ent[1] & 020000:
mode = 'lnk'
else:
mode = '%3o' % (ent[1] & 0777)
ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
for f in repo.dirstate.copies():
ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
def debugsub(ui, repo, rev=None):
if rev == '':
rev = None
for k, v in sorted(repo[rev].substate.items()):
ui.write('path %s\n' % k)
ui.write(' source %s\n' % v[0])
ui.write(' revision %s\n' % v[1])
def debugdata(ui, file_, rev):
"""dump the contents of a data file revision"""
r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i")
try:
ui.write(r.revision(r.lookup(rev)))
except KeyError:
raise util.Abort(_('invalid revision identifier %s') % rev)
def debugdate(ui, date, range=None, **opts):
"""parse and display a date"""
if opts["extended"]:
d = util.parsedate(date, util.extendeddateformats)
else:
d = util.parsedate(date)
ui.write("internal: %s %s\n" % d)
ui.write("standard: %s\n" % util.datestr(d))
if range:
m = util.matchdate(range)
ui.write("match: %s\n" % m(d[0]))
def debugindex(ui, file_):
"""dump the contents of an index file"""
r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
ui.write(" rev offset length base linkrev"
" nodeid p1 p2\n")
for i in r:
node = r.node(i)
try:
pp = r.parents(node)
except:
pp = [nullid, nullid]
ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
i, r.start(i), r.length(i), r.base(i), r.linkrev(i),
short(node), short(pp[0]), short(pp[1])))
def debugindexdot(ui, file_):
"""dump an index DAG as a graphviz dot file"""
r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
ui.write("digraph G {\n")
for i in r:
node = r.node(i)
pp = r.parents(node)
ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
if pp[1] != nullid:
ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
ui.write("}\n")
def debuginstall(ui):
'''test Mercurial installation'''
def writetemp(contents):
(fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
f = os.fdopen(fd, "wb")
f.write(contents)
f.close()
return name
problems = 0
# encoding
ui.status(_("Checking encoding (%s)...\n") % encoding.encoding)
try:
encoding.fromlocal("test")
except util.Abort, inst:
ui.write(" %s\n" % inst)
ui.write(_(" (check that your locale is properly set)\n"))
problems += 1
# compiled modules
ui.status(_("Checking extensions...\n"))
try:
import bdiff, mpatch, base85
except Exception, inst:
ui.write(" %s\n" % inst)
ui.write(_(" One or more extensions could not be found"))
ui.write(_(" (check that you compiled the extensions)\n"))
problems += 1
# templates
ui.status(_("Checking templates...\n"))
try:
import templater
templater.templater(templater.templatepath("map-cmdline.default"))
except Exception, inst:
ui.write(" %s\n" % inst)
ui.write(_(" (templates seem to have been installed incorrectly)\n"))
problems += 1
# patch
ui.status(_("Checking patch...\n"))
patchproblems = 0
a = "1\n2\n3\n4\n"
b = "1\n2\n3\ninsert\n4\n"
fa = writetemp(a)
d = mdiff.unidiff(a, None, b, None, os.path.basename(fa),
os.path.basename(fa))
fd = writetemp(d)
files = {}
try:
patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files)
except util.Abort, e:
ui.write(_(" patch call failed:\n"))
ui.write(" " + str(e) + "\n")
patchproblems += 1
else:
if list(files) != [os.path.basename(fa)]:
ui.write(_(" unexpected patch output!\n"))
patchproblems += 1
a = open(fa).read()
if a != b:
ui.write(_(" patch test failed!\n"))
patchproblems += 1
if patchproblems:
if ui.config('ui', 'patch'):
ui.write(_(" (Current patch tool may be incompatible with patch,"
" or misconfigured. Please check your .hgrc file)\n"))
else:
ui.write(_(" Internal patcher failure, please report this error"
" to http://mercurial.selenic.com/bts/\n"))
problems += patchproblems
os.unlink(fa)
os.unlink(fd)
# editor
ui.status(_("Checking commit editor...\n"))
editor = ui.geteditor()
cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0])
if not cmdpath:
if editor == 'vi':
ui.write(_(" No commit editor set and can't find vi in PATH\n"))
ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
else:
ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
problems += 1
# check username
ui.status(_("Checking username...\n"))
try:
user = ui.username()
except util.Abort, e:
ui.write(" %s\n" % e)
ui.write(_(" (specify a username in your .hgrc file)\n"))
problems += 1
if not problems:
ui.status(_("No problems detected\n"))
else:
ui.write(_("%s problems detected,"
" please check your install!\n") % problems)
return problems
def debugrename(ui, repo, file1, *pats, **opts):
"""dump rename information"""
ctx = repo[opts.get('rev')]
m = cmdutil.match(repo, (file1,) + pats, opts)
for abs in ctx.walk(m):
fctx = ctx[abs]
o = fctx.filelog().renamed(fctx.filenode())
rel = m.rel(abs)
if o:
ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
else:
ui.write(_("%s not renamed\n") % rel)
def debugwalk(ui, repo, *pats, **opts):
"""show how files match on given patterns"""
m = cmdutil.match(repo, pats, opts)
items = list(repo.walk(m))
if not items:
return
fmt = 'f %%-%ds %%-%ds %%s' % (
max([len(abs) for abs in items]),
max([len(m.rel(abs)) for abs in items]))
for abs in items:
line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
ui.write("%s\n" % line.rstrip())
def diff(ui, repo, *pats, **opts):
"""diff repository (or selected files)
Show differences between revisions for the specified files.
Differences between files are shown using the unified diff format.
NOTE: diff may generate unexpected results for merges, as it will
default to comparing against the working directory's first parent
changeset if no revisions are specified.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.
Alternatively you can specify -c/--change with a revision to see
the changes in that changeset relative to its first parent.
Without the -a/--text option, diff will avoid generating diffs of
files it detects as binary. With -a, diff will generate a diff
anyway, probably with undesirable results.
Use the -g/--git option to generate diffs in the git extended diff
format. For more information, read 'hg help diffs'.
"""
revs = opts.get('rev')
change = opts.get('change')
stat = opts.get('stat')
reverse = opts.get('reverse')
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = repo.lookup(change)
node1 = repo[node2].parents()[0].node()
else:
node1, node2 = cmdutil.revpair(repo, revs)
if reverse:
node1, node2 = node2, node1
if stat:
opts['unified'] = '0'
diffopts = patch.diffopts(ui, opts)
m = cmdutil.match(repo, pats, opts)
it = patch.diff(repo, node1, node2, match=m, opts=diffopts)
if stat:
width = 80
if not ui.plain():
width = util.termwidth()
ui.write(patch.diffstat(util.iterlines(it), width=width,
git=diffopts.git))
else:
for chunk in it:
ui.write(chunk)
def export(ui, repo, *changesets, **opts):
"""dump the header and diffs for one or more changesets
Print the changeset header and diffs for one or more revisions.
The information shown in the changeset header is: author, date,
branch name (if non-default), changeset hash, parent(s) and commit
comment.
NOTE: export may generate unexpected diff output for merge
changesets, as it will compare the merge changeset against its
first parent only.
Output may be to a file, in which case the name of the file is
given using a format string. The formatting rules are as follows:
:``%%``: literal "%" character
:``%H``: changeset hash (40 bytes of hexadecimal)
:``%N``: number of patches being generated
:``%R``: changeset revision number
:``%b``: basename of the exporting repository
:``%h``: short-form changeset hash (12 bytes of hexadecimal)
:``%n``: zero-padded sequence number, starting at 1
:``%r``: zero-padded changeset revision number
Without the -a/--text option, export will avoid generating diffs
of files it detects as binary. With -a, export will generate a
diff anyway, probably with undesirable results.
Use the -g/--git option to generate diffs in the git extended diff
format. See 'hg help diffs' for more information.
With the --switch-parent option, the diff will be against the
second parent. It can be useful to review a merge.
"""
changesets += tuple(opts.get('rev', []))
if not changesets:
raise util.Abort(_("export requires at least one changeset"))
revs = cmdutil.revrange(repo, changesets)
if len(revs) > 1:
ui.note(_('exporting patches:\n'))
else:
ui.note(_('exporting patch:\n'))
patch.export(repo, revs, template=opts.get('output'),
switch_parent=opts.get('switch_parent'),
opts=patch.diffopts(ui, opts))
def forget(ui, repo, *pats, **opts):
"""forget the specified files on the next commit
Mark the specified files so they will no longer be tracked
after the next commit.
This only removes files from the current branch, not from the
entire project history, and it does not delete them from the
working directory.
To undo a forget before the next commit, see hg add.
"""
if not pats:
raise util.Abort(_('no files specified'))
m = cmdutil.match(repo, pats, opts)
s = repo.status(match=m, clean=True)
forget = sorted(s[0] + s[1] + s[3] + s[6])
for f in m.files():
if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
ui.warn(_('not removing %s: file is already untracked\n')
% m.rel(f))
for f in forget:
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
repo.remove(forget, unlink=False)
def grep(ui, repo, pattern, *pats, **opts):
"""search for a pattern in specified files and revisions
Search revisions of files for a regular expression.
This command behaves differently than Unix grep. It only accepts
Python/Perl regexps. It searches repository history, not the
working directory. It always prints the revision number in which a
match appears.
By default, grep only prints output for the first revision of a
file in which it finds a match. To get it to print every revision
that contains a change in match status ("-" for a match that
becomes a non-match, or "+" for a non-match that becomes a match),
use the --all flag.
"""
reflags = 0
if opts.get('ignore_case'):
reflags |= re.I
try:
regexp = re.compile(pattern, reflags)
except Exception, inst:
ui.warn(_("grep: invalid match pattern: %s\n") % inst)
return None
sep, eol = ':', '\n'
if opts.get('print0'):
sep = eol = '\0'
getfile = util.lrucachefunc(repo.file)
def matchlines(body):
begin = 0
linenum = 0
while True:
match = regexp.search(body, begin)
if not match:
break
mstart, mend = match.span()
linenum += body.count('\n', begin, mstart) + 1
lstart = body.rfind('\n', begin, mstart) + 1 or begin
begin = body.find('\n', mend) + 1 or len(body)
lend = begin - 1
yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
class linestate(object):
def __init__(self, line, linenum, colstart, colend):
self.line = line
self.linenum = linenum
self.colstart = colstart
self.colend = colend
def __hash__(self):
return hash((self.linenum, self.line))
def __eq__(self, other):
return self.line == other.line
matches = {}
copies = {}
def grepbody(fn, rev, body):
matches[rev].setdefault(fn, [])
m = matches[rev][fn]
for lnum, cstart, cend, line in matchlines(body):
s = linestate(line, lnum, cstart, cend)
m.append(s)
def difflinestates(a, b):
sm = difflib.SequenceMatcher(None, a, b)
for tag, alo, ahi, blo, bhi in sm.get_opcodes():
if tag == 'insert':
for i in xrange(blo, bhi):
yield ('+', b[i])
elif tag == 'delete':
for i in xrange(alo, ahi):
yield ('-', a[i])
elif tag == 'replace':
for i in xrange(alo, ahi):
yield ('-', a[i])
for i in xrange(blo, bhi):
yield ('+', b[i])
def display(fn, ctx, pstates, states):
rev = ctx.rev()
datefunc = ui.quiet and util.shortdate or util.datestr
found = False
filerevmatches = {}
if opts.get('all'):
iter = difflinestates(pstates, states)
else:
iter = [('', l) for l in states]
for change, l in iter:
cols = [fn, str(rev)]
if opts.get('line_number'):
cols.append(str(l.linenum))
if opts.get('all'):
cols.append(change)
if opts.get('user'):
cols.append(ui.shortuser(ctx.user()))
if opts.get('date'):
cols.append(datefunc(ctx.date()))
if opts.get('files_with_matches'):
c = (fn, rev)
if c in filerevmatches:
continue
filerevmatches[c] = 1
else:
cols.append(l.line)
ui.write(sep.join(cols), eol)
found = True
return found
skip = {}
revfiles = {}
matchfn = cmdutil.match(repo, pats, opts)
found = False
follow = opts.get('follow')
def prep(ctx, fns):
rev = ctx.rev()
pctx = ctx.parents()[0]
parent = pctx.rev()
matches.setdefault(rev, {})
matches.setdefault(parent, {})
files = revfiles.setdefault(rev, [])
for fn in fns:
flog = getfile(fn)
try:
fnode = ctx.filenode(fn)
except error.LookupError:
continue
copied = flog.renamed(fnode)
copy = follow and copied and copied[0]
if copy:
copies.setdefault(rev, {})[fn] = copy
if fn in skip:
if copy:
skip[copy] = True
continue
files.append(fn)
if fn not in matches[rev]:
grepbody(fn, rev, flog.read(fnode))
pfn = copy or fn
if pfn not in matches[parent]:
try:
fnode = pctx.filenode(pfn)
grepbody(pfn, parent, flog.read(fnode))
except error.LookupError:
pass
for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
rev = ctx.rev()
parent = ctx.parents()[0].rev()
for fn in sorted(revfiles.get(rev, [])):
states = matches[rev][fn]
copy = copies.get(rev, {}).get(fn)
if fn in skip:
if copy:
skip[copy] = True
continue
pstates = matches.get(parent, {}).get(copy or fn, [])
if pstates or states:
r = display(fn, ctx, pstates, states)
found = found or r
if r and not opts.get('all'):
skip[fn] = True
if copy:
skip[copy] = True
del matches[rev]
del revfiles[rev]
def heads(ui, repo, *branchrevs, **opts):
"""show current repository heads or show branch heads
With no arguments, show all repository branch heads.
Repository "heads" are changesets with no child changesets. They are
where development generally takes place and are the usual targets
for update and merge operations. Branch heads are changesets that have
no child changeset on the same branch.
If one or more REVs are given, only branch heads on the branches
associated with the specified changesets are shown.
If -c/--closed is specified, also show branch heads marked closed
(see hg commit --close-branch).
If STARTREV is specified, only those heads that are descendants of
STARTREV will be displayed.
If -t/--topo is specified, named branch mechanics will be ignored and only
changesets without children will be shown.
"""
if opts.get('rev'):
start = repo.lookup(opts['rev'])
else:
start = None
if opts.get('topo'):
heads = [repo[h] for h in repo.heads(start)]
else:
heads = []
for b, ls in repo.branchmap().iteritems():
if start is None:
heads += [repo[h] for h in ls]
continue
startrev = repo.changelog.rev(start)
descendants = set(repo.changelog.descendants(startrev))
descendants.add(startrev)
rev = repo.changelog.rev
heads += [repo[h] for h in ls if rev(h) in descendants]
if branchrevs:
decode, encode = encoding.fromlocal, encoding.tolocal
branches = set(repo[decode(br)].branch() for br in branchrevs)
heads = [h for h in heads if h.branch() in branches]
if not opts.get('closed'):
heads = [h for h in heads if not h.extra().get('close')]
if opts.get('active') and branchrevs:
dagheads = repo.heads(start)
heads = [h for h in heads if h.node() in dagheads]
if branchrevs:
haveheads = set(h.branch() for h in heads)
if branches - haveheads:
headless = ', '.join(encode(b) for b in branches - haveheads)
msg = _('no open branch heads found on branches %s')
if opts.get('rev'):
msg += _(' (started at %s)' % opts['rev'])
ui.warn((msg + '\n') % headless)
if not heads:
return 1
heads = sorted(heads, key=lambda x: -x.rev())
displayer = cmdutil.show_changeset(ui, repo, opts)
for ctx in heads:
displayer.show(ctx)
displayer.close()
def help_(ui, name=None, with_version=False, unknowncmd=False):
"""show help for a given topic or a help overview
With no arguments, print a list of commands with short help messages.
Given a topic, extension, or command name, print help for that
topic."""
option_lists = []
textwidth = util.termwidth() - 2
def addglobalopts(aliases):
if ui.verbose:
option_lists.append((_("global options:"), globalopts))
if name == 'shortlist':
option_lists.append((_('use "hg help" for the full list '
'of commands'), ()))
else:
if name == 'shortlist':
msg = _('use "hg help" for the full list of commands '
'or "hg -v" for details')
elif aliases:
msg = _('use "hg -v help%s" to show aliases and '
'global options') % (name and " " + name or "")
else:
msg = _('use "hg -v help %s" to show global options') % name
option_lists.append((msg, ()))
def helpcmd(name):
if with_version:
version_(ui)
ui.write('\n')
try:
aliases, entry = cmdutil.findcmd(name, table, strict=unknowncmd)
except error.AmbiguousCommand, inst:
# py3k fix: except vars can't be used outside the scope of the
# except block, nor can be used inside a lambda. python issue4617
prefix = inst.args[0]
select = lambda c: c.lstrip('^').startswith(prefix)
helplist(_('list of commands:\n\n'), select)
return
# check if it's an invalid alias and display its error if it is
if getattr(entry[0], 'badalias', False):
if not unknowncmd:
entry[0](ui)
return
# synopsis
if len(entry) > 2:
if entry[2].startswith('hg'):
ui.write("%s\n" % entry[2])
else:
ui.write('hg %s %s\n' % (aliases[0], entry[2]))
else:
ui.write('hg %s\n' % aliases[0])
# aliases
if not ui.quiet and len(aliases) > 1:
ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
# description
doc = gettext(entry[0].__doc__)
if not doc:
doc = _("(no help text available)")
if hasattr(entry[0], 'definition'): # aliased command
doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc)
if ui.quiet:
doc = doc.splitlines()[0]
keep = ui.verbose and ['verbose'] or []
formatted, pruned = minirst.format(doc, textwidth, keep=keep)
ui.write("\n%s\n" % formatted)
if pruned:
ui.write(_('\nuse "hg -v help %s" to show verbose help\n') % name)
if not ui.quiet:
# options
if entry[1]:
option_lists.append((_("options:\n"), entry[1]))
addglobalopts(False)
def helplist(header, select=None):
h = {}
cmds = {}
for c, e in table.iteritems():
f = c.split("|", 1)[0]
if select and not select(f):
continue
if (not select and name != 'shortlist' and
e[0].__module__ != __name__):
continue
if name == "shortlist" and not f.startswith("^"):
continue
f = f.lstrip("^")
if not ui.debugflag and f.startswith("debug"):
continue
doc = e[0].__doc__
if doc and 'DEPRECATED' in doc and not ui.verbose:
continue
doc = gettext(doc)
if not doc:
doc = _("(no help text available)")
h[f] = doc.splitlines()[0].rstrip()
cmds[f] = c.lstrip("^")
if not h:
ui.status(_('no commands defined\n'))
return
ui.status(header)
fns = sorted(h)
m = max(map(len, fns))
for f in fns:
if ui.verbose:
commands = cmds[f].replace("|",", ")
ui.write(" %s:\n %s\n"%(commands, h[f]))
else:
ui.write(' %-*s %s\n' % (m, f, util.wrap(h[f], m + 4)))
if not ui.quiet:
addglobalopts(True)
def helptopic(name):
for names, header, doc in help.helptable:
if name in names:
break
else:
raise error.UnknownCommand(name)
# description
if not doc:
doc = _("(no help text available)")
if hasattr(doc, '__call__'):
doc = doc()
ui.write("%s\n\n" % header)
ui.write("%s\n" % minirst.format(doc, textwidth, indent=4))
def helpext(name):
try:
mod = extensions.find(name)
doc = gettext(mod.__doc__) or _('no help text available')
except KeyError:
mod = None
doc = extensions.disabledext(name)
if not doc:
raise error.UnknownCommand(name)
if '\n' not in doc:
head, tail = doc, ""
else:
head, tail = doc.split('\n', 1)
ui.write(_('%s extension - %s\n\n') % (name.split('.')[-1], head))
if tail:
ui.write(minirst.format(tail, textwidth))
ui.status('\n\n')
if mod:
try:
ct = mod.cmdtable
except AttributeError:
ct = {}
modcmds = set([c.split('|', 1)[0] for c in ct])
helplist(_('list of commands:\n\n'), modcmds.__contains__)
else:
ui.write(_('use "hg help extensions" for information on enabling '
'extensions\n'))
def helpextcmd(name):
cmd, ext, mod = extensions.disabledcmd(name, ui.config('ui', 'strict'))
doc = gettext(mod.__doc__).splitlines()[0]
msg = help.listexts(_("'%s' is provided by the following "
"extension:") % cmd, {ext: doc}, len(ext),
indent=4)
ui.write(minirst.format(msg, textwidth))
ui.write('\n\n')
ui.write(_('use "hg help extensions" for information on enabling '
'extensions\n'))
if name and name != 'shortlist':
i = None
if unknowncmd:
queries = (helpextcmd,)
else:
queries = (helptopic, helpcmd, helpext, helpextcmd)
for f in queries:
try:
f(name)
i = None
break
except error.UnknownCommand, inst:
i = inst
if i:
raise i
else:
# program name
if ui.verbose or with_version:
version_(ui)
else:
ui.status(_("Mercurial Distributed SCM\n"))
ui.status('\n')
# list of commands
if name == "shortlist":
header = _('basic commands:\n\n')
else:
header = _('list of commands:\n\n')
helplist(header)
if name != 'shortlist':
exts, maxlength = extensions.enabled()
text = help.listexts(_('enabled extensions:'), exts, maxlength)
if text:
ui.write("\n%s\n" % minirst.format(text, textwidth))
# list all option lists
opt_output = []
for title, options in option_lists:
opt_output.append(("\n%s" % title, None))
for shortopt, longopt, default, desc in options:
if _("DEPRECATED") in desc and not ui.verbose:
continue
opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
longopt and " --%s" % longopt),
"%s%s" % (desc,
default
and _(" (default: %s)") % default
or "")))
if not name:
ui.write(_("\nadditional help topics:\n\n"))
topics = []
for names, header, doc in help.helptable:
topics.append((sorted(names, key=len, reverse=True)[0], header))
topics_len = max([len(s[0]) for s in topics])
for t, desc in topics:
ui.write(" %-*s %s\n" % (topics_len, t, desc))
if opt_output:
opts_len = max([len(line[0]) for line in opt_output if line[1]] or [0])
for first, second in opt_output:
if second:
second = util.wrap(second, opts_len + 3)
ui.write(" %-*s %s\n" % (opts_len, first, second))
else:
ui.write("%s\n" % first)
def identify(ui, repo, source=None,
rev=None, num=None, id=None, branch=None, tags=None):
"""identify the working copy or specified revision
With no revision, print a summary of the current state of the
repository.
Specifying a path to a repository root or Mercurial bundle will
cause lookup to operate on that repository/bundle.
This summary identifies the repository state using one or two
parent hash identifiers, followed by a "+" if there are
uncommitted changes in the working directory, a list of tags for
this revision and a branch name for non-default branches.
"""
if not repo and not source:
raise util.Abort(_("There is no Mercurial repository here "
"(.hg not found)"))
hexfunc = ui.debugflag and hex or short
default = not (num or id or branch or tags)
output = []
revs = []
if source:
source, branches = hg.parseurl(ui.expandpath(source))
repo = hg.repository(ui, source)
revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
if not repo.local():
if not rev and revs:
rev = revs[0]
if not rev:
rev = "tip"
if num or branch or tags:
raise util.Abort(
"can't query remote revision number, branch, or tags")
output = [hexfunc(repo.lookup(rev))]
elif not rev:
ctx = repo[None]
parents = ctx.parents()
changed = False
if default or id or num:
changed = util.any(repo.status())
if default or id:
output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]),
(changed) and "+" or "")]
if num:
output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]),
(changed) and "+" or ""))
else:
ctx = repo[rev]
if default or id:
output = [hexfunc(ctx.node())]
if num:
output.append(str(ctx.rev()))
if repo.local() and default and not ui.quiet:
b = encoding.tolocal(ctx.branch())
if b != 'default':
output.append("(%s)" % b)
# multiple tags for a single parent separated by '/'
t = "/".join(ctx.tags())
if t:
output.append(t)
if branch:
output.append(encoding.tolocal(ctx.branch()))
if tags:
output.extend(ctx.tags())
ui.write("%s\n" % ' '.join(output))
def import_(ui, repo, patch1, *patches, **opts):
"""import an ordered set of patches
Import a list of patches and commit them individually (unless
--no-commit is specified).
If there are outstanding changes in the working directory, import
will abort unless given the -f/--force flag.
You can import a patch straight from a mail message. Even patches
as attachments work (to use the body part, it must have type
text/plain or text/x-patch). From and Subject headers of email
message are used as default committer and commit message. All
text/plain body parts before first diff are added to commit
message.
If the imported patch was generated by hg export, user and
description from patch override values from message headers and
body. Values given on command line with -m/--message and -u/--user
override these.
If --exact is specified, import will set the working directory to
the parent of each patch before applying it, and will abort if the
resulting changeset has a different ID than the one recorded in
the patch. This may happen due to character set problems or other
deficiencies in the text patch format.
With -s/--similarity, hg will attempt to discover renames and
copies in the patch in the same way as 'addremove'.
To read a patch from standard input, use "-" as the patch name. If
a URL is specified, the patch will be downloaded from it.
See 'hg help dates' for a list of formats valid for -d/--date.
"""
patches = (patch1,) + patches
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
try:
sim = float(opts.get('similarity') or 0)
except ValueError:
raise util.Abort(_('similarity must be a number'))
if sim < 0 or sim > 100:
raise util.Abort(_('similarity must be between 0 and 100'))
if opts.get('exact') or not opts.get('force'):
cmdutil.bail_if_changed(repo)
d = opts["base"]
strip = opts["strip"]
wlock = lock = None
def tryone(ui, hunk):
tmpname, message, user, date, branch, nodeid, p1, p2 = \
patch.extract(ui, hunk)
if not tmpname:
return None
commitid = _('to working directory')
try:
cmdline_message = cmdutil.logmessage(opts)
if cmdline_message:
# pickup the cmdline msg
message = cmdline_message
elif message:
# pickup the patch msg
message = message.strip()
else:
# launch the editor
message = None
ui.debug('message:\n%s\n' % message)
wp = repo.parents()
if opts.get('exact'):
if not nodeid or not p1:
raise util.Abort(_('not a Mercurial patch'))
p1 = repo.lookup(p1)
p2 = repo.lookup(p2 or hex(nullid))
if p1 != wp[0].node():
hg.clean(repo, p1)
repo.dirstate.setparents(p1, p2)
elif p2:
try:
p1 = repo.lookup(p1)
p2 = repo.lookup(p2)
if p1 == wp[0].node():
repo.dirstate.setparents(p1, p2)
except error.RepoError:
pass
if opts.get('exact') or opts.get('import_branch'):
repo.dirstate.setbranch(branch or 'default')
files = {}
try:
patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
files=files, eolmode=None)
finally:
files = patch.updatedir(ui, repo, files,
similarity=sim / 100.0)
if not opts.get('no_commit'):
if opts.get('exact'):
m = None
else:
m = cmdutil.matchfiles(repo, files or [])
n = repo.commit(message, opts.get('user') or user,
opts.get('date') or date, match=m,
editor=cmdutil.commiteditor)
if opts.get('exact'):
if hex(n) != nodeid:
repo.rollback()
raise util.Abort(_('patch is damaged'
' or loses information'))
# Force a dirstate write so that the next transaction
# backups an up-do-date file.
repo.dirstate.write()
if n:
commitid = short(n)
return commitid
finally:
os.unlink(tmpname)
try:
wlock = repo.wlock()
lock = repo.lock()
lastcommit = None
for p in patches:
pf = os.path.join(d, p)
if pf == '-':
ui.status(_("applying patch from stdin\n"))
pf = sys.stdin
else:
ui.status(_("applying %s\n") % p)
pf = url.open(ui, pf)
haspatch = False
for hunk in patch.split(pf):
commitid = tryone(ui, hunk)
if commitid:
haspatch = True
if lastcommit:
ui.status(_('applied %s\n') % lastcommit)
lastcommit = commitid
if not haspatch:
raise util.Abort(_('no diffs found'))
finally:
release(lock, wlock)
def incoming(ui, repo, source="default", **opts):
"""show new changesets found in source
Show new changesets found in the specified path/URL or the default
pull location. These are the changesets that would have been pulled
if a pull at the time you issued this command.
For remote repository, using --bundle avoids downloading the
changesets twice if the incoming is followed by a pull.
See pull for valid source format details.
"""
limit = cmdutil.loglimit(opts)
source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
other = hg.repository(cmdutil.remoteui(repo, opts), source)
ui.status(_('comparing with %s\n') % url.hidepassword(source))
revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
if revs:
revs = [other.lookup(rev) for rev in revs]
common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
force=opts["force"])
if not incoming:
try:
os.unlink(opts["bundle"])
except:
pass
ui.status(_("no changes found\n"))
return 1
cleanup = None
try:
fname = opts["bundle"]
if fname or not other.local():
# create a bundle (uncompressed if other repo is not local)
if revs is None and other.capable('changegroupsubset'):
revs = rheads
if revs is None:
cg = other.changegroup(incoming, "incoming")
else:
cg = other.changegroupsubset(incoming, revs, 'incoming')
bundletype = other.local() and "HG10BZ" or "HG10UN"
fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
# keep written bundle?
if opts["bundle"]:
cleanup = None
if not other.local():
# use the created uncompressed bundlerepo
other = bundlerepo.bundlerepository(ui, repo.root, fname)
o = other.changelog.nodesbetween(incoming, revs)[0]
if opts.get('newest_first'):
o.reverse()
displayer = cmdutil.show_changeset(ui, other, opts)
count = 0
for n in o:
if limit is not None and count >= limit:
break
parents = [p for p in other.changelog.parents(n) if p != nullid]
if opts.get('no_merges') and len(parents) == 2:
continue
count += 1
displayer.show(other[n])
displayer.close()
finally:
if hasattr(other, 'close'):
other.close()
if cleanup:
os.unlink(cleanup)
def init(ui, dest=".", **opts):
"""create a new repository in the given directory
Initialize a new repository in the given directory. If the given
directory does not exist, it will be created.
If no directory is given, the current directory is used.
It is possible to specify an ``ssh://`` URL as the destination.
See 'hg help urls' for more information.
"""
hg.repository(cmdutil.remoteui(ui, opts), dest, create=1)
def locate(ui, repo, *pats, **opts):
"""locate files matching specific patterns
Print files under Mercurial control in the working directory whose
names match the given patterns.
By default, this command searches all directories in the working
directory. To search just the current directory and its
subdirectories, use "--include .".
If no patterns are given to match, this command prints the names
of all files under Mercurial control in the working directory.
If you want to feed the output of this command into the "xargs"
command, use the -0 option to both this command and "xargs". This
will avoid the problem of "xargs" treating single filenames that
contain whitespace as multiple filenames.
"""
end = opts.get('print0') and '\0' or '\n'
rev = opts.get('rev') or None
ret = 1
m = cmdutil.match(repo, pats, opts, default='relglob')
m.bad = lambda x, y: False
for abs in repo[rev].walk(m):
if not rev and abs not in repo.dirstate:
continue
if opts.get('fullpath'):
ui.write(repo.wjoin(abs), end)
else:
ui.write(((pats and m.rel(abs)) or abs), end)
ret = 0
return ret
def log(ui, repo, *pats, **opts):
"""show revision history of entire repository or files
Print the revision history of the specified files or the entire
project.
File history is shown without following rename or copy history of
files. Use -f/--follow with a filename to follow history across
renames and copies. --follow without a filename will only show
ancestors or descendants of the starting revision. --follow-first
only follows the first parent of merge revisions.
If no revision range is specified, the default is tip:0 unless
--follow is set, in which case the working directory parent is
used as the starting revision.
See 'hg help dates' for a list of formats valid for -d/--date.
By default this command prints revision number and changeset id,
tags, non-trivial parents, user, date and time, and a summary for
each commit. When the -v/--verbose switch is used, the list of
changed files and full commit message are shown.
NOTE: log -p/--patch may generate unexpected diff output for merge
changesets, as it will only compare the merge changeset against
its first parent. Also, only files different from BOTH parents
will appear in files:.
"""
matchfn = cmdutil.match(repo, pats, opts)
limit = cmdutil.loglimit(opts)
count = 0
endrev = None
if opts.get('copies') and opts.get('rev'):
endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1
df = False
if opts["date"]:
df = util.matchdate(opts["date"])
displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn)
def prep(ctx, fns):
rev = ctx.rev()
parents = [p for p in repo.changelog.parentrevs(rev)
if p != nullrev]
if opts.get('no_merges') and len(parents) == 2:
return
if opts.get('only_merges') and len(parents) != 2:
return
if opts.get('only_branch') and ctx.branch() not in opts['only_branch']:
return
if df and not df(ctx.date()[0]):
return
if opts['user'] and not [k for k in opts['user'] if k in ctx.user()]:
return
if opts.get('keyword'):
for k in [kw.lower() for kw in opts['keyword']]:
if (k in ctx.user().lower() or
k in ctx.description().lower() or
k in " ".join(ctx.files()).lower()):
break
else:
return
copies = None
if opts.get('copies') and rev:
copies = []
getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
for fn in ctx.files():
rename = getrenamed(fn, rev)
if rename:
copies.append((fn, rename[0]))
displayer.show(ctx, copies=copies)
for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
if count == limit:
break
if displayer.flush(ctx.rev()):
count += 1
displayer.close()
def manifest(ui, repo, node=None, rev=None):
"""output the current or given revision of the project manifest
Print a list of version controlled files for the given revision.
If no revision is given, the first parent of the working directory
is used, or the null revision if no revision is checked out.
With -v, print file permissions, symlink and executable bits.
With --debug, print file revision hashes.
"""
if rev and node:
raise util.Abort(_("please specify just one revision"))
if not node:
node = rev
decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
ctx = repo[node]
for f in ctx:
if ui.debugflag:
ui.write("%40s " % hex(ctx.manifest()[f]))
if ui.verbose:
ui.write(decor[ctx.flags(f)])
ui.write("%s\n" % f)
def merge(ui, repo, node=None, **opts):
"""merge working directory with another revision
The current working directory is updated with all changes made in
the requested revision since the last common predecessor revision.
Files that changed between either parent are marked as changed for
the next commit and a commit must be performed before any further
updates to the repository are allowed. The next commit will have
two parents.
If no revision is specified, the working directory's parent is a
head revision, and the current branch contains exactly one other
head, the other head is merged with by default. Otherwise, an
explicit revision with which to merge with must be provided.
"""
if opts.get('rev') and node:
raise util.Abort(_("please specify just one revision"))
if not node:
node = opts.get('rev')
if not node:
branch = repo.changectx(None).branch()
bheads = repo.branchheads(branch)
if len(bheads) > 2:
ui.warn(_("abort: branch '%s' has %d heads - "
"please merge with an explicit rev\n")
% (branch, len(bheads)))
ui.status(_("(run 'hg heads .' to see heads)\n"))
return False
parent = repo.dirstate.parents()[0]
if len(bheads) == 1:
if len(repo.heads()) > 1:
ui.warn(_("abort: branch '%s' has one head - "
"please merge with an explicit rev\n" % branch))
ui.status(_("(run 'hg heads' to see all heads)\n"))
return False
msg = _('there is nothing to merge')
if parent != repo.lookup(repo[None].branch()):
msg = _('%s - use "hg update" instead') % msg
raise util.Abort(msg)
if parent not in bheads:
raise util.Abort(_('working dir not at a head rev - '
'use "hg update" or merge with an explicit rev'))
node = parent == bheads[0] and bheads[-1] or bheads[0]
if opts.get('preview'):
# find nodes that are ancestors of p2 but not of p1
p1 = repo.lookup('.')
p2 = repo.lookup(node)
nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
displayer = cmdutil.show_changeset(ui, repo, opts)
for node in nodes:
displayer.show(repo[node])
displayer.close()
return 0
return hg.merge(repo, node, force=opts.get('force'))
def outgoing(ui, repo, dest=None, **opts):
"""show changesets not found in the destination
Show changesets not found in the specified destination repository
or the default push location. These are the changesets that would
be pushed if a push was requested.
See pull for details of valid destination formats.
"""
limit = cmdutil.loglimit(opts)
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
if revs:
revs = [repo.lookup(rev) for rev in revs]
other = hg.repository(cmdutil.remoteui(repo, opts), dest)
ui.status(_('comparing with %s\n') % url.hidepassword(dest))
o = repo.findoutgoing(other, force=opts.get('force'))
if not o:
ui.status(_("no changes found\n"))
return 1
o = repo.changelog.nodesbetween(o, revs)[0]
if opts.get('newest_first'):
o.reverse()
displayer = cmdutil.show_changeset(ui, repo, opts)
count = 0
for n in o:
if limit is not None and count >= limit:
break
parents = [p for p in repo.changelog.parents(n) if p != nullid]
if opts.get('no_merges') and len(parents) == 2:
continue
count += 1
displayer.show(repo[n])
displayer.close()
def parents(ui, repo, file_=None, **opts):
"""show the parents of the working directory or revision
Print the working directory's parent revisions. If a revision is
given via -r/--rev, the parent of that revision will be printed.
If a file argument is given, the revision in which the file was
last changed (before the working directory revision or the
argument to --rev if given) is printed.
"""
rev = opts.get('rev')
if rev:
ctx = repo[rev]
else:
ctx = repo[None]
if file_:
m = cmdutil.match(repo, (file_,), opts)
if m.anypats() or len(m.files()) != 1:
raise util.Abort(_('can only specify an explicit filename'))
file_ = m.files()[0]
filenodes = []
for cp in ctx.parents():
if not cp:
continue
try:
filenodes.append(cp.filenode(file_))
except error.LookupError:
pass
if not filenodes:
raise util.Abort(_("'%s' not found in manifest!") % file_)
fl = repo.file(file_)
p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
else:
p = [cp.node() for cp in ctx.parents()]
displayer = cmdutil.show_changeset(ui, repo, opts)
for n in p:
if n != nullid:
displayer.show(repo[n])
displayer.close()
def paths(ui, repo, search=None):
"""show aliases for remote repositories
Show definition of symbolic path name NAME. If no name is given,
show definition of all available names.
Path names are defined in the [paths] section of /etc/mercurial/hgrc
and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
The path names ``default`` and ``default-push`` have a special
meaning. When performing a push or pull operation, they are used
as fallbacks if no location is specified on the command-line.
When ``default-push`` is set, it will be used for push and
``default`` will be used for pull; otherwise ``default`` is used
as the fallback for both. When cloning a repository, the clone
source is written as ``default`` in ``.hg/hgrc``. Note that
``default`` and ``default-push`` apply to all inbound (e.g. ``hg
incoming``) and outbound (e.g. ``hg outgoing``, ``hg email`` and
``hg bundle``) operations.
See 'hg help urls' for more information.
"""
if search:
for name, path in ui.configitems("paths"):
if name == search:
ui.write("%s\n" % url.hidepassword(path))
return
ui.warn(_("not found!\n"))
return 1
else:
for name, path in ui.configitems("paths"):
ui.write("%s = %s\n" % (name, url.hidepassword(path)))
def postincoming(ui, repo, modheads, optupdate, checkout):
if modheads == 0:
return
if optupdate:
if (modheads <= 1 or len(repo.branchheads()) == 1) or checkout:
return hg.update(repo, checkout)
else:
ui.status(_("not updating, since new heads added\n"))
if modheads > 1:
ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
else:
ui.status(_("(run 'hg update' to get a working copy)\n"))
def pull(ui, repo, source="default", **opts):
"""pull changes from the specified source
Pull changes from a remote repository to a local one.
This finds all changes from the repository at the specified path
or URL and adds them to a local repository (the current one unless
-R is specified). By default, this does not update the copy of the
project in the working directory.
Use hg incoming if you want to see what would have been added by a
pull at the time you issued this command. If you then decide to
added those changes to the repository, you should use pull -r X
where X is the last changeset listed by hg incoming.
If SOURCE is omitted, the 'default' path will be used.
See 'hg help urls' for more information.
"""
source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
other = hg.repository(cmdutil.remoteui(repo, opts), source)
ui.status(_('pulling from %s\n') % url.hidepassword(source))
revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
if revs:
try:
revs = [other.lookup(rev) for rev in revs]
except error.CapabilityError:
err = _("Other repository doesn't support revision lookup, "
"so a rev cannot be specified.")
raise util.Abort(err)
modheads = repo.pull(other, heads=revs, force=opts.get('force'))
if checkout:
checkout = str(repo.changelog.rev(other.lookup(checkout)))
return postincoming(ui, repo, modheads, opts.get('update'), checkout)
def push(ui, repo, dest=None, **opts):
"""push changes to the specified destination
Push changes from the local repository to the specified destination.
This is the symmetrical operation for pull. It moves changes from
the current repository to a different one. If the destination is
local this is identical to a pull in that directory from the
current one.
By default, push will refuse to run if it detects the result would
increase the number of remote heads. This generally indicates the
user forgot to pull and merge before pushing.
If -r/--rev is used, the named revision and all its ancestors will
be pushed to the remote repository.
Please see 'hg help urls' for important details about ``ssh://``
URLs. If DESTINATION is omitted, a default path will be used.
"""
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
other = hg.repository(cmdutil.remoteui(repo, opts), dest)
ui.status(_('pushing to %s\n') % url.hidepassword(dest))
if revs:
revs = [repo.lookup(rev) for rev in revs]
# push subrepos depth-first for coherent ordering
c = repo['']
subs = c.substate # only repos that are committed
for s in sorted(subs):
if not c.sub(s).push(opts.get('force')):
return False
r = repo.push(other, opts.get('force'), revs=revs)
return r == 0
def recover(ui, repo):
"""roll back an interrupted transaction
Recover from an interrupted commit or pull.
This command tries to fix the repository status after an
interrupted operation. It should only be necessary when Mercurial
suggests it.
"""
if repo.recover():
return hg.verify(repo)
return 1
def remove(ui, repo, *pats, **opts):
"""remove the specified files on the next commit
Schedule the indicated files for removal from the repository.
This only removes files from the current branch, not from the
entire project history. -A/--after can be used to remove only
files that have already been deleted, -f/--force can be used to
force deletion, and -Af can be used to remove files from the next
revision without deleting them from the working directory.
The following table details the behavior of remove for different
file states (columns) and option combinations (rows). The file
states are Added [A], Clean [C], Modified [M] and Missing [!] (as
reported by hg status). The actions are Warn, Remove (from branch)
and Delete (from disk)::
A C M !
none W RD W R
-f R RD RD R
-A W W W R
-Af R R R R
This command schedules the files to be removed at the next commit.
To undo a remove before that, see hg revert.
"""
after, force = opts.get('after'), opts.get('force')
if not pats and not after:
raise util.Abort(_('no files specified'))
m = cmdutil.match(repo, pats, opts)
s = repo.status(match=m, clean=True)
modified, added, deleted, clean = s[0], s[1], s[3], s[6]
for f in m.files():
if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
def warn(files, reason):
for f in files:
ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
% (m.rel(f), reason))
if force:
remove, forget = modified + deleted + clean, added
elif after:
remove, forget = deleted, []
warn(modified + added + clean, _('still exists'))
else:
remove, forget = deleted + clean, []
warn(modified, _('is modified'))
warn(added, _('has been marked for add'))
for f in sorted(remove + forget):
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
repo.forget(forget)
repo.remove(remove, unlink=not after)
def rename(ui, repo, *pats, **opts):
"""rename files; equivalent of copy + remove
Mark dest as copies of sources; mark sources for deletion. If dest
is a directory, copies are put in that directory. If dest is a
file, there can only be one source.
By default, this command copies the contents of files as they
exist in the working directory. If invoked with -A/--after, the
operation is recorded, but no copying is performed.
This command takes effect at the next commit. To undo a rename
before that, see hg revert.
"""
wlock = repo.wlock(False)
try:
return cmdutil.copy(ui, repo, pats, opts, rename=True)
finally:
wlock.release()
def resolve(ui, repo, *pats, **opts):
"""various operations to help finish a merge
This command includes several actions that are often useful while
performing a merge, after running ``merge`` but before running
``commit``. (It is only meaningful if your working directory has
two parents.) It is most relevant for merges with unresolved
conflicts, which are typically a result of non-interactive merging with
``internal:merge`` or a command-line merge tool like ``diff3``.
The available actions are:
1) list files that were merged with conflicts (U, for unresolved)
and without conflicts (R, for resolved): ``hg resolve -l``
(this is like ``status`` for merges)
2) record that you have resolved conflicts in certain files:
``hg resolve -m [file ...]`` (default: mark all unresolved files)
3) forget that you have resolved conflicts in certain files:
``hg resolve -u [file ...]`` (default: unmark all resolved files)
4) discard your current attempt(s) at resolving conflicts and
restart the merge from scratch: ``hg resolve file...``
(or ``-a`` for all unresolved files)
Note that Mercurial will not let you commit files with unresolved merge
conflicts. You must use ``hg resolve -m ...`` before you can commit
after a conflicting merge.
"""
all, mark, unmark, show, nostatus = \
[opts.get(o) for o in 'all mark unmark list no_status'.split()]
if (show and (mark or unmark)) or (mark and unmark):
raise util.Abort(_("too many options specified"))
if pats and all:
raise util.Abort(_("can't specify --all and patterns"))
if not (all or pats or show or mark or unmark):
raise util.Abort(_('no files or directories specified; '
'use --all to remerge all files'))
ms = merge_.mergestate(repo)
m = cmdutil.match(repo, pats, opts)
for f in ms:
if m(f):
if show:
if nostatus:
ui.write("%s\n" % f)
else:
ui.write("%s %s\n" % (ms[f].upper(), f))
elif mark:
ms.mark(f, "r")
elif unmark:
ms.mark(f, "u")
else:
wctx = repo[None]
mctx = wctx.parents()[-1]
# backup pre-resolve (merge uses .orig for its own purposes)
a = repo.wjoin(f)
util.copyfile(a, a + ".resolve")
# resolve file
ms.resolve(f, wctx, mctx)
# replace filemerge's .orig file with our resolve file
util.rename(a + ".resolve", a + ".orig")
def revert(ui, repo, *pats, **opts):
"""restore individual files or directories to an earlier state
(Use update -r to check out earlier revisions, revert does not
change the working directory parents.)
With no revision specified, revert the named files or directories
to the contents they had in the parent of the working directory.
This restores the contents of the affected files to an unmodified
state and unschedules adds, removes, copies, and renames. If the
working directory has two parents, you must explicitly specify a
revision.
Using the -r/--rev option, revert the given files or directories
to their contents as of a specific revision. This can be helpful
to "roll back" some or all of an earlier change. See 'hg help
dates' for a list of formats valid for -d/--date.
Revert modifies the working directory. It does not commit any
changes, or change the parent of the working directory. If you
revert to a revision other than the parent of the working
directory, the reverted files will thus appear modified
afterwards.
If a file has been deleted, it is restored. If the executable mode
of a file was changed, it is reset.
If names are given, all files matching the names are reverted.
If no arguments are given, no files are reverted.
Modified files are saved with a .orig suffix before reverting.
To disable these backups, use --no-backup.
"""
if opts["date"]:
if opts["rev"]:
raise util.Abort(_("you can't specify a revision and a date"))
opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
if not pats and not opts.get('all'):
raise util.Abort(_('no files or directories specified; '
'use --all to revert the whole repo'))
parent, p2 = repo.dirstate.parents()
if not opts.get('rev') and p2 != nullid:
raise util.Abort(_('uncommitted merge - please provide a '
'specific revision'))
ctx = repo[opts.get('rev')]
node = ctx.node()
mf = ctx.manifest()
if node == parent:
pmf = mf
else:
pmf = None
# need all matching names in dirstate and manifest of target rev,
# so have to walk both. do not print errors if files exist in one
# but not other.
names = {}
wlock = repo.wlock()
try:
# walk dirstate.
m = cmdutil.match(repo, pats, opts)
m.bad = lambda x, y: False
for abs in repo.walk(m):
names[abs] = m.rel(abs), m.exact(abs)
# walk target manifest.
def badfn(path, msg):
if path in names:
return
path_ = path + '/'
for f in names:
if f.startswith(path_):
return
ui.warn("%s: %s\n" % (m.rel(path), msg))
m = cmdutil.match(repo, pats, opts)
m.bad = badfn
for abs in repo[node].walk(m):
if abs not in names:
names[abs] = m.rel(abs), m.exact(abs)
m = cmdutil.matchfiles(repo, names)
changes = repo.status(match=m)[:4]
modified, added, removed, deleted = map(set, changes)
# if f is a rename, also revert the source
cwd = repo.getcwd()
for f in added:
src = repo.dirstate.copied(f)
if src and src not in names and repo.dirstate[src] == 'r':
removed.add(src)
names[src] = (repo.pathto(src, cwd), True)
def removeforget(abs):
if repo.dirstate[abs] == 'a':
return _('forgetting %s\n')
return _('removing %s\n')
revert = ([], _('reverting %s\n'))
add = ([], _('adding %s\n'))
remove = ([], removeforget)
undelete = ([], _('undeleting %s\n'))
disptable = (
# dispatch table:
# file state
# action if in target manifest
# action if not in target manifest
# make backup if in target manifest
# make backup if not in target manifest
(modified, revert, remove, True, True),
(added, revert, remove, True, False),
(removed, undelete, None, False, False),
(deleted, revert, remove, False, False),
)
for abs, (rel, exact) in sorted(names.items()):
mfentry = mf.get(abs)
target = repo.wjoin(abs)
def handle(xlist, dobackup):
xlist[0].append(abs)
if dobackup and not opts.get('no_backup') and util.lexists(target):
bakname = "%s.orig" % rel
ui.note(_('saving current version of %s as %s\n') %
(rel, bakname))
if not opts.get('dry_run'):
util.copyfile(target, bakname)
if ui.verbose or not exact:
msg = xlist[1]
if not isinstance(msg, basestring):
msg = msg(abs)
ui.status(msg % rel)
for table, hitlist, misslist, backuphit, backupmiss in disptable:
if abs not in table:
continue
# file has changed in dirstate
if mfentry:
handle(hitlist, backuphit)
elif misslist is not None:
handle(misslist, backupmiss)
break
else:
if abs not in repo.dirstate:
if mfentry:
handle(add, True)
elif exact:
ui.warn(_('file not managed: %s\n') % rel)
continue
# file has not changed in dirstate
if node == parent:
if exact:
ui.warn(_('no changes needed to %s\n') % rel)
continue
if pmf is None:
# only need parent manifest in this unlikely case,
# so do not read by default
pmf = repo[parent].manifest()
if abs in pmf:
if mfentry:
# if version of file is same in parent and target
# manifests, do nothing
if (pmf[abs] != mfentry or
pmf.flags(abs) != mf.flags(abs)):
handle(revert, False)
else:
handle(remove, False)
if not opts.get('dry_run'):
def checkout(f):
fc = ctx[f]
repo.wwrite(f, fc.data(), fc.flags())
audit_path = util.path_auditor(repo.root)
for f in remove[0]:
if repo.dirstate[f] == 'a':
repo.dirstate.forget(f)
continue
audit_path(f)
try:
util.unlink(repo.wjoin(f))
except OSError:
pass
repo.dirstate.remove(f)
normal = None
if node == parent:
# We're reverting to our parent. If possible, we'd like status
# to report the file as clean. We have to use normallookup for
# merges to avoid losing information about merged/dirty files.
if p2 != nullid:
normal = repo.dirstate.normallookup
else:
normal = repo.dirstate.normal
for f in revert[0]:
checkout(f)
if normal:
normal(f)
for f in add[0]:
checkout(f)
repo.dirstate.add(f)
normal = repo.dirstate.normallookup
if node == parent and p2 == nullid:
normal = repo.dirstate.normal
for f in undelete[0]:
checkout(f)
normal(f)
finally:
wlock.release()
def rollback(ui, repo):
"""roll back the last transaction
This command should be used with care. There is only one level of
rollback, and there is no way to undo a rollback. It will also
restore the dirstate at the time of the last transaction, losing
any dirstate changes since that time. This command does not alter
the working directory.
Transactions are used to encapsulate the effects of all commands
that create new changesets or propagate existing changesets into a
repository. For example, the following commands are transactional,
and their effects can be rolled back:
- commit
- import
- pull
- push (with this repository as the destination)
- unbundle
This command is not intended for use on public repositories. Once
changes are visible for pull by other users, rolling a transaction
back locally is ineffective (someone else may already have pulled
the changes). Furthermore, a race is possible with readers of the
repository; for example an in-progress pull from the repository
may fail if a rollback is performed.
"""
repo.rollback()
def root(ui, repo):
"""print the root (top) of the current working directory
Print the root directory of the current repository.
"""
ui.write(repo.root + "\n")
def serve(ui, repo, **opts):
"""export the repository via HTTP
Start a local HTTP repository browser and pull server. You can use
this for ad-hoc sharing and browing of repositories. It is
recommended to use a real web server to serve a repository for
longer periods of time.
Please note that the server does not implement access control.
This means that, by default, anybody can read from the server and
nobody can write to it by default. Set the ``web.allow_push``
option to ``*`` to allow everybody to push to the server. You
should use a real web server if you need to authenticate users.
By default, the server logs accesses to stdout and errors to
stderr. Use the -A/--accesslog and -E/--errorlog options to log to
files.
"""
if opts["stdio"]:
if repo is None:
raise error.RepoError(_("There is no Mercurial repository here"
" (.hg not found)"))
s = sshserver.sshserver(ui, repo)
s.serve_forever()
baseui = repo and repo.baseui or ui
optlist = ("name templates style address port prefix ipv6"
" accesslog errorlog webdir_conf certificate encoding")
for o in optlist.split():
if opts.get(o, None):
baseui.setconfig("web", o, str(opts[o]))
if (repo is not None) and (repo.ui != baseui):
repo.ui.setconfig("web", o, str(opts[o]))
if repo is None and not ui.config("web", "webdir_conf"):
raise error.RepoError(_("There is no Mercurial repository here"
" (.hg not found)"))
class service(object):
def init(self):
util.set_signal_handler()
self.httpd = server.create_server(baseui, repo)
if not ui.verbose:
return
if self.httpd.prefix:
prefix = self.httpd.prefix.strip('/') + '/'
else:
prefix = ''
port = ':%d' % self.httpd.port
if port == ':80':
port = ''
bindaddr = self.httpd.addr
if bindaddr == '0.0.0.0':
bindaddr = '*'
elif ':' in bindaddr: # IPv6
bindaddr = '[%s]' % bindaddr
fqaddr = self.httpd.fqaddr
if ':' in fqaddr:
fqaddr = '[%s]' % fqaddr
ui.status(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
(fqaddr, port, prefix, bindaddr, self.httpd.port))
def run(self):
self.httpd.serve_forever()
service = service()
cmdutil.service(opts, initfn=service.init, runfn=service.run)
def status(ui, repo, *pats, **opts):
"""show changed files in the working directory
Show status of files in the repository. If names are given, only
files that match are shown. Files that are clean or ignored or
the source of a copy/move operation, are not listed unless
-c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
Unless options described with "show only ..." are given, the
options -mardu are used.
Option -q/--quiet hides untracked (unknown and ignored) files
unless explicitly requested with -u/--unknown or -i/--ignored.
NOTE: status may appear to disagree with diff if permissions have
changed or a merge has occurred. The standard diff format does not
report permission changes and diff only reports changes relative
to one merge parent.
If one revision is given, it is used as the base revision.
If two revisions are given, the differences between them are
shown. The --change option can also be used as a shortcut to list
the changed files of a revision from its first parent.
The codes used to show the status of files are::
M = modified
A = added
R = removed
C = clean
! = missing (deleted by non-hg command, but still tracked)
? = not tracked
I = ignored
= origin of the previous file listed as A (added)
"""
revs = opts.get('rev')
change = opts.get('change')
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = repo.lookup(change)
node1 = repo[node2].parents()[0].node()
else:
node1, node2 = cmdutil.revpair(repo, revs)
cwd = (pats and repo.getcwd()) or ''
end = opts.get('print0') and '\0' or '\n'
copy = {}
states = 'modified added removed deleted unknown ignored clean'.split()
show = [k for k in states if opts.get(k)]
if opts.get('all'):
show += ui.quiet and (states[:4] + ['clean']) or states
if not show:
show = ui.quiet and states[:4] or states[:5]
stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts),
'ignored' in show, 'clean' in show, 'unknown' in show)
changestates = zip(states, 'MAR!?IC', stat)
if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
ctxn = repo[nullid]
ctx1 = repo[node1]
ctx2 = repo[node2]
added = stat[1]
if node2 is None:
added = stat[0] + stat[1] # merged?
for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
if k in added:
copy[k] = v
elif v in added:
copy[v] = k
for state, char, files in changestates:
if state in show:
format = "%s %%s%s" % (char, end)
if opts.get('no_status'):
format = "%%s%s" % end
for f in files:
ui.write(format % repo.pathto(f, cwd))
if f in copy:
ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end))
def summary(ui, repo, **opts):
"""summarize working directory state
This generates a brief summary of the working directory state,
including parents, branch, commit status, and available updates.
With the --remote option, this will check the default paths for
incoming and outgoing changes. This can be time-consuming.
"""
ctx = repo[None]
parents = ctx.parents()
pnode = parents[0].node()
tags = repo.tags()
for p in parents:
t = ' '.join([t for t in tags if tags[t] == p.node()])
if p.rev() == -1:
if not len(repo):
t += _(' (empty repository)')
else:
t += _(' (no revision checked out)')
ui.write(_('parent: %d:%s %s\n') % (p.rev(), str(p), t))
if p.description():
ui.status(' ' + p.description().splitlines()[0].strip() + '\n')
branch = ctx.branch()
bheads = repo.branchheads(branch)
m = _('branch: %s\n') % branch
if branch != 'default':
ui.write(m)
else:
ui.status(m)
st = list(repo.status(unknown=True))[:6]
ms = merge_.mergestate(repo)
st.append([f for f in ms if ms[f] == 'u'])
labels = [_('%d modified'), _('%d added'), _('%d removed'),
_('%d deleted'), _('%d unknown'), _('%d ignored'),
_('%d unresolved')]
t = []
for s, l in zip(st, labels):
if s:
t.append(l % len(s))
t = ', '.join(t)
cleanworkdir = False
if len(parents) > 1:
t += _(' (merge)')
elif branch != parents[0].branch():
t += _(' (new branch)')
elif (not st[0] and not st[1] and not st[2]):
t += _(' (clean)')
cleanworkdir = True
elif pnode not in bheads:
t += _(' (new branch head)')
if cleanworkdir:
ui.status(_('commit: %s\n') % t.strip())
else:
ui.write(_('commit: %s\n') % t.strip())
# all ancestors of branch heads - all ancestors of parent = new csets
new = [0] * len(repo)
cl = repo.changelog
for a in [cl.rev(n) for n in bheads]:
new[a] = 1
for a in cl.ancestors(*[cl.rev(n) for n in bheads]):
new[a] = 1
for a in [p.rev() for p in parents]:
if a >= 0:
new[a] = 0
for a in cl.ancestors(*[p.rev() for p in parents]):
new[a] = 0
new = sum(new)
if new == 0:
ui.status(_('update: (current)\n'))
elif pnode not in bheads:
ui.write(_('update: %d new changesets (update)\n') % new)
else:
ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
(new, len(bheads)))
if opts.get('remote'):
t = []
source, branches = hg.parseurl(ui.expandpath('default'))
other = hg.repository(cmdutil.remoteui(repo, {}), source)
revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
ui.debug('comparing with %s\n' % url.hidepassword(source))
repo.ui.pushbuffer()
common, incoming, rheads = repo.findcommonincoming(other)
repo.ui.popbuffer()
if incoming:
t.append(_('1 or more incoming'))
dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
other = hg.repository(cmdutil.remoteui(repo, {}), dest)
ui.debug('comparing with %s\n' % url.hidepassword(dest))
repo.ui.pushbuffer()
o = repo.findoutgoing(other)
repo.ui.popbuffer()
o = repo.changelog.nodesbetween(o, None)[0]
if o:
t.append(_('%d outgoing') % len(o))
if t:
ui.write(_('remote: %s\n') % (', '.join(t)))
else:
ui.status(_('remote: (synced)\n'))
def tag(ui, repo, name1, *names, **opts):
"""add one or more tags for the current or given revision
Name a particular revision using <name>.
Tags are used to name particular revisions of the repository and are
very useful to compare different revisions, to go back to significant
earlier versions or to mark branch points as releases, etc.
If no revision is given, the parent of the working directory is
used, or tip if no revision is checked out.
To facilitate version control, distribution, and merging of tags,
they are stored as a file named ".hgtags" which is managed
similarly to other project files and can be hand-edited if
necessary. The file '.hg/localtags' is used for local tags (not
shared among repositories).
See 'hg help dates' for a list of formats valid for -d/--date.
"""
rev_ = "."
names = (name1,) + names
if len(names) != len(set(names)):
raise util.Abort(_('tag names must be unique'))
for n in names:
if n in ['tip', '.', 'null']:
raise util.Abort(_('the name \'%s\' is reserved') % n)
if opts.get('rev') and opts.get('remove'):
raise util.Abort(_("--rev and --remove are incompatible"))
if opts.get('rev'):
rev_ = opts['rev']
message = opts.get('message')
if opts.get('remove'):
expectedtype = opts.get('local') and 'local' or 'global'
for n in names:
if not repo.tagtype(n):
raise util.Abort(_('tag \'%s\' does not exist') % n)
if repo.tagtype(n) != expectedtype:
if expectedtype == 'global':
raise util.Abort(_('tag \'%s\' is not a global tag') % n)
else:
raise util.Abort(_('tag \'%s\' is not a local tag') % n)
rev_ = nullid
if not message:
# we don't translate commit messages
message = 'Removed tag %s' % ', '.join(names)
elif not opts.get('force'):
for n in names:
if n in repo.tags():
raise util.Abort(_('tag \'%s\' already exists '
'(use -f to force)') % n)
if not rev_ and repo.dirstate.parents()[1] != nullid:
raise util.Abort(_('uncommitted merge - please provide a '
'specific revision'))
r = repo[rev_].node()
if not message:
# we don't translate commit messages
message = ('Added tag %s for changeset %s' %
(', '.join(names), short(r)))
date = opts.get('date')
if date:
date = util.parsedate(date)
repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
def tags(ui, repo):
"""list repository tags
This lists both regular and local tags. When the -v/--verbose
switch is used, a third column "local" is printed for local tags.
"""
hexfunc = ui.debugflag and hex or short
tagtype = ""
for t, n in reversed(repo.tagslist()):
if ui.quiet:
ui.write("%s\n" % t)
continue
try:
hn = hexfunc(n)
r = "%5d:%s" % (repo.changelog.rev(n), hn)
except error.LookupError:
r = " ?:%s" % hn
else:
spaces = " " * (30 - encoding.colwidth(t))
if ui.verbose:
if repo.tagtype(t) == 'local':
tagtype = " local"
else:
tagtype = ""
ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
def tip(ui, repo, **opts):
"""show the tip revision
The tip revision (usually just called the tip) is the changeset
most recently added to the repository (and therefore the most
recently changed head).
If you have just made a commit, that commit will be the tip. If
you have just pulled changes from another repository, the tip of
that repository becomes the current tip. The "tip" tag is special
and cannot be renamed or assigned to a different changeset.
"""
displayer = cmdutil.show_changeset(ui, repo, opts)
displayer.show(repo[len(repo) - 1])
displayer.close()
def unbundle(ui, repo, fname1, *fnames, **opts):
"""apply one or more changegroup files
Apply one or more compressed changegroup files generated by the
bundle command.
"""
fnames = (fname1,) + fnames
lock = repo.lock()
try:
for fname in fnames:
f = url.open(ui, fname)
gen = changegroup.readbundle(f, fname)
modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
finally:
lock.release()
return postincoming(ui, repo, modheads, opts.get('update'), None)
def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False):
"""update working directory
Update the repository's working directory to the specified
changeset.
If no changeset is specified, attempt to update to the head of the
current branch. If this head is a descendant of the working
directory's parent, update to it, otherwise abort.
The following rules apply when the working directory contains
uncommitted changes:
1. If neither -c/--check nor -C/--clean is specified, and if
the requested changeset is an ancestor or descendant of
the working directory's parent, the uncommitted changes
are merged into the requested changeset and the merged
result is left uncommitted. If the requested changeset is
not an ancestor or descendant (that is, it is on another
branch), the update is aborted and the uncommitted changes
are preserved.
2. With the -c/--check option, the update is aborted and the
uncommitted changes are preserved.
3. With the -C/--clean option, uncommitted changes are discarded and
the working directory is updated to the requested changeset.
Use null as the changeset to remove the working directory (like 'hg
clone -U').
If you want to update just one file to an older changeset, use 'hg revert'.
See 'hg help dates' for a list of formats valid for -d/--date.
"""
if rev and node:
raise util.Abort(_("please specify just one revision"))
if not rev:
rev = node
if check and clean:
raise util.Abort(_("cannot specify both -c/--check and -C/--clean"))
if check:
# we could use dirty() but we can ignore merge and branch trivia
c = repo[None]
if c.modified() or c.added() or c.removed():
raise util.Abort(_("uncommitted local changes"))
if date:
if rev:
raise util.Abort(_("you can't specify a revision and a date"))
rev = cmdutil.finddate(ui, repo, date)
if clean or check:
return hg.clean(repo, rev)
else:
return hg.update(repo, rev)
def verify(ui, repo):
"""verify the integrity of the repository
Verify the integrity of the current repository.
This will perform an extensive check of the repository's
integrity, validating the hashes and checksums of each entry in
the changelog, manifest, and tracked files, as well as the
integrity of their crosslinks and indices.
"""
return hg.verify(repo)
def version_(ui):
"""output version and copyright information"""
ui.write(_("Mercurial Distributed SCM (version %s)\n")
% util.version())
ui.status(_(
"\nCopyright (C) 2005-2010 Matt Mackall <mpm@selenic.com> and others\n"
"This is free software; see the source for copying conditions. "
"There is NO\nwarranty; "
"not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
))
# Command options and aliases are listed here, alphabetically
globalopts = [
('R', 'repository', '',
_('repository root directory or name of overlay bundle file')),
('', 'cwd', '', _('change working directory')),
('y', 'noninteractive', None,
_('do not prompt, assume \'yes\' for any required answers')),
('q', 'quiet', None, _('suppress output')),
('v', 'verbose', None, _('enable additional output')),
('', 'config', [],
_('set/override config option (use \'section.name=value\')')),
('', 'debug', None, _('enable debugging output')),
('', 'debugger', None, _('start debugger')),
('', 'encoding', encoding.encoding, _('set the charset encoding')),
('', 'encodingmode', encoding.encodingmode,
_('set the charset encoding mode')),
('', 'traceback', None, _('always print a traceback on exception')),
('', 'time', None, _('time how long the command takes')),
('', 'profile', None, _('print command execution profile')),
('', 'version', None, _('output version information and exit')),
('h', 'help', None, _('display help and exit')),
]
dryrunopts = [('n', 'dry-run', None,
_('do not perform actions, just print output'))]
remoteopts = [
('e', 'ssh', '', _('specify ssh command to use')),
('', 'remotecmd', '', _('specify hg command to run on the remote side')),
]
walkopts = [
('I', 'include', [], _('include names matching the given patterns')),
('X', 'exclude', [], _('exclude names matching the given patterns')),
]
commitopts = [
('m', 'message', '', _('use <text> as commit message')),
('l', 'logfile', '', _('read commit message from <file>')),
]
commitopts2 = [
('d', 'date', '', _('record datecode as commit date')),
('u', 'user', '', _('record the specified user as committer')),
]
templateopts = [
('', 'style', '', _('display using template map file')),
('', 'template', '', _('display with template')),
]
logopts = [
('p', 'patch', None, _('show patch')),
('g', 'git', None, _('use git extended diff format')),
('l', 'limit', '', _('limit number of changes displayed')),
('M', 'no-merges', None, _('do not show merges')),
] + templateopts
diffopts = [
('a', 'text', None, _('treat all files as text')),
('g', 'git', None, _('use git extended diff format')),
('', 'nodates', None, _('omit dates from diff headers'))
]
diffopts2 = [
('p', 'show-function', None, _('show which function each change is in')),
('', 'reverse', None, _('produce a diff that undoes the changes')),
('w', 'ignore-all-space', None,
_('ignore white space when comparing lines')),
('b', 'ignore-space-change', None,
_('ignore changes in the amount of white space')),
('B', 'ignore-blank-lines', None,
_('ignore changes whose lines are all blank')),
('U', 'unified', '', _('number of lines of context to show')),
('', 'stat', None, _('output diffstat-style summary of changes')),
]
similarityopts = [
('s', 'similarity', '',
_('guess renamed files by similarity (0<=s<=100)'))
]
table = {
"^add": (add, walkopts + dryrunopts, _('[OPTION]... [FILE]...')),
"addremove":
(addremove, similarityopts + walkopts + dryrunopts,
_('[OPTION]... [FILE]...')),
"^annotate|blame":
(annotate,
[('r', 'rev', '', _('annotate the specified revision')),
('', 'follow', None,
_('follow copies/renames and list the filename (DEPRECATED)')),
('', 'no-follow', None, _("don't follow copies and renames")),
('a', 'text', None, _('treat all files as text')),
('u', 'user', None, _('list the author (long with -v)')),
('f', 'file', None, _('list the filename')),
('d', 'date', None, _('list the date (short with -q)')),
('n', 'number', None, _('list the revision number (default)')),
('c', 'changeset', None, _('list the changeset')),
('l', 'line-number', None,
_('show line number at the first appearance'))
] + walkopts,
_('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')),
"archive":
(archive,
[('', 'no-decode', None, _('do not pass files through decoders')),
('p', 'prefix', '', _('directory prefix for files in archive')),
('r', 'rev', '', _('revision to distribute')),
('t', 'type', '', _('type of distribution to create')),
] + walkopts,
_('[OPTION]... DEST')),
"backout":
(backout,
[('', 'merge', None,
_('merge with old dirstate parent after backout')),
('', 'parent', '', _('parent to choose when backing out merge')),
('r', 'rev', '', _('revision to backout')),
] + walkopts + commitopts + commitopts2,
_('[OPTION]... [-r] REV')),
"bisect":
(bisect,
[('r', 'reset', False, _('reset bisect state')),
('g', 'good', False, _('mark changeset good')),
('b', 'bad', False, _('mark changeset bad')),
('s', 'skip', False, _('skip testing changeset')),
('c', 'command', '', _('use command to check changeset state')),
('U', 'noupdate', False, _('do not update to target'))],
_("[-gbsr] [-U] [-c CMD] [REV]")),
"branch":
(branch,
[('f', 'force', None,
_('set branch name even if it shadows an existing branch')),
('C', 'clean', None, _('reset branch name to parent branch name'))],
_('[-fC] [NAME]')),
"branches":
(branches,
[('a', 'active', False,
_('show only branches that have unmerged heads')),
('c', 'closed', False,
_('show normal and closed branches'))],
_('[-ac]')),
"bundle":
(bundle,
[('f', 'force', None,
_('run even when the destination is unrelated')),
('r', 'rev', [],
_('a changeset intended to be added to the destination')),
('b', 'branch', [],
_('a specific branch you would like to bundle')),
('', 'base', [],
_('a base changeset assumed to be available at the destination')),
('a', 'all', None, _('bundle all changesets in the repository')),
('t', 'type', 'bzip2', _('bundle compression type to use')),
] + remoteopts,
_('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]')),
"cat":
(cat,
[('o', 'output', '', _('print output to file with formatted name')),
('r', 'rev', '', _('print the given revision')),
('', 'decode', None, _('apply any matching decode filter')),
] + walkopts,
_('[OPTION]... FILE...')),
"^clone":
(clone,
[('U', 'noupdate', None,
_('the clone will include an empty working copy (only a repository)')),
('u', 'updaterev', '',
_('revision, tag or branch to check out')),
('r', 'rev', [],
_('include the specified changeset')),
('b', 'branch', [],
_('clone only the specified branch')),
('', 'pull', None, _('use pull protocol to copy metadata')),
('', 'uncompressed', None,
_('use uncompressed transfer (fast over LAN)')),
] + remoteopts,
_('[OPTION]... SOURCE [DEST]')),
"^commit|ci":
(commit,
[('A', 'addremove', None,
_('mark new/missing files as added/removed before committing')),
('', 'close-branch', None,
_('mark a branch as closed, hiding it from the branch list')),
] + walkopts + commitopts + commitopts2,
_('[OPTION]... [FILE]...')),
"copy|cp":
(copy,
[('A', 'after', None, _('record a copy that has already occurred')),
('f', 'force', None,
_('forcibly copy over an existing managed file')),
] + walkopts + dryrunopts,
_('[OPTION]... [SOURCE]... DEST')),
"debugancestor": (debugancestor, [], _('[INDEX] REV1 REV2')),
"debugcheckstate": (debugcheckstate, [], ''),
"debugcommands": (debugcommands, [], _('[COMMAND]')),
"debugcomplete":
(debugcomplete,
[('o', 'options', None, _('show the command options'))],
_('[-o] CMD')),
"debugdate":
(debugdate,
[('e', 'extended', None, _('try extended date formats'))],
_('[-e] DATE [RANGE]')),
"debugdata": (debugdata, [], _('FILE REV')),
"debugfsinfo": (debugfsinfo, [], _('[PATH]')),
"debugindex": (debugindex, [], _('FILE')),
"debugindexdot": (debugindexdot, [], _('FILE')),
"debuginstall": (debuginstall, [], ''),
"debugrebuildstate":
(debugrebuildstate,
[('r', 'rev', '', _('revision to rebuild to'))],
_('[-r REV] [REV]')),
"debugrename":
(debugrename,
[('r', 'rev', '', _('revision to debug'))],
_('[-r REV] FILE')),
"debugsetparents":
(debugsetparents, [], _('REV1 [REV2]')),
"debugstate":
(debugstate,
[('', 'nodates', None, _('do not display the saved mtime'))],
_('[OPTION]...')),
"debugsub":
(debugsub,
[('r', 'rev', '', _('revision to check'))],
_('[-r REV] [REV]')),
"debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')),
"^diff":
(diff,
[('r', 'rev', [], _('revision')),
('c', 'change', '', _('change made by revision'))
] + diffopts + diffopts2 + walkopts,
_('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...')),
"^export":
(export,
[('o', 'output', '', _('print output to file with formatted name')),
('', 'switch-parent', None, _('diff against the second parent')),
('r', 'rev', [], _('revisions to export')),
] + diffopts,
_('[OPTION]... [-o OUTFILESPEC] REV...')),
"^forget":
(forget,
[] + walkopts,
_('[OPTION]... FILE...')),
"grep":
(grep,
[('0', 'print0', None, _('end fields with NUL')),
('', 'all', None, _('print all revisions that match')),
('f', 'follow', None,
_('follow changeset history,'
' or file history across copies and renames')),
('i', 'ignore-case', None, _('ignore case when matching')),
('l', 'files-with-matches', None,
_('print only filenames and revisions that match')),
('n', 'line-number', None, _('print matching line numbers')),
('r', 'rev', [], _('only search files changed within revision range')),
('u', 'user', None, _('list the author (long with -v)')),
('d', 'date', None, _('list the date (short with -q)')),
] + walkopts,
_('[OPTION]... PATTERN [FILE]...')),
"heads":
(heads,
[('r', 'rev', '', _('show only heads which are descendants of REV')),
('t', 'topo', False, _('show topological heads only')),
('a', 'active', False,
_('show active branchheads only [DEPRECATED]')),
('c', 'closed', False,
_('show normal and closed branch heads')),
] + templateopts,
_('[-ac] [-r STARTREV] [REV]...')),
"help": (help_, [], _('[TOPIC]')),
"identify|id":
(identify,
[('r', 'rev', '', _('identify the specified revision')),
('n', 'num', None, _('show local revision number')),
('i', 'id', None, _('show global revision id')),
('b', 'branch', None, _('show branch')),
('t', 'tags', None, _('show tags'))],
_('[-nibt] [-r REV] [SOURCE]')),
"import|patch":
(import_,
[('p', 'strip', 1,
_('directory strip option for patch. This has the same '
'meaning as the corresponding patch option')),
('b', 'base', '', _('base path')),
('f', 'force', None,
_('skip check for outstanding uncommitted changes')),
('', 'no-commit', None,
_("don't commit, just update the working directory")),
('', 'exact', None,
_('apply patch to the nodes from which it was generated')),
('', 'import-branch', None,
_('use any branch information in patch (implied by --exact)'))] +
commitopts + commitopts2 + similarityopts,
_('[OPTION]... PATCH...')),
"incoming|in":
(incoming,
[('f', 'force', None,
_('run even if remote repository is unrelated')),
('n', 'newest-first', None, _('show newest record first')),
('', 'bundle', '', _('file to store the bundles into')),
('r', 'rev', [],
_('a remote changeset intended to be added')),
('b', 'branch', [],
_('a specific branch you would like to pull')),
] + logopts + remoteopts,
_('[-p] [-n] [-M] [-f] [-r REV]...'
' [--bundle FILENAME] [SOURCE]')),
"^init":
(init,
remoteopts,
_('[-e CMD] [--remotecmd CMD] [DEST]')),
"locate":
(locate,
[('r', 'rev', '', _('search the repository as it is in REV')),
('0', 'print0', None,
_('end filenames with NUL, for use with xargs')),
('f', 'fullpath', None,
_('print complete paths from the filesystem root')),
] + walkopts,
_('[OPTION]... [PATTERN]...')),
"^log|history":
(log,
[('f', 'follow', None,
_('follow changeset history,'
' or file history across copies and renames')),
('', 'follow-first', None,
_('only follow the first parent of merge changesets')),
('d', 'date', '', _('show revisions matching date spec')),
('C', 'copies', None, _('show copied files')),
('k', 'keyword', [], _('do case-insensitive search for a keyword')),
('r', 'rev', [], _('show the specified revision or range')),
('', 'removed', None, _('include revisions where files were removed')),
('m', 'only-merges', None, _('show only merges')),
('u', 'user', [], _('revisions committed by user')),
('b', 'only-branch', [],
_('show only changesets within the given named branch')),
('P', 'prune', [],
_('do not display revision or any of its ancestors')),
] + logopts + walkopts,
_('[OPTION]... [FILE]')),
"manifest":
(manifest,
[('r', 'rev', '', _('revision to display'))],
_('[-r REV]')),
"^merge":
(merge,
[('f', 'force', None, _('force a merge with outstanding changes')),
('r', 'rev', '', _('revision to merge')),
('P', 'preview', None,
_('review revisions to merge (no merge is performed)'))],
_('[-P] [-f] [[-r] REV]')),
"outgoing|out":
(outgoing,
[('f', 'force', None,
_('run even when the destination is unrelated')),
('r', 'rev', [],
_('a changeset intended to be included in the destination')),
('n', 'newest-first', None, _('show newest record first')),
('b', 'branch', [],
_('a specific branch you would like to push')),
] + logopts + remoteopts,
_('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')),
"parents":
(parents,
[('r', 'rev', '', _('show parents of the specified revision')),
] + templateopts,
_('[-r REV] [FILE]')),
"paths": (paths, [], _('[NAME]')),
"^pull":
(pull,
[('u', 'update', None,
_('update to new branch head if changesets were pulled')),
('f', 'force', None,
_('run even when remote repository is unrelated')),
('r', 'rev', [],
_('a remote changeset intended to be added')),
('b', 'branch', [],
_('a specific branch you would like to pull')),
] + remoteopts,
_('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')),
"^push":
(push,
[('f', 'force', None, _('force push')),
('r', 'rev', [],
_('a changeset intended to be included in the destination')),
('b', 'branch', [],
_('a specific branch you would like to push')),
] + remoteopts,
_('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')),
"recover": (recover, []),
"^remove|rm":
(remove,
[('A', 'after', None, _('record delete for missing files')),
('f', 'force', None,
_('remove (and delete) file even if added or modified')),
] + walkopts,
_('[OPTION]... FILE...')),
"rename|mv":
(rename,
[('A', 'after', None, _('record a rename that has already occurred')),
('f', 'force', None,
_('forcibly copy over an existing managed file')),
] + walkopts + dryrunopts,
_('[OPTION]... SOURCE... DEST')),
"resolve":
(resolve,
[('a', 'all', None, _('select all unresolved files')),
('l', 'list', None, _('list state of files needing merge')),
('m', 'mark', None, _('mark files as resolved')),
('u', 'unmark', None, _('unmark files as resolved')),
('n', 'no-status', None, _('hide status prefix'))]
+ walkopts,
_('[OPTION]... [FILE]...')),
"revert":
(revert,
[('a', 'all', None, _('revert all changes when no arguments given')),
('d', 'date', '', _('tipmost revision matching date')),
('r', 'rev', '', _('revert to the specified revision')),
('', 'no-backup', None, _('do not save backup copies of files')),
] + walkopts + dryrunopts,
_('[OPTION]... [-r REV] [NAME]...')),
"rollback": (rollback, []),
"root": (root, []),
"^serve":
(serve,
[('A', 'accesslog', '', _('name of access log file to write to')),
('d', 'daemon', None, _('run server in background')),
('', 'daemon-pipefds', '', _('used internally by daemon mode')),
('E', 'errorlog', '', _('name of error log file to write to')),
('p', 'port', 0, _('port to listen on (default: 8000)')),
('a', 'address', '',
_('address to listen on (default: all interfaces)')),
('', 'prefix', '',
_('prefix path to serve from (default: server root)')),
('n', 'name', '',
_('name to show in web pages (default: working directory)')),
('', 'webdir-conf', '', _('name of the webdir config file'
' (serve more than one repository)')),
('', 'pid-file', '', _('name of file to write process ID to')),
('', 'stdio', None, _('for remote clients')),
('t', 'templates', '', _('web templates to use')),
('', 'style', '', _('template style to use')),
('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
('', 'certificate', '', _('SSL certificate file'))],
_('[OPTION]...')),
"showconfig|debugconfig":
(showconfig,
[('u', 'untrusted', None, _('show untrusted configuration options'))],
_('[-u] [NAME]...')),
"^summary|sum":
(summary,
[('', 'remote', None, _('check for push and pull'))], '[--remote]'),
"^status|st":
(status,
[('A', 'all', None, _('show status of all files')),
('m', 'modified', None, _('show only modified files')),
('a', 'added', None, _('show only added files')),
('r', 'removed', None, _('show only removed files')),
('d', 'deleted', None, _('show only deleted (but tracked) files')),
('c', 'clean', None, _('show only files without changes')),
('u', 'unknown', None, _('show only unknown (not tracked) files')),
('i', 'ignored', None, _('show only ignored files')),
('n', 'no-status', None, _('hide status prefix')),
('C', 'copies', None, _('show source of copied files')),
('0', 'print0', None,
_('end filenames with NUL, for use with xargs')),
('', 'rev', [], _('show difference from revision')),
('', 'change', '', _('list the changed files of a revision')),
] + walkopts,
_('[OPTION]... [FILE]...')),
"tag":
(tag,
[('f', 'force', None, _('replace existing tag')),
('l', 'local', None, _('make the tag local')),
('r', 'rev', '', _('revision to tag')),
('', 'remove', None, _('remove a tag')),
# -l/--local is already there, commitopts cannot be used
('m', 'message', '', _('use <text> as commit message')),
] + commitopts2,
_('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')),
"tags": (tags, [], ''),
"tip":
(tip,
[('p', 'patch', None, _('show patch')),
('g', 'git', None, _('use git extended diff format')),
] + templateopts,
_('[-p] [-g]')),
"unbundle":
(unbundle,
[('u', 'update', None,
_('update to new branch head if changesets were unbundled'))],
_('[-u] FILE...')),
"^update|up|checkout|co":
(update,
[('C', 'clean', None, _('discard uncommitted changes (no backup)')),
('c', 'check', None, _('check for uncommitted changes')),
('d', 'date', '', _('tipmost revision matching date')),
('r', 'rev', '', _('revision'))],
_('[-c] [-C] [-d DATE] [[-r] REV]')),
"verify": (verify, []),
"version": (version_, []),
}
norepo = ("clone init version help debugcommands debugcomplete debugdata"
" debugindex debugindexdot debugdate debuginstall debugfsinfo")
optionalrepo = ("identify paths serve showconfig debugancestor")
| {
"content_hash": "6f4212f01acec1e741fa6bce7404b55a",
"timestamp": "",
"source": "github",
"line_count": 3866,
"max_line_length": 83,
"avg_line_length": 37.14692188308329,
"alnum_prop": 0.5599331522874451,
"repo_name": "joewalnes/idea-community",
"id": "d3135d385e60ad23e9896978692d5441462dc212",
"size": "143852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/hg4idea/testData/bin/mercurial/commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "387"
},
{
"name": "C",
"bytes": "136045"
},
{
"name": "C#",
"bytes": "103"
},
{
"name": "C++",
"bytes": "40449"
},
{
"name": "Emacs Lisp",
"bytes": "2507"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groovy",
"bytes": "361320"
},
{
"name": "Java",
"bytes": "89694599"
},
{
"name": "JavaScript",
"bytes": "978"
},
{
"name": "Objective-C",
"bytes": "1877"
},
{
"name": "PHP",
"bytes": "145"
},
{
"name": "Perl",
"bytes": "6523"
},
{
"name": "Python",
"bytes": "1699274"
},
{
"name": "Shell",
"bytes": "6965"
},
{
"name": "VimL",
"bytes": "5950"
}
],
"symlink_target": ""
} |
from .sub_resource import SubResource
class InboundNatRule(SubResource):
"""Inbound NAT rule of the load balancer.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param frontend_ip_configuration: A reference to frontend IP addresses.
:type frontend_ip_configuration:
~azure.mgmt.network.v2016_12_01.models.SubResource
:ivar backend_ip_configuration: A reference to a private IP address
defined on a network interface of a VM. Traffic sent to the frontend port
of each of the frontend IP configurations is forwarded to the backed IP.
:vartype backend_ip_configuration:
~azure.mgmt.network.v2016_12_01.models.NetworkInterfaceIPConfiguration
:param protocol: The transport protocol for the endpoint. Possible values
are: 'Udp' or 'Tcp'. Possible values include: 'Udp', 'Tcp'
:type protocol: str or
~azure.mgmt.network.v2016_12_01.models.TransportProtocol
:param frontend_port: The port for the external endpoint. Port numbers for
each rule must be unique within the Load Balancer. Acceptable values range
from 1 to 65534.
:type frontend_port: int
:param backend_port: The port used for the internal endpoint. Acceptable
values range from 1 to 65535.
:type backend_port: int
:param idle_timeout_in_minutes: The timeout for the TCP idle connection.
The value can be set between 4 and 30 minutes. The default value is 4
minutes. This element is only used when the protocol is set to TCP.
:type idle_timeout_in_minutes: int
:param enable_floating_ip: Configures a virtual machine's endpoint for the
floating IP capability required to configure a SQL AlwaysOn Availability
Group. This setting is required when using the SQL AlwaysOn Availability
Groups in SQL server. This setting can't be changed after you create the
endpoint.
:type enable_floating_ip: bool
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'backend_ip_configuration': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'backend_ip_configuration': {'key': 'properties.backendIPConfiguration', 'type': 'NetworkInterfaceIPConfiguration'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'int'},
'backend_port': {'key': 'properties.backendPort', 'type': 'int'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(InboundNatRule, self).__init__(**kwargs)
self.frontend_ip_configuration = kwargs.get('frontend_ip_configuration', None)
self.backend_ip_configuration = None
self.protocol = kwargs.get('protocol', None)
self.frontend_port = kwargs.get('frontend_port', None)
self.backend_port = kwargs.get('backend_port', None)
self.idle_timeout_in_minutes = kwargs.get('idle_timeout_in_minutes', None)
self.enable_floating_ip = kwargs.get('enable_floating_ip', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
| {
"content_hash": "28b7ccc0efd0e8d49ea7013cee4681cc",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 124,
"avg_line_length": 51.02469135802469,
"alnum_prop": 0.6803774497943382,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "a83812a22842f1c0ba7e9e462c34aa03d8d90133",
"size": "4607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/inbound_nat_rule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from contextlib import contextmanager
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.jar_task import JarBuilderTask
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
def is_jvm_binary(target):
return isinstance(target, JvmBinary)
def is_java_library(target):
return target.has_sources('.java')
def is_scala_library(target):
return target.has_sources('.scala')
def is_jvm_library(target):
return (is_java_library(target)
or is_scala_library(target)
or (is_jvm_binary(target) and target.has_resources))
class JarCreate(JarBuilderTask):
"""Jars jvm libraries and optionally their sources and their docs."""
@classmethod
def register_options(cls, register):
super(JarCreate, cls).register_options(register)
register('--compressed', default=True, type=bool,
fingerprint=True,
help='Create compressed jars.')
@classmethod
def product_types(cls):
return ['jars']
@classmethod
def prepare(cls, options, round_manager):
super(JarCreate, cls).prepare(options, round_manager)
cls.JarBuilder.prepare(round_manager)
def __init__(self, *args, **kwargs):
super(JarCreate, self).__init__(*args, **kwargs)
self.compressed = self.get_options().compressed
self._jars = {}
@property
def cache_target_dirs(self):
return True
def execute(self):
# NB: Invalidating dependents transitively is more than is strictly necessary, but
# we know that JarBuilderTask touches (at least) the direct dependencies of targets (in
# the case of resources). One of these tasks could implement an FingerprintStrategy that
# would attempt to hash the relevant dependencies, but that is really error prone, and
# this task is more than fast enough to re-run (JarTool "copies" pre-zipped data from input
# zip/jar files).
with self.invalidated(self.context.targets(is_jvm_library),
invalidate_dependents=True) as invalidation_check:
with self.context.new_workunit(name='jar-create', labels=[WorkUnitLabel.MULTITOOL]):
jar_mapping = self.context.products.get('jars')
for vt in invalidation_check.all_vts:
jar_name = vt.target.name + '.jar'
jar_path = os.path.join(vt.results_dir, jar_name)
def add_jar_to_products():
jar_mapping.add(vt.target, vt.results_dir).append(jar_name)
if vt.valid:
if os.path.exists(jar_path):
add_jar_to_products()
else:
with self.create_jar(vt.target, jar_path) as jarfile:
with self.create_jar_builder(jarfile) as jar_builder:
if jar_builder.add_target(vt.target):
add_jar_to_products()
@contextmanager
def create_jar(self, target, path):
existing = self._jars.setdefault(path, target)
if target != existing:
raise TaskError(
'Duplicate name: target {} tried to write {} already mapped to target {}'
.format(target, path, existing))
self._jars[path] = target
with self.open_jar(path, overwrite=True, compressed=self.compressed) as jar:
yield jar
| {
"content_hash": "7375a7bf7cb439ab574680dbfa56bde5",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 95,
"avg_line_length": 34.541666666666664,
"alnum_prop": 0.6776236429433052,
"repo_name": "twitter/pants",
"id": "bb227f74bc463982c7aa56b52672328b3e7e641b",
"size": "3463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/jvm/tasks/jar_create.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5639"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "85294"
},
{
"name": "Java",
"bytes": "498956"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "6700799"
},
{
"name": "Rust",
"bytes": "765598"
},
{
"name": "Scala",
"bytes": "89346"
},
{
"name": "Shell",
"bytes": "94395"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
import datetime
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from dox.models import Page, PageStatus, File
from dox.forms import PageForm
# https://bitbucket.org/codekoala/django-articles/src/fc6a1ae96dc8/articles/admin.py
#--------------------------------------------------------------------------
#
# Model inlines:
#
#--------------------------------------------------------------------------
class FileInline(admin.TabularInline):
fields = ('file', 'caption',)
model = File
extra = 1
#--------------------------------------------------------------------------
#
# Admin models:
#
#--------------------------------------------------------------------------
class PageAdmin(admin.ModelAdmin):
#----------------------------------
# Fields:
#----------------------------------
fieldsets = [
(None, {
'fields': [
'name',
'url',
'template_name',
'status',
],
}),
('Scheduling', {
'fields': [
'is_active',
'publish_date',
'expiration_date',
],
}),
('Head', {
'fields': [
'head_title',
'head_title_extra',
'head_description',
'head_keywords',
'head_extra',
],
}),
('Body', {
'fields': [
'title',
'description_short',
'description_long',
'content',
'extra',
],
}),
('Advanced', {
'fields': [
'login_required',
'author',
'notes',
], 'classes': ['collapse'],
}),
]
#----------------------------------
# Forms:
#----------------------------------
form = PageForm
#----------------------------------
# Inlines:
#----------------------------------
inlines = [
FileInline,
]
#----------------------------------
# Change lists:
#----------------------------------
list_display = ('name', 'url', 'template_name', 'status', 'author', 'publish_date', 'expiration_date', 'is_active',)
list_display_links = ('name', 'status', 'is_active', 'publish_date', 'expiration_date',)
list_editable = ('url', 'template_name', 'author',)
list_filter = ('author', 'status', 'is_active', 'publish_date', 'expiration_date',)
ordering = ('name',)
date_hierarchy = 'publish_date'
search_fields = (
'notes',
'name',
'url',
'title',
'content',
'template_name',
'head_title',
'head_title_extra',
'head_description',
'head_keywords',
'head_extra',
'description_short',
'description_long',
'extra',
)
actions = ['mark_active', 'mark_inactive',]
actions_on_top = True
actions_on_bottom = True
actions_selection_counter = True
#----------------------------------
# Change list actions:
#----------------------------------
def mark_active(self, request, queryset):
queryset.update(is_active=True)
mark_active.short_description = _(u'Mark select articles as active')
def mark_inactive(self, request, queryset):
queryset.update(is_active=False)
mark_inactive.short_description = _(u'Mark select articles as inactive')
def get_actions(self, request):
actions = super(PageAdmin, self).get_actions(request)
def dynamic_status(name, status):
def status_func(self, request, queryset):
queryset.update(status=status)
status_func.__name__ = name
status_func.short_description = _(u'Set status of selected to "%s"' % status)
return status_func
for status in PageStatus.objects.all():
name = 'mark_status_%i' % status.id
actions[name] = (dynamic_status(name, status), name, _(u'Set status of selected to "%s"' % status))
return actions
#----------------------------------
# Change forms:
#----------------------------------
save_on_top = True
save_as = True
#----------------------------------
# Methods:
#----------------------------------
def queryset(self, request):
"""
Limit the list of articles to article posted by this user unless they're a superuser.
"""
if request.user.is_superuser:
return self.model._default_manager.all()
else:
return self.model._default_manager.filter(author=request.user)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Restrict the list of authors to Super Users and Staff only.
"""
if db_field.name == 'author':
'''
Query filters out DTI Django Users and alphabetizes result.
'''
kwargs['queryset'] = User.objects.filter(is_staff=True, email__contains='registerguard.com').order_by('username')
return super(PageAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class PageStatusAdmin(admin.ModelAdmin):
#----------------------------------
# Change lists:
#----------------------------------
list_display = ('name', 'is_live')
list_filter = ('is_live',)
search_fields = (
'name',
)
class FileAdmin(admin.ModelAdmin):
pass
#--------------------------------------------------------------------------
#
# Registrations:
#
#--------------------------------------------------------------------------
admin.site.register(Page, PageAdmin)
admin.site.register(PageStatus, PageStatusAdmin)
admin.site.register(File, FileAdmin) | {
"content_hash": "c04da6d067c7aa863ee29e139507006d",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 126,
"avg_line_length": 28.626168224299064,
"alnum_prop": 0.4286647078028077,
"repo_name": "mhulse/django-dox",
"id": "530b5e17fadf0a662e04ab7d5d9b1aa08de21db6",
"size": "6126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dox/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "52"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "21455"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import autoslug.fields
class Migration(migrations.Migration):
dependencies = [
('momentum', '0006_auto_20150315_1329'),
]
operations = [
migrations.AlterField(
model_name='goal',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False),
),
]
| {
"content_hash": "c994b6df3d3699592c8c638ad314621a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 64,
"avg_line_length": 21.68421052631579,
"alnum_prop": 0.616504854368932,
"repo_name": "mod2/momentum",
"id": "dd3605842ed3a9372a7d7fac2e647187587e4dad",
"size": "436",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "momentum/migrations/0007_auto_20150315_1331.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6918"
},
{
"name": "HTML",
"bytes": "15738"
},
{
"name": "JavaScript",
"bytes": "6934"
},
{
"name": "Python",
"bytes": "48321"
}
],
"symlink_target": ""
} |
"""Options manager for :class:`~diofant.polys.polytools.Poly` and public API functions."""
from __future__ import annotations
import re
from ..core import Basic, I
from ..core.sympify import sympify
from ..utilities import has_dups, numbered_symbols, topological_sort
from .polyerrors import FlagError, GeneratorsError, OptionError
__all__ = 'Options', 'Order'
class Option:
"""Base class for all kinds of options."""
option: str
is_Flag = False
requires: list[str] = []
excludes: list[str] = []
after: list[str] = []
before: list[str] = []
@classmethod
def default(cls):
return
@classmethod
def preprocess(cls, option):
return # pragma: no cover
@classmethod
def postprocess(cls, options):
return
class Flag(Option):
"""Base class for all kinds of flags."""
is_Flag = True
class BooleanOption(Option):
"""An option that must have a boolean value or equivalent assigned."""
@classmethod
def preprocess(cls, option):
if option in [True, False]:
return bool(option)
else:
raise OptionError(f"'{cls.option}' must have a boolean value "
f'assigned, got {option}')
class OptionType(type):
"""Base type for all options that does registers options."""
def __init__(cls, *args, **kwargs):
super().__init__(cls)
@property
def getter(a):
try:
return a[cls.option]
except KeyError:
return cls.default()
setattr(Options, cls.option, getter)
Options.__options__[cls.option] = cls
class Options(dict):
"""
Options manager for polynomial manipulation module.
Examples
========
>>> Options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
>>> build_options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
**Options**
* Expand --- boolean option
* Gens --- option
* Wrt --- option
* Sort --- option
* Order --- option
* Field --- boolean option
* Greedy --- boolean option
* Domain --- option
* Split --- boolean option
* Gaussian --- boolean option
* Extension --- option
* Modulus --- option
* Symmetric --- boolean option
* Strict --- boolean option
**Flags**
* Auto --- boolean flag
* Frac --- boolean flag
* Formal --- boolean flag
* Polys --- boolean flag
* Include --- boolean flag
* All --- boolean flag
* Gen --- flag
"""
__order__: list[str] | None = None
__options__: dict[str, type[Option]] = {}
def __init__(self, gens, args, flags=None, strict=False):
dict.__init__(self)
if gens and args.get('gens', ()):
raise OptionError("both '*gens' and keyword "
"argument 'gens' supplied")
if gens:
args = dict(args)
args['gens'] = gens
defaults = args.pop('defaults', {})
def preprocess_options(args):
for option, value in args.items():
try:
cls = self.__options__[option]
except KeyError as exc:
raise OptionError(f"'{option}' is not a "
'valid option') from exc
if issubclass(cls, Flag):
if strict and (flags is None or option not in flags):
raise OptionError(f"'{option}' flag is not "
'allowed in this context')
if value is not None:
self[option] = cls.preprocess(value)
preprocess_options(args)
for key in dict(defaults):
if key in self:
del defaults[key]
else:
for option in self:
cls = self.__options__[option]
if key in cls.excludes:
del defaults[key]
break
preprocess_options(defaults)
for option in self:
cls = self.__options__[option]
for exclude_option in cls.excludes:
if self.get(exclude_option) is not None:
raise OptionError(f"'{option}' option is not allowed together with '{exclude_option}'")
for option in self.__order__: # pylint: disable=not-an-iterable
self.__options__[option].postprocess(self)
@classmethod
def _init_dependencies_order(cls):
"""Resolve the order of options' processing."""
if cls.__order__ is None:
vertices, edges = [], set()
for name, option in cls.__options__.items():
vertices.append(name)
for _name in option.after:
edges.add((_name, name))
for _name in option.before:
edges.add((name, _name))
try:
cls.__order__ = topological_sort((vertices, list(edges)))
except ValueError as exc:
raise RuntimeError('cycle detected in diofant.polys'
' options framework') from exc
def clone(self, updates={}):
"""Clone ``self`` and update specified options."""
obj = dict.__new__(self.__class__)
for option, value in self.items():
obj[option] = value
for option, value in updates.items():
obj[option] = value
return obj
def __setattr__(self, attr, value):
if attr in self.__options__:
self[attr] = value
else:
super().__setattr__(attr, value)
@property
def args(self):
args = {}
for option, value in self.items():
if value is not None and option != 'gens':
cls = self.__options__[option]
if not issubclass(cls, Flag):
args[option] = value
return args
@property
def options(self):
options = {}
for option, cls in self.__options__.items():
if not issubclass(cls, Flag):
options[option] = getattr(self, option)
return options
@property
def flags(self):
flags = {}
for option, cls in self.__options__.items():
if issubclass(cls, Flag):
flags[option] = getattr(self, option)
return flags
class Expand(BooleanOption, metaclass=OptionType):
"""``expand`` option to polynomial manipulation functions."""
option = 'expand'
@classmethod
def default(cls):
return True
class Gens(Option, metaclass=OptionType):
"""``gens`` option to polynomial manipulation functions."""
option = 'gens'
@classmethod
def default(cls):
return ()
@classmethod
def preprocess(cls, option):
if isinstance(option, Basic):
option = option,
if option == (None,):
return ()
elif has_dups(option):
raise GeneratorsError(f'duplicated generators: {option}')
elif any(gen.is_commutative is False for gen in option):
raise GeneratorsError(f'non-commutative generators: {option}')
else:
return tuple(option)
class Wrt(Option, metaclass=OptionType):
"""``wrt`` option to polynomial manipulation functions."""
option = 'wrt'
_re_split = re.compile(r'\s*,\s*|\s+')
@classmethod
def preprocess(cls, option):
if isinstance(option, Basic):
return [str(option)]
elif isinstance(option, str):
option = option.strip()
if option.endswith(','):
raise OptionError('Bad input: missing parameter.')
if not option:
return []
return list(cls._re_split.split(option))
elif hasattr(option, '__getitem__'):
return list(map(str, option))
else:
raise OptionError("invalid argument for 'wrt' option")
class Sort(Option, metaclass=OptionType):
"""``sort`` option to polynomial manipulation functions."""
option = 'sort'
@classmethod
def default(cls):
return []
@classmethod
def preprocess(cls, option):
if isinstance(option, str):
return [gen.strip() for gen in option.split('>')]
elif hasattr(option, '__getitem__'):
return list(map(str, option))
else:
raise OptionError("invalid argument for 'sort' option")
class Order(Option, metaclass=OptionType):
"""``order`` option to polynomial manipulation functions."""
option = 'order'
@classmethod
def default(cls):
from .orderings import lex
return lex
@classmethod
def preprocess(cls, option):
from .orderings import monomial_key
return monomial_key(option)
class Field(BooleanOption, metaclass=OptionType):
"""``field`` option to polynomial manipulation functions."""
option = 'field'
excludes = ['domain', 'split', 'gaussian']
class Greedy(BooleanOption, metaclass=OptionType):
"""``greedy`` option to polynomial manipulation functions."""
option = 'greedy'
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus']
class Composite(BooleanOption, metaclass=OptionType):
"""``composite`` option to polynomial manipulation functions."""
option = 'composite'
@classmethod
def default(cls):
return
excludes = ['domain', 'split', 'gaussian', 'modulus']
class Domain(Option, metaclass=OptionType):
"""``domain`` option to polynomial manipulation functions."""
option = 'domain'
excludes = ['field', 'greedy', 'split', 'gaussian', 'extension']
after = ['gens']
_re_realfield = re.compile(r'^(R|RR)(_(\d+))?$')
_re_complexfield = re.compile(r'^(C|CC)(_(\d+))?$')
_re_finitefield = re.compile(r'^(FF|GF)\((\d+)\)$')
_re_polynomial = re.compile(r'^(Z|ZZ|Q|QQ)\[(.+)\]$')
_re_fraction = re.compile(r'^(Z|ZZ|Q|QQ)\((.+)\)$')
_re_algebraic = re.compile(r'^(Q|QQ)\<(.+)\>$')
@classmethod
def preprocess(cls, option):
from .. import domains
if isinstance(option, domains.Domain):
return option
elif isinstance(option, str):
if option in ['Z', 'ZZ']:
return domains.ZZ
if option in ['Q', 'QQ']:
return domains.QQ
if option == 'EX':
return domains.EX
r = cls._re_realfield.match(option)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return domains.RR
else:
return domains.RealField(int(prec))
r = cls._re_complexfield.match(option)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return domains.CC
else:
return domains.ComplexField(int(prec))
r = cls._re_finitefield.match(option)
if r is not None:
return domains.FF(int(r.groups()[1]))
r = cls._re_polynomial.match(option)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return domains.ZZ.inject(*gens)
else:
return domains.QQ.inject(*gens)
r = cls._re_fraction.match(option)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return domains.ZZ.inject(*gens).field
else:
return domains.QQ.inject(*gens).field
r = cls._re_algebraic.match(option)
if r is not None:
gens = list(map(sympify, r.groups()[1].split(',')))
return domains.QQ.algebraic_field(*gens)
raise OptionError('expected a valid domain specification, '
f'got {option}')
@classmethod
def postprocess(cls, options):
from .. import domains
from ..domains.compositedomain import CompositeDomain
if 'gens' in options and 'domain' in options and isinstance(options['domain'], CompositeDomain) and \
(set(options['domain'].symbols) & set(options['gens'])):
raise GeneratorsError('ground domain and generators '
'interfere together')
if ('gens' not in options or not options['gens']) and \
'domain' in options and options['domain'] == domains.EX:
raise GeneratorsError('you have to provide generators because'
' EX domain was requested')
class Split(BooleanOption, metaclass=OptionType):
"""``split`` option to polynomial manipulation functions."""
option = 'split'
excludes = ['field', 'greedy', 'domain', 'gaussian', 'extension', 'modulus']
@classmethod
def postprocess(cls, options):
if 'split' in options:
raise NotImplementedError("'split' option is not implemented yet")
class Gaussian(BooleanOption, metaclass=OptionType):
"""``gaussian`` option to polynomial manipulation functions."""
option = 'gaussian'
excludes = ['field', 'greedy', 'domain', 'split', 'extension', 'modulus']
@classmethod
def postprocess(cls, options):
if 'gaussian' in options and options['gaussian'] is True:
options['extension'] = {I}
Extension.postprocess(options)
class Extension(Option, metaclass=OptionType):
"""``extension`` option to polynomial manipulation functions."""
option = 'extension'
excludes = ['greedy', 'domain', 'split', 'gaussian', 'modulus']
@classmethod
def preprocess(cls, option):
if option == 1:
return bool(option)
elif option == 0:
return bool(option)
else:
if not hasattr(option, '__iter__'):
option = {option}
else:
if not option:
option = None
else:
option = set(option)
return option
@classmethod
def postprocess(cls, options):
from .. import domains
if 'extension' in options and options['extension'] not in (True, False):
options['domain'] = domains.QQ.algebraic_field(
*options['extension'])
class Modulus(Option, metaclass=OptionType):
"""``modulus`` option to polynomial manipulation functions."""
option = 'modulus'
excludes = ['greedy', 'split', 'domain', 'gaussian', 'extension']
@classmethod
def preprocess(cls, option):
option = sympify(option)
if option.is_Integer and option > 0:
return int(option)
else:
raise OptionError(
f"'modulus' must a positive integer, got {option}")
@classmethod
def postprocess(cls, options):
from .. import domains
if 'modulus' in options:
modulus = options['modulus']
options['domain'] = domains.FF(modulus)
class Strict(BooleanOption, metaclass=OptionType):
"""``strict`` option to polynomial manipulation functions."""
option = 'strict'
@classmethod
def default(cls):
return True
class Auto(BooleanOption, Flag, metaclass=OptionType):
"""``auto`` flag to polynomial manipulation functions."""
option = 'auto'
after = ['field', 'domain', 'extension', 'gaussian']
@classmethod
def default(cls):
return True
@classmethod
def postprocess(cls, options):
if ('domain' in options or 'field' in options) and 'auto' not in options:
options['auto'] = False
class Frac(BooleanOption, Flag, metaclass=OptionType):
"""``frac`` option to polynomial manipulation functions."""
option = 'frac'
@classmethod
def default(cls):
return False
class Formal(BooleanOption, Flag, metaclass=OptionType):
"""``formal`` flag to polynomial manipulation functions."""
option = 'formal'
@classmethod
def default(cls):
return False
class Polys(BooleanOption, Flag, metaclass=OptionType):
"""``polys`` flag to polynomial manipulation functions."""
option = 'polys'
class Include(BooleanOption, Flag, metaclass=OptionType):
"""``include`` flag to polynomial manipulation functions."""
option = 'include'
@classmethod
def default(cls):
return False
class All(BooleanOption, Flag, metaclass=OptionType):
"""``all`` flag to polynomial manipulation functions."""
option = 'all'
@classmethod
def default(cls):
return False
class Gen(Flag, metaclass=OptionType):
"""``gen`` flag to polynomial manipulation functions."""
option = 'gen'
@classmethod
def default(cls):
return 0
@classmethod
def preprocess(cls, option):
if isinstance(option, (Basic, int)):
return option
else:
raise OptionError("invalid argument for 'gen' option")
class Symbols(Flag, metaclass=OptionType):
"""``symbols`` flag to polynomial manipulation functions."""
option = 'symbols'
@classmethod
def default(cls):
return numbered_symbols('s', start=1)
@classmethod
def preprocess(cls, option):
if hasattr(option, '__iter__'):
return iter(option)
else:
raise OptionError('expected an iterator or '
f'iterable container, got {option}')
class Method(Flag, metaclass=OptionType):
"""``method`` flag to polynomial manipulation functions."""
option = 'method'
@classmethod
def preprocess(cls, option):
if isinstance(option, str):
return option.lower()
else:
raise OptionError(f'expected a string, got {option}')
def build_options(gens, args=None):
"""Construct options from keyword arguments or ... options."""
if args is None:
gens, args = (), gens
if len(args) != 1 or 'opt' not in args or gens:
return Options(gens, args)
else:
return args['opt']
def allowed_flags(args, flags):
"""
Allow specified flags to be used in the given context.
Examples
========
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
"""
flags = set(flags)
for arg in args:
try:
if Options.__options__[arg].is_Flag and arg not in flags:
raise FlagError(f"'{arg}' flag is not allowed "
'in this context')
except KeyError as exc:
raise OptionError(f"'{arg}' is not a valid option") from exc
def set_defaults(options, **defaults):
"""Update options with default values."""
if 'defaults' not in options:
options = dict(options)
options['defaults'] = defaults
return options
Options._init_dependencies_order()
| {
"content_hash": "9ad6b5e109973941cc5f6bd0dedee57f",
"timestamp": "",
"source": "github",
"line_count": 724,
"max_line_length": 109,
"avg_line_length": 26.97099447513812,
"alnum_prop": 0.5562042300404568,
"repo_name": "diofant/diofant",
"id": "2c77391895ca9e1700cffe8d14859589bb462031",
"size": "19527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diofant/polys/polyoptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9063539"
}
],
"symlink_target": ""
} |
class RdopkgException(Exception):
msg_fmt = "An unknown error occurred"
def __init__(self, msg=None, **kwargs):
self.kwargs = kwargs
if not msg:
try:
msg = self.msg_fmt % kwargs
except Exception as e:
# kwargs doesn't mach those in message.
# Returning this is still better than nothing.
message = self.msg_fmt
super(RdopkgException, self).__init__(msg)
class CommandFailed(RdopkgException):
msg_fmt = "Command failed: %(cmd)s"
class SpecFileNotFound(RdopkgException):
msg_fmt = "No .spec files found."
class IncompleteChangelog(RdopkgException):
msg_fmt = "Description of changes is missing in %%changelog."
class MultipleSpecFilesFound(RdopkgException):
msg_fmt = "Multiple .spec files found. Expected only one."
class SpecFileParseError(RdopkgException):
msg_fmt = "Error parsing .spec file '%(spec_fn)s': %(error)s"
class ModuleNotAvailable(RdopkgException):
msg_fmt = "Module %(module)s is not available. Unable to continue."
class RpmModuleNotAvailable(ModuleNotAvailable):
msg_fmt = ("Module rpm is not available. It is required to parse .spec "
"files. Pro tip: `yum install rpm-python`")
class KojiModuleNotAvailable(ModuleNotAvailable):
msg_fmt = ("Module koji is not available. It is required by "
"koji-scratch build source. Pro tip: `yum install koji`")
class InvalidAction(RdopkgException):
msg_fmt = "Invalid action: %(action)s"
| {
"content_hash": "7e236a78af57ba9ceac52134170e40ff",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 76,
"avg_line_length": 30.372549019607842,
"alnum_prop": 0.6610716591349257,
"repo_name": "yac/rdoupdate",
"id": "aa90c665e2e16a64795268dd16fe4a2c7e1b97a7",
"size": "1549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdoupdate/utils/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "433"
},
{
"name": "Python",
"bytes": "48351"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.contenttypes import models as ct_models
from django.contrib.contenttypes.admin import GenericTabularInline
from django.forms import ModelForm
from waldur_core.core.admin import ReversionAdmin
from waldur_core.quotas import models, utils
class QuotaScopeClassListFilter(admin.SimpleListFilter):
# Human-readable title
title = 'Scope class'
# Parameter for the filter that will be used in the URL query
parameter_name = 'scope_class'
def lookups(self, request, model_admin):
models = utils.get_models_with_quotas()
return [(ct_models.ContentType.objects.get_for_model(m).id, m.__name__) for m in models]
def queryset(self, request, queryset):
content_type_id = self.value()
if content_type_id:
return queryset.filter(content_type_id=content_type_id)
return queryset
class QuotaFieldTypeLimit(object):
fields = ('name', 'limit', 'usage', 'quota_field_type')
readonly_fields = ('name', 'usage', 'quota_field_type')
def quota_field_type(self, obj):
field = obj.get_field()
if field:
return field.__class__.__name__
return ''
class QuotaForm(ModelForm):
class Meta:
model = models.Quota
fields = ('name', 'limit', 'usage')
# TODO: Temporarily disabled because it breaks validation
# def __init__(self, *args, **kwargs):
# super(QuotaForm, self).__init__(*args, **kwargs)
#
# if (self.instance
# and self._is_backend_quota_field(self.instance)
# and not settings.WALDUR_CORE['BACKEND_FIELDS_EDITABLE']):
# self.fields['limit'].widget = ReadonlyTextWidget()
#
# def _is_backend_quota_field(self, quota):
# if not quota.scope:
# return False
#
# field = getattr(quota.scope.Quotas, quota.name)
# return field.is_backend
class QuotaAdmin(QuotaFieldTypeLimit, ReversionAdmin):
list_display = ['scope', 'name', 'limit', 'usage']
list_filter = ['name', QuotaScopeClassListFilter]
class QuotaInline(QuotaFieldTypeLimit, GenericTabularInline):
model = models.Quota
form = QuotaForm
extra = 0
can_delete = False
admin.site.register(models.Quota, QuotaAdmin)
| {
"content_hash": "5946698d7c95fc31d19979a2f19a467e",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 96,
"avg_line_length": 31.52054794520548,
"alnum_prop": 0.6549326379834854,
"repo_name": "opennode/nodeconductor",
"id": "dc3d92b6a12c0cf2e06f50fd93ac41a3e226dfca",
"size": "2301",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "waldur_core/quotas/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1877"
},
{
"name": "HTML",
"bytes": "17528"
},
{
"name": "JavaScript",
"bytes": "248900"
},
{
"name": "Python",
"bytes": "1254720"
}
],
"symlink_target": ""
} |
"""
Example to show managing rule entities under a ServiceBus Subscription, including
- Create a rule
- Create a rule with sql filter
- Get rule properties and runtime information
- Update a rule
- Delete a rule
- List rules under the given ServiceBus Subscription
"""
import os
import uuid
from azure.servicebus.management import (
ServiceBusAdministrationClient,
SqlRuleFilter
)
CONNECTION_STR = os.environ['SERVICEBUS_CONNECTION_STR']
TOPIC_NAME = os.environ['SERVICEBUS_TOPIC_NAME']
SUBSCRIPTION_NAME = os.environ['SERVICEBUS_SUBSCRIPTION_NAME']
RULE_NAME = "sb_mgmt_rule" + str(uuid.uuid4())
RULE_WITH_SQL_FILTER_NAME = "sb_sql_rule" + str(uuid.uuid4())
def create_rule(servicebus_mgmt_client):
print("-- Create Rule")
servicebus_mgmt_client.create_rule(TOPIC_NAME, SUBSCRIPTION_NAME, RULE_NAME)
print("Rule {} is created.".format(RULE_NAME))
print("")
print("-- Create Rule with SQL Filter")
sql_filter_parametrized = SqlRuleFilter(
"property1 = @param1 AND property2 = @param2",
parameters={
"@param1": "value",
"@param2": 1
}
)
servicebus_mgmt_client.create_rule(TOPIC_NAME, SUBSCRIPTION_NAME, RULE_WITH_SQL_FILTER_NAME, filter=sql_filter_parametrized)
print("Rule {} is created.".format(RULE_WITH_SQL_FILTER_NAME))
print("")
def delete_rule(servicebus_mgmt_client):
print("-- Delete Rule")
servicebus_mgmt_client.delete_rule(TOPIC_NAME, SUBSCRIPTION_NAME, RULE_NAME)
print("Rule {} is deleted.".format(RULE_NAME))
servicebus_mgmt_client.delete_rule(TOPIC_NAME, SUBSCRIPTION_NAME, RULE_WITH_SQL_FILTER_NAME)
print("Rule {} is deleted.".format(RULE_WITH_SQL_FILTER_NAME))
print("")
def list_rules(servicebus_mgmt_client):
print("-- List Rules")
for rule_properties in servicebus_mgmt_client.list_rules(TOPIC_NAME, SUBSCRIPTION_NAME):
print("Rule Name:", rule_properties.name)
print("")
def get_and_update_rule(servicebus_mgmt_client):
print("-- Get and Update Rule")
rule_properties = servicebus_mgmt_client.get_rule(TOPIC_NAME, SUBSCRIPTION_NAME, RULE_NAME)
print("Rule Name:", rule_properties.name)
print("Please refer to RuleProperties for complete available properties.")
print("")
# update by updating the properties in the model
rule_properties.filter = SqlRuleFilter(
"property1 = @param1 AND property2 = @param2",
parameters={
"@param1": "value2",
"@param2": 2
}
)
servicebus_mgmt_client.update_rule(TOPIC_NAME, SUBSCRIPTION_NAME, rule_properties)
# update by passing keyword arguments
rule_properties = servicebus_mgmt_client.get_rule(TOPIC_NAME, SUBSCRIPTION_NAME, RULE_NAME)
servicebus_mgmt_client.update_rule(
TOPIC_NAME,
SUBSCRIPTION_NAME,
rule_properties,
filter=SqlRuleFilter(
"property1 = @param1 AND property2 = @param2",
parameters={
"@param1": "value3",
"@param2": 3
}
)
)
with ServiceBusAdministrationClient.from_connection_string(CONNECTION_STR) as servicebus_mgmt_client:
create_rule(servicebus_mgmt_client)
list_rules(servicebus_mgmt_client)
get_and_update_rule(servicebus_mgmt_client)
delete_rule(servicebus_mgmt_client)
| {
"content_hash": "b0cc44ee94a71ad853a3a1329d7245a6",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 128,
"avg_line_length": 34.58762886597938,
"alnum_prop": 0.6742175856929955,
"repo_name": "Azure/azure-sdk-for-python",
"id": "3f6721d71a9554debb8fa38b8f769ed48dc669f2",
"size": "3724",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/servicebus/azure-servicebus/samples/sync_samples/mgmt_rule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
"""OAuth 2.0 Authentication"""
from hashlib import sha256
from urlparse import parse_qsl
from simplejson import dumps
from django.conf import settings
from django.http import HttpResponse
from oauth2app.exceptions import OAuth2Exception
from oauth2app.models import AccessToken, AccessRange, TimestampGenerator
from oauth2app.consts import REALM, AUTHENTICATION_METHOD, MAC, BEARER
class AuthenticationException(OAuth2Exception):
"""Authentication exception base class."""
pass
class InvalidRequest(AuthenticationException):
"""The request is missing a required parameter, includes an
unsupported parameter or parameter value, repeats the same
parameter, uses more than one method for including an access
token, or is otherwise malformed."""
error = 'invalid_request'
class InvalidToken(AuthenticationException):
"""The access token provided is expired, revoked, malformed, or
invalid for other reasons."""
error = 'invalid_token'
class InsufficientScope(AuthenticationException):
"""The request requires higher privileges than provided by the
access token."""
error = 'insufficient_scope'
class UnvalidatedRequest(OAuth2Exception):
"""The method requested requires a validated request to continue."""
pass
class Authenticator(object):
"""Django HttpRequest authenticator. Checks a request for valid
credentials and scope.
**Kwargs:**
* *scope:* An iterable of oauth2app.models.AccessRange objects representing
the scope the authenticator will authenticate.
*Default None*
* *authentication_method:* Accepted authentication methods. Possible
values are: oauth2app.consts.MAC, oauth2app.consts.BEARER,
oauth2app.consts.MAC | oauth2app.consts.BEARER,
*Default oauth2app.consts.BEARER*
"""
valid = False
access_token = None
auth_type = None
auth_value = None
error = None
attempted_validation = False
def __init__(
self,
scope=None,
authentication_method=AUTHENTICATION_METHOD):
if authentication_method not in [BEARER, MAC, BEARER | MAC]:
raise OAuth2Exception("Possible values for authentication_method"
" are oauth2app.consts.MAC, oauth2app.consts.BEARER, "
"oauth2app.consts.MAC | oauth2app.consts.BEARER")
self.authentication_method = authentication_method
if scope is None:
self.authorized_scope = None
elif isinstance(scope, AccessRange):
self.authorized_scope = set([scope.key])
else:
self.authorized_scope = set([x.key for x in scope])
def validate(self, request):
"""Validate the request. Raises an AuthenticationException if the
request fails authentication.
**Args:**
* *request:* Django HttpRequest object.
*Returns None*"""
self.request = request
self.bearer_token = request.REQUEST.get('bearer_token')
if "HTTP_AUTHORIZATION" in self.request.META:
auth = self.request.META["HTTP_AUTHORIZATION"].split()
self.auth_type = auth[0].lower()
self.auth_value = " ".join(auth[1:]).strip()
elif self.request.META.get('REQUEST_METHOD') == 'GET':
self.auth_type = 'bearer'
self.auth_value = request.REQUEST.get('access_token')
self.request_hostname = self.request.META.get("REMOTE_HOST")
self.request_port = self.request.META.get("SERVER_PORT")
try:
self._validate()
except AuthenticationException, e:
self.error = e
raise e
self.valid = True
def _validate(self):
"""Validate the request."""
# Check for Bearer or Mac authorization
if self.auth_type in ["bearer", "mac"]:
self.attempted_validation = True
if self.auth_type == "bearer":
self._validate_bearer(self.auth_value)
elif self.auth_type == "mac":
self._validate_mac(self.auth_value)
self.valid = True
# Check for posted/paramaterized bearer token.
elif self.bearer_token is not None:
self.attempted_validation = True
self._validate_bearer(self.bearer_token)
self.valid = True
return
else:
raise InvalidRequest("Request authentication failed, no "
"authentication credentials provided.")
if self.authorized_scope is not None:
token_scope = set([x.key for x in self.access_token.scope.all()])
new_scope = self.authorized_scope - token_scope
if len(new_scope) > 0:
raise InsufficientScope(("Access token has insufficient "
"scope: %s") % ','.join(self.authorized_scope))
now = TimestampGenerator()()
if self.access_token.expire < now:
raise InvalidToken("Token is expired")
def _validate_bearer(self, token):
"""Validate Bearer token."""
if self.authentication_method & BEARER == 0:
raise InvalidToken("Bearer authentication is not supported.")
try:
self.access_token = AccessToken.objects.get(token=token)
except AccessToken.DoesNotExist:
if token:
raise InvalidToken("Access token doesn't exist")
else:
raise InvalidToken("Access token was missing from the request")
def _validate_mac(self, mac_header):
"""Validate MAC authentication. Not implemented."""
if self.authentication_method & MAC == 0:
raise InvalidToken("MAC authentication is not supported.")
mac_header = parse_qsl(mac_header.replace(",","&").replace('"', ''))
mac_header = dict([(x[0].strip(), x[1].strip()) for x in mac_header])
for parameter in ["id", "nonce", "mac"]:
if "parameter" not in mac_header:
raise InvalidToken("MAC Authorization header does not contain"
" required parameter '%s'" % parameter)
if "bodyhash" in mac_header:
bodyhash = mac_header["bodyhash"]
else:
bodyhash = ""
if "ext" in mac_header:
ext = mac_header["ext"]
else:
ext = ""
if self.request_hostname is None:
raise InvalidRequest("Request does not contain a hostname.")
if self.request_port is None:
raise InvalidRequest("Request does not contain a port.")
nonce_timestamp, nonce_string = mac_header["nonce"].split(":")
mac = sha256("\n".join([
mac_header["nonce"], # The nonce value generated for the request
self.request.method.upper(), # The HTTP request method
"XXX", # The HTTP request-URI
self.request_hostname, # The hostname included in the HTTP request
self.request_port, # The port as included in the HTTP request
bodyhash,
ext])).hexdigest()
raise NotImplementedError()
# Todo:
# 1. Recalculate the request body hash (if included in the request) as
# described in Section 3.2 and request MAC as described in
# Section 3.3 and compare the request MAC to the value received
# from the client via the "mac" attribute.
# 2. Ensure that the combination of nonce and MAC key identifier
# received from the client has not been used before in a previous
# request (the server MAY reject requests with stale timestamps;
# the determination of staleness is left up to the server to
# define).
# 3. Verify the scope and validity of the MAC credentials.
def _get_user(self):
"""The user associated with the valid access token.
*django.auth.User object*"""
if not self.valid:
raise UnvalidatedRequest("This request is invalid or has not "
"been validated.")
return self.access_token.user
user = property(_get_user)
def _get_scope(self):
"""The client scope associated with the valid access token.
*QuerySet of AccessRange objects.*"""
if not self.valid:
raise UnvalidatedRequest("This request is invalid or has not "
"been validated.")
return self.access_token.scope.all()
scope = property(_get_scope)
def _get_client(self):
"""The client associated with the valid access token.
*oauth2app.models.Client object*"""
if not self.valid:
raise UnvalidatedRequest("This request is invalid or has not "
"been validated.")
return self.access_token.client
client = property(_get_client)
def error_response(self,
content='',
mimetype=None,
content_type=settings.DEFAULT_CONTENT_TYPE):
"""Error response generator. Returns a Django HttpResponse with status
401 and the approproate headers set. See Django documentation for details.
https://docs.djangoproject.com/en/dev/ref/request-response/#django.http.HttpResponse.__init__
**Kwargs:**
* *content:* See Django docs. *Default ''*
* *mimetype:* See Django docs. *Default None*
* *content_type:* See Django docs. *Default DEFAULT_CONTENT_TYPE*
"""
response = HttpResponse(
content=content,
mimetype=mimetype,
content_type=content_type)
if not self.attempted_validation:
response['WWW-Authenticate'] = 'Bearer realm="%s"' % REALM
response.status_code = 401
return response
else:
if self.error is not None:
error = getattr(self.error, "error", "invalid_request")
error_description = self.error.message
else:
error = "invalid_request"
error_description = "Invalid Request."
header = [
'Bearer realm="%s"' % REALM,
'error="%s"' % error,
'error_description="%s"' % error_description]
if isinstance(self.error, InsufficientScope):
header.append('scope=%s' % ' '.join(self.authorized_scope))
response.status_code = 403
elif isinstance(self.error, InvalidToken):
response.status_code = 401
elif isinstance(self.error, InvalidRequest):
response.status_code = 400
else:
response.status_code = 401
response['WWW-Authenticate'] = ', '.join(header)
return response
class JSONAuthenticator(Authenticator):
"""Wraps Authenticator, adds support for a callback parameter and
JSON related. convenience methods.
**Args:**
* *request:* Django HttpRequest object.
**Kwargs:**
* *scope:* A iterable of oauth2app.models.AccessRange objects.
"""
callback = None
def __init__(self, scope=None):
Authenticator.__init__(self, scope=scope)
def validate(self, request):
self.callback = request.REQUEST.get('callback')
return Authenticator.validate(self, request)
def response(self, data):
"""Returns a HttpResponse object of JSON serialized data.
**Args:**
* *data:* Object to be JSON serialized and returned.
"""
json_data = dumps(data)
if self.callback is not None:
json_data = "%s(%s);" % (self.callback, json_data)
response = HttpResponse(
content=json_data,
content_type='application/json')
return response
def error_response(self):
"""Returns a HttpResponse object of JSON error data."""
if self.error is not None:
content = dumps({
"error":getattr(self.error, "error", "invalid_request"),
"error_description":self.error.message})
else:
content = ({
"error":"invalid_request",
"error_description":"Invalid Request."})
if self.callback is not None:
content = "%s(%s);" % (self.callback, content)
response = Authenticator.error_response(
self,
content=content,
content_type='application/json')
if self.callback is not None:
response.status_code = 200
return response
| {
"content_hash": "69475b2c3f01151125b9e53205156ccd",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 101,
"avg_line_length": 37.74174174174174,
"alnum_prop": 0.6066995544239338,
"repo_name": "mliu7/django-oauth2app",
"id": "3045725b3cb933a8919c13cf1e4fdf62105ab03e",
"size": "12568",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "oauth2app/authenticate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "32230"
},
{
"name": "Python",
"bytes": "130581"
},
{
"name": "Shell",
"bytes": "101"
}
],
"symlink_target": ""
} |
import os
import pyntofdjango
MODULE_PATH = os.path.abspath(__file__)
pyntofdjango.setup_pod(MODULE_PATH)
from pynt import task
from pyntofdjango.tasks import python, pip, clean, delete_venv, create_venv, recreate_venv, manage, test_tox, \
runserver, dumpdata, migrate, docs, venv_bin
from pyntofdjango import utils, project, paths
from pyntcontrib import safe_cd
@task()
def test_venv():
"""Runs all tests on venv"""
with safe_cd('demo'):
project.execute_manage('test', 'andablog')
project.execute_manage('test')
@task()
def loadalldatas():
"""Loads all demo fixtures."""
dependency_order = ['common', 'profiles', 'blog', 'democomments']
for app in dependency_order:
project.recursive_load(os.path.join(paths.project_paths.manage_root, app))
@task()
def reset_db():
"""Recreates the development db"""
project.execute_manage('reset_db', '--noinput')
@task()
def rebuild_db():
"""Wipes, migrates and loads fixtures"""
reset_db()
migrate()
loadalldatas()
@task()
def rundocserver():
"""Runs the sphinx-autobuild server"""
with safe_cd('docs'):
project.venv_execute('sphinx-autobuild', '.', '_build/html')
| {
"content_hash": "cf582d77dbe8b1aea2f5cca09f388dce",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 111,
"avg_line_length": 25.574468085106382,
"alnum_prop": 0.6697171381031614,
"repo_name": "WimpyAnalytics/django-andablog",
"id": "ef06c09d073838b19f404f3a7ae8d1c81bc4b829",
"size": "1220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1068"
},
{
"name": "HTML",
"bytes": "5151"
},
{
"name": "Python",
"bytes": "31536"
}
],
"symlink_target": ""
} |
"""
identitycrisis.py
Created by Sandro Gauci on 2014-04-23.
Copyright (c) 2014 Enable Security ltd. All rights reserved.
Inspired from Chris John Riley's excellent UATester:
<http://blog.c22.cc/toolsscripts/>
"""
from difflib import SequenceMatcher
import threading
from burp import IBurpExtender
from burp import IContextMenuFactory
from burp import IScanIssue
from javax.swing import JMenuItem
from javax.swing import JMenu
from javax.swing import JFileChooser
from java.util import ArrayList
from java.io import PrintWriter
EXTENSION_NAME = 'Identity Crisis (User-Agent Tester)'
__VERSION__ = 0.2
DEBUG = False
class Similarity:
"""
Train with strings/buffers to find a norm, then use is_different()
to see if a particular content is different from the norm
"""
def __init__(self, initdata, lower=False):
self.lower = lower
self.initdata = self.getsection(initdata)
self.norm = 10.0
def getsection(self, data):
if self.lower:
if len(data) > 1024:
data = data[-1024:]
else:
data = ''
else:
data = data[:1024]
return data
def train(self, newdata):
newratio = self.get_ratio(newdata)
if self.norm > newratio:
self.set_norm(newratio)
def get_ratio(self, newdata):
newdata = self.getsection(newdata)
s = SequenceMatcher(lambda x: x == " ",
self.initdata,
newdata)
return s.ratio()
def set_norm(self, norm):
self.norm = norm
def is_different(self, newdata):
ratio = self.get_ratio(newdata)
if ratio < self.norm:
return True
return False
class BurpExtender(IBurpExtender, IContextMenuFactory):
banned_headers = ['expires', 'vtag', 'etag', 'date', 'time', 'set-cookie', 'x-transaction', 'x-cache', 'age']
useragents = dict()
useragents['Desktop Web Browsers'] = \
[
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0)",
"Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible;MSIE 5.5; Windows 98)",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100922 Firefox/4.0.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; rv:1.9.2) Gecko/20100101 Firefox/3.6",
"Mozilla/5.0 (X11; U; SunOS sun4v; en-US; rv:1.8.1.3) Gecko/20070321 Firefox/2.0.0.3",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.27 Safari/525.13",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
"Opera/9.99 (Windows NT 5.1; U; pl) Presto/9.9.9",
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E;',
]
useragents['Mobile Web Browsers'] = \
[
'Mozilla/5.0 (Linux; Android 4.4; Nexus 7 Build/KOT24) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.105 Safari/537.36',
'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16',
'Nokia7650/1.0 Symbian-QP/6.1 Nokia/2.1',
]
useragents['Crawlers and bots'] = \
[
"Googlebot/2.1 (+http://www.google.com/bot.html)",
"Googlebot-Image/1.0",
"Mediapartners-Google",
"Mozilla/2.0 (compatible; Ask Jeeves)",
"msnbot-Products/1.0 (+http://search.msn.com/msnbot.htm)",
"mmcrawler",
"TrackBack/1.02",
]
useragents['Devices and non-browsers'] = \
[
"Windows-Media-Player/9.00.00.4503",
"Mozilla/5.0 (PLAYSTATION 3; 2.00)",
"wispr",
"Wget 1.9cvs-stable",
"Lynx (textmode)",
'Mozilla/5.0',
]
useragents['Attack strings'] = \
[
"<script>alert('123')</script>",
"'",
"' or 22=22'--",
"%0d%0a",
"../../../../../../etc/passwd",
"../../../../../boot.ini",
"Mozilla/4.75 (Nikto/2.01)",
"curl/7.7.2 (powerpc-apple-darwin6.0) libcurl 7.7.2 (OpenSSL 0.9.6b)",
"w3af.sourceforge.net",
"HTTrack",
".nasl",
"paros",
"webinspect",
"brutus",
"java",
]
def __init__(self, ):
self.menuitems = dict()
self.custom_file_text = 'Load custom user-agent strings from file ...'
self.generate_menu_items()
def generate_menu_items(self):
all_tests = 'All tests'
if all_tests not in self.useragents.keys():
_tmparray = list()
for v in self.useragents.values():
_tmparray.extend(v)
self.useragents[all_tests] = _tmparray
if self.custom_file_text not in self.useragents.keys():
self.useragents[self.custom_file_text] = []
for k in self.useragents.keys():
menuitem = JMenuItem(k, actionPerformed=self.menuItemClicked)
self.menuitems[menuitem] = k
def registerExtenderCallbacks(self, callbacks):
self.menuitems = dict()
self._callbacks = callbacks
self._helpers = callbacks.getHelpers()
callbacks.setExtensionName(EXTENSION_NAME)
callbacks.registerContextMenuFactory(self)
self._contextMenuData = None
self._stdout = PrintWriter(callbacks.getStdout(), True)
self.generate_menu_items()
return
def createMenuItems(self, contextMenuInvocation):
menuItemList = ArrayList()
self._contextMenuData = contextMenuInvocation.getSelectedMessages()
submenu = JMenu(EXTENSION_NAME)
for menuitem in sorted(self.menuitems):
submenu.add(menuitem)
menuItemList.add(submenu)
return menuItemList
def menuItemClicked(self, event):
if self._contextMenuData is None:
return
menutext = event.getSource().getText()
if menutext == self.custom_file_text:
self.get_custom_headers_file(event)
useragents = self.useragents[menutext]
if len(useragents) == 0:
return
if DEBUG:
self.run_test(self._contextMenuData, useragents)
else:
t = threading.Thread(target=self.run_test, args=[self._contextMenuData, useragents])
t.daemon = True
t.start()
def train(self, httpService, requestBytes):
httpRequestResponse = self._callbacks.makeHttpRequest(httpService, requestBytes)
httpResponseBytes = self._helpers.bytesToString(httpRequestResponse.getResponse())
bodyOffset = self._helpers.analyzeRequest(httpResponseBytes).getBodyOffset()
httpResponseBody = self._helpers.bytesToString(httpResponseBytes[bodyOffset:])
self.similaritytest_headers.train(self.get_clean_headers(httpResponseBytes))
self.similaritytest_upperbody.train(httpResponseBody)
self.similaritytest_lowerbody.train(httpResponseBody)
def initialise(self, baseRequestResponse):
httpResponseBytes = self._helpers.bytesToString(baseRequestResponse.getResponse())
bodyOffset = self._helpers.analyzeRequest(httpResponseBytes).getBodyOffset()
httpResponseBody = self._helpers.bytesToString(httpResponseBytes[bodyOffset:])
self.similaritytest_headers = Similarity(self.get_clean_headers(httpResponseBytes))
self.similaritytest_upperbody = Similarity(httpResponseBody)
self.similaritytest_lowerbody = Similarity(httpResponseBody, lower=True)
def get_clean_headers_list(self, httpResponseBytes, banned_headers):
httpResponseInfo = self._helpers.analyzeResponse(httpResponseBytes)
httpResponseHeaders = httpResponseInfo.getHeaders()
newHttpResponseHeaders = list()
for hdr in httpResponseHeaders:
_tmp = map(lambda x: x.strip().lower(), hdr.split(':', 1))
if len(_tmp) == 2:
k, v = _tmp
if not k in banned_headers:
newHttpResponseHeaders.append(hdr)
else:
newHttpResponseHeaders.append(hdr)
return newHttpResponseHeaders
def get_clean_headers(self, httpResponseBytes, banned_headers=banned_headers):
newHttpResponseHeaders = self.get_clean_headers_list(httpResponseBytes, banned_headers)
httpResponseInfo = self._helpers.analyzeResponse(httpResponseBytes)
bodyOffset = httpResponseInfo.getBodyOffset()
httpResponseBody = httpResponseBytes[bodyOffset:]
newhttpResponseBytes = self._helpers.buildHttpMessage(newHttpResponseHeaders, httpResponseBody)[:bodyOffset]
return newhttpResponseBytes
def choose_file(self, event):
chooseFile = JFileChooser()
chooseFile.showOpenDialog(None)
chosenFile = chooseFile.getSelectedFile()
return str(chosenFile)
def get_custom_headers_file(self, event):
self.useragents[self.custom_file_text] = list()
customheadersfn = self.choose_file(event)
if customheadersfn is None or customheadersfn == 'None':
return
with open(customheadersfn,'r') as customheadersf:
for line in customheadersf:
self.useragents[self.custom_file_text].append(line.strip())
def run_test(self, _contextMenuData, useragents):
for baseRequestResponse in _contextMenuData:
httpService = baseRequestResponse.getHttpService()
requestBytes = baseRequestResponse.getRequest()
targeturl = self._helpers.analyzeRequest(baseRequestResponse).getUrl()
self._stdout.println('Target request for this URL: ' + targeturl.toString())
if baseRequestResponse.getResponse() == None:
self._stdout.println('No response for this request .. issuing a new request')
baseRequestResponse = self._callbacks.makeHttpRequest(httpService, requestBytes)
if baseRequestResponse.getResponse() == None:
self._stdout.println('Did you get a response')
continue
# first we do a test to see how stable the responses are
# make a first request to compare to
self.initialise(baseRequestResponse)
# and we check if the response is totally way off from the one previously recorded
# if it is then we have to issue a new request - might be an expired session
self.train(httpService, requestBytes)
if (self.similaritytest_upperbody.norm < 0.6) and \
(self.similaritytest_lowerbody.norm < 0.6) and \
(self.similaritytest_headers.norm < 0.6):
self._stdout.println('The previous request appears to grown fungii.. refreshing')
baseRequestResponse = self._callbacks.makeHttpRequest(httpService, requestBytes)
self.initialise(baseRequestResponse)
# run the same request and analyse the responses to get a standard deviation
# using the train() function which finds the lowest ratio
self._stdout.println('training ...')
for _ in range(10):
self.train(httpService, requestBytes)
# reduce norm by 0.05 to account for future changes
for obj in [self.similaritytest_upperbody, self.similaritytest_lowerbody, self.similaritytest_headers]:
obj.norm -= 0.05
# now we check what is the lowest ratio number and use that as a normality tester
if (self.similaritytest_upperbody.norm < 0.6) and (self.similaritytest_lowerbody.norm < 0.6) and (
self.similaritytest_headers.norm < 0.6):
self._stdout.println('The site does not appear stable enough for our tests')
continue
dotest = list()
if (self.similaritytest_lowerbody.norm >= 0.6):
dotest.append('lowerbody')
if (self.similaritytest_upperbody.norm >= 0.6):
dotest.append('upperbody')
if (self.similaritytest_headers.norm >= 0.6):
dotest.append('headers')
self._stdout.println("Norm for headers: " + str(self.similaritytest_headers.norm))
self._stdout.println("Norm for upper body: " + str(self.similaritytest_upperbody.norm))
self._stdout.println("Norm for lower body: " + str(self.similaritytest_lowerbody.norm))
requestInfo = self._helpers.analyzeRequest(requestBytes)
origRequestBody = requestBytes[requestInfo.getBodyOffset():]
# remove the user-agent header so that we can add our own later
newRequestHeaders = self.get_clean_headers_list(requestBytes, banned_headers=['user-agent'])
# get all reported issues for the target URL so that we later check if the issue is a dupe
scanIssues = self._callbacks.getScanIssues(None)
for ua in useragents:
if len(ua) > 25:
issueName = "User-Agent dependent response (%s...%s)" % (ua[:30], ua[-12:])
else:
issueName = "User-Agent dependent response (%s)" % (ua,)
issueDetail = "Web location responds differently when User-Agent header is set to \"%s\"." % ua
alreadyscanned = False
for scanIssue in scanIssues:
if (scanIssue.getIssueDetail() == issueDetail) and (scanIssue.getUrl() == targeturl):
alreadyscanned = True
continue
if alreadyscanned:
self._stdout.println("User-agent: %s is already an issue .. skipping" % ua)
continue
# add the new user agent
_newRequestHeaders = newRequestHeaders[:]
_newRequestHeaders.append(u'User-Agent: ' + ua)
# build and send http request
httpMessage = self._helpers.buildHttpMessage(_newRequestHeaders, origRequestBody)
httpRequestResponse = self._callbacks.makeHttpRequest(httpService, httpMessage)
# get the response
httpResponseBytes = self._helpers.bytesToString(httpRequestResponse.getResponse())
# split up the head from the body
bodyOffset = self._helpers.analyzeRequest(httpResponseBytes).getBodyOffset()
httpResponseBody = self._helpers.bytesToString(httpResponseBytes[bodyOffset:])
change_hdr = None
change_upperbody = None
change_lowerbody = None
# only test stable ones
if 'headers' in dotest:
change_hdr = self.similaritytest_headers.is_different(self.get_clean_headers(httpResponseBytes))
if 'upperbody' in dotest:
change_upperbody = self.similaritytest_upperbody.is_different(httpResponseBody)
if 'lowerbody' in dotest:
change_lowerbody = self.similaritytest_lowerbody.is_different(httpResponseBody)
if change_lowerbody or change_upperbody or change_hdr:
self._stdout.println(
"%s - headers: %s upperbody: %s lowerbody: %s" \
% (ua, change_hdr, change_upperbody, change_lowerbody))
httpmsgs = [baseRequestResponse, httpRequestResponse]
issue = ScanIssue(httpService,
self._helpers.analyzeRequest(httpRequestResponse).getUrl(),
httpmsgs,
issueName,
issueDetail,
"Information",
"Firm",
None,
None,
None,
)
self._callbacks.addScanIssue(issue)
else:
self._stdout.println('No changes detected with "%s"' % ua)
self._stdout.println('Done')
class ScanIssue(IScanIssue):
"""This is our custom IScanIssue class implementation."""
def __init__(self, httpService, url, httpMessages, issueName, issueDetail, severity, confidence, issueBackground,
remediationDetail, remediationBackground):
self._issueName = issueName
self._httpService = httpService
self._url = url
self._httpMessages = httpMessages
self._issueDetail = issueDetail
self._severity = severity
self._confidence = confidence
self._remediationDetail = remediationDetail
self._issueBackground = issueBackground
self._remediationBackground = remediationBackground
def getConfidence(self):
return self._confidence
def getHttpMessages(self):
return self._httpMessages
def getHttpService(self):
return self._httpService
def getIssueBackground(self):
return self._issueBackground
def getIssueDetail(self):
return self._issueDetail
def getIssueName(self):
return self._issueName
def getIssueType(self):
return 0
def getRemediationBackground(self):
return self._remediationBackground
def getRemediationDetail(self):
return self._remediationDetail
def getSeverity(self):
return self._severity
def getUrl(self):
return self._url
def getHost(self):
return 'localhost'
def getPort(self):
return int(80) | {
"content_hash": "9d58d20a7c5eb35ca0ce39083dd67845",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 156,
"avg_line_length": 45.0317848410758,
"alnum_prop": 0.6018568791399718,
"repo_name": "sandrogauci/Identity-Crisis",
"id": "561ec06a6e9c769fbb6b898a2402007c34220e1a",
"size": "18458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "identitycrisis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18458"
}
],
"symlink_target": ""
} |
TITLE = u""
SPEAKER = u""
COMPANY = u""
LOCATION = u""
DATE = u""
# Directory structure
ROOT_DIR = "."
SLIDES_DIR = "slides"
MEDIA_DIR = "media"
THEMES_DIR = "themes"
# Engine settings
THEME = "default"
CONTROL_VIS = "hidden"
DEFAULT_VIEW = "slideshow"
ENCODING = "utf-8"
# Syntax highlight settings
SYNTAX_HIGHLIGHT_OPTIONS = {
"style": "default",
}
# Template engine extensions, filters and tests
TEMPLATE_ENGINE_EXTENSIONS = [
"pydozeoff.template.ext.code",
"pydozeoff.template.ext.code_style",
]
TEMPLATE_ENGINE_FILTERS = {}
TEMPLATE_ENGINE_TESTS = {}
# Slides
SLIDES = ()
| {
"content_hash": "a6a1c347a956236fe5cd843abe847f22",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 47,
"avg_line_length": 17.942857142857143,
"alnum_prop": 0.64171974522293,
"repo_name": "danielfm/pydozeoff",
"id": "30d4284f9fc38cb2a23939d854e1d11b136f4a14",
"size": "698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pydozeoff/conf/global_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "29242"
}
],
"symlink_target": ""
} |
"""
========
numpydoc
========
Sphinx extension that handles docstrings in the Numpy standard format. [1]
It will:
- Convert Parameters etc. sections to field lists.
- Convert See Also section to a See also entry.
- Renumber references.
- Extract the signature from the docstring, if it can't be determined
otherwise.
.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
"""
from __future__ import division, absolute_import, print_function
import sys
import re
import pydoc
import sphinx
import inspect
try:
from collections.abc import Callable
except ImportError: # Python 2
from collections import Callable
if sphinx.__version__ < '1.0.1':
raise RuntimeError("Sphinx 1.0.1 or newer is required")
from docscrape_sphinx import get_doc_object, SphinxDocString
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
def mangle_docstrings(app, what, name, obj, options, lines,
reference_offset=[0]):
cfg = {'use_plots': app.config.numpydoc_use_plots,
'show_class_members': app.config.numpydoc_show_class_members,
'show_inherited_class_members':
app.config.numpydoc_show_inherited_class_members,
'class_members_toctree': app.config.numpydoc_class_members_toctree}
u_NL = sixu('\n')
if what == 'module':
# Strip top title
pattern = '^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'
title_re = re.compile(sixu(pattern), re.I | re.S)
lines[:] = title_re.sub(sixu(''), u_NL.join(lines)).split(u_NL)
else:
doc = get_doc_object(obj, what, u_NL.join(lines), config=cfg)
if sys.version_info[0] >= 3:
doc = str(doc)
else:
doc = unicode(doc)
lines[:] = doc.split(u_NL)
if (app.config.numpydoc_edit_link and hasattr(obj, '__name__') and
obj.__name__):
if hasattr(obj, '__module__'):
v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__))
else:
v = dict(full_name=obj.__name__)
lines += [sixu(''), sixu('.. htmlonly::'), sixu('')]
lines += [sixu(' %s') % x for x in
(app.config.numpydoc_edit_link % v).split("\n")]
# replace reference numbers so that there are no duplicates
references = []
for line in lines:
line = line.strip()
m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I)
if m:
references.append(m.group(1))
# start renaming from the longest string, to avoid overwriting parts
references.sort(key=lambda x: -len(x))
if references:
for i, line in enumerate(lines):
for r in references:
if re.match(sixu('^\\d+$'), r):
new_r = sixu("R%d") % (reference_offset[0] + int(r))
else:
new_r = sixu("%s%d") % (r, reference_offset[0])
lines[i] = lines[i].replace(sixu('[%s]_') % r,
sixu('[%s]_') % new_r)
lines[i] = lines[i].replace(sixu('.. [%s]') % r,
sixu('.. [%s]') % new_r)
reference_offset[0] += len(references)
def mangle_signature(app, what, name, obj, options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
(not hasattr(obj, '__init__') or
'initializes x; see ' in pydoc.getdoc(obj.__init__))):
return '', ''
if not (isinstance(obj, Callable) or
hasattr(obj, '__argspec_is_invalid_')):
return
if not hasattr(obj, '__doc__'):
return
doc = SphinxDocString(pydoc.getdoc(obj))
if doc['Signature']:
sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature'])
return sig, sixu('')
def setup(app, get_doc_object_=get_doc_object):
if not hasattr(app, 'add_config_value'):
return # probably called by nose, better bail out
global get_doc_object
get_doc_object = get_doc_object_
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('autodoc-process-signature', mangle_signature)
app.add_config_value('numpydoc_edit_link', None, False)
app.add_config_value('numpydoc_use_plots', None, False)
app.add_config_value('numpydoc_show_class_members', True, True)
app.add_config_value('numpydoc_show_inherited_class_members', True, True)
app.add_config_value('numpydoc_class_members_toctree', True, True)
# Extra mangling domains
app.add_domain(NumpyPythonDomain)
app.add_domain(NumpyCDomain)
# ------------------------------------------------------------------------------
# Docstring-mangling domains
# ------------------------------------------------------------------------------
from docutils.statemachine import ViewList
from sphinx.domains.c import CDomain
from sphinx.domains.python import PythonDomain
class ManglingDomainBase(object):
directive_mangling_map = {}
def __init__(self, *a, **kw):
super(ManglingDomainBase, self).__init__(*a, **kw)
self.wrap_mangling_directives()
def wrap_mangling_directives(self):
for name, objtype in list(self.directive_mangling_map.items()):
self.directives[name] = wrap_mangling_directive(
self.directives[name], objtype)
class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
name = 'np'
directive_mangling_map = {
'function': 'function',
'class': 'class',
'exception': 'class',
'method': 'function',
'classmethod': 'function',
'staticmethod': 'function',
'attribute': 'attribute',
}
indices = []
class NumpyCDomain(ManglingDomainBase, CDomain):
name = 'np-c'
directive_mangling_map = {
'function': 'function',
'member': 'attribute',
'macro': 'function',
'type': 'class',
'var': 'object',
}
def wrap_mangling_directive(base_directive, objtype):
class directive(base_directive):
def run(self):
env = self.state.document.settings.env
name = None
if self.arguments:
m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
name = m.group(2).strip()
if not name:
name = self.arguments[0]
lines = list(self.content)
mangle_docstrings(env.app, objtype, name, None, None, lines)
self.content = ViewList(lines, self.content.parent)
return base_directive.run(self)
return directive
| {
"content_hash": "716b2ea4865af7611a07b77b3343ddd5",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 80,
"avg_line_length": 32.51219512195122,
"alnum_prop": 0.567741935483871,
"repo_name": "kaushik94/sympy",
"id": "f526d2e468216d7398ae76f950b319143e03e439",
"size": "6665",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "doc/ext/numpydoc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5094"
},
{
"name": "Python",
"bytes": "13553568"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "TeX",
"bytes": "32356"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
import ConfigParser
import boto.exception
import boto.s3.connection
import bunch
import itertools
import os
import random
import string
from .utils import region_sync_meta
s3 = bunch.Bunch()
config = bunch.Bunch()
targets = bunch.Bunch()
# this will be assigned by setup()
prefix = None
calling_formats = dict(
ordinary=boto.s3.connection.OrdinaryCallingFormat(),
subdomain=boto.s3.connection.SubdomainCallingFormat(),
vhost=boto.s3.connection.VHostCallingFormat(),
)
def get_prefix():
assert prefix is not None
return prefix
def is_slow_backend():
return slow_backend
def choose_bucket_prefix(template, max_len=30):
"""
Choose a prefix for our test buckets, so they're easy to identify.
Use template and feed it more and more random filler, until it's
as long as possible but still below max_len.
"""
rand = ''.join(
random.choice(string.ascii_lowercase + string.digits)
for c in range(255)
)
while rand:
s = template.format(random=rand)
if len(s) <= max_len:
return s
rand = rand[:-1]
raise RuntimeError(
'Bucket prefix template is impossible to fulfill: {template!r}'.format(
template=template,
),
)
def nuke_prefixed_buckets_on_conn(prefix, name, conn):
print 'Cleaning buckets from connection {name} prefix {prefix!r}.'.format(
name=name,
prefix=prefix,
)
for bucket in conn.get_all_buckets():
if bucket.name.startswith(prefix):
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
try:
bucket.set_canned_acl('private')
for key in bucket.list():
print 'Cleaning bucket {bucket} key {key}'.format(
bucket=bucket,
key=key,
)
key.set_canned_acl('private')
key.delete()
bucket.delete()
except boto.exception.S3ResponseError as e:
if e.error_code != 'AccessDenied':
print 'GOT UNWANTED ERROR', e.error_code
raise
# seems like we're not the owner of the bucket; ignore
pass
def nuke_prefixed_buckets(prefix):
# If no regions are specified, use the simple method
if targets.main.master == None:
for name, conn in s3.items():
print 'Deleting buckets on {name}'.format(name=name)
nuke_prefixed_buckets_on_conn(prefix, name, conn)
else:
# First, delete all buckets on the master connection
for name, conn in s3.items():
if conn == targets.main.master.connection:
print 'Deleting buckets on {name} (master)'.format(name=name)
nuke_prefixed_buckets_on_conn(prefix, name, conn)
# Then sync to propagate deletes to secondaries
region_sync_meta(targets.main, targets.main.master.connection)
print 'region-sync in nuke_prefixed_buckets'
# Now delete remaining buckets on any other connection
for name, conn in s3.items():
if conn != targets.main.master.connection:
print 'Deleting buckets on {name} (non-master)'.format(name=name)
nuke_prefixed_buckets_on_conn(prefix, name, conn)
print 'Done with cleanup of test buckets.'
class TargetConfig:
def __init__(self, cfg, section):
self.port = None
self.api_name = ''
self.is_master = False
self.is_secure = False
self.sync_agent_addr = None
self.sync_agent_port = 0
self.sync_meta_wait = 0
try:
self.api_name = cfg.get(section, 'api_name')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.port = cfg.getint(section, 'port')
except ConfigParser.NoOptionError:
pass
try:
self.host=cfg.get(section, 'host')
except ConfigParser.NoOptionError:
raise RuntimeError(
'host not specified for section {s}'.format(s=section)
)
try:
self.is_master=cfg.getboolean(section, 'is_master')
except ConfigParser.NoOptionError:
pass
try:
self.is_secure=cfg.getboolean(section, 'is_secure')
except ConfigParser.NoOptionError:
pass
try:
raw_calling_format = cfg.get(section, 'calling_format')
except ConfigParser.NoOptionError:
raw_calling_format = 'ordinary'
try:
self.sync_agent_addr = cfg.get(section, 'sync_agent_addr')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.sync_agent_port = cfg.getint(section, 'sync_agent_port')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.sync_meta_wait = cfg.getint(section, 'sync_meta_wait')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.calling_format = calling_formats[raw_calling_format]
except KeyError:
raise RuntimeError(
'calling_format unknown: %r' % raw_calling_format
)
class TargetConnection:
def __init__(self, conf, conn):
self.conf = conf
self.connection = conn
class RegionsInfo:
def __init__(self):
self.m = bunch.Bunch()
self.master = None
self.secondaries = []
def add(self, name, region_config):
self.m[name] = region_config
if (region_config.is_master):
if not self.master is None:
raise RuntimeError(
'multiple regions defined as master'
)
self.master = region_config
else:
self.secondaries.append(region_config)
def get(self, name):
return self.m[name]
def get(self):
return self.m
def iteritems(self):
return self.m.iteritems()
regions = RegionsInfo()
class RegionsConn:
def __init__(self):
self.m = bunch.Bunch()
self.default = None
self.master = None
self.secondaries = []
def iteritems(self):
return self.m.iteritems()
def set_default(self, conn):
self.default = conn
def add(self, name, conn):
self.m[name] = conn
if not self.default:
self.default = conn
if (conn.conf.is_master):
self.master = conn
else:
self.secondaries.append(conn)
# nosetests --processes=N with N>1 is safe
_multiprocess_can_split_ = True
def setup():
cfg = ConfigParser.RawConfigParser()
try:
path = os.environ['S3TEST_CONF']
except KeyError:
raise RuntimeError(
'To run tests, point environment '
+ 'variable S3TEST_CONF to a config file.',
)
with file(path) as f:
cfg.readfp(f)
global prefix
global targets
global slow_backend
try:
template = cfg.get('fixtures', 'bucket prefix')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
template = 'test-{random}-'
prefix = choose_bucket_prefix(template=template)
try:
slow_backend = cfg.getboolean('fixtures', 'slow backend')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
slow_backend = False
# pull the default_region out, if it exists
try:
default_region = cfg.get('fixtures', 'default_region')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
default_region = None
s3.clear()
config.clear()
for section in cfg.sections():
try:
(type_, name) = section.split(None, 1)
except ValueError:
continue
if type_ != 'region':
continue
regions.add(name, TargetConfig(cfg, section))
for section in cfg.sections():
try:
(type_, name) = section.split(None, 1)
except ValueError:
continue
if type_ != 's3':
continue
if len(regions.get()) == 0:
regions.add("default", TargetConfig(cfg, section))
config[name] = bunch.Bunch()
for var in [
'user_id',
'display_name',
'email',
]:
try:
config[name][var] = cfg.get(section, var)
except ConfigParser.NoOptionError:
pass
targets[name] = RegionsConn()
for (k, conf) in regions.iteritems():
conn = boto.s3.connection.S3Connection(
aws_access_key_id=cfg.get(section, 'access_key'),
aws_secret_access_key=cfg.get(section, 'secret_key'),
is_secure=conf.is_secure,
port=conf.port,
host=conf.host,
# TODO test vhost calling format
calling_format=conf.calling_format,
)
temp_targetConn = TargetConnection(conf, conn)
targets[name].add(k, temp_targetConn)
# Explicitly test for and set the default region, if specified.
# If it was not specified, use the 'is_master' flag to set it.
if default_region:
if default_region == name:
targets[name].set_default(temp_targetConn)
elif conf.is_master:
targets[name].set_default(temp_targetConn)
s3[name] = targets[name].default.connection
# WARNING! we actively delete all buckets we see with the prefix
# we've chosen! Choose your prefix with care, and don't reuse
# credentials!
# We also assume nobody else is going to use buckets with that
# prefix. This is racy but given enough randomness, should not
# really fail.
nuke_prefixed_buckets(prefix=prefix)
def teardown():
# remove our buckets here also, to avoid littering
nuke_prefixed_buckets(prefix=prefix)
bucket_counter = itertools.count(1)
def get_new_bucket_name():
"""
Get a bucket name that probably does not exist.
We make every attempt to use a unique random prefix, so if a
bucket by this name happens to exist, it's ok if tests give
false negatives.
"""
name = '{prefix}{num}'.format(
prefix=prefix,
num=next(bucket_counter),
)
return name
def get_new_bucket(target=None, name=None, headers=None):
"""
Get a bucket that exists and is empty.
Always recreates a bucket from scratch. This is useful to also
reset ACLs and such.
"""
if target is None:
target = targets.main.default
connection = target.connection
if name is None:
name = get_new_bucket_name()
# the only way for this to fail with a pre-existing bucket is if
# someone raced us between setup nuke_prefixed_buckets and here;
# ignore that as astronomically unlikely
bucket = connection.create_bucket(name, location=target.conf.api_name, headers=headers)
return bucket
| {
"content_hash": "156c23e7988d0615af48baff8eef760b",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 91,
"avg_line_length": 30.315363881401616,
"alnum_prop": 0.5899350938027919,
"repo_name": "SUSE/s3-tests",
"id": "51dd5067a690194c43c1d13ebc76426ce0f2a906",
"size": "11247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "s3tests/functional/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "260358"
},
{
"name": "Shell",
"bytes": "1513"
}
],
"symlink_target": ""
} |
"""Commandline utility to run operator benchmarks"""
import argparse
import logging
import os
import sys
import mxnet as mx
from benchmark.opperf.nd_operations.unary_operators import run_mx_unary_operators_benchmarks
from benchmark.opperf.nd_operations.binary_operators import run_mx_binary_broadcast_operators_benchmarks, \
run_mx_binary_element_wise_operators_benchmarks, run_mx_binary_misc_operators_benchmarks
from benchmark.opperf.nd_operations.gemm_operators import run_gemm_operators_benchmarks
from benchmark.opperf.nd_operations.random_sampling_operators import run_mx_random_sampling_operators_benchmarks
from benchmark.opperf.nd_operations.reduction_operators import run_mx_reduction_operators_benchmarks
from benchmark.opperf.nd_operations.sorting_searching_operators import run_sorting_searching_operators_benchmarks
from benchmark.opperf.nd_operations.nn_activation_operators import run_activation_operators_benchmarks
from benchmark.opperf.nd_operations.nn_conv_operators import run_pooling_operators_benchmarks, \
run_convolution_operators_benchmarks, run_transpose_convolution_operators_benchmarks
from benchmark.opperf.nd_operations.nn_basic_operators import run_nn_basic_operators_benchmarks
from benchmark.opperf.nd_operations.nn_optimizer_operators import run_optimizer_operators_benchmarks
from benchmark.opperf.nd_operations.indexing_routines import run_indexing_routines_benchmarks
from benchmark.opperf.nd_operations.nn_loss_operators import run_loss_operators_benchmarks
from benchmark.opperf.nd_operations.linalg_operators import run_linalg_operators_benchmarks
from benchmark.opperf.nd_operations.misc_operators import run_mx_misc_operators_benchmarks
from benchmark.opperf.nd_operations.array_manipulation_operators import run_rearrange_operators_benchmarks, \
run_shape_operators_benchmarks, run_expanding_operators_benchmarks, run_rounding_operators_benchmarks, \
run_join_split_operators_benchmarks
from benchmark.opperf.utils.common_utils import merge_map_list, save_to_file
from benchmark.opperf.utils.op_registry_utils import get_operators_with_no_benchmark, \
get_current_runtime_features
def run_all_mxnet_operator_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Run all the MXNet operators (NDArray) benchmarks.
Returns
-------
Dictionary of benchmark results.
"""
mxnet_operator_benchmark_results = []
# *************************MXNET TENSOR OPERATOR BENCHMARKS*****************************
# Run all Unary operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_mx_unary_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Binary Broadcast, element_wise, and miscellaneous operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_mx_binary_broadcast_operators_benchmarks(ctx=ctx,
dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
mxnet_operator_benchmark_results.append(run_mx_binary_element_wise_operators_benchmarks(ctx=ctx,
dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
mxnet_operator_benchmark_results.append(run_mx_binary_misc_operators_benchmarks(ctx=ctx,
dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all GEMM operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_gemm_operators_benchmarks(ctx=ctx,
dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Random sampling operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_mx_random_sampling_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Reduction operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_mx_reduction_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Sorting and Searching operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_sorting_searching_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Indexing routines benchmarks with default input values
mxnet_operator_benchmark_results.append(run_indexing_routines_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Array Rearrange operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_rearrange_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Array Shape Manipulation operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_shape_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Array Expansion operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_expanding_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Array Rounding operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_rounding_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Array Join & Split operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_join_split_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# ************************ MXNET NN OPERATOR BENCHMARKS ****************************
# Run all basic NN operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_nn_basic_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Activation operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_activation_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Pooling operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_pooling_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Convolution operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_convolution_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Optimizer operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_optimizer_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Transpose Convolution operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_transpose_convolution_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all NN loss operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_loss_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Run all Miscellaneous operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_mx_misc_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# Linear Algebra operators do not work with int64 tensor data. Issue tracked here: https://github.com/apache/incubator-mxnet/issues/17716
if int64_tensor == 'off':
# Run all Linear Algebra operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_linalg_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
# ****************************** PREPARE FINAL RESULTS ********************************
final_benchmark_result_map = merge_map_list(mxnet_operator_benchmark_results)
return final_benchmark_result_map
def _parse_mxnet_context(ctx):
if not ctx:
raise ValueError("Context cannot be null or empty")
if ctx.lower() in ['cpu', 'gpu']:
return mx.context.Context(ctx)
elif ctx.lower().startwith('gpu('):
device_id = int(ctx[4:-1])
return mx.gpu(device_id)
def main():
# 1. GET USER INPUTS
parser = argparse.ArgumentParser(description='Run all the MXNet operator benchmarks')
parser.add_argument('--ctx', type=str, default='cpu',
help='Global context to run all benchmarks. By default, cpu on a '
'CPU machine, gpu(0) on a GPU machine. '
'Valid Inputs - cpu, gpu, gpu(0), gpu(1)...')
parser.add_argument('--dtype', type=str, default='float32', help='DType (Precision) to run benchmarks. By default, '
'float32. Valid Inputs - float32, float64, int32, '
'int64')
parser.add_argument('-f', '--output-format', type=str, default='json',
choices=['json', 'md'],
help='Benchmark result output format. By default, json. '
'Valid Inputs - json, md')
parser.add_argument('-o', '--output-file', type=str, default='./mxnet_operator_benchmarks.json',
help='Name and path for the '
'output file.')
parser.add_argument('-p', '--profiler', type=str, default='native',
help='Use built-in CPP profiler (native) or Python'
'time module.'
'Valid Inputs - native, python')
parser.add_argument('--int64-tensor', type=str, default='off',
help='Run performance tests with large tensor input'
'data (dimension >= 2**32) or standard input data.'
'Valid Inputs - on, off')
parser.add_argument('-w', '--warmup', type=int, default=25,
help='Number of times to run for warmup.'
'Valid Inputs - positive integers')
parser.add_argument('-r', '--runs', type=int, default=100,
help='Number of runs to capture benchmark results.'
'Valid Inputs - positive integers')
args = parser.parse_args()
logging.info(f"Running MXNet operator benchmarks with the following options: {args}")
assert not os.path.isfile(args.output_file),\
f"Output file {args.output_file} already exists."
# 2. RUN BENCHMARKS
ctx = _parse_mxnet_context(args.ctx)
dtype = args.dtype
profiler = args.profiler
int64_tensor = args.int64_tensor
warmup = args.warmup
runs = args.runs
benchmark_results = run_all_mxnet_operator_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs)
# Sort benchmark results alphabetically by op name
final_benchmark_results = dict()
for key in sorted(benchmark_results.keys()):
final_benchmark_results[key] = benchmark_results[key]
# 3. PREPARE OUTPUTS
run_time_features = get_current_runtime_features()
save_to_file(final_benchmark_results, args.output_file, args.output_format, run_time_features, profiler)
# 4. Generate list of MXNet operators not covered in benchmarks
ops_not_covered = get_operators_with_no_benchmark(final_benchmark_results.keys())
for idx, op in enumerate(ops_not_covered):
print(f"{idx}. {op}")
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "181bf22bb65761bda65e36d806399665",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 185,
"avg_line_length": 61.95631067961165,
"alnum_prop": 0.6992086500039175,
"repo_name": "apache/incubator-mxnet",
"id": "8cc48694d5ad23da29b5f00d5d8fe61ebe525b45",
"size": "13600",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "benchmark/opperf/opperf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151356"
},
{
"name": "C++",
"bytes": "12059300"
},
{
"name": "CMake",
"bytes": "213440"
},
{
"name": "Cuda",
"bytes": "1528224"
},
{
"name": "Cython",
"bytes": "26285"
},
{
"name": "Dockerfile",
"bytes": "54893"
},
{
"name": "Groovy",
"bytes": "132682"
},
{
"name": "Jupyter Notebook",
"bytes": "1889643"
},
{
"name": "Makefile",
"bytes": "8991"
},
{
"name": "PowerShell",
"bytes": "6699"
},
{
"name": "Python",
"bytes": "8626713"
},
{
"name": "Shell",
"bytes": "172547"
}
],
"symlink_target": ""
} |
from oneview_redfish_toolkit import category_resource
from oneview_redfish_toolkit import multiple_oneview
def first_parameter_resource(resource, function, *args, **kwargs):
resource_id = args[0]
resp = multiple_oneview.query_ov_client_by_resource(
resource_id, resource, function, *args, **kwargs)
category_resource.set_map_category_resources_entry(resource_id,
resource,
function)
return resp
def multiple_parameter_resource(resource, function, *args, **kwargs):
uri_or_id = args[0]
if 'rest' in uri_or_id:
get_function = 'get_by_uri'
else:
get_function = 'get_by_id'
temp_args = list(args)
temp_args.remove(uri_or_id)
new_args = tuple(temp_args)
resource_object = multiple_oneview.query_ov_client_by_resource(
uri_or_id, resource, get_function, uri_or_id)
resp = multiple_oneview.execute_query_function(
resource_object, function, *new_args, **kwargs)
return resp
def filter_uuid_parameter_resource(resource, function, *args, **kwargs):
if 'filter' not in kwargs:
return all_oneviews_resource(resource, function, *args, **kwargs)
resource_id = _get_resource_id_by_filter(kwargs["filter"], None)
return multiple_oneview.query_ov_client_by_resource(resource_id, resource,
function, *args,
**kwargs)
def all_oneviews_resource(resource, function, *args, **kwargs):
all_results = multiple_oneview.search_resource_multiple_ov(resource,
function,
None,
None,
*args,
**kwargs)
return all_results
def spt_get_all_with_filter(resource, function, *args, **kwargs):
if 'filter' not in kwargs:
return all_oneviews_resource(resource, function, *args, **kwargs)
resource_id = \
_get_resource_id_by_filter(kwargs["filter"], "enclosureGroupUri")
return \
multiple_oneview.query_ov_client_by_resource(resource_id,
resource,
function,
*args, **kwargs)
def drive_enclosures_get_all_with_filter(resource, function, *args, **kwargs):
if 'filter' not in kwargs:
return all_oneviews_resource(resource, function, *args, **kwargs)
resource_id = \
_get_resource_id_by_filter(kwargs["filter"], "locationUri")
return \
multiple_oneview.query_ov_client_by_resource(resource_id,
resource,
function,
*args, **kwargs)
def create_server_profile(resource, function, *args, **kwargs):
# In this case, args are resulted by the connection.post call;
# The second index of args represents the resource object;
# The first index represents the URI to access;
sp = args[1]
server_hardware_uri = sp['serverHardwareUri']
return _run_action(server_hardware_uri, 'server_hardware', 'get_by_uri', resource, function, *args, **kwargs)
def delete_server_profile(resource, function, *args, **kwargs):
sp_uuid = args[0]
return _run_action(sp_uuid, 'server_profiles', 'get_by_id', resource,
function, *args, **kwargs)
def update_power_state_server_hardware(resource, function, *args, **kwargs):
sh_uuid = args[1]
if 'rest' in sh_uuid:
get_function = 'get_by_uri'
else:
get_function = 'get_by_id'
resource_object = multiple_oneview.query_ov_client_by_resource(
sh_uuid, resource, get_function, sh_uuid)
resp = multiple_oneview.execute_query_function(
resource_object, function, args[0], **kwargs)
return resp
def create_labels(resource, function, *args, **kwargs):
resource_id = args[0]["resourceUri"]
return \
_run_action(resource_id, 'server_profiles', 'get_by_uri', resource,
function, *args, **kwargs)
def _run_action(resource_id, resource_get, function_get, resource,
function, *args, **kwargs):
# Check if the resource_id is already mapped
if not multiple_oneview.get_ov_ip_by_resource(resource_id):
# Mapping OneView for the resource_id
multiple_oneview.query_ov_client_by_resource(resource_id,
resource_get,
function_get,
resource_id, **{})
# Running action on OneView already mapped for resource_id
return multiple_oneview.query_ov_client_by_resource(resource_id,
resource, function,
*args, **kwargs)
def _get_resource_id_by_filter(filter_parameter, resource_property):
# Check if filter is composed by a list. If it is true
# sets the resource_id based on a resource_uri;
# Otherwise just splits the filter string and sets its value
# into resource_id
if isinstance(filter_parameter, list):
for filters in filter_parameter:
filter_data = filters.split('=')
if filter_data[0] == resource_property:
resource_id = filter_data[1]
else:
filter_data = filter_parameter.split('=')
resource_id = filter_data[1]
return resource_id
| {
"content_hash": "800c908ff906fde1cb1608d4e37ecef5",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 113,
"avg_line_length": 39.758389261744966,
"alnum_prop": 0.5455773126266037,
"repo_name": "HewlettPackard/oneview-redfish-toolkit",
"id": "b655dbfc33f96d9e5e2bde84529eaacb59cd162e",
"size": "6572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oneview_redfish_toolkit/strategy_multiple_oneview.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "284"
},
{
"name": "Python",
"bytes": "979438"
},
{
"name": "Shell",
"bytes": "866"
}
],
"symlink_target": ""
} |
import nbinteract as nbi
from .util import run_doctests
def test_doctests():
results = run_doctests(nbi.plotting)
assert results.failed == 0
| {
"content_hash": "a98a98462946514c3ad2c50a15edd86e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 40,
"avg_line_length": 19,
"alnum_prop": 0.7236842105263158,
"repo_name": "SamLau95/nbinteract",
"id": "5aa26b5ee4bfa4d5b74051389c88165a5899d516",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_plotting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "14221"
},
{
"name": "HTML",
"bytes": "9297782"
},
{
"name": "JavaScript",
"bytes": "25383"
},
{
"name": "Jupyter Notebook",
"bytes": "276178"
},
{
"name": "Makefile",
"bytes": "2480"
},
{
"name": "Python",
"bytes": "77764"
},
{
"name": "Smarty",
"bytes": "10717"
}
],
"symlink_target": ""
} |
from gerencianet import Gerencianet
from ...credentials import credentials
gn = Gerencianet(credentials.CREDENTIALS)
response = gn.pix_create_evp()
print(response)
| {
"content_hash": "80ba52e43f6dcc098c2916055629a760",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 41,
"avg_line_length": 21,
"alnum_prop": 0.7976190476190477,
"repo_name": "gerencianet/gn-api-sdk-python",
"id": "b5347db1dff84882a30b38c9ed606c9ab9f97461",
"size": "187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/gn/key/pix_create_evp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25401"
}
],
"symlink_target": ""
} |
"""
A filter middleware that inspects the requested URI for a version string
and/or Accept headers and attempts to negotiate an API controller to
return
"""
from oslo_config import cfg
from oslo_log import log as logging
from glance.api import versions
from glance.common import wsgi
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class VersionNegotiationFilter(wsgi.Middleware):
def __init__(self, app):
self.versions_app = versions.Controller()
self.allowed_versions = None
self.vnd_mime_type = 'application/vnd.openstack.images-'
super(VersionNegotiationFilter, self).__init__(app)
def process_request(self, req):
"""Try to find a version first in the accept header, then the URL"""
args = {'method': req.method, 'path': req.path, 'accept': req.accept}
LOG.debug("Determining version of request: %(method)s %(path)s "
"Accept: %(accept)s", args)
# If the request is for /versions, just return the versions container
if req.path_info_peek() == "versions":
return self.versions_app.index(req, explicit=True)
accept = str(req.accept)
if accept.startswith(self.vnd_mime_type):
LOG.debug("Using media-type versioning")
token_loc = len(self.vnd_mime_type)
req_version = accept[token_loc:]
else:
LOG.debug("Using url versioning")
# Remove version in url so it doesn't conflict later
req_version = self._pop_path_info(req)
try:
version = self._match_version_string(req_version)
except ValueError:
LOG.debug("Unknown version. Returning version choices.")
return self.versions_app
req.environ['api.version'] = version
req.path_info = ''.join(('/v', str(version), req.path_info))
LOG.debug("Matched version: v%d", version)
LOG.debug('new path %s', req.path_info)
return None
def _get_allowed_versions(self):
allowed_versions = {}
if CONF.enable_v1_api:
allowed_versions['v1'] = 1
allowed_versions['v1.0'] = 1
allowed_versions['v1.1'] = 1
if CONF.enable_v2_api:
allowed_versions['v2'] = 2
allowed_versions['v2.0'] = 2
allowed_versions['v2.1'] = 2
allowed_versions['v2.2'] = 2
allowed_versions['v2.3'] = 2
allowed_versions['v2.4'] = 2
allowed_versions['v2.5'] = 2
return allowed_versions
def _match_version_string(self, subject):
"""
Given a string, tries to match a major and/or
minor version number.
:param subject: The string to check
:returns: version found in the subject
:raises: ValueError if no acceptable version could be found
"""
if self.allowed_versions is None:
self.allowed_versions = self._get_allowed_versions()
if subject in self.allowed_versions:
return self.allowed_versions[subject]
else:
raise ValueError()
def _pop_path_info(self, req):
"""
'Pops' off the next segment of PATH_INFO, returns the popped
segment. Do NOT push it onto SCRIPT_NAME.
"""
path = req.path_info
if not path:
return None
while path.startswith('/'):
path = path[1:]
idx = path.find('/')
if idx == -1:
idx = len(path)
r = path[:idx]
req.path_info = path[idx:]
return r
| {
"content_hash": "b534311acb6b80b392c53fc13d016097",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 77,
"avg_line_length": 34.10476190476191,
"alnum_prop": 0.5858698687517453,
"repo_name": "rajalokan/glance",
"id": "d36a4a91d5ab0e2f99a66febee05b2dff318c9e8",
"size": "4217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/api/middleware/version_negotiation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "673"
},
{
"name": "Mako",
"bytes": "431"
},
{
"name": "Python",
"bytes": "3840692"
}
],
"symlink_target": ""
} |
import json
from unittest import mock
import fixtures
from keystoneclient import discover as ks_discover
from oslo_config import cfg
from oslo_config import fixture as config_fixture
import testtools
from testtools import matchers
from zaqarclient.queues.v1 import message
from zaqarclient import transport
from zaqarclient.transport import response
from os_collect_config import collect
from os_collect_config import exc
from os_collect_config.tests import test_heat
from os_collect_config import zaqar
class FakeKeystoneClient(test_heat.FakeKeystoneClient):
def url_for(self, service_type, endpoint_type):
self._test.assertEqual('messaging', service_type)
self._test.assertEqual('publicURL', endpoint_type)
return 'http://192.0.2.1:8888/'
class FakeKeystoneClientWebsocket(test_heat.FakeKeystoneClient):
def url_for(self, service_type, endpoint_type):
self._test.assertEqual('messaging-websocket', service_type)
self._test.assertEqual('publicURL', endpoint_type)
return 'ws://127.0.0.1:9000/'
class FakeZaqarClient(object):
def __init__(self, testcase):
self._test = testcase
def Client(self, endpoint, conf, version):
self._test.assertEqual(1.1, version)
self._test.assertEqual('http://192.0.2.1:8888/', endpoint)
return self
def queue(self, queue_id):
self._test.assertEqual(
'4f3f46d3-09f1-42a7-8c13-f91a5457192c', queue_id)
return FakeQueue()
class FakeZaqarWebsocketClient(object):
def __init__(self, options, messages=None, testcase=None):
self._messages = messages
self._test = testcase
def send(self, request):
self._test.assertEqual('ws://127.0.0.1:9000/', request.endpoint)
if request.operation == 'message_list':
body = json.loads(request.content)
self._test.assertEqual(
'4f3f46d3-09f1-42a7-8c13-f91a5457192c', body['queue_name'])
return response.Response(request, content=json.dumps(self._messages),
status_code=200)
def recv(self):
return {'body': test_heat.META_DATA}
def __enter__(self):
return self
def __exit__(self, *exc):
pass
class FakeQueue(object):
def pop(self):
return iter([message.Message(
queue=self, ttl=10, age=10, body=test_heat.META_DATA, href='')])
class FakeZaqarClientSoftwareConfig(object):
def __init__(self, testcase):
self._test = testcase
def Client(self, endpoint, conf, version):
self._test.assertEqual(1.1, version)
self._test.assertEqual('http://192.0.2.1:8888/', endpoint)
return self
def queue(self, queue_id):
self._test.assertEqual(
'4f3f46d3-09f1-42a7-8c13-f91a5457192c', queue_id)
return FakeQueueSoftwareConfig()
class FakeQueueSoftwareConfig(object):
def pop(self):
return iter([message.Message(
queue=self, ttl=10, age=10, body=test_heat.SOFTWARE_CONFIG_DATA,
href='')])
class TestZaqar(testtools.TestCase):
def setUp(self):
super(TestZaqar, self).setUp()
self.log = self.useFixture(fixtures.FakeLogger())
self.useFixture(fixtures.NestedTempfile())
collect.setup_conf()
conf = config_fixture.Config()
self.useFixture(conf)
conf.config(group='zaqar', use_websockets=False)
conf.config(group='zaqar', auth_url='http://192.0.2.1:5000/v3')
conf.config(group='zaqar', user_id='0123456789ABCDEF')
conf.config(group='zaqar', password='FEDCBA9876543210')
conf.config(group='zaqar',
project_id='9f6b09df-4d7f-4a33-8ec3-9924d8f46f10')
conf.config(group='zaqar',
queue_id='4f3f46d3-09f1-42a7-8c13-f91a5457192c')
conf.config(group='zaqar', ssl_certificate_validation=True)
conf.config(group='zaqar', ca_file='/foo/bar')
@mock.patch.object(ks_discover.Discover, '__init__')
@mock.patch.object(ks_discover.Discover, 'url_for')
def test_collect_zaqar(self, mock_url_for, mock___init__):
mock___init__.return_value = None
mock_url_for.return_value = cfg.CONF.zaqar.auth_url
zaqar_md = zaqar.Collector(
keystoneclient=FakeKeystoneClient(self, cfg.CONF.zaqar),
zaqarclient=FakeZaqarClient(self),
discover_class=test_heat.FakeKeystoneDiscover).collect()
self.assertThat(zaqar_md, matchers.IsInstance(list))
self.assertEqual('zaqar', zaqar_md[0][0])
zaqar_md = zaqar_md[0][1]
for k in ('int1', 'strfoo', 'map_ab'):
self.assertIn(k, zaqar_md)
self.assertEqual(zaqar_md[k], test_heat.META_DATA[k])
@mock.patch.object(ks_discover.Discover, '__init__')
@mock.patch.object(ks_discover.Discover, 'url_for')
def test_collect_zaqar_deployments(self, mock_url_for, mock___init__):
mock___init__.return_value = None
mock_url_for.return_value = cfg.CONF.zaqar.auth_url
zaqar_md = zaqar.Collector(
keystoneclient=FakeKeystoneClient(self, cfg.CONF.zaqar),
zaqarclient=FakeZaqarClientSoftwareConfig(self),
discover_class=test_heat.FakeKeystoneDiscover).collect()
self.assertThat(zaqar_md, matchers.IsInstance(list))
self.assertEqual('zaqar', zaqar_md[0][0])
self.assertEqual(2, len(zaqar_md))
self.assertEqual('zaqar', zaqar_md[0][0])
self.assertEqual(
test_heat.SOFTWARE_CONFIG_DATA['deployments'],
zaqar_md[0][1]['deployments'])
self.assertEqual(
('dep-name1', {'config1': 'value1'}), zaqar_md[1])
@mock.patch.object(ks_discover.Discover, '__init__')
@mock.patch.object(ks_discover.Discover, 'url_for')
def test_collect_zaqar_fail(self, mock_url_for, mock___init__):
mock___init__.return_value = None
mock_url_for.return_value = cfg.CONF.zaqar.auth_url
zaqar_collect = zaqar.Collector(
keystoneclient=test_heat.FakeFailKeystoneClient(
self, cfg.CONF.zaqar),
zaqarclient=FakeZaqarClient(self),
discover_class=test_heat.FakeKeystoneDiscover)
self.assertRaises(exc.ZaqarMetadataNotAvailable, zaqar_collect.collect)
self.assertIn('Forbidden', self.log.output)
def test_collect_zaqar_no_auth_url(self):
cfg.CONF.zaqar.auth_url = None
zaqar_collect = zaqar.Collector()
self.assertRaises(
exc.ZaqarMetadataNotConfigured, zaqar_collect.collect)
self.assertIn('No auth_url configured', self.log.output)
def test_collect_zaqar_no_password(self):
cfg.CONF.zaqar.password = None
zaqar_collect = zaqar.Collector()
self.assertRaises(
exc.ZaqarMetadataNotConfigured, zaqar_collect.collect)
self.assertIn('No password configured', self.log.output)
def test_collect_zaqar_no_project_id(self):
cfg.CONF.zaqar.project_id = None
zaqar_collect = zaqar.Collector()
self.assertRaises(
exc.ZaqarMetadataNotConfigured, zaqar_collect.collect)
self.assertIn('No project_id configured', self.log.output)
def test_collect_zaqar_no_user_id(self):
cfg.CONF.zaqar.user_id = None
zaqar_collect = zaqar.Collector()
self.assertRaises(
exc.ZaqarMetadataNotConfigured, zaqar_collect.collect)
self.assertIn('No user_id configured', self.log.output)
def test_collect_zaqar_no_queue_id(self):
cfg.CONF.zaqar.queue_id = None
zaqar_collect = zaqar.Collector()
self.assertRaises(
exc.ZaqarMetadataNotConfigured, zaqar_collect.collect)
self.assertIn('No queue_id configured', self.log.output)
def test_collect_zaqar_no_ca_file(self):
cfg.CONF.zaqar.ssl_certificate_validation = True
cfg.CONF.zaqar.ca_file = None
zaqar_collect = zaqar.Collector()
self.assertRaises(
exc.ZaqarMetadataNotConfigured, zaqar_collect.collect)
expected = ('No CA file configured when flag ssl certificate '
'validation is on.')
self.assertIn(expected, self.log.output)
@mock.patch.object(transport, 'get_transport_for')
@mock.patch.object(ks_discover.Discover, '__init__')
@mock.patch.object(ks_discover.Discover, 'url_for')
def test_collect_zaqar_websocket(self, mock_url_for, mock___init__,
mock_transport):
mock___init__.return_value = None
mock_url_for.return_value = cfg.CONF.zaqar.auth_url
conf = config_fixture.Config()
self.useFixture(conf)
conf.config(group='zaqar', use_websockets=True)
messages = {'messages': [{'body': test_heat.META_DATA, 'id': 1}]}
ws = FakeZaqarWebsocketClient({}, messages=messages, testcase=self)
mock_transport.return_value = ws
zaqar_md = zaqar.Collector(
keystoneclient=FakeKeystoneClientWebsocket(self, cfg.CONF.zaqar)
).collect()
self.assertThat(zaqar_md, matchers.IsInstance(list))
self.assertEqual('zaqar', zaqar_md[0][0])
zaqar_md = zaqar_md[0][1]
for k in ('int1', 'strfoo', 'map_ab'):
self.assertIn(k, zaqar_md)
self.assertEqual(zaqar_md[k], test_heat.META_DATA[k])
@mock.patch.object(transport, 'get_transport_for')
@mock.patch.object(ks_discover.Discover, '__init__')
@mock.patch.object(ks_discover.Discover, 'url_for')
def test_collect_zaqar_websocket_recv(self, mock_url_for, mock___init__,
mock_transport):
mock___init__.return_value = None
mock_url_for.return_value = cfg.CONF.zaqar.auth_url
ws = FakeZaqarWebsocketClient({}, messages={}, testcase=self)
mock_transport.return_value = ws
conf = config_fixture.Config()
self.useFixture(conf)
conf.config(group='zaqar', use_websockets=True)
zaqar_md = zaqar.Collector(
keystoneclient=FakeKeystoneClientWebsocket(self, cfg.CONF.zaqar),
).collect()
self.assertThat(zaqar_md, matchers.IsInstance(list))
self.assertEqual('zaqar', zaqar_md[0][0])
zaqar_md = zaqar_md[0][1]
for k in ('int1', 'strfoo', 'map_ab'):
self.assertIn(k, zaqar_md)
self.assertEqual(zaqar_md[k], test_heat.META_DATA[k])
| {
"content_hash": "be23eeee24d35658accf3888f43e447d",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 79,
"avg_line_length": 39.082089552238806,
"alnum_prop": 0.6392018331105594,
"repo_name": "openstack/os-collect-config",
"id": "5c6bc59d8fa573f278541de91dd458d1b2b0dde2",
"size": "11022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "os_collect_config/tests/test_zaqar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "150071"
}
],
"symlink_target": ""
} |
import abc
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.common import exceptions
from neutron import manager
# Attribute Map
RESOURCE_NAME = 'agent'
RESOURCE_ATTRIBUTE_MAP = {
RESOURCE_NAME + 's': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'agent_type': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'binary': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'topic': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'host': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'admin_state_up': {'allow_post': False, 'allow_put': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'created_at': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'started_at': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'heartbeat_timestamp': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'alive': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'configurations': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'description': {'allow_post': False, 'allow_put': True,
'is_visible': True,
'validate': {'type:string': None}},
},
}
class AgentNotFound(exceptions.NotFound):
message = _("Agent %(id)s could not be found")
class AgentNotFoundByTypeHost(exceptions.NotFound):
message = _("Agent with agent_type=%(agent_type)s and host=%(host)s "
"could not be found")
class MultipleAgentFoundByTypeHost(exceptions.Conflict):
message = _("Multiple agents with agent_type=%(agent_type)s and "
"host=%(host)s found")
class AgentInvalidTimestamp(exceptions.NotFound):
message = _("Message with invalid timestamp received")
class Agent(object):
"""Agent management extension."""
@classmethod
def get_name(cls):
return "agent"
@classmethod
def get_alias(cls):
return "agent"
@classmethod
def get_description(cls):
return "The agent management extension."
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/agent/api/v2.0"
@classmethod
def get_updated(cls):
return "2013-02-03T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()]
attr.PLURALS.update(dict(my_plurals))
plugin = manager.NeutronManager.get_plugin()
params = RESOURCE_ATTRIBUTE_MAP.get(RESOURCE_NAME + 's')
controller = base.create_resource(RESOURCE_NAME + 's',
RESOURCE_NAME,
plugin, params
)
ex = extensions.ResourceExtension(RESOURCE_NAME + 's',
controller)
return [ex]
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class AgentPluginBase(object):
"""REST API to operate the Agent.
All of method must be in an admin context.
"""
def create_agent(self, context, agent):
"""Create agent.
This operation is not allow in REST API.
@raise exceptions.BadRequest:
"""
raise exceptions.BadRequest()
@abc.abstractmethod
def delete_agent(self, context, id):
"""Delete agent.
Agents register themselves on reporting state.
But if a agent does not report its status
for a long time (for example, it is dead for ever. ),
admin can remove it. Agents must be disabled before
being removed.
"""
pass
@abc.abstractmethod
def update_agent(self, context, agent):
"""Disable or Enable the agent.
Discription also can be updated. Some agents cannot be disabled, such
as plugins, services. An error code should be reported in this case.
@raise exceptions.BadRequest:
"""
pass
@abc.abstractmethod
def get_agents(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_agent(self, context, id, fields=None):
pass
| {
"content_hash": "7bb5f9ef9ec0f7ea7bccbda987811ae4",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 79,
"avg_line_length": 31.776315789473685,
"alnum_prop": 0.5614906832298137,
"repo_name": "nash-x/hws",
"id": "21ae446960b339892f721491aee0bd1245a250e2",
"size": "5421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/extensions/agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "PLpgSQL",
"bytes": "12782"
},
{
"name": "Python",
"bytes": "20443623"
},
{
"name": "Shell",
"bytes": "4643"
}
],
"symlink_target": ""
} |
from sklearn.naive_bayes import GaussianNB
class GNB(object):
""" Implements a Gaussian Naive Bayes classifier """
def __init__(self):
self.possible_labels = ['left', 'keep', 'right']
self.lane_width = 4.0
self.clf = GaussianNB()
def train(self, data, labels):
"""
Trains the classifier with N data points and labels.
INPUTS
data - array of N observations
- Each observation is a tuple with 4 values: s, d,
s_dot and d_dot.
- Example : [
[3.5, 0.1, 5.9, -0.02],
[8.0, -0.3, 3.0, 2.2],
...
]
labels - array of N labels
- Each label is one of "left", "keep", or "right".
"""
# Get relevant features
X_train = self._get_features(data)
# Fit classifier
self.clf.fit(X_train, labels)
def predict(self, observation):
"""
Once trained, this method is called and expected to return
a predicted behavior for the given observation.
INPUTS
observation - a 4 tuple with s, d, s_dot, d_dot.
- Example: [3.5, 0.1, 8.5, -0.2]
OUTPUT
A label representing the best guess of the classifier. Can
be one of "left", "keep" or "right".
"""
# Get relevant features
x_test = self._get_features([observation])
# Predict class
return self.clf.predict(x_test)
def _get_features(self, data):
output = []
for data_point in data:
s = data_point[0]
d_in_lane = data_point[1] % self.lane_width
s_dot = data_point[2]
d_dot = data_point[3]
feature = []
feature.append(d_in_lane)
feature.append(d_dot)
output.append(feature)
return output
| {
"content_hash": "c8386ad6b7100dc9501db9efa80899b3",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 67,
"avg_line_length": 26.928571428571427,
"alnum_prop": 0.5193633952254642,
"repo_name": "Deborah-Digges/SDC-ND-term-3",
"id": "29bb96e5d5c519438b834f774e9920350ac4e6af",
"size": "1885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p1-path-planning/class-quizzes/naive-bayes-behavior-prediction/classifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "234071"
},
{
"name": "C++",
"bytes": "9619937"
},
{
"name": "CMake",
"bytes": "212426"
},
{
"name": "CSS",
"bytes": "5383"
},
{
"name": "Cuda",
"bytes": "131738"
},
{
"name": "Fortran",
"bytes": "1326303"
},
{
"name": "JavaScript",
"bytes": "7839"
},
{
"name": "Python",
"bytes": "19099"
},
{
"name": "Shell",
"bytes": "20151"
}
],
"symlink_target": ""
} |
import numpy as np
import theano
from theano import function
from theano.sandbox.rng_mrg import MRG_RandomStreams
import theano.tensor as T
def energy(W, V, H):
"""
W : A theano matrix of RBM weights
num visible x num hidden
V : A theano matrix of assignments to visible units
Each row is another configuration
Each column corresponds to a different unit
H : A theano matrix of assignments to hidden units
Each row is another configuration
Each column corresponds to a different unit
Returns:
E: a theano vector
Element i gives the energy of configuration (V[i,:], H[i,:])
(This RBM has no biases, only weights)
"""
return -(T.dot(V, W) * H).sum(axis=1)
def grad_expected_energy(W, V, H):
"""
W : A theano matrix of RBM weights
num visible x num hidden
V : A theano matrix of samples of visible units
Each row is another samples
Each column corresponds to a different unit
H : A theano matrix of samples of hidden units
Each row is another samples
Each column corresponds to a different unit
Returns:
dW: a matrix of the derivatives of the expected gradient
of the energy
"""
return T.grad(energy(W, V, H).mean(), W, consider_constant=[V, H])
if __name__ == "__main__":
m = 2
nv = 3
nh = 4
h0 = T.alloc(1., m, nh)
rng_factory = MRG_RandomStreams(42)
W = rng_factory.normal(size=(nv, nh), dtype=h0.dtype)
pv = T.nnet.sigmoid(T.dot(h0, W.T))
v = rng_factory.binomial(p=pv, size=pv.shape, dtype=W.dtype)
ph = T.nnet.sigmoid(T.dot(v, W))
h = rng_factory.binomial(p=ph, size=ph.shape, dtype=W.dtype)
class _ElemwiseNoGradient(theano.tensor.Elemwise):
def grad(self, inputs, output_gradients):
raise TypeError("You shouldn't be differentiating through "
"the sampling process.")
return [ theano.gradient.DisconnectedType()() ]
block_gradient = _ElemwiseNoGradient(theano.scalar.identity)
v = block_gradient(v)
h = block_gradient(h)
g = grad_expected_energy(W, v, h)
stats = T.dot(v.T, h) / m
f = function([], [g, stats])
g, stats = f()
assert np.allclose(g, -stats)
print "SUCCESS!"
| {
"content_hash": "6763dfbaa834d3d45c0ecbbb88a7cdd6",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 71,
"avg_line_length": 32.38028169014085,
"alnum_prop": 0.6285341452805567,
"repo_name": "goodfeli/theano_exercises",
"id": "4c6ba548fbf222d5ca58cb1d77626e934e2bc3cd",
"size": "2299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "02_advanced/01_symbolic/03_energy_soln.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38745"
}
],
"symlink_target": ""
} |
import pytest
import os
from appr.new import new_package
import appr.manifest_jsonnet
@pytest.fixture()
def home_dir(monkeypatch, fake_home):
monkeypatch.chdir(str(fake_home))
return str(fake_home)
@pytest.fixture()
def new(home_dir):
new_package("organization/newpackage")
@pytest.fixture()
def new_with_comments(home_dir):
new_package("organization/newpackage2", with_comments=True)
def test_directory(new):
assert os.path.exists("organization/newpackage")
def test_directory_comments(new_with_comments):
assert os.path.exists("organization/newpackage2")
def test_files_created(new):
for f in ["templates", "manifest.yaml", "README.md"]:
assert os.path.exists(os.path.join("organization/newpackage", f))
def test_load_manifest(new, monkeypatch, fake_home):
name = "organization/newpackage"
monkeypatch.chdir(os.path.join(str(fake_home), name))
m = appr.manifest_jsonnet.ManifestJsonnet()
assert m.package["name"] == "organization/newpackage"
assert m.deploy == [{'name': "$self"}]
def test_load_manifest_comments(new_with_comments, monkeypatch, fake_home):
name = "organization/newpackage2"
monkeypatch.chdir(os.path.join(str(fake_home), name))
m = appr.manifest_jsonnet.ManifestJsonnet()
assert m.package["name"] == name
assert m.deploy == [{'name': "$self"}]
| {
"content_hash": "6f7cc40f33b3ab650b4bdb578adcec54",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 27.12,
"alnum_prop": 0.7087020648967551,
"repo_name": "app-registry/appr",
"id": "e03bd585a4d83ddd5f71c1043e1d0cff1bdfd0dd",
"size": "1356",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/wip_test_new.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "Dockerfile",
"bytes": "1285"
},
{
"name": "Makefile",
"bytes": "3320"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "330235"
},
{
"name": "Shell",
"bytes": "408"
}
],
"symlink_target": ""
} |
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render
from estacionamientos.controller import *
from estacionamientos.forms import EstacionamientoExtendedForm
from estacionamientos.forms import EstacionamientoForm
from estacionamientos.forms import EstacionamientoReserva
from estacionamientos.models import Estacionamiento, ReservasModel
listaReserva = []
# Usamos esta vista para procesar todos los estacionamientos
def estacionamientos_all(request):
global listaReserva
listaReserva = []
# Si se hace un POST a esta vista implica que se quiere agregar un nuevo
# estacionamiento
estacionamientos = Estacionamiento.objects.all()
if request.method == 'POST':
# Creamos un formulario con los datos que recibimos
form = EstacionamientoForm(request.POST)
# Parte de la entrega era limitar la cantidad maxima de
# estacionamientos a 5
if len(estacionamientos) >= 5:
return render(request, 'templateMensaje.html',
{'color':'red', 'mensaje':'No se pueden agregar más estacionamientos'})
# Si el formulario es valido, entonces creamos un objeto con
# el constructor del modelo
if form.is_valid():
obj = Estacionamiento(
Propietario = form.cleaned_data['propietario'],
Nombre = form.cleaned_data['nombre'],
Direccion = form.cleaned_data['direccion'],
Rif = form.cleaned_data['rif'],
Telefono_1 = form.cleaned_data['telefono_1'],
Telefono_2 = form.cleaned_data['telefono_2'],
Telefono_3 = form.cleaned_data['telefono_3'],
Email_1 = form.cleaned_data['email_1'],
Email_2 = form.cleaned_data['email_2']
)
obj.save()
# Recargamos los estacionamientos ya que acabamos de agregar
estacionamientos = Estacionamiento.objects.all()
# Si no es un POST es un GET, y mandamos un formulario vacio
else:
form = EstacionamientoForm()
return render(request, 'base.html', {'form': form, 'estacionamientos': estacionamientos})
def estacionamiento_detail(request, _id):
_id = int(_id)
# Verificamos que el objeto exista antes de continuar
try:
estacion = Estacionamiento.objects.get(id = _id)
except ObjectDoesNotExist:
return render(request, '404.html')
global listaReserva
listaReserva = []
if request.method == 'POST':
# Leemos el formulario
form = EstacionamientoExtendedForm(request.POST)
# Si el formulario
if form.is_valid():
hora_in = form.cleaned_data['horarioin']
hora_out = form.cleaned_data['horarioout']
reserva_in = form.cleaned_data['horario_reserin']
reserva_out = form.cleaned_data['horario_reserout']
m_validado = HorarioEstacionamiento(hora_in, hora_out, reserva_in, reserva_out)
if not m_validado[0]:
return render(request, 'templateMensaje.html', {'color':'red', 'mensaje': m_validado[1]})
estacion.Tarifa = form.cleaned_data['tarifa']
estacion.Apertura = hora_in
estacion.Cierre = hora_out
estacion.Reservas_Inicio = reserva_in
estacion.Reservas_Cierre = reserva_out
estacion.NroPuesto = form.cleaned_data['puestos']
estacion.save()
else:
form = EstacionamientoExtendedForm()
return render(request, 'estacionamiento.html', {'form': form, 'estacionamiento': estacion})
def estacionamiento_reserva(request, _id):
_id = int(_id)
# Verificamos que el objeto exista antes de continuar
try:
estacion = Estacionamiento.objects.get(id = _id)
except ObjectDoesNotExist:
return render(request, '404.html')
global listaReserva
# Antes de entrar en la reserva, si la lista esta vacia, agregamos los
# valores predefinidos
if len(listaReserva) < 1:
Puestos = ReservasModel.objects.filter(Estacionamiento = estacion).values_list('Puesto', 'InicioReserva', 'FinalReserva')
elem1 = (estacion.Apertura, estacion.Apertura)
elem2 = (estacion.Cierre, estacion.Cierre)
listaReserva = [[elem1, elem2] for _ in range(estacion.NroPuesto)]
for obj in Puestos:
puesto = busquedaBin(obj[1], obj[2], listaReserva[obj[0]])
listaReserva[obj[0]] = insertarReserva(obj[1], obj[2], puesto[0], listaReserva[obj[0]])
# Si se hace un GET renderizamos los estacionamientos con su formulario
if request.method == 'GET':
form = EstacionamientoReserva()
return render(request, 'estacionamientoReserva.html', {'form': form, 'estacionamiento': estacion})
# Si es un POST estan mandando un request
elif request.method == 'POST':
form = EstacionamientoReserva(request.POST)
# Verificamos si es valido con los validadores del formulario
if form.is_valid():
inicio_reserva = form.cleaned_data['inicio']
final_reserva = form.cleaned_data['final']
# Validamos los horarios con los horario de salida y entrada
m_validado = validarHorarioReserva(inicio_reserva, final_reserva, estacion.Reservas_Inicio, estacion.Reservas_Cierre)
# Si no es valido devolvemos el request
if not m_validado[0]:
return render(request, 'templateMensaje.html', {'color':'red', 'mensaje': m_validado[1]})
# Si esta en un rango valido, procedemos a buscar en la lista
# el lugar a insertar
x = buscar(inicio_reserva, final_reserva, listaReserva)
if x[2] == True :
reservar(inicio_reserva, final_reserva, listaReserva)
reservaFinal = ReservasModel(
Estacionamiento = estacion,
Puesto = x[0],
InicioReserva = inicio_reserva,
FinalReserva = final_reserva
)
reservaFinal.save()
return render(request, 'templateMensaje.html', {'color':'green', 'mensaje':'Se realizo la reserva exitosamente'})
else:
return render(request, 'templateMensaje.html', {'color':'red', 'mensaje':'No hay un puesto disponible para ese horario'})
else:
form = EstacionamientoReserva()
return render(request, 'estacionamientoReserva.html', {'form': form, 'estacionamiento': estacion})
| {
"content_hash": "b497f6ccd68d99b73ecc171efe8a3620",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 141,
"avg_line_length": 44.851612903225806,
"alnum_prop": 0.6006904487917146,
"repo_name": "Sealos/SAGE",
"id": "51a4499f4f873dec922f68ab4f6ea89b32df5fb8",
"size": "6978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/SAGE/estacionamientos/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "117833"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "70012"
}
],
"symlink_target": ""
} |
from .resource import Resource
from .ec2 import EC2InstanceResource
from .iam import IAMRoleResource, IAMInstanceProfileResource
import pkg_resources
class ElasticBeanstalkEnvironment(EC2InstanceResource):
"""
Creates an EC2 server resource with the code deploy agent running.
Takes the same keyword parameters as EC2InstanceResource.
Automatically generates and depends upon a CodeDeployServiceRole.
"""
def _get_init_script(self, region_name):
resource_package = __name__
resource_path = '/'.join(('scripts', 'code_deploy_init.sh'))
s = pkg_resources.resource_string(resource_package, resource_path).decode('utf-8')
s = s.replace("{region_name}", region_name)
return s
def generate_sub_resources(self):
res = super().generate_sub_resources()
if self._service_role is None:
self._service_role = CodeDeployServiceRoleResource(self._context)
self.add_dependency(self._service_role)
res.append(self._service_role)
res.append(self._instance_profile)
self.add_dependency(self._instance_profile)
return res
def __init__(self, context, deployment_group, **kwargs):
self._deployment_group = deployment_group
self._service_role = None
kwargs = self.configure_kwargs(context, kwargs)
super().__init__(context, **kwargs)
self.add_dependency(self._deployment_group)
def configure_kwargs(self, context, kwargs):
"""
Configure tags, init_script, and roles for code deploy
"""
region_name = "us-west-1"
if "region_name" in kwargs:
region_name = kwargs[region_name]
kwargs['tags'] = [self._deployment_group.get_ec2_tag()]
kwargs['init_script'] = self._get_init_script(region_name)
# Setup instance profile
permissions = kwargs.get('permissions', [])
roles = kwargs.get('roles', [])
services = kwargs.get('services', []) +\
['ec2.amazonaws.com', 'codedeploy.amazonaws.com']
#roles.append(CodeDeployServiceRoleResource.global_service_role(context))
self._instance_profile = IAMInstanceProfileResource(
context,
permissions=permissions,
roles=roles,
services=services
)
kwargs['instance_profile'] = self._instance_profile
return kwargs
class CodeDeployServiceRoleResource(IAMRoleResource):
def __init__(self, context, permissions=[], policies=[], policy_arns=[]):
services = ['ec2.amazonaws.com', 'codedeploy.amazonaws.com']
super().__init__(context,
permissions=permissions,
services=services,
policies=policies,
policy_arns = policy_arns +\
["arn:aws:iam::aws:policy/service-role/AWSCodeDeployRole"]
)
class CodeDeployDeploymentGroupResource(Resource):
def __init__(self, context, application_name,
deployment_group_name=None, cf_params={}):
super().__init__(context, cf_params)
self._application_name = application_name
self._application = None
self._service_role = CodeDeployServiceRoleResource(self._context)
self.add_dependency(self._service_role)
self._ec2_tag_filters = [
{"Key": "redleaderDeploymentGroup",
"Type": "KEY_AND_VALUE",
"Value": self._id_placeholder()
}
]
if deployment_group_name is None:
self._deployment_group_name = self._id_placeholder()
else:
self._deployment_group_name = deployment_group_name
def get_ec2_tag(self):
return {"Key": "redleaderDeploymentGroup", "Value": self.get_id()}
def generate_sub_resources(self):
res = super().generate_sub_resources()
res.append(self._service_role)
if self._application is None:
self._application = CodeDeployApplicationResource(
self._context, self._application_name)
self.add_dependency(self._application)
res.append(self._application)
return res
def _cloud_formation_template(self):
return {
"Type" : "AWS::CodeDeploy::DeploymentGroup",
"Properties" : {
"ApplicationName" : self._application_name,
#"AutoScalingGroups" : [],
#"Deployment" : Deployment,
#"DeploymentConfigName" : String,
"DeploymentGroupName": self._deployment_group_name,
"Ec2TagFilters" : self._ec2_tag_filters,
"ServiceRoleArn" : Resource.cf_attr(self._service_role, "Arn")
}
}
class CodeDeployApplicationResource(Resource):
def __init__(self, context, application_name, cf_params={}):
super().__init__(context, cf_params)
self._application_name = application_name
def _cloud_formation_template(self):
return {
"Type" : "AWS::CodeDeploy::Application",
"Properties" : {
"ApplicationName": self._application_name,
}
}
| {
"content_hash": "075e95f8f5146ba0a617b440580dd58a",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 90,
"avg_line_length": 37.76086956521739,
"alnum_prop": 0.6029552868931107,
"repo_name": "mmcdermo/RedLeader",
"id": "16e4c0fba7202567cfc1895c4ef77b871bb834db",
"size": "5211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redleader/resources/codedeploy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "103986"
},
{
"name": "Shell",
"bytes": "499"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import include
from django.conf.urls.static import static
from django.urls import path
urlpatterns = [
path(settings.GEOREPORT_API_ROOT, include('issues.api.urls', namespace='georeport/v2')),
]
if 'issues_simple_ui' in settings.INSTALLED_APPS:
urlpatterns.append(path(r'', include('issues_simple_ui.urls')))
if 'django.contrib.admin' in settings.INSTALLED_APPS:
from django.contrib.admin.sites import site
urlpatterns.append(path('admin/', site.urls))
if settings.DEBUG: # pragma: no cover
urlpatterns.extend(static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT))
| {
"content_hash": "e5d1971b3397851abc927d7ef7ded09d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 92,
"avg_line_length": 34.36842105263158,
"alnum_prop": 0.7565084226646248,
"repo_name": "6aika/issue-reporting",
"id": "d68301d640d8768f523ce46647a7bd3280b0168d",
"size": "653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cfh/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "307"
},
{
"name": "Dockerfile",
"bytes": "746"
},
{
"name": "HTML",
"bytes": "8542"
},
{
"name": "JavaScript",
"bytes": "24775"
},
{
"name": "Python",
"bytes": "153728"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy.types import String
from sqlalchemy.orm import relationship, backref
from models import dbsession
from models.BaseModels import DatabaseObject, generate_uuid
class Browser(DatabaseObject):
uuid = Column(String(32), unique=True, default=generate_uuid)
_name = Column(String(32))
_version = Column(String(32))
_codename = Column(String(32))
_platform = Column(String(32))
_user_agent = Column(String(64))
_oscpu = Column(String(32))
# Belongs to Target
target_id = Column(Integer, ForeignKey('target.id'), nullable=False)
target = relationship("Target", backref=backref("browser", lazy="select"))
@classmethod
def all(cls):
return dbsession.query(cls).all()
@classmethod
def by_id(cls, _id):
return dbsession.query(cls).filter_by(id=_id).first()
@classmethod
def by_uuid(cls, _uuid):
return dbsession.query(cls).filter_by(uuid=_uuid).first()
# Properties
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = str(value[:32])
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = str(value[:32])
@property
def codename(self):
return self._codename
@codename.setter
def codename(self, value):
self._codename = str(value[:32])
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, value):
self._platform = str(value[:32])
@property
def user_agent(self):
return self._user_agent
@user_agent.setter
def user_agent(self, value):
self._user_agent = str(value[:64])
@property
def oscpu(self):
return self._oscpu
@oscpu.setter
def oscpu(self, value):
self._oscpu = str(value[:32]) | {
"content_hash": "2c0de7fa970210d6d1b42dd41df12d84",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 78,
"avg_line_length": 23.710843373493976,
"alnum_prop": 0.6351626016260162,
"repo_name": "lunarca/fngrpt",
"id": "6dcf2a2bb851681d3cab154a8dc1e7fabd12fc60",
"size": "1993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/BrowserModel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "821"
},
{
"name": "HTML",
"bytes": "9944"
},
{
"name": "JavaScript",
"bytes": "1897"
},
{
"name": "Python",
"bytes": "67762"
},
{
"name": "Shell",
"bytes": "1782"
}
],
"symlink_target": ""
} |
import datetime
import urllib
import warnings
from abc import abstractmethod
from typing import Union, Any, Dict, List, Optional
from pykechain.enums import FilterType, Category, PropertyType, ScopeStatus
from pykechain.exceptions import IllegalArgumentError, NotFoundError
from pykechain.models.input_checks import (
check_base,
check_enum,
check_type,
check_text,
check_datetime,
)
from pykechain.models.widgets.enums import MetaWidget
class BaseFilter:
"""Base class for any filters used in pykechain."""
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__class__.write_options(
filters=[self]
) == self.__class__.write_options(filters=[other])
else:
return False
@classmethod
@abstractmethod
def parse_options(cls, options: Dict) -> List["BaseFilter"]: # pragma: no cover
"""
Convert the dict & string-based definition of a filter to a list of Filter objects.
:param options: options dict from a property or meta dict from a widget.
:return: list of Filter objects
:rtype list
"""
pass
@classmethod
@abstractmethod
def write_options(cls, filters: List) -> Dict:
"""
Convert the list of Filter objects to a dict.
:param filters: List of BaseFilter objects
:returns options dict to be used to update the options dict of a property
"""
if not all(isinstance(f, cls) for f in filters):
raise IllegalArgumentError(f"All `filters` must be of type `{cls}`")
class PropertyValueFilter(BaseFilter):
"""
Property value filter, used for Part reference properties and filtered grid widgets.
:ivar id: property model UUID
:ivar value: value of the filter
:ivar type: filter type
"""
def __init__(
self,
property_model: Union[str, "Property"],
value: Any,
filter_type: FilterType,
):
"""Create PropertyValueFilter instance."""
from pykechain.models import Property
property_model_id = check_base(property_model, Property, "property_model")
check_enum(filter_type, FilterType, "filter_type")
self.id = property_model_id
if isinstance(value, str):
self.value = urllib.parse.unquote(value)
else:
self.value = value
self.type = filter_type
def __repr__(self):
return f"PropertyValueFilter {self.type}: {self.value} ({self.id})"
def format(self) -> str:
"""Format PropertyValueFilter as a string."""
if isinstance(self.value, str):
value = urllib.parse.quote(self.value)
elif isinstance(self.value, bool):
value = str(self.value).lower()
else:
value = self.value
return f"{self.id}:{value}:{self.type}"
def validate(self, part_model: "Part") -> None:
"""
Validate data of the PropertyValueFilter.
:param part_model: Part model to which the filter will be applied.
:returns None
"""
from pykechain.models import Part
check_base(part_model, Part, "part_model")
try:
prop = part_model.property(self.id)
except NotFoundError:
raise IllegalArgumentError(
"Property value filters can only be set on properties belonging to the selected "
"Part model."
)
if prop.category != Category.MODEL:
raise IllegalArgumentError(
'Property value filters can only be set on Property models, received "{}".'.format(
prop
)
)
else:
property_type = prop.type
if (
property_type
in (
PropertyType.BOOLEAN_VALUE,
PropertyType.REFERENCES_VALUE,
PropertyType.ACTIVITY_REFERENCES_VALUE,
)
and self.type != FilterType.EXACT
):
warnings.warn(
"A PropertyValueFilter on a `{}` property should use "
"filter type `{}`, not `{}`".format(
property_type, FilterType.EXACT, self.type
),
Warning,
)
elif (
property_type
in (
PropertyType.TEXT_VALUE,
PropertyType.CHAR_VALUE,
PropertyType.LINK_VALUE,
PropertyType.SINGLE_SELECT_VALUE,
PropertyType.USER_REFERENCES_VALUE,
PropertyType.SCOPE_REFERENCES_VALUE,
)
and self.type != FilterType.CONTAINS
):
warnings.warn(
"A PropertyValueFilter on a `{}` property should use "
"filter type `{}`, not `{}`".format(
property_type, FilterType.CONTAINS, self.type
),
Warning,
)
elif property_type in (
PropertyType.INT_VALUE,
PropertyType.FLOAT_VALUE,
PropertyType.DATE_VALUE,
PropertyType.DATETIME_VALUE,
) and self.type not in (
FilterType.LOWER_THAN_EQUAL,
FilterType.GREATER_THAN_EQUAL,
):
warnings.warn(
"A PropertyValueFilter on a `{}` property should use "
"filter type `{}` or `{}`, not `{}`".format(
property_type,
FilterType.LOWER_THAN_EQUAL,
FilterType.GREATER_THAN_EQUAL,
self.type,
),
Warning,
)
elif (
property_type in (PropertyType.MULTI_SELECT_VALUE,)
and self.type != FilterType.CONTAINS_SET
):
warnings.warn(
"A PropertyValueFilter on a `{}` property should use "
"filter type `{}`, not `{}`".format(
property_type, FilterType.CONTAINS_SET, self.type
),
Warning,
)
else:
pass
@classmethod
def parse_options(cls, options: Dict) -> List["PropertyValueFilter"]:
"""
Convert the dict & string-based definition of a property value filter to a list of PropertyValueFilter objects.
:param options: options dict from a multi-reference property or meta dict from a filtered grid widget.
:return: list of PropertyValueFilter objects
:rtype list
"""
check_type(options, dict, "options")
prefilter_string = options.get(MetaWidget.PREFILTERS, {}).get("property_value")
prefilter_string_list = prefilter_string.split(",") if prefilter_string else []
prefilters = list()
for pf_string in prefilter_string_list:
prefilter_raw = pf_string.split(":")
if len(prefilter_raw) == 1: # FIXME encoding problem KE-chain
prefilter_raw = pf_string.split("%3A")
prefilters.append(PropertyValueFilter(*prefilter_raw))
return prefilters
@classmethod
def write_options(cls, filters: List) -> Dict:
"""
Convert the list of Filter objects to a dict.
:param filters: List of BaseFilter objects
:returns options dict to be used to update the options dict of a property
"""
super().write_options(filters=filters)
prefilters = {"property_value": ",".join([pf.format() for pf in filters])}
options = {MetaWidget.PREFILTERS: prefilters}
return options
class ScopeFilter(BaseFilter):
"""
Scope filter, used on scope reference properties.
:ivar tag: string
"""
# map between KE-chain field and Pykechain attribute, and whether the filter is stored as a list (cs-string)
MAP = [
("name__icontains", "name", False),
("status__in", "status", False),
("due_date__gte", "due_date_gte", False),
("due_date__lte", "due_date_lte", False),
("start_date__gte", "start_date_gte", False),
("start_date__lte", "start_date_lte", False),
("progress__gte", "progress_gte", False),
("progress__lte", "progress_lte", False),
("tags__contains", "tag", True),
("team__in", "team", False),
]
def __init__(
self,
tag: Optional[str] = None,
status: Optional[ScopeStatus] = None,
name: Optional[str] = None,
team: Optional[Union[str, "Team"]] = None,
due_date_gte: Optional[datetime.datetime] = None,
due_date_lte: Optional[datetime.datetime] = None,
start_date_gte: Optional[datetime.datetime] = None,
start_date_lte: Optional[datetime.datetime] = None,
progress_gte: Optional[float] = None,
progress_lte: Optional[float] = None,
**kwargs,
):
"""Create a ScopeFilter object."""
from pykechain.models import Team
filters = [
tag,
status,
name,
team,
due_date_gte,
due_date_lte,
start_date_gte,
start_date_lte,
progress_gte,
progress_lte,
]
if sum(p is not None for p in filters) + len(kwargs) != 1:
raise IllegalArgumentError(
"Every ScopeFilter object must apply only 1 filter!"
)
self.status = check_enum(status, ScopeStatus, "status")
self.name = check_text(name, "name")
self.due_date_gte = check_datetime(due_date_gte, "due_date_gte")
self.due_date_lte = check_datetime(due_date_lte, "due_date_lte")
self.start_date_gte = check_datetime(start_date_gte, "start_date_gte")
self.start_date_lte = check_datetime(start_date_lte, "start_date_lte")
self.progress_gte = check_type(progress_gte, float, "progress_gte")
self.progress_lte = check_type(progress_lte, float, "progress_lte")
self.tag = check_text(tag, "tag")
self.team = check_base(team, Team, "team")
self.extra_filter: dict = kwargs
def __repr__(self):
_repr = "ScopeFilter: "
if self.name:
_repr += f"name: `{self.name}`"
elif self.status:
_repr += f"status `{self.status}`"
elif self.due_date_gte:
_repr += f"due date greater or equal than: `{self.due_date_gte}`"
elif self.due_date_lte:
_repr += f"due date lesser or equal than: `{self.due_date_lte}`"
elif self.start_date_gte:
_repr += f"start date greater or equal than: `{self.start_date_gte}`"
elif self.start_date_lte:
_repr += f"start date lesser or equal than: `{self.start_date_lte}`"
elif self.progress_gte:
_repr += f"progress greater or equal than: {self.progress_gte * 100}%"
elif self.progress_lte:
_repr += f"progress lesser or equal than: {self.progress_lte * 100}%"
elif self.tag:
_repr += f"tag `{self.tag}`"
elif self.team:
_repr += f"team: `{self.team}`"
else:
_repr += f"{self.extra_filter}"
return _repr
@classmethod
def parse_options(cls, options: Dict) -> List["ScopeFilter"]:
"""
Convert the dict & string-based definition of a scope filter to a list of ScopeFilter objects.
:param options: options dict from a scope reference property or meta dict from a scopes widget.
:return: list of ScopeFilter objects
:rtype list
"""
check_type(options, dict, "options")
filters_dict = options.get(MetaWidget.PREFILTERS, {})
scope_filters = []
mapping = {field: (attr, is_list) for field, attr, is_list in cls.MAP}
for field, value in filters_dict.items():
if field in mapping:
attr, is_list = mapping[field]
try:
if is_list:
values = value.split(",")
else:
values = [value]
except AttributeError:
values = value
for item in values:
scope_filters.append(cls(**{attr: item}))
else:
scope_filters.append(cls(**{field: value}))
return scope_filters
@classmethod
def write_options(cls, filters: List) -> Dict:
"""
Convert the list of Filter objects to a dict.
:param filters: List of BaseFilter objects
:returns options dict to be used to update the options dict of a property
"""
super().write_options(filters=filters)
prefilters = dict()
options = {MetaWidget.PREFILTERS: prefilters}
for f in filters: # type: cls
found = False
for field, attr, is_list in cls.MAP:
filter_value = getattr(f, attr)
if filter_value is not None:
if is_list:
# creata a string with commaseparted prefilters, the first item directly and
# consequent items with a ,
# TODO: refactor to create a list and then join them with a ','
if field not in prefilters:
prefilters[field] = filter_value
else:
prefilters[field] += f",{filter_value}"
else:
prefilters[field] = filter_value
found = True
break
if not found:
prefilters.update(f.extra_filter)
return options
| {
"content_hash": "259a8f1ae7a5e3bd28ddfebd00fd847b",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 119,
"avg_line_length": 35.51767676767677,
"alnum_prop": 0.5411304656949876,
"repo_name": "KE-works/pykechain",
"id": "e2afd2e934c71293fa079c005aa1d14804474d8a",
"size": "14065",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pykechain/models/value_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1370094"
},
{
"name": "Shell",
"bytes": "222"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Version.version'
db.alter_column(u'pypi_version', 'version', self.gf('django.db.models.fields.CharField')(max_length=40))
def backwards(self, orm):
# Changing field 'Version.version'
db.alter_column(u'pypi_version', 'version', self.gf('django.db.models.fields.CharField')(max_length=20))
models = {
u'pypi.package': {
'Meta': {'object_name': 'Package'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'pypi.release': {
'Meta': {'ordering': "('version', 'upload_time')", 'unique_together': "(('version', 'upload_time'),)", 'object_name': 'Release'},
'comment_text': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'downloads': ('django.db.models.fields.PositiveIntegerField', [], {}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'has_sig': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5_digest': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'packagetype': ('django.db.models.fields.CharField', [], {'max_length': '13'}),
'python_version': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'size': ('django.db.models.fields.PositiveIntegerField', [], {}),
'upload_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'releases'", 'to': u"orm['pypi.Version']"})
},
u'pypi.version': {
'Meta': {'unique_together': "(('package', 'version'),)", 'object_name': 'Version'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': u"orm['pypi.Package']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '40'})
}
}
complete_apps = ['pypi'] | {
"content_hash": "f13205a595663276337b31751414ccc4",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 141,
"avg_line_length": 54.104166666666664,
"alnum_prop": 0.5656526761648055,
"repo_name": "kitsunde/django-pypi",
"id": "f71c51eb340cae5fbedeec2b091b2ed5d2347d3b",
"size": "2621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypi/migrations/0003_auto__chg_field_version_version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24824"
}
],
"symlink_target": ""
} |
from AccessControl.SecurityInfo import ClassSecurityInfo
from Acquisition import Implicit
from App.class_init import InitializeClass
from OFS.PropertyManager import PropertyManager
from OFS.SimpleItem import SimpleItem
from Products.BeakerSessionDataManager.interfaces import ISessionDataObject
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
from Products.Sessions.SessionDataManager import SessionDataManagerErr
from Products.Sessions.SessionDataManager import SessionDataManagerTraverser
from Products.Sessions.SessionPermissions import ACCESS_CONTENTS_PERM
from Products.Sessions.SessionPermissions import ACCESS_SESSIONDATA_PERM
from Products.Sessions.SessionPermissions import ARBITRARY_SESSIONDATA_PERM
from Products.Sessions.interfaces import ISessionDataManager
from ZPublisher.BeforeTraverse import registerBeforeTraverse
from ZPublisher.BeforeTraverse import unregisterBeforeTraverse
from collective.beaker.interfaces import ISession
from six.moves import UserDict
from zope.interface import implementer
import time
@implementer(ISessionDataManager)
class BeakerSessionDataManager(SimpleItem, PropertyManager):
"""Implement a session data manager which uses Beaker sessions."""
security = ClassSecurityInfo()
ok = {
"meta_type": 1,
"id": 1,
"title": 1,
"icon": 1,
"bobobase_modification_time": 1,
"title_or_id": 1,
}
security.setDefaultAccess(ok)
def __init__(self, title=""):
self.title = title
#
# ZMI
#
meta_type = "Beaker Session Data Manager"
_requestSessionName = "SESSION"
_properties = ({"id": "title", "type": "string", "mode": "w", "label": "Title"},)
manage_options = PropertyManager.manage_options + SimpleItem.manage_options
def _session(self):
"""Here's the core logic which looks up the Beaker session."""
session = ISession(self.REQUEST)
return BeakerSessionDataObject(session)
#
# ISessionDataManager implementation
#
security.declareProtected(ACCESS_SESSIONDATA_PERM, "getSessionData")
def getSessionData(self, create=1):
""" """
return self._session()
security.declareProtected(ACCESS_SESSIONDATA_PERM, "hasSessionData")
def hasSessionData(self):
""" """
return True
security.declareProtected(ARBITRARY_SESSIONDATA_PERM, "getSessionDataByKey")
def getSessionDataByKey(self, key):
raise SessionDataManagerErr(
"Beaker session data manager does not support retrieving arbitrary sessions."
)
security.declareProtected(ACCESS_CONTENTS_PERM, "getBrowserIdManager")
def getBrowserIdManager(self):
""" """
raise SessionDataManagerErr(
"Beaker session data manager does not support browser id managers."
)
# Traversal hook
def manage_afterAdd(self, item, container):
"""Add our traversal hook"""
self.updateTraversalData(self._requestSessionName)
def manage_beforeDelete(self, item, container):
"""Clean up on delete"""
self.updateTraversalData(None)
def updateTraversalData(self, requestSessionName=None):
# Note this can't be called directly at add -- manage_afterAdd will
# work though.
parent = self.aq_inner.aq_parent
if getattr(self, "_hasTraversalHook", None):
unregisterBeforeTraverse(parent, "BeakerSessionDataManager")
del self._hasTraversalHook
self._requestSessionName = None
if requestSessionName:
hook = SessionDataManagerTraverser(requestSessionName, self.id)
registerBeforeTraverse(parent, hook, "BeakerSessionDataManager", 50)
self._hasTraversalHook = 1
self._requestSessionName = requestSessionName
InitializeClass(BeakerSessionDataManager)
def addBeakerSessionDataManager(dispatcher, id, title="", REQUEST=None):
"""Add a BSDM to dispatcher."""
sdc = BeakerSessionDataManager(title=title)
sdc._setId(id)
dispatcher._setObject(id, sdc)
if REQUEST is not None: # pragma: no cover
REQUEST["RESPONSE"].redirect("%s/manage_workspace" % dispatcher.absolute_url())
addBeakerSessionDataManagerForm = PageTemplateFile("www/add_sdm.pt", globals())
def session_mutator(func):
"""Decorator to make a UserDict mutator save the session."""
def mutating_func(self, *args, **kw):
res = func(self, *args, **kw)
self.data.save()
return res
return mutating_func
@implementer(ISessionDataObject)
class BeakerSessionDataObject(Implicit):
"""Adapts a beaker session object to the interface expected of Zope sessions."""
security = ClassSecurityInfo()
security.setDefaultAccess("allow")
security.declareObjectPublic()
def __init__(self, session):
self.data = self.session = session
#
# IMapping methods
#
clear = session_mutator(UserDict.clear)
update = session_mutator(UserDict.update)
setdefault = session_mutator(UserDict.setdefault)
pop = session_mutator(UserDict.pop)
popitem = session_mutator(UserDict.popitem)
__setitem__ = session_mutator(UserDict.__setitem__)
__delitem__ = session_mutator(UserDict.__delitem__)
set = __setitem__
__guarded_setitem__ = __setitem__
__guarded_delitem__ = __delitem__
delete = __delitem__
def __len__(self):
try:
return self.data.__len__()
except AttributeError:
return len(self.data.keys())
#
# ISessionDataObject
#
def getId(self):
return self.session.id
def invalidate(self):
self.session.invalidate()
def isValid(self):
return True
def getCreated(self):
return time.mktime(self.session["_creation_time"].timetuple())
getContainerKey = getId
#
# compatibility with standard persistent Zope sessions
#
def _get_p_changed(self):
return 1
def _set_p_changed(self, v):
if v:
self.session.save()
_p_changed = property(_get_p_changed, _set_p_changed)
# dict API compatibility used by some plone addons e.g. collective.z3cform.wizard
def has_key(self, key):
return key in self.session.keys()
def get(self, key):
return self.session.get(key)
def __getitem__(self, key):
return self.get(key)
InitializeClass(BeakerSessionDataObject)
| {
"content_hash": "a680636e26d0e3f0e222a245ef046028",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 89,
"avg_line_length": 29.655963302752294,
"alnum_prop": 0.6863109048723898,
"repo_name": "davisagli/Products.BeakerSessionDataManager",
"id": "fa54e071b60ef5237b1134c6a8d39ece023c16f3",
"size": "6490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Products/BeakerSessionDataManager/sessiondata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21903"
}
],
"symlink_target": ""
} |
"""
Support for Volvo On Call.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/volvooncall/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import (CONF_USERNAME, CONF_PASSWORD,
CONF_NAME, CONF_RESOURCES)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_send,
async_dispatcher_connect)
from homeassistant.util.dt import utcnow
DOMAIN = 'volvooncall'
DATA_KEY = DOMAIN
REQUIREMENTS = ['volvooncall==0.8.7']
_LOGGER = logging.getLogger(__name__)
MIN_UPDATE_INTERVAL = timedelta(minutes=1)
DEFAULT_UPDATE_INTERVAL = timedelta(minutes=1)
CONF_UPDATE_INTERVAL = 'update_interval'
CONF_REGION = 'region'
CONF_SERVICE_URL = 'service_url'
CONF_SCANDINAVIAN_MILES = 'scandinavian_miles'
CONF_MUTABLE = 'mutable'
SIGNAL_STATE_UPDATED = '{}.updated'.format(DOMAIN)
COMPONENTS = {
'sensor': 'sensor',
'binary_sensor': 'binary_sensor',
'lock': 'lock',
'device_tracker': 'device_tracker',
'switch': 'switch'
}
RESOURCES = [
'position',
'lock',
'heater',
'odometer',
'trip_meter1',
'trip_meter2',
'fuel_amount',
'fuel_amount_level',
'average_fuel_consumption',
'distance_to_empty',
'washer_fluid_level',
'brake_fluid',
'service_warning_status',
'bulb_failures',
'battery_range',
'battery_level',
'time_to_fully_charged',
'battery_charge_status',
'engine_start',
'last_trip',
'is_engine_running',
'doors_hood_open',
'doors_front_left_door_open',
'doors_front_right_door_open',
'doors_rear_left_door_open',
'doors_rear_right_door_open',
'windows_front_left_window_open',
'windows_front_right_window_open',
'windows_rear_left_window_open',
'windows_rear_right_window_open',
'tyre_pressure_front_left_tyre_pressure',
'tyre_pressure_front_right_tyre_pressure',
'tyre_pressure_rear_left_tyre_pressure',
'tyre_pressure_rear_right_tyre_pressure',
'any_door_open',
'any_window_open'
]
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_UPDATE_INTERVAL, default=DEFAULT_UPDATE_INTERVAL): (
vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL))),
vol.Optional(CONF_NAME, default={}): vol.Schema(
{cv.slug: cv.string}),
vol.Optional(CONF_RESOURCES): vol.All(
cv.ensure_list, [vol.In(RESOURCES)]),
vol.Optional(CONF_REGION): cv.string,
vol.Optional(CONF_SERVICE_URL): cv.string,
vol.Optional(CONF_MUTABLE, default=True): cv.boolean,
vol.Optional(CONF_SCANDINAVIAN_MILES, default=False): cv.boolean,
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Volvo On Call component."""
session = async_get_clientsession(hass)
from volvooncall import Connection
connection = Connection(
session=session,
username=config[DOMAIN].get(CONF_USERNAME),
password=config[DOMAIN].get(CONF_PASSWORD),
service_url=config[DOMAIN].get(CONF_SERVICE_URL),
region=config[DOMAIN].get(CONF_REGION))
interval = config[DOMAIN].get(CONF_UPDATE_INTERVAL)
data = hass.data[DATA_KEY] = VolvoData(config)
def is_enabled(attr):
"""Return true if the user has enabled the resource."""
return attr in config[DOMAIN].get(CONF_RESOURCES, [attr])
def discover_vehicle(vehicle):
"""Load relevant platforms."""
data.vehicles.add(vehicle.vin)
dashboard = vehicle.dashboard(
mutable=config[DOMAIN][CONF_MUTABLE],
scandinavian_miles=config[DOMAIN][CONF_SCANDINAVIAN_MILES])
for instrument in (
instrument
for instrument in dashboard.instruments
if instrument.component in COMPONENTS and
is_enabled(instrument.slug_attr)):
data.instruments.add(instrument)
hass.async_create_task(
discovery.async_load_platform(
hass,
COMPONENTS[instrument.component],
DOMAIN,
(vehicle.vin,
instrument.component,
instrument.attr),
config))
async def update(now):
"""Update status from the online service."""
try:
if not await connection.update(journal=True):
_LOGGER.warning("Could not query server")
return False
for vehicle in connection.vehicles:
if vehicle.vin not in data.vehicles:
discover_vehicle(vehicle)
async_dispatcher_send(hass, SIGNAL_STATE_UPDATED)
return True
finally:
async_track_point_in_utc_time(hass, update, utcnow() + interval)
_LOGGER.info("Logging in to service")
return await update(utcnow())
class VolvoData:
"""Hold component state."""
def __init__(self, config):
"""Initialize the component state."""
self.vehicles = set()
self.instruments = set()
self.config = config[DOMAIN]
self.names = self.config.get(CONF_NAME)
def instrument(self, vin, component, attr):
"""Return corresponding instrument."""
return next((instrument
for instrument in self.instruments
if instrument.vehicle.vin == vin and
instrument.component == component and
instrument.attr == attr), None)
def vehicle_name(self, vehicle):
"""Provide a friendly name for a vehicle."""
if (vehicle.registration_number and
vehicle.registration_number.lower()) in self.names:
return self.names[vehicle.registration_number.lower()]
if vehicle.vin and vehicle.vin.lower() in self.names:
return self.names[vehicle.vin.lower()]
if vehicle.registration_number:
return vehicle.registration_number
if vehicle.vin:
return vehicle.vin
return ''
class VolvoEntity(Entity):
"""Base class for all VOC entities."""
def __init__(self, data, vin, component, attribute):
"""Initialize the entity."""
self.data = data
self.vin = vin
self.component = component
self.attribute = attribute
async def async_added_to_hass(self):
"""Register update dispatcher."""
async_dispatcher_connect(
self.hass, SIGNAL_STATE_UPDATED,
self.async_schedule_update_ha_state)
@property
def instrument(self):
"""Return corresponding instrument."""
return self.data.instrument(self.vin, self.component, self.attribute)
@property
def icon(self):
"""Return the icon."""
return self.instrument.icon
@property
def vehicle(self):
"""Return vehicle."""
return self.instrument.vehicle
@property
def _entity_name(self):
return self.instrument.name
@property
def _vehicle_name(self):
return self.data.vehicle_name(self.vehicle)
@property
def name(self):
"""Return full name of the entity."""
return '{} {}'.format(
self._vehicle_name,
self._entity_name)
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return dict(self.instrument.attributes,
model='{}/{}'.format(
self.vehicle.vehicle_type,
self.vehicle.model_year))
| {
"content_hash": "15d939510be5d4697e3d7088ae48d51e",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 78,
"avg_line_length": 30.91078066914498,
"alnum_prop": 0.6198436560432953,
"repo_name": "tinloaf/home-assistant",
"id": "9f9b58ec8b6128fed329ed20f3871c84ef1d8c62",
"size": "8315",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/volvooncall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
} |
class DoubanPipeline(object):
# def __init__(self, server, port):
# pass
# @classmethod
# def from_crawler(cls, crawler):
# return cls(crawler.settings['MONGO_SERVER'],
# crawler.settings['MONGO_PORT'])
def process_item(self, item, spider):
return item
| {
"content_hash": "c7937dfd5a2af455c8b41e9f45044977",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 54,
"avg_line_length": 26.25,
"alnum_prop": 0.5777777777777777,
"repo_name": "tzpBingo/github-trending",
"id": "a68d5238e29691b8453bb6d60dbd7c6f27175f62",
"size": "510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codespace/python/tmp/douban/douban/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
} |
'''
zzz domains
'''
#import sys
#from imp import reload
#reload(sys)
#sys.setdefaultencoding('utf-8')
from sqlalchemy import Table, Column, ForeignKey, MetaData
from sqlalchemy import Integer, Boolean
from sqlalchemy import Unicode, UnicodeText, DateTime
from sqlalchemy import func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
metadata = MetaData()
Base = declarative_base()
Base.metadata = metadata
class Option(Base):
'''配置表'''
__tablename__ = 'option'
name = Column('option_name', Unicode(200),
primary_key=True, nullable=False, unique=True,
index=True)
value = Column('option_value', UnicodeText, nullable=False)
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "<Option('%s')" % self.name
#关联表
post_tag = Table('post_tag', Base.metadata,
Column('post_id', Integer, ForeignKey('post.id'), index=True),
Column('tag_id', Integer, ForeignKey('tag.id'), index=True)
)
class Post(Base):
'''正文表'''
__tablename__ = 'post'
__table_args__ = {'sqlite_autoincrement': True}
id = Column(Integer, primary_key=True, index=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False, index=True)
url = Column(Unicode(200), nullable=False, unique=True, index=True)
title = Column(Unicode(200), nullable=False)
content = Column(UnicodeText, nullable=False)
ispass = Column(Boolean, nullable=False, index=True)
pubdate = Column(DateTime, index=True)
pubyear = Column(Integer, index=True)
pubmonth = Column(Integer, index=True)
commentnum = Column(Integer)
commentpass = Column(Integer)
taglist = Column(Unicode(500))
def __init__(self, url, title, content, ispass, pubdate, user):
self.url = url
self.title = title
self.content = content
self.ispass = ispass
self.pubdate = pubdate
self.pubyear = pubdate.year
self.pubmonth = pubdate.month
self.user = user
tags = relationship("Tag",
secondary=post_tag,
backref="posts")
comments = relationship("Comment",
backref="post")
import hashlib
class User(Base):
'''用户表'''
__tablename__ = 'user'
__table_args__ = {'sqlite_autoincrement': True}
id = Column(Integer, primary_key=True, index=True)
username = Column(Unicode(200), unique=True, nullable=False, index=True)
pwd = Column(Unicode(200), nullable=False)
nickname = Column(Unicode(200), unique=True, nullable=False, index=True)
mail = Column(Unicode(200), nullable=False)
posts = relationship("Post",
backref="user")
commnets = relationship("Comment",
backref="user")
def __init__(self, username, pwd, mail):
self.username = username
self.nickname = username
m = hashlib.md5()
m.update(pwd.encode('utf8'))
#self.pwd = unicode(m.hexdigest(), 'utf-8')
self.pwd = m.hexdigest()
self.mail = mail
class Comment(Base):
'''评论表'''
__tablename__ = 'comment'
__table_args__ = {'sqlite_autoincrement': True}
id = Column(Integer, primary_key=True, index=True)
post_id = Column(Integer, ForeignKey('post.id'), nullable=False, index=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=True, index=True)
username = Column(Unicode(200), nullable=False)
mail = Column(Unicode(200), nullable=False)
site = Column(Unicode(200))
comment = Column(Unicode(1000), nullable=False)
ispass = Column(Boolean, nullable=False, index=True)
isshowsite = Column(Boolean, nullable=False)
ip = Column(Unicode(200), nullable=True)
adddate = Column(DateTime, nullable=False, index=True)
##notify = Column(Boolean, nullable=True)
def __init__(self, post, user, username, mail, site,
comment, ispass, adddate):
self.post = post
self.user = user
self.username = username
self.mail = mail
self.site = site
self.comment = comment
self.ispass = ispass
self.isshowsite = False
self.adddate = adddate
class Tag(Base):
'''标签表'''
__tablename__ = 'tag'
__table_args__ = {'sqlite_autoincrement': True}
id = Column(Integer, primary_key=True, index=True)
tag = Column(Unicode(200), nullable=False, unique=True, index=True)
nums = Column(Integer, nullable=False, index=True)
def __init__(self, tag, nums=0):
self.tag = tag
self.nums = nums
def update_all_count(db):
'''更新统计量'''
count = db.query(func.count(Post.id)).\
filter(Post.ispass == True).scalar()
update_option(db, u'posts_available', count)
count = db.query(func.count(Post.id)).\
scalar()
update_option(db, u'posts_total', count)
count = db.query(func.count(Comment.id)).\
filter(Comment.ispass == True).scalar()
update_option(db, u'comments_available', count)
count = db.query(func.count(Comment.id)).\
scalar()
update_option(db, u'comments_total', count)
count = db.query(func.count(Tag.id)).\
scalar()
update_option(db, u'tags_total', count)
def get_post_tags(db, tags):
results = []
for t in tags:
tag = db.query(Tag).\
filter(Tag.tag==t.tag).\
first()
if not tag:
db.add(t)
results.append(t)
else:
results.append(tag)
return results
def update_post_tags(db, post_id, tags, oldtags):
#删除的
names = [x.tag for x in tags]
deletes = [o for o in oldtags if o.tag not in names]
for o in deletes:
ptag = db.query(post_tag).\
filter(post_tag.c.tag_id==o.id).\
first()
if ptag:
print('====================')
print(ptag)
db.delete(ptag)
#更新统计
alls = tags + [o for o in oldtags if o.tag not in names]
return alls
#for x in alls:
# update_tag_post(db, x.id)
def update_post_count(db, post_id):
'''更新文章的统计量'''
comment_count = db.query(func.count(Comment.id)).\
filter(Comment.post_id==post_id).\
scalar()
comment_pass= db.query(func.count(Comment.id)).\
filter(Comment.post_id==post_id).\
filter(Comment.ispass==True).\
scalar()
db.query(Post).\
filter(Post.id==post_id).\
update({
Post.commentnum:comment_count,
Post.commentpass:comment_pass
})
return comment_count
def update_option(db, name, value):
count = db.query(Option).\
filter(Option.name == name).first()
if not count:
count = Option(name, None)
#count.value = unicode(str(value), 'utf8')
count.value = str(value)
db.add(count)
def update_post_commentnum(db):
'''更新所有文章的评论数量'''
post_ids = db.query(Comment.post_id).\
distinct().all()
if post_ids:
post_ids = [x[0] for x in post_ids]
for post_id in post_ids:
comment_count = update_post_count(db, post_id)
print('\r updating: \t post_id:%d comments: %d' \
% (post_id, comment_count)),
db.commit()
print('\r\n'),
print(' total : %s' % len(post_ids))
print('\r\n'),
def update_tag_post(db, tag_id):
'''更新Tag的文章数量'''
count = db.query(func.count(post_tag.c.post_id)).\
filter(post_tag.c.tag_id==tag_id).\
filter(Post.ispass==True).\
filter(Post.id==post_tag.c.post_id).\
scalar()
db.query(Tag).\
filter(Tag.id==tag_id).\
update({Tag.nums:count})
return count
#更新月份记录文章记录
import datetime
import _pickle as cPickle
#import cPickle
def update_db_month(db):
'''update_db_month'''
pubdate_max = db.query(Post.pubdate).\
filter(Post.ispass==True).\
order_by(Post.pubdate.desc()).first()
pubdate_min = db.query(Post.pubdate).\
filter(Post.ispass==True).\
order_by(Post.pubdate).first()
if pubdate_max:
pubdate_max = pubdate_max[0]
if pubdate_min:
pubdate_min = pubdate_min[0]
update_archives_count(db, pubdate_min, pubdate_max)
def min_max_months(min_date, max_date):
'''get all month time ranges'''
year_months = []
if min_date > max_date:
return year_months
for year in range(min_date.year, max_date.year+1):
for month in range(1, 13):
if year == min_date.year and month < min_date.month:
pass
elif year == max_date.year and month > max_date.month:
pass
else:
month_range = get_month_range(year, month)
year_months.append(month_range)
return year_months
def get_month_range(year, month):
'''get_month_range'''
month_first = datetime.datetime(year, month, 1, 0, 0, 0)
time_delta = datetime.timedelta(microseconds=1)
if month == 12:
month_last = datetime.datetime(year + 1, 1, 1) - time_delta
else:
month_last = datetime.datetime(year, month + 1, 1) - time_delta
return month_first, month_last
def update_archives_count(db, min_date, max_date):
'''更新小月到大月之间的文章数'''
#get months
year_months = min_max_months(min_date, max_date)
#print year_months
month_counts = {}
#print 'year_months:', year_months
for x in year_months:
#print 'year_months -- x:', x
#filter(Post.pubdate.between(x[0], x[1])).\
#print '\r update month: \t %d-%d' % (x[0].year, x[0].month),
post_count = db.query(func.count(Post.id)).\
filter(Post.ispass==True).\
filter(Post.pubyear==x[0].year).\
filter(Post.pubmonth==x[0].month).\
scalar()
if post_count:
month_counts[x[0]] = post_count
#print '\r\n',
if month_counts:
option_name = u'site_archives_count'
count1 = db.query(Option).\
filter(Option.name == option_name).first()
db_vals = {}
if not count1:
count1 = Option(option_name, None)
else:
try:
db_vals = cPickle.loads(count1.value)
except cPickle.UnpicklingError :
pass
db_vals.update(month_counts)
option_value = cPickle.dumps(db_vals)
#count1.value = unicode(option_value, 'utf8')
count1.value = option_value
db.add(count1)
def get_archives_count(db):
'''get archives count'''
name1 = u'site_archives_count'
counts = db.query(Option).\
filter(Option.name == name1).first()
db_vals = {}
if counts and counts.value:
try:
db_vals = cPickle.loads(counts.value)
except cPickle.UnpicklingError :
pass
return db_vals
from utils import cached, get_int
@cached(cache_key='site_info')
def load_info(db):
d = {}
#配置表
options = db.query(Option).all()
for x in options:
d[x.name] = x.value
posts_available = get_int(d.get('posts_available',''), -1)
posts_total = get_int(d.get('posts_total',''), -1)
if posts_available == -1:
posts_available = db.query(func.count(Post.id)).\
filter(Post.ispass == True).\
scalar()
if posts_total== -1:
posts_total= db.query(func.count(Post.id)).\
scalar()
d['posts_available'] = posts_available
d['posts_total'] = posts_total
# archive
archives_count = get_archives_count(db)
d['archives_count'] = archives_count
return d
| {
"content_hash": "4bb5f7299a03859d500dde087792fe49",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 80,
"avg_line_length": 31.953168044077135,
"alnum_prop": 0.5912578670575049,
"repo_name": "xiexiao/zzz",
"id": "f3185da60826acd173f2ef72368ff60d059cf2a5",
"size": "11786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "domains.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8150"
},
{
"name": "JavaScript",
"bytes": "4744"
},
{
"name": "Python",
"bytes": "54707"
}
],
"symlink_target": ""
} |
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.graphrbac
from devtools_testutils import AzureMgmtTestCase
class GraphRbacTest(AzureMgmtTestCase):
def setUp(self):
super(GraphRbacTest, self).setUp()
self.graphrbac_client = self.create_basic_client(
azure.graphrbac.GraphRbacManagementClient,
tenant_id=self.settings.AD_DOMAIN
)
def test_graphrbac_users(self):
user = self.graphrbac_client.users.create(
azure.graphrbac.models.UserCreateParameters(
user_principal_name="testbuddy#TEST@{}".format(self.settings.AD_DOMAIN),
account_enabled=False,
display_name='Test Buddy',
mail_nickname='testbuddy',
password_profile=azure.graphrbac.models.PasswordProfile(
password='MyStr0ngP4ssword',
force_change_password_next_login=True
)
)
)
self.assertEqual(user.display_name, 'Test Buddy')
user = self.graphrbac_client.users.get(user.object_id)
self.assertEqual(user.display_name, 'Test Buddy')
user = self.graphrbac_client.users.get(user.user_principal_name)
self.assertEqual(user.display_name, 'Test Buddy')
users = self.graphrbac_client.users.list(
filter="displayName eq 'Test Buddy'"
)
users = list(users)
self.assertEqual(len(users), 1)
self.assertEqual(users[0].display_name, 'Test Buddy')
self.graphrbac_client.users.delete(user.object_id)
def test_groups(self):
group = self.graphrbac_client.groups.create("pytestgroup_display", "pytestgroup_nickname")
self.assertEqual(group.display_name, "pytestgroup_display")
group = self.graphrbac_client.groups.get(group.object_id)
self.assertEqual(group.display_name, "pytestgroup_display")
groups = self.graphrbac_client.groups.list(
filter="displayName eq 'pytestgroup_display'"
)
groups = list(groups)
self.assertEqual(len(groups), 1)
self.assertEqual(groups[0].display_name, "pytestgroup_display")
self.graphrbac_client.groups.delete(group.object_id)
def test_apps_and_sp(self):
app = self.graphrbac_client.applications.create({
'available_to_other_tenants': False,
'display_name': 'pytest_app',
'identifier_uris': ['http://pytest_app.org']
})
sp = self.graphrbac_client.service_principals.create({
'app_id': app.app_id, # Do NOT use app.object_id
'account_enabled': False
})
self.graphrbac_client.service_principals.delete(sp.object_id)
self.graphrbac_client.applications.delete(app.object_id)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "655dc395df02030ae60772463da99895",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 98,
"avg_line_length": 36.29213483146067,
"alnum_prop": 0.5894736842105263,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "c7262d1f50844db9e11232f8efb515341d52a255",
"size": "3232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-graphrbac/tests/test_graphrbac.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_firewall_global_rules import (
ApiParameters, ModuleParameters, ModuleManager, ArgumentSpec
)
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
enforced_policy='enforced1',
staged_policy='staged1',
service_policy='service1',
)
p = ModuleParameters(params=args)
assert p.enforced_policy == '/Common/enforced1'
assert p.staged_policy == '/Common/staged1'
assert p.service_policy == '/Common/service1'
def test_api_parameters(self):
p = ApiParameters(params=load_fixture('load_security_firewall_global_rules_1.json'))
assert p.enforced_policy == '/Common/foo'
assert p.service_policy == '/Common/bar'
assert p.staged_policy == '/Common/baz'
class TestUntypedManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_firewall_global_rules.tmos_version')
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_firewall_global_rules.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = '14.1.0'
self.m3 = self.p3.start()
self.m3.return_value = True
def tearDown(self):
self.p2.stop()
self.p3.stop()
def test_update(self, *args):
set_module_args(dict(
enforced_policy='enforced1',
staged_policy='staged1',
service_policy='service1',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
current = ApiParameters(params=load_fixture('load_security_firewall_global_rules_1.json'))
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
| {
"content_hash": "e22be17bd5bba74ceee1f1e5c107fa30",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 125,
"avg_line_length": 32.15533980582524,
"alnum_prop": 0.652475845410628,
"repo_name": "F5Networks/f5-ansible-modules",
"id": "8f1eca6bed212fc7ef688e027eaff6a87e9f86da",
"size": "3471",
"binary": false,
"copies": "2",
"ref": "refs/heads/doc-update",
"path": "ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/test_bigip_firewall_global_rules.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1931"
},
{
"name": "Python",
"bytes": "345682"
}
],
"symlink_target": ""
} |
'''
Introduction:
Workshop Example Code from the Blockchain and Machine Learning Workshop at START Summit 2017 in Switzerland
Description:
The file user.py implements an easy chat-client for transmitting text and images to Blockchain smart contract.
Tags are automatically extracted from an image using a Deep Residual Neural Network.
Author:
Thomas Schmiedel, Data Reply 2017
Mail:
t.schmiedel@reply.de
Note:
This is just example code and not perfect yet, if you have any questions, advice, ..., just drop me a mail :-)
'''
##################################
# imports
##################################
from __future__ import print_function
import os, sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from ethjsonrpc import EthJsonRpc
import time
import io
from PIL import Image
import signal
from ethereum.abi import decode_abi
from ethereum import utils
##################################
# config
##################################
RPC_HOST = '127.0.0.1'
RPC_PORT = 8545
GAS = 20000000
IMAGE_SIZE = 256
CONTRACT_STORAGE_ADDRESS = "0x69d7a25222ad60dd1aa9bf50fdf7936beee31f74"
USER_STORAGE_ADDRESS = "0x1107ad0bf1aad31e15221c65b46c77fabfb9993b"
counter = 0
##################################
# signal handler
##################################
LISTENING = False
def handler(sig, frame):
global LISTENING
if not LISTENING:
sys.stdout.write('\nenter q or quit to leave\n>> ')
else:
print('\n')
LISTENING = False
signal.signal(signal.SIGINT, handler)
##################################
# decode transaction input
##################################
class Decoder:
@staticmethod
def decodeABI(tinput, sig, returnVals):
abi = tinput[2 :]
hash = utils.sha3(sig)[: 4].encode('hex')
if abi[: 8] != hash:
return None
return decode_abi(returnVals, abi[8 :].decode('hex'))
##################################
# main
##################################
def main():
#
# create rpc interface
#
try:
rpc = EthJsonRpc(RPC_HOST, RPC_PORT)
except:
print('unable to connect to rpc server at {}:{}'.format(RPC_HOST, RPC_PORT))
sys.exit(-1)
method = sys.argv[1]
if method == "newContract":
owner = sys.argv[2]
partner = sys.argv[3]
text = sys.argv[4]
tx = rpc.call_with_transaction(owner, CONTRACT_STORAGE_ADDRESS,
'createNewContract(string,string)', [partner, text],
gas=GAS)
print(format(tx))
elif method == "contractData":
trans_addr = sys.argv[2]
trans = rpc.eth_getTransactionByHash(trans_addr)
res = Decoder.decodeABI(trans['input'], 'createNewContract(string,string)', ['string','string'])
print(res)
elif method == "newUser":
address = sys.argv[2]
dataString = sys.argv[3]
tx = rpc.call_with_transaction(address, USER_STORAGE_ADDRESS,
'setUserIdentityDocs(string)', [dataString],
gas=GAS)
elif method == "identification":
account_addr = sys.argv[2]
transactionHashes, identityDocuments = rpc.call(USER_STORAGE_ADDRESS, 'getUser(address)', [account_addr], ['string','string'])
print (identityDocuments)
elif method == "accounts":
account_id = sys.argv[2]
print(rpc.eth_accounts()[int(account_id)])
else:
print("method not recognized!")
##################################
# run
##################################
if __name__ == '__main__':
main()
| {
"content_hash": "50e05dac9e8d4de243a1b17506023675",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 134,
"avg_line_length": 27.416666666666668,
"alnum_prop": 0.5576126001657916,
"repo_name": "Lyniat/hackaburg17-smartcontracts",
"id": "eb5078104bff74a866ecd0cee3472d504aa79c42",
"size": "3619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend_server/smartContract.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "20941"
},
{
"name": "C",
"bytes": "1663"
},
{
"name": "C#",
"bytes": "137065"
},
{
"name": "C++",
"bytes": "1358539"
},
{
"name": "CSS",
"bytes": "1870883"
},
{
"name": "HTML",
"bytes": "69732"
},
{
"name": "Java",
"bytes": "718532"
},
{
"name": "JavaScript",
"bytes": "1521053"
},
{
"name": "Objective-C",
"bytes": "596217"
},
{
"name": "Objective-C++",
"bytes": "92656"
},
{
"name": "Python",
"bytes": "14383"
},
{
"name": "Shell",
"bytes": "1984"
}
],
"symlink_target": ""
} |
from subprocess import Popen, PIPE
class rFactory(object):
""" A factory to execute CRAN scripts """
def __init__(self, log):
self._log = log
pass
def exec_module(self, file, args=None):
#'var/R/helloWorld.R'
p = Popen(['Rscript', file], stdout=PIPE, bufsize=1)
for line in iter(p.stdout.readline, b''):
if line.decode("utf-8") == '[1] "Exit(0)"\r\n':
self._log.warn('error has been raised')
p.kill()
break
self._log.info(line.decode("utf-8")),
p.communicate() # close p.stdout, wait for the subprocess to exit
return True | {
"content_hash": "f43fe107faa825ae1c281a975e907619",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 73,
"avg_line_length": 33.25,
"alnum_prop": 0.5413533834586466,
"repo_name": "batoure/ScienceManager",
"id": "dc919ba111bee6a9fb2539cc618e5fd0235a127e",
"size": "665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "App/service/process/factory/r.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13670"
}
],
"symlink_target": ""
} |
"""
Load files created by the astrodendro package.
astrodendro must be installed in order to use this loader
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from astrodendro import Dendrogram
from glue.core.data_factories.hdf5 import is_hdf5
from glue.core.data_factories.fits import is_fits
from glue.core.data_factories.helpers import data_label
from glue.core.data import Data
from glue.config import data_factory
__all__ = ['load_dendro', 'is_dendro']
def is_dendro(file, **kwargs):
if is_hdf5(file):
import h5py
f = h5py.File(file, 'r')
return 'data' in f and 'index_map' in f and 'newick' in f
elif is_fits(file):
from astropy.io import fits
with fits.open(file, ignore_missing_end=True) as hdulist:
# For recent versions of astrodendro the HDUs have a recongnizable
# set of names.
if 'DATA' in hdulist and 'INDEX_MAP' in hdulist and 'NEWICK' in hdulist:
return True
# For older versions of astrodendro, the HDUs did not have names
# Here we use heuristics to figure out if this is likely to be a
# dendrogram. Specifically, there should be three HDU extensions.
# The primary HDU should be empty, HDU 1 and HDU 2 should have
# matching shapes, and HDU 3 should have a 1D array. Also, if the
# HDUs do have names then this is not a dendrogram since the old
# files did not have names
# This branch can be removed once we think most dendrogram files
# will have HDU names.
if len(hdulist) != 4:
return False
if hdulist[1].name != '' or hdulist[2].name != '' or hdulist[3].name != '':
return False
if hdulist[0].data is not None:
return False
if hdulist[1].data is None or hdulist[2].data is None or hdulist[3].data is None:
return False
if hdulist[1].data.shape != hdulist[2].data.shape:
return False
if hdulist[3].data.ndim != 1:
return False
# We're probably ok, so return True
return True
else:
return False
@data_factory(label='Dendrogram', identifier=is_dendro, priority=1000)
def load_dendro(filename):
"""
Load a dendrogram saved by the astrodendro package
:param file: Path to a dendrogram file
:returns: A list of 2 glue Data objects: the original dataset, and dendrogram.
"""
label = data_label(filename)
dg = Dendrogram.load_from(filename)
structs = np.arange(len(dg))
parent = np.array([dg[i].parent.idx
if dg[i].parent is not None else -1
for i in structs])
height = np.array([dg[i].height for i in structs])
pk = np.array([dg[i].get_peak(True)[1] for i in structs])
dendro = Data(parent=parent,
height=height,
peak=pk,
label="{} [dendrogram]".format(label))
im = Data(intensity=dg.data,
structure=dg.index_map,
label="{} [data]".format(label))
im.join_on_key(dendro, 'structure', dendro.pixel_component_ids[0])
return [dendro, im]
| {
"content_hash": "67f5b52fd70d4b8510f367937b9a88e1",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 93,
"avg_line_length": 30.172727272727272,
"alnum_prop": 0.6022898463392589,
"repo_name": "stscieisenhamer/glue",
"id": "8c277b1a6206810e7e79b93e06e914d1f3642887",
"size": "3319",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "glue/plugins/dendro_viewer/data_factory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1591083"
},
{
"name": "Shell",
"bytes": "1603"
}
],
"symlink_target": ""
} |
from enum import Enum
import re
class OperatingSystemClasses(Enum):
#ENUM:START
DESKTOP = 1
SERVER = 2
EMBEDDED = 3
#ENUM:END
def parse(self, string):
for j in OperatingSystemClasses:
if (string.lower()).find(j.name.lower())>=0:
return j
return OperatingSystemClasses.DESKTOP
| {
"content_hash": "ed2454489cea8c39ff8c971c95d1daf9",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 47,
"avg_line_length": 17.764705882352942,
"alnum_prop": 0.7052980132450332,
"repo_name": "PC-fit-Christian-Rupp/Enumeration-OS",
"id": "b9b5433290275b38c812791bf6e17f2cfa657527",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/OperatingSystemClasses.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "24098"
},
{
"name": "Python",
"bytes": "17509"
}
],
"symlink_target": ""
} |
import re
import os
import subprocess
from .logger import Logger
from .tmux import Tmux
log = Logger()
class Git(object):
_config = {}
_repos = []
_root = ''
def __init__(self, config):
"""
:param config: Dictionary with config schema
"""
self._config = config
self._root = self._config.get('dir') or os.getcwd()
self._root = os.path.expanduser(self._root)
# Collect normalized list of repositories in workspace
for repo_name in self._config.get('repos', []):
if isinstance(repo_name, str):
repo = {
'name': repo_name,
'url': self._parse_repo_url(repo_name),
'dir': repo_name.split('/')[1]
}
else:
repo = repo_name
if 'url' not in repo:
repo['url'] = self._parse_repo_url(repo['name'])
self._repos.append(repo)
def _parse_repo_url(self, repo_name):
"""
Complete full URL for short named repositories
"""
url = None
if 'github.com' not in repo_name:
url = 'https://github.com/{}.git'.format(repo_name)
return url or repo_name
def clone(self):
"""
Clone all repositories in project directory
"""
log.echo(' [blue]::[reset] Fetching git index for project at [white]{}'
.format(self._root))
if not os.path.isdir(self._root):
os.makedirs(self._root)
os.chdir(self._root)
for repo in self._repos:
log.echo(' [blue]::[reset] Cloning [white]{} [boldblack]@ {}'
.format(repo['name'], repo['url']))
subprocess.check_output(
['git', 'clone', repo['url']],
stderr=subprocess.STDOUT)
def fetch(self):
"""
Iterate through all repositories and run git fetch with rich output
"""
log.echo(' [blue]::[reset] Fetching git index for project at [white]{}'
.format(self._root))
# Debugging:
# with open('output_example.txt') as f:
# output = f.read()
# self._parse_git_fetch(output)
# return
for repo in self._repos:
log.echo(' [blue]::[reset] Fetching [white]{} [boldblack]@ {}'
.format(repo['name'], repo['url']))
os.chdir(self._root)
os.chdir(repo['dir'])
output = subprocess.check_output(
['git', 'fetch', '--all', '--tags', '--prune'],
stderr=subprocess.STDOUT)
self._parse_git_fetch(output.decode('utf_8'))
def _parse_git_fetch(self, output):
"""
Parse and beautify git's raw fetch summary
:param output: Git's raw fetch output
"""
branches = {'created': [], 'updated': []}
tags = {'created': [], 'updated': []}
deleted = []
# Example matches:
#
# * [new branch] 1.34.3 -> gogs/1.34.3
# bc23688..8be82ed develop -> gogs/develop
# * [new tag] 0.9.1 -> 0.9.1
# - [tag update] 1.19.0 -> 1.19.0
# - [tag update] 1.19.1 -> 1.19.1
# - [tag update] 1.19.2 -> 1.19.2
# * [new tag] 1.19.3 -> 1.19.3
# * [new tag] 1.19.4 -> 1.19.4
# x [deleted] (none) -> origin/foobar
regex = re.compile(
r'^\s+([-+*x\ ])\s+\[?([\w\ \.]+)\]?'
'\s{2,}([^\s]+)\s{2,}->\s(.*)$',
flags=re.MULTILINE
)
for match in regex.finditer(output):
remote = match.group(4)
if match.group(2).find('new') > -1:
action = 'created'
else:
action = 'updated'
if match.group(2) == 'deleted':
deleted.append(remote)
if match.group(2).find('tag') > -1:
tags[action].append(remote)
else:
branches[action].append(remote)
for action in ['created', 'updated']:
if tags[action] or branches[action]:
log.echo(' [{}]::[reset] {}'
' {}[yellow]([boldyellow]{}[yellow])[reset]'
' {}[yellow]([boldred]{}[yellow])[reset]'
.format(
'green' if action == 'created' else 'yellow',
action.title(),
'tags: ' if tags[action] else '',
', '.join(tags[action]),
'branches: ' if branches[action] else '',
', '.join(branches[action])))
if deleted:
log.echo(' [red]::[reset] Deleted:'
' [yellow]([boldred]{}[yellow])[reset]'
.format(', '.join(deleted)))
def status(self):
"""
Iterate through repositories and display a colorful status
"""
session_name = self._config.get('name')
is_on = Tmux().has_session(session_name)
log.echo(' [blue]::[reset] Session [boldyellow]{}[reset]: {}'
.format(session_name,
'[boldgreen]on' if is_on else '[boldred]off'))
# Compatibility for python 2.x, doesn't have subprocess.DEVNULL
DEVNULL = open(os.devnull, 'wb')
for repo in self._repos:
os.chdir(self._root)
os.chdir(repo['dir'])
try:
output = subprocess.check_output(
['git', 'symbolic-ref', '-q', 'HEAD'],
stderr=DEVNULL)
detached = False
except subprocess.CalledProcessError:
detached = True
output = subprocess.check_output(['git', 'diff', '--shortstat'])
modified = re.match(r'^\s*(\d)', output.decode('utf_8'))
modified = '≠' + str(modified.group(1)) if modified else ''
output = subprocess.check_output(
['git', 'ls-files', '--others', '--exclude-standard'],
stderr=DEVNULL).decode('utf-8')
untracked = len(output.split('\n')) - 1
untracked = '?' + str(untracked) if untracked > 0 else ''
current = subprocess.check_output([
'git', 'log', '-1', '--color=always',
'--format=%C(auto)%D %C(black bold)(%aN %ar)%Creset'
]).decode('utf-8').strip()
if detached:
position = 'detach'
else:
output = subprocess.check_output([
'git', 'rev-parse', '--abbrev-ref', 'HEAD'])
branch = output.decode('utf-8').strip()
upstream = 'origin/{}'.format(branch)
try:
output = subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', '@{upstream}'],
stderr=DEVNULL)
upstream = output.decode('utf-8').strip()
except subprocess.CalledProcessError:
pass
position = ''
try:
output = subprocess.check_output([
'git', 'rev-list', '--left-right',
branch, '...', upstream
], stderr=subprocess.STDOUT).decode('utf-8')
ahead = len(re.findall(r'\<', output))
behind = len(re.findall(r'\>', output))
position = '{}{}'.format(
'▲' + str(ahead) if ahead else '',
'▼' + str(behind) if behind else '',
)
except subprocess.CalledProcessError:
position = 'n/a'
name = repo['name']
if repo['name'].split('/')[1] != repo['dir']:
name = repo['dir']
log.echo(' [white]{:>30} '
' [boldred]{:3} [boldblue]{:3} [boldmagenta]{:7}'
' [reset]{}'
.format(
name, modified, untracked,
position, current))
DEVNULL.close()
@staticmethod
def is_git_repo():
"""
Check if current directory is a Git repository
"""
try:
subprocess.check_output(
['git', 'rev-parse', '--is-inside-work-tree'],
stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError:
return False
@staticmethod
def get_remote_url():
"""
Returns the current directory's Git repository remote URL
"""
try:
url = subprocess.check_output(
['git', 'config', '--get', 'remote.origin.url'],
stderr=subprocess.STDOUT)
return url.decode('utf-8').strip()
except subprocess.CalledProcessError:
return False
| {
"content_hash": "2f25507d8b7f5b7889b05ca4546f83c1",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 79,
"avg_line_length": 36.75,
"alnum_prop": 0.4521615097651964,
"repo_name": "rafi/mx",
"id": "5cd98c9b61ce3130c1fd809eb51b6e5e00136bc2",
"size": "9144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mx/git.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "319"
},
{
"name": "Python",
"bytes": "30856"
}
],
"symlink_target": ""
} |
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self):
profile = FirefoxProfile()
profile.set_preference("browser.startup.homepage_override.mstone", "ignore")
self.wd = WebDriver(firefox_profile= profile)
#self.wd = WebDriver()
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
if not wd.current_url.endswith("/addressbook/"):
wd.get("http://localhost/addressbook/")
def destroy(self):
self.wd.quit()
| {
"content_hash": "0e9c741c1edaafcee57287c020576ed5",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 84,
"avg_line_length": 29.545454545454547,
"alnum_prop": 0.6543589743589744,
"repo_name": "tzvezda/python_training",
"id": "ade46ca054b419ee143c4e7644daa91df455b951",
"size": "999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30452"
}
],
"symlink_target": ""
} |
from re import findall
class Paragraph:
"""Paragraph of a chapter"""
def __init__(self, text):
self._text = text
def getText(self):
return self._text;
def getWordCount(self):
return findall(r'\w+', self._text);
def getCharCount(self):
return len(self._text)
| {
"content_hash": "74d83a47204d23f4dc342572a40a9436",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 43,
"avg_line_length": 19.6875,
"alnum_prop": 0.5873015873015873,
"repo_name": "Senth/ktouch-lesson-creator",
"id": "dc0aad4e751522ccc7887947850bbf1ede138c7d",
"size": "315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "book/Paragraph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10674"
}
],
"symlink_target": ""
} |
""" Unit tests:
- :class:`TestUniformityUniformSpanningTreeSampler` to check that the different procedures used for uniform spanning trees of a graph actually sample spanning trees uniformly at random.
"""
import unittest
import itertools as itt
from collections import Counter
import numpy as np
from scipy.stats import chisquare
from networkx import erdos_renyi_graph, is_connected, incidence_matrix
import sys
sys.path.append('..')
from dppy.utils import det_ST
from dppy.exotic_dpps import UST
class UniformityOfSamplerForUniformSpanningTree(unittest.TestCase):
""" Test uniformity the sampling procedures for sampling spanning trees of a graph uniformly at random on an Erdos-Renyi graph :math:`G(n,p)`
"""
def __init__(self, *args, **kwargs):
super(UniformityOfSamplerForUniformSpanningTree,
self).__init__(*args, **kwargs)
# Sample a connected Erdos-Renyi graph
n, p = 5, 0.4
nb_st_min, nb_st_max = 5, 10
it_max = 100
for _ in range(it_max):
g = erdos_renyi_graph(n, p)
if is_connected(g):
A = incidence_matrix(g, oriented=True)[:-1, :].toarray()
potential_st = itt.combinations(range(g.number_of_edges()),
n - 1)
list_st = [st for st in potential_st
if det_ST(A, range(n - 1), st)]
if nb_st_min <= len(list_st) <= nb_st_max:
break
else:
raise ValueError('No satisfactory Erdos-Renyi graph found')
self.nb_spanning_trees = len(list_st)
self.ust = UST(g)
self.nb_samples = 1000
def test_projection_kernel_computation(self):
"""UST is a DPP associated to the projection kernel onto the row span of the vertex-edge-incidence matrix
"""
inc = incidence_matrix(self.ust.graph, oriented=True).todense()
expected_kernel = np.linalg.pinv(inc).dot(inc)
self.ust.compute_kernel()
self.assertTrue(np.allclose(self.ust.kernel, expected_kernel))
@staticmethod
def sample_to_label(graph):
"""Join egdes of a sample to from the ID of the corresponding spanning tree
Ex:
[(3, 2), (2, 0), (2, 1), (0, 4)] -> '[0, 2][0, 4][1, 2][2, 3]'
"""
return ''.join(map(str, sorted(map(sorted, graph.edges()))))
def test_uniformity_adequation(self):
for sampler, sampler_modes in self.ust._sampling_modes.items():
for mode in sampler_modes:
with self.subTest(sampler=(sampler, mode)):
self.ust.flush_samples()
for _ in range(self.nb_samples):
self.ust.sample(mode=mode)
self.assertTrue(self.uniformity_adequation())
def uniformity_adequation(self, tol=0.05):
"""Perform chi-square test to check that the different spanning trees sampled have a uniform distribution"""
counter = Counter(map(self.sample_to_label,
self.ust.list_of_samples))
freq = np.array(list(counter.values())) / self.nb_samples
theo = np.ones(self.nb_spanning_trees) / self.nb_spanning_trees
_, pval = chisquare(f_obs=freq, f_exp=theo)
return pval > tol
def main():
unittest.main()
if __name__ == '__main__':
main()
| {
"content_hash": "c0c24c90e205aeb382affbb5ff63ca81",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 185,
"avg_line_length": 32.48571428571429,
"alnum_prop": 0.5968924069187922,
"repo_name": "guilgautier/DPPy",
"id": "250d5ed9e325e348a798d7d0e395d626aad29442",
"size": "3426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_ust_samplers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "408222"
},
{
"name": "Python",
"bytes": "441355"
},
{
"name": "Shell",
"bytes": "1694"
}
],
"symlink_target": ""
} |
from numpy import cos, linspace, pi, sin
from bokeh.core.enums import LegendLocation
from bokeh.io import output_file, show
from bokeh.models import (Circle, ColumnDataSource, DataRange1d, Legend, Line,
LinearAxis, PanTool, Plot, SaveTool, WheelZoomTool,)
x = linspace(-2*pi, 2*pi, 400)
y = sin(x)
y2 = cos(x)
source = ColumnDataSource(data=dict(x=x, y=y, y2=y2))
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(
x_range=xdr, y_range=ydr,
plot_width=1000, plot_height=600,
min_border=0,
toolbar_location=None,
background_fill_color='#F0F0F0',
border_fill_color='lightgray',
)
line_glyph = Line(x="x", y="y", line_color="navy", line_width=2, line_dash="dashed")
line = plot.add_glyph(source, line_glyph)
circle = Circle(x="x", y="y2", size=6, line_color="red", fill_color="orange", fill_alpha=0.6)
circle = plot.add_glyph(source, circle)
pan = PanTool()
wheel_zoom = WheelZoomTool()
preview_save = SaveTool()
plot.add_tools(pan, wheel_zoom, preview_save)
# Add axes (Note it's important to add these before adding legends in side panels)
plot.add_layout(LinearAxis(), 'below')
plot.add_layout(LinearAxis(), 'left')
plot.add_layout(LinearAxis(), 'right')
def add_legend(location, orientation, side):
legend = Legend(
# TODO: title
items=[("line", [line]), ("circle", [circle])],
location=location, orientation=orientation,
border_line_color="black",
title='Example Title'
)
plot.add_layout(legend, side)
# Add legends in names positions e.g. 'top_right', 'top_left' (see plot for all)
for location in LegendLocation:
add_legend(location, "vertical", "center")
# Add legend at fixed positions
add_legend((150, 50), "horizontal", "center")
# Add legend in side panels
add_legend("center_left", "horizontal", "above")
add_legend("center", "horizontal", "below")
add_legend("center", "vertical", "left")
add_legend("bottom_center", "vertical", "right")
output_file("legends.html")
show(plot)
| {
"content_hash": "d73489a327d6667d59a5d0f36b071015",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 93,
"avg_line_length": 30.454545454545453,
"alnum_prop": 0.6776119402985075,
"repo_name": "ericmjl/bokeh",
"id": "dfdad63ac4d73a7ce86ffd65bdbb36e7ee4b93b8",
"size": "2010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/models/file/legends.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705341"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468288"
}
],
"symlink_target": ""
} |
import pytest
from flask import json, url_for
from tests.conftest import create_authorization_header
from tests.db import create_fee
class WhenGettingEventTypes(object):
def it_returns_all_event_types(self, client, sample_event_type, db_session):
response = client.get(
url_for('event_types.get_event_types', event_type_id=str(sample_event_type.id)),
headers=[create_authorization_header()]
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp) == 1
assert json_resp[0]['id'] == str(sample_event_type.id)
class WhenGettingEventTypeByID(object):
def it_returns_correct_event_type(self, client, sample_event_type, db_session):
response = client.get(
url_for('event_type.get_event_type_by_id', event_type_id=str(sample_event_type.id))
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp['id'] == str(sample_event_type.id)
def it_returns_correct_event_type_with_fees(self, client, sample_event_type, db_session):
fees = [
create_fee(event_type_id=str(sample_event_type.id), valid_from='2017-03-01'),
create_fee(event_type_id=str(sample_event_type.id), fee=10, conc_fee=7, valid_from='2017-02-01')
]
response = client.get(
url_for('event_type.get_event_type_by_id', event_type_id=str(sample_event_type.id))
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp['id'] == str(sample_event_type.id)
for i, fee in enumerate(fees):
assert json_resp['fees'][i]['fee'] == fee.fee
assert json_resp['fees'][i]['conc_fee'] == fee.conc_fee
class WhenPostingEventTypes(object):
def it_creates_event_types_for_imported_event_types(self, client, db_session):
data = [
{
"id": "1",
"EventType": "Talk",
"Fees": "5",
"ConcFees": "3",
"EventDesc": "",
"EventFilename": None
},
{
"id": "2",
"EventType": "Introductory Course",
"Fees": "0",
"ConcFees": None,
"EventDesc": "",
"EventFilename": None
}
]
response = client.post(
url_for('event_types.import_event_types'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp) == len(data)
for i in range(0, len(data) - 1):
assert json_resp[i]["old_id"] == int(data[i]["id"])
assert json_resp[i]["event_type"] == data[i]["EventType"]
def it_ignores_existing_event_types_for_imported_event_types(self, client, db_session, sample_event_type):
data = [
{
"id": "1",
"EventType": sample_event_type.event_type,
"Fees": "5",
"ConcFees": "3",
"EventDesc": "",
"EventFilename": None
},
{
"id": "2",
"EventType": "Introductory Course",
"Fees": "0",
"ConcFees": None,
"EventDesc": "",
"EventFilename": None
}
]
response = client.post(
url_for('event_types.import_event_types'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
# should ignore the first data element but create the second one
assert len(json_resp) == 1
assert json_resp[0]["old_id"] == int(data[1]["id"])
assert json_resp[0]["event_type"] == data[1]["EventType"]
class WhenPostingEventType(object):
@pytest.mark.parametrize('data', [
{'event_type': 'Seminar'},
{'event_type': 'Seminar', 'event_desc': 'Seminar test'},
])
def it_creates_an_event_type_on_valid_post_data(self, client, data, db_session):
response = client.post(
url_for('event_type.create_event_type'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
json_resp = json.loads(response.get_data(as_text=True))
for key in data.keys():
assert data[key] == json_resp[key]
@pytest.mark.parametrize('data,error_msg', [
({'event_desc': 'Seminar test'}, 'event_type is a required property'),
])
def it_returns_400_on_invalid_event_type_post_data(self, client, data, error_msg, db_session):
response = client.post(
url_for('event_type.create_event_type'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 400
json_resp = json.loads(response.get_data(as_text=True))
assert all([e['error'] == "ValidationError" for e in json_resp['errors']])
assert json_resp['errors'][0]['message'] == error_msg
def it_updates_an_event_type_on_valid_post_data(self, client, sample_event_type, db_session):
data = {'event_desc': 'updated desc', 'duration': 90}
response = client.post(
url_for('event_type.update_event_type', event_type_id=sample_event_type.id),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
for key in data.keys():
assert data[key] == json_resp[key]
| {
"content_hash": "5d3488b34fe23897745e0d6ac49b5d33",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 110,
"avg_line_length": 37.48192771084337,
"alnum_prop": 0.5605914496946319,
"repo_name": "NewAcropolis/api",
"id": "36cfc8a9bb32ee2cb244252a90f8498ecd2d4558",
"size": "6222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/app/routes/event_types/test_rest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10421"
},
{
"name": "Makefile",
"bytes": "1369"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "791740"
},
{
"name": "Shell",
"bytes": "66108"
}
],
"symlink_target": ""
} |
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
from .models import ApprovalTracking, Report
class ApprovalTrackingForm(forms.ModelForm):
class Meta:
model = ApprovalTracking
fields = ('document_data_name', 'from_1',
'date', 'purpose_of_data_exchange',
'third_party_organization_to_receive_data',
'third_party_requested_to_receive_data',
'datasets', 'approved',)
class ReportForm(forms.ModelForm):
class Meta:
model = Report
fields = ('document_data_name', 'document_data_type', 'mg_employee_assigned',
'third_party_organization',
'third_party_name', 'date_sent_received',
'category', 'purpose', 'approved', 'description', 'response_required',
'response_due_date', 'comments', 'data_saved_as', 'attachments',)
class MessageForm(forms.Form):
text_input = forms.CharField()
textarea = forms.CharField(
widget=forms.Textarea(),
)
radio_buttons = forms.ChoiceField(
choices=(
('option_one', "Option one is this and that be sure to include why it's great"),
('option_two', "Option two can is something else and selecting it will deselect option one")
),
widget=forms.RadioSelect,
initial='option_two',
)
checkboxes = forms.MultipleChoiceField(
choices=(
('option_one', "Option one is this and that be sure to include why it's great"),
('option_two', 'Option two can also be checked and included in form results'),
('option_three', 'Option three can yes, you guessed it also be checked and included in form results')
),
initial='option_one',
widget=forms.CheckboxSelectMultiple,
help_text="<strong>Note:</strong> Labels surround all the options for much larger click areas and a more usable form.",
)
appended_text = forms.CharField(
help_text="Here's more help text"
)
prepended_text = forms.CharField()
prepended_text_two = forms.CharField()
multicolon_select = forms.MultipleChoiceField(
choices=(('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5')),
)
# Uni-form
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.layout = Layout(
Field('text_input', css_class='input-xlarge'),
Field('textarea', rows="3", css_class='input-xlarge'),
'radio_buttons',
Field('checkboxes', style="background: #FAFAFA; padding: 10px;"),
AppendedText('appended_text', '.00'),
PrependedText(
'prepended_text', '<input type="checkbox" checked="checked" value="" id="" name="">', active=True),
PrependedText('prepended_text_two', '@'),
'multicolon_select',
FormActions(
Submit('save_changes', 'Save changes', css_class="btn-primary"),
Submit('cancel', 'Cancel'),
)
)
| {
"content_hash": "17e2d62cf2ad2d3d469865d8da6d5153",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 127,
"avg_line_length": 37.411764705882355,
"alnum_prop": 0.6069182389937107,
"repo_name": "kdagley/midas_pr",
"id": "e52d6fd9e3ba07575ece8726af0bb585bc490dde",
"size": "3204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "midas_pr/dataexchanges/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "69244"
},
{
"name": "HTML",
"bytes": "97710"
},
{
"name": "JavaScript",
"bytes": "166647"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "75893"
},
{
"name": "Shell",
"bytes": "4586"
}
],
"symlink_target": ""
} |
from optparse import make_option
from django.core.management.base import BaseCommand
from narrative.models import Datum
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
'--verbose', action='store_true', dest='verbose', default=False,
help='Determines if this command should in verbose mode'),)
help = (
'Clear any expired events')
def handle(self, *args, **options):
cleared_events = Datum.objects.clear_expired()
if options['verbose']:
print('Cleared data: {0}'.format(cleared_events))
| {
"content_hash": "20b437c70fc0bf141f2095670f1162a8",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 30.35,
"alnum_prop": 0.6523887973640856,
"repo_name": "ambitioninc/django-narrative",
"id": "5d5da140625748795b829c170fabd76786c86d4d",
"size": "607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "narrative/management/commands/clear_expired_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "249"
},
{
"name": "Python",
"bytes": "247319"
}
],
"symlink_target": ""
} |
"""Unit tests for the SparseHamiltonian class."""
import pytest
from openfermion import FermionOperator
from fqe.hamiltonians import sparse_hamiltonian
def test_sparse():
"""Test some of the functions in SparseHamiltonian."""
oper = FermionOperator('0 0^')
oper += FermionOperator('1 1^')
test = sparse_hamiltonian.SparseHamiltonian(oper)
assert test.rank() == 2
test = sparse_hamiltonian.SparseHamiltonian(oper, False)
assert not test.is_individual()
terms = test.terms()
assert terms == [(-1.0, [(0, 1), (0, 0)], []), (-1.0, [], [(0, 1), (0, 0)])]
ham = test.terms_hamiltonian()
assert len(ham) == 2
assert isinstance(ham[0], sparse_hamiltonian.SparseHamiltonian)
assert ham[1].terms()[0] == (-1.0, [], [(0, 1), (0, 0)])
def test_sparse_from_string():
test = sparse_hamiltonian.SparseHamiltonian('1^ 0')
assert test.rank() == 2
assert test.is_individual()
assert test.nterms() == 1
time = 3.1
iht = test.iht(time)
terms = test.terms()[0]
iterms = iht.terms()[0]
assert terms[1:] == iterms[1:]
assert abs(terms[0] * (-1j) * time - iterms[0]) < 1.0e-8
def test_dim_error():
"""Test if SparseHamiltonian raises an error if dim() is accessed
"""
oper = FermionOperator('0 0^')
test = sparse_hamiltonian.SparseHamiltonian(oper)
with pytest.raises(NotImplementedError):
d = test.dim()
def test_equality():
""" Test the equality operator """
oper = FermionOperator('0 0^')
test = sparse_hamiltonian.SparseHamiltonian(oper)
test2 = sparse_hamiltonian.SparseHamiltonian(oper)
assert test == test2
assert not (test == 1)
oper2 = FermionOperator('1 1^')
test2 = sparse_hamiltonian.SparseHamiltonian(oper2)
assert test != test2
| {
"content_hash": "fa44f01c35d00a5ac1dcd055517bf01e",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 80,
"avg_line_length": 28.428571428571427,
"alnum_prop": 0.6393076493579006,
"repo_name": "quantumlib/OpenFermion-FQE",
"id": "2fa6584bc195ac6f994d637045dcc9c80d123b99",
"size": "2384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sparse_hamiltonian_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "219159"
},
{
"name": "Cython",
"bytes": "56133"
},
{
"name": "Makefile",
"bytes": "580"
},
{
"name": "Python",
"bytes": "1684470"
},
{
"name": "Shell",
"bytes": "5724"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2017, Arm Limited and affiliates.
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os,sys
from icetea_lib.bench import Bench
from icetea_lib.TestStepError import TestStepFail
class Testcase(Bench):
def __init__(self):
Bench.__init__(self, name = "send_data",
title = "Simple data transmission test",
status = "released",
type = "smoke",
subtype = "",
execution = {
"skip": {
"value": False,
"reason": ""
}
},
author = "Valtteri Erkkila",
purpose = "Tests that sending data works",
feature = ["MCPS-DATA"],
component = ["MAC"],
requirements = {
"duts": {
'*': {
"count":2,
"type": "hardware",
"allowed_platforms": ["K64F", "K66F", "NUCLEO_F429ZI", "KW24D", "UBLOX_EVK_ODIN_W2", "KW41Z"],
"application": {
"name": "TEST_APPS-device-nanostack_mac_tester"
}
},
"1":{"nick": "First"},
"2":{"nick": "Second"}
}}
)
def setUp(self):
self.channel = 11
def do_test_iteration(self):
self.command("First", "mlme-reset")
self.command("Second", "mlme-reset")
self.command("First", "addr --64-bit 01:02:03:00:00:00:00:01")
self.command("Second", "addr --64-bit 01:02:03:00:00:00:00:02")
# Start PAN coordinator
self.command("First", "start --pan_coordinator true --logical_channel {}".format(self.channel))
# Start PAN beacon
self.command("Second", "start --pan_coordinator false --logical_channel {}".format(self.channel))
# Send data
self.command("First", "data --dst_addr 01:02:03:00:00:00:00:02 --msdu_length 5 --msdu abcde")
self.command("Second", "data --dst_addr 01:02:03:00:00:00:00:01 --msdu_length 5 --msdu 12345")
def case(self):
# Try tests few times because of potential RF failures
loop = 0
while loop < 5:
try:
self.do_test_iteration()
break
except TestStepFail:
self.logger.info("Warning, iteration failed #" + str(loop+1))
loop = loop + 1
self.delay(5)
else:
raise TestStepFail("Too many failed iterations!")
def tearDown(self):
self.reset_dut()
| {
"content_hash": "716e8ee76a5fc2f4e28e2662dfa3fb77",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 130,
"avg_line_length": 39.01123595505618,
"alnum_prop": 0.4873271889400922,
"repo_name": "andcor02/mbed-os",
"id": "40db26069dc5b5723bfcfd29f6422e4c14a02b63",
"size": "3472",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "TEST_APPS/testcases/nanostack_mac_tester/send_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6601399"
},
{
"name": "Batchfile",
"bytes": "22"
},
{
"name": "C",
"bytes": "295194591"
},
{
"name": "C++",
"bytes": "9038670"
},
{
"name": "CMake",
"bytes": "5285"
},
{
"name": "HTML",
"bytes": "2063156"
},
{
"name": "Makefile",
"bytes": "103497"
},
{
"name": "Objective-C",
"bytes": "460244"
},
{
"name": "Perl",
"bytes": "2589"
},
{
"name": "Python",
"bytes": "38809"
},
{
"name": "Shell",
"bytes": "16862"
},
{
"name": "XSLT",
"bytes": "5596"
}
],
"symlink_target": ""
} |
import logging
import pytest
import unittest.mock as mock
from alphatwirl.loop import MPEventLoopRunner
##__________________________________________________________________||
@pytest.fixture()
def communicationChannel():
return mock.Mock(name='communicationChannel')
@pytest.fixture()
def obj(communicationChannel):
return MPEventLoopRunner(communicationChannel)
##__________________________________________________________________||
def test_repr(obj):
repr(obj)
def test_begin_end(obj, communicationChannel):
obj.begin()
communicationChannel.receive.return_value = [ ]
obj.end()
def test_run(obj, communicationChannel):
obj.begin()
communicationChannel.put.side_effect = [0, 1]
eventLoop1 = mock.Mock(name='eventLoop1')
assert 0 == obj.run(eventLoop1)
eventLoop2 = mock.Mock(name='eventLoop2')
assert 1 == obj.run(eventLoop2)
assert [mock.call(eventLoop1), mock.call(eventLoop2)] == communicationChannel.put.call_args_list
def test_run_multiple(obj, communicationChannel):
obj.begin()
communicationChannel.put_multiple.return_value = [0, 1]
eventLoop1 = mock.Mock(name='eventLoop1')
eventLoop2 = mock.Mock(name='eventLoop2')
assert [0, 1] == obj.run_multiple([eventLoop1, eventLoop2])
assert [mock.call([eventLoop1, eventLoop2])] == communicationChannel.put_multiple.call_args_list
def test_end(obj, communicationChannel):
obj.begin()
eventLoop1 = mock.Mock(name='eventLoop1')
obj.run(eventLoop1)
eventLoop2 = mock.Mock(name='eventLoop2')
obj.run(eventLoop2)
result1 = mock.Mock(name='result1')
result2 = mock.Mock(name='result2')
communicationChannel.receive.return_value = [result1, result2]
assert [result1, result2] == obj.end()
def test_end_logging_wrong_number_or_results(obj, communicationChannel, caplog):
obj.begin()
eventLoop1 = mock.Mock(name='eventLoop1')
obj.run(eventLoop1)
eventLoop2 = mock.Mock(name='eventLoop2')
obj.run(eventLoop2)
result1 = mock.Mock(name='result1')
result2 = mock.Mock(name='result2')
communicationChannel.receive.return_value = [result1]
with caplog.at_level(logging.WARNING, logger = 'alphatwirl'):
results = obj.end()
assert [result1] == results
assert len(caplog.records) == 1
assert caplog.records[0].levelname == 'WARNING'
assert 'MPEventLoopRunner' in caplog.records[0].name
assert 'too few results received' in caplog.records[0].msg
##__________________________________________________________________||
def test_receive(obj, communicationChannel):
obj.begin()
communicationChannel.put.side_effect = [0, 1]
eventLoop1 = mock.Mock(name='eventLoop1')
assert 0 == obj.run(eventLoop1)
eventLoop2 = mock.Mock(name='eventLoop2')
assert 1 == obj.run(eventLoop2)
result1 = mock.Mock(name='result1')
result2 = mock.Mock(name='result2')
communicationChannel.receive_all.return_value = [(0, result1), (1, result2)]
assert [(0, result1), (1, result2)] == obj.receive()
def test_poll(obj, communicationChannel):
obj.begin()
communicationChannel.put.side_effect = [0, 1]
eventLoop1 = mock.Mock(name='eventLoop1')
assert 0 == obj.run(eventLoop1)
eventLoop2 = mock.Mock(name='eventLoop2')
assert 1 == obj.run(eventLoop2)
result1 = mock.Mock(name='result1')
result2 = mock.Mock(name='result2')
communicationChannel.receive_finished.return_value = [(0, result1), (1, result2)]
assert [(0, result1), (1, result2)] == obj.poll()
def test_receive_one(obj, communicationChannel):
obj.begin()
communicationChannel.put.side_effect = [0, 1]
eventLoop1 = mock.Mock(name='eventLoop1')
assert 0 == obj.run(eventLoop1)
eventLoop2 = mock.Mock(name='eventLoop2')
assert 1 == obj.run(eventLoop2)
result1 = mock.Mock(name='result1')
result2 = mock.Mock(name='result2')
communicationChannel.receive_one.side_effect = [(0, result1), (1, result2), None]
assert (0, result1) == obj.receive_one()
assert (1, result2) == obj.receive_one()
assert obj.receive_one() is None
##__________________________________________________________________||
| {
"content_hash": "c697936a20c6d31dc57214c32697d7d1",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 100,
"avg_line_length": 30.62773722627737,
"alnum_prop": 0.644184938036225,
"repo_name": "alphatwirl/alphatwirl",
"id": "028be1972d09fd1e376c890a8562bb61f0dde970",
"size": "4232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/loop/test_MPEventLoopRunner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3603"
},
{
"name": "Python",
"bytes": "775977"
},
{
"name": "R",
"bytes": "1222"
},
{
"name": "Shell",
"bytes": "28"
}
],
"symlink_target": ""
} |
"""
.. module:: smart_dir_copy
:platform: Unix, Windows
:synopsis:
Recursively copy a directory, skipping any files that don't appear to
require updating.
.. moduleauthor:: Brandon Huber
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
REVISION = '$Revision$'[12:]
__version__ = '0.1'
import math
import os
import os.path
import shutil
# Python 3.5 introduced a function for comparing floats.
# Try to import it, and failback to creating it ourselves if necessary.
try:
from math import isclose as _isclose
except ImportError:
def _isclose(a, b, rel_tol=1e-9, abs_tol=0.0):
return abs(a-b) <= max(
rel_tol * max(abs(a), abs(b)), abs_tol
)
#def _comparator(old, new):
#if os.path.isfile(old)
def set_mtime(filename, mtime):
"""Set the modification timestamp of ``filename`` to ``mtime``.
:param str filename: The filename of the file to modify.
:param float mtime:
The new mtime, as a float. See :meth:`os.path.getmtime()`.
"""
os.utime(
filename,
( os.path.getatime(filename), mtime )
)
def smart_dir_sync(
src_dir, dst_dir, mtime_tolerance=2.0,
pathname_xform=lambda sub_path, file: os.path.join(sub_path, file)
):
"""Recursively copy a directory, skipping any files that don't appear to
require updating.
:param str src_dir: The directory to copy from.
:param str dst_dir: The directory to copy to.
:param float mtime_tolerance:
How many seconds to tolerate in differences between the mtime of
a file in ``src_dir`` and an existing file of the same name in
``dst_dir``. This is especially useful for FAT filesystems, which have
a 2-second resolution.
:param function filename_xform:
An optional function that translates a pathname from ``src_dir`` before
being written to ``dst_dir``. This can be used for stripping out
characters that may not be supported on the filesystem of ``dst_dir``.
The function should take 2 strings as arguments, representing the
current subdirectory(s), if any, under the ``dst_dir``;
and the filename. The function should return a single string
representing both strings after undergoing any desired transformations
and concatenated together.
"""
for dir_path, dir_names, filenames in os.walk(src_dir):
print('src_dir=' + src_dir + ', dir_path=' + dir_path)
sub_path = os.path.relpath(dir_path, src_dir)
for d in dir_names:
src_pathname = os.path.join(src_dir, sub_path, d)
dst_pathname = os.path.join(dst_dir, pathname_xform(sub_path, d))
if not os.path.exists(dst_pathname):
os.mkdir(dst_pathname)
for f in filenames:
print('sub_path = ' + sub_path)
src_pathname = os.path.join(src_dir, sub_path, f)
dst_pathname = os.path.join(dst_dir, pathname_xform(sub_path, f))
print('< ' + src_pathname)
print('> ' + dst_pathname)
if (
os.path.exists(dst_pathname)
and _isclose(
os.path.getmtime(src_pathname),
os.path.getmtime(dst_pathname),
abs_tol=mtime_tolerance
)
and os.path.getsize(dst_pathname) ==
os.path.getsize(src_pathname)
):
continue
shutil.copy(src_pathname, dst_pathname)
set_mtime(dst_pathname, os.path.getmtime(src_pathname))
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='smart_dir_sync.py')
parser.add_argument('--version', action='version', version='%(prog)s 2.0')
parser.add_argument('src')
parser.add_argument('dst')
options = parser.parse_args()
smart_dir_sync(options.src, options.dst)
| {
"content_hash": "cf8d24d3f59f35d602084d24b1083b93",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 34.93043478260869,
"alnum_prop": 0.6183719193427931,
"repo_name": "shalesbridge/steamssdmanager",
"id": "fc1282485a540b566b3e7050b627199b299fa48c",
"size": "4040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smart_dir_copy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44074"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import json
import fileinput
for line in fileinput.input():
tweet = json.loads(line)
if 'media' in tweet['entities']:
for media in tweet['entities']['media']:
print(media['media_url'])
| {
"content_hash": "68422887699a3917ddcacd557dbbf55f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 48,
"avg_line_length": 28,
"alnum_prop": 0.6468253968253969,
"repo_name": "edsu/twarc",
"id": "7c6ef6ffcd82cba242f46a33b31a2f5b5e5f3978",
"size": "275",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "utils/embeds.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157624"
}
],
"symlink_target": ""
} |
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'confluent_platform', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
def parse_pyproject_array(name):
import os
import re
from ast import literal_eval
pattern = r'^{} = (\[.*?\])$'.format(name)
with open(os.path.join(HERE, 'pyproject.toml'), 'r', encoding='utf-8') as f:
# Windows \r\n prevents match
contents = '\n'.join(line.rstrip() for line in f.readlines())
array = re.search(pattern, contents, flags=re.MULTILINE | re.DOTALL).group(1)
return literal_eval(array)
CHECKS_BASE_REQ = parse_pyproject_array('dependencies')[0]
setup(
name='datadog-confluent_platform',
version=ABOUT['__version__'],
description='The Confluent Platform check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent confluent_platform check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='BSD-3-Clause',
# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.8',
],
# The package we're going to ship
packages=['datadog_checks.confluent_platform'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': parse_pyproject_array('deps')},
# Extra files to ship with the wheel package
include_package_data=True,
)
| {
"content_hash": "a250a77c3540d35595dc1bdb895ffbee",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 88,
"avg_line_length": 31,
"alnum_prop": 0.6549235993208828,
"repo_name": "DataDog/integrations-core",
"id": "7dec01c2ad0f082fd7daab65fa75d6709c0ea31a",
"size": "2471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "confluent_platform/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.toonbase.ToontownGlobals import *
from toontown.toonbase import ToontownTimer
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import TTLocalizer
class DistributedTarget(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedTarget')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.geom = None
self.numConsecutiveHits = 0
self.enabled = 0
self.score = 0
self.hitTime = 0
self.targetBounceTrack = None
self.pinballInfo = {}
self.pinballHiScore = 0
self.pinballHiScorer = ''
self.onscreenMessage = None
self.fadeTrack = None
return
def disable(self):
self.ignoreAll()
DistributedObject.DistributedObject.disable(self)
if self.targetBounceTrack:
self.targetBounceTrack.finish()
self.targetBounceTrack = None
if self.fadeTrack:
self.fadeTrack.pause()
self.fadeTrack = None
self.__clearOnscreenMessage()
return
def generateInit(self):
DistributedObject.DistributedObject.generateInit(self)
self.load()
def load(self):
self.timer = ToontownTimer.ToontownTimer()
self.timer.setPos(1.1, 0, -0.15)
self.timer.hide()
self.geom = loader.loadModel('phase_5.5/models/estate/target')
self.geom.reparentTo(base.cr.playGame.hood.loader.geom)
self.geom.setPos(0, 0, 40)
self.geom.setScale(3)
self.geom.stash()
self.hitSound = base.loadSfx('phase_4/audio/sfx/MG_Tag_A.ogg')
self.rewardSound = base.loadSfx('phase_4/audio/sfx/MG_pos_buzzer.ogg')
self.scoreText = TextNode('scoreText')
self.scoreText.setTextColor(1, 0, 0, 1)
self.scoreText.setAlign(self.scoreText.ACenter)
self.scoreText.setFont(getSignFont())
self.scoreText.setText('0')
self.scoreNode = self.timer.attachNewNode(self.scoreText)
self.scoreNode.setPos(0, 0, 0.35)
self.scoreNode.setScale(0.25)
self.curPinballScoreText = TextNode('pinballScoreText')
self.curPinballScoreText.setTextColor(1, 0, 0, 1)
self.curPinballScoreText.setAlign(self.scoreText.ACenter)
self.curPinballScoreText.setFont(getSignFont())
self.curPinballScoreText.setText('')
self.curPinballScoreNode = render.attachNewNode(self.curPinballScoreText)
self.curPinballScoreNode.setPos(0.5, 0.5, 0.3)
self.curPinballScoreNode.setScale(0.25)
colSphere = CollisionSphere(0, 0, 0, 3.5)
colSphere.setTangible(0)
colNode = CollisionNode('targetSphere')
colNode.addSolid(colSphere)
colSphereNode = self.geom.attachNewNode(colNode)
self.accept('hitTarget', self.handleHitTarget)
self.accept('missedTarget', self.handleMissedTarget)
self.accept('entertargetSphere', self.handleEnterTarget)
def delete(self):
self.ignoreAll()
self.scoreNode.removeNode()
del self.scoreNode
self.curPinballScoreNode.removeNode()
del self.curPinballScoreNode
self.geom.removeNode()
del self.geom
self.timer.destroy()
del self.timer
del self.rewardSound
del self.hitSound
DistributedObject.DistributedObject.delete(self)
def setState(self, enabled, score, time):
if self.enabled != enabled:
if self.fadeTrack:
self.fadeTrack.pause()
if enabled:
self.fadeTrack = Sequence(Func(base.localAvatar.setSystemMessage, 0, TTLocalizer.EstateTargetGameStart), Func(self.geom.unstash), self.geom.colorScaleInterval(1.0, Vec4(1.0, 1.0, 1.0, 1.0)), Wait(1), Func(base.localAvatar.setSystemMessage, 0, TTLocalizer.EstateTargetGameInst))
else:
self.fadeTrack = Sequence(self.geom.colorScaleInterval(1.0, Vec4(1.0, 1.0, 1.0, 0.0)), Func(self.geom.stash), Func(self.hideTimer), Func(base.localAvatar.setSystemMessage, 0, TTLocalizer.EstateTargetGameEnd))
self.fadeTrack.start()
self.enabled = enabled
if score != self.score:
self.setLevel(score)
if time != self.hitTime:
self.setTimer(time)
def setReward(self, reward):
base.playSfx(self.rewardSound)
def handleEnterTarget(self, collEntry):
self.handleHitTarget()
def handleHitTarget(self, avId = None, vel = None):
if not avId:
avId = base.localAvatar.doId
if self.enabled:
self.sendUpdate('setResult', [avId])
if vel:
if self.targetBounceTrack:
self.targetBounceTrack.finish()
pos = self.geom.getPos()
dist = Vec3(vel)
dist.normalize()
newPos = pos - dist * 1.5
springPos = pos + dist
self.notify.debug('reaction distance = %s,%s,%s' % (vel[0], vel[1], vel[2]))
self.targetBounceTrack = Sequence(LerpPosInterval(self.geom, duration=0.1, pos=newPos, blendType='easeOut'), LerpPosInterval(self.geom, duration=0.25, pos=springPos, blendType='easeOut'), LerpPosInterval(self.geom, duration=0.2, pos=pos, blendType='easeOut'))
self.targetBounceTrack.start()
def handleMissedTarget(self):
if self.enabled:
self.sendUpdate('setResult', [0])
def handleHitCloud(self):
if self.enabled:
self.sendUpdate('setBonus', [0.5])
def setLevel(self, level):
self.notify.debug('setLevel(%s)' % level)
self.score = level
base.playSfx(self.hitSound)
self.scoreText.setText('+' + str(int(self.score)))
def setTimer(self, time):
self.hitTime = time
self.notify.debug('updateTimer(%s)' % self.enabled)
if self.enabled:
self.showTimer()
self.notify.debug('hitTime = %s' % self.hitTime)
self.timer.setTime(self.hitTime)
self.timer.countdown(self.hitTime)
def showTimer(self):
if base.localAvatar.animFSM.getCurrentState().getName() != 'ReadBook':
base.setCellsAvailable([base.rightCells[0]], 0)
self.timer.show()
def hideTimer(self):
self.timer.hide()
base.setCellsAvailable([base.rightCells[0]], 1)
def setPosition(self, x, y, z):
self.geom.setPos(x, y, z)
def showScore(self):
scoreName = self.pinballHiScorer[0:12]
if scoreName:
if len(self.pinballHiScorer) > 12:
scoreName += TTLocalizer.PinballHiScoreAbbrev
titleText = TTLocalizer.PinballHiScore % scoreName
scoreText = TTLocalizer.PinballScoreHolder % self.pinballHiScore
pinballEntry = self.pinballInfo.get(base.localAvatar.doId)
if pinballEntry:
titleText += TTLocalizer.PinballYourBestScore
scoreText += TTLocalizer.PinballScoreHolder % pinballEntry[0]
titleText += TTLocalizer.PinballScore % (pinballEntry[1], pinballEntry[2])
scoreText += TTLocalizer.PinballScoreHolder % (pinballEntry[1] * pinballEntry[2])
self.__showOnscreenMessage(titleText, scoreText)
def setCurPinballScore(self, avId, score, multiplier):
self.notify.debug('setCurPinballScore %d %d %d' % (avId, score, multiplier))
if self.pinballInfo.get(avId) == None:
self.pinballInfo[avId] = [0, 0, 0]
pinballEntry = self.pinballInfo[avId]
pinballEntry[1] = score
pinballEntry[2] = multiplier
curScore = score * multiplier
if curScore > pinballEntry[0]:
pinballEntry[0] = curScore
if curScore > self.pinballHiScore:
self.pinballHiScore = pinballEntry[0]
toon = base.cr.doId2do.get(avId)
if toon:
self.pinballHiScorer = toon.getName()
self.showScore()
return
def b_setCurPinballScore(self, avId, score, multiplier):
self.setCurPinballScore(avId, score, multiplier)
self.sendUpdate('setCurPinballScore', [avId, score, multiplier])
def __showOnscreenMessage(self, titleText, scoreText):
self.notify.debug('----- __showOnscreenmessage')
if not self.onscreenMessage:
self.onscreenMessage = DirectFrame(relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=GlobalDialogColor, geom_scale=(12, 1, 3), pos=(0, 0, 0.8), scale=0.1)
titles = DirectLabel(parent=self.onscreenMessage, relief=None, text=titleText, text_fg=VBase4(0, 0, 0, 1), text_align=TextNode.ALeft, text_scale=0.7, pos=(-5.75, 0, 0.5))
scores = DirectLabel(parent=self.onscreenMessage, relief=None, text=scoreText, text_fg=VBase4(1, 0, 0, 1), text_align=TextNode.ARight, text_scale=0.7, pos=(5.75, 0, 0.5))
self.onscreenMessage.titles = titles
self.onscreenMessage.scores = scores
else:
self.onscreenMessage.titles['text'] = titleText
self.onscreenMessage.scores['text'] = scoreText
base.foobar = self.onscreenMessage
return
def __clearOnscreenMessage(self):
self.notify.debug('----- __clearOnscreenMessage')
if self.onscreenMessage:
self.onscreenMessage.destroy()
self.onscreenMessage = None
return
def setPinballHiScore(self, score):
self.pinballHiScore = score
self.showScore()
def setPinballHiScorer(self, name):
self.pinballHiScorer = name
self.showScore()
def hideGui(self):
if self.timer:
self.hideTimer()
if self.onscreenMessage:
self.onscreenMessage.hide()
def showGui(self):
if self.timer:
if self.enabled:
self.showTimer()
if self.onscreenMessage:
self.onscreenMessage.show()
| {
"content_hash": "2512400ac1e014db7852fec2099d9eba",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 293,
"avg_line_length": 41.391836734693875,
"alnum_prop": 0.6398777240903264,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "95bf8bf81d9d15d1f314cc7d215c7f5420273ea1",
"size": "10141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/estate/DistributedTarget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
import django_filters
from django.contrib.auth.models import User
from rest_framework import permissions
from rest_framework import renderers
from rest_framework import viewsets
from rest_framework import filters
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from bckt.models import Bckt
from bckt.permissions import IsOwnerOrReadOnly
from bckt.serializers import BcktSerializer, BcktListSerializer, UserSerializer
class BcktFilter(django_filters.FilterSet):
'''
Enable filtering on Bckt regardless of
the value of the parameter DEFAULT_FILTER_BACKENDS.
Example: /api/bckt/?owner=admin&title__icontains=test
/api/bckt/?owner=admin&created=2015-07-26%2017:45:45.316090
/api/bckt/?owner=admin&created__gt=2015-07-26%2017:50:00
Please be ware of:
- https://github.com/tomchristie/django-rest-framework/issues/1338
- https://code.djangoproject.com/ticket/23448
- https://github.com/alex/django-filter/pull/264
'2015-07-26T17:45:45.316090Z'
url encoded is '2015-07-26T17%3A45%3A45.316090Z'
'2015-07-26 17:45:45.316090Z'
url encoded is '2015-07-26%2017%3A45%3A45.316090Z'
'''
# Owner is really a relation to an instance of
# django.contrib.auth.models.User model.
# Use the username in queries
owner = django_filters.CharFilter(name="owner__username")
class Meta:
model = Bckt
fields = {
'owner': ['exact'],
'language': ['exact'],
'title': ['exact', 'icontains'],
'created': ['exact', 'lt', 'gt'],
'code': ['contains', 'icontains'],
}
class BcktMultiViewSet(viewsets.ModelViewSet):
'''
BcktMultiViewSet is a cusom class based on viewsets.ModelViewSet
enabeling the use of different serializers in te same model view set
depending on the action.
The initial aim was to ommit bulky details from lists, only using them in
single record views.
This is achieved by replacing the get_serializer_class method with
one choosing serializer based on the value of action.
'''
serializers = {
'default': None,
}
filter_backends = (filters.DjangoFilterBackend,)
filter_class = BcktFilter
def get_serializer_class(self):
# Get an instance of a logger
logger = logging.getLogger(__name__)
m = '{:s}: action = {!s}'.format(
'BcktMultiViewSet.get_serializer_class()',
self.action)
logger.debug(m)
return self.serializers.get(self.action,
self.serializers['default'])
class BcktViewSet(BcktMultiViewSet):
'''
This endpoint presents the buckets.
The `highlight` field presents a hyperlink to the hightlighted HTML
representation of the bucket.
The **owner** of the bucket may update or delete instances
of the bucket.
'''
# The class BcktViewSet is derrived from BcktMultiViewSet, which in turn is
# derrived from viewsets.ModelViewSet to allow for different presentations
# in lists and detailed views.
# model = Bckt
queryset = Bckt.objects.all()
# The choice of which serializer to use is controlled by serializers
# instead of a common serializer for all actions
# (serializer_class = BcktSerializer)
serializers = {
'default': BcktSerializer,
'list': BcktListSerializer,
'retrieve': BcktSerializer,
}
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
@detail_route(renderer_classes=(renderers.StaticHTMLRenderer,))
def highlight(self, request, *args, **kwargs):
bckt = self.get_object()
return Response(bckt.highlighted)
def perform_create(self, serializer):
# Get an instance of a logger
logger = logging.getLogger(__name__)
m = '{:s}: self.action = {!s}'.format(
'BcktViewSet.perform_create',
self.action)
logger.debug(m)
m = '{:s}: self.request.user = {!s}'.format(
'BcktViewSet.perform_create',
self.request.user)
logger.debug(m)
serializer.save(owner=self.request.user)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
This endpoint presents the users in the system.
The collection of bckt instances owned by a user are
serialized using a hyperlinked representation.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
| {
"content_hash": "cdda12a3087a770839ec33b64396f63d",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 33.67153284671533,
"alnum_prop": 0.6663776284413614,
"repo_name": "peterdv/pyreststore",
"id": "f5130513d8b15cefe38458928cc5bd6b51b40c6b",
"size": "4652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyreststore/bckt/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "46027"
}
],
"symlink_target": ""
} |
import numpy as np
from autocorrelation import autocorrelation
def decorrelation_length(x,min_autocorrelation):
""" decorrelation_length returns the first occurrence lag at which the autocorrelation becomes smaller than min_autocorrelation.
Inputs:
- x [1-dim numpy array of floats]: the time series (the time is supposed to be on a regular grid with a timestep of 1)
- min_autocorrelation [float]: the value of the autocorrelation for which we want the corresponding lag.
Outputs:
- mylength [int]: The first occurrence lag at which the autocorrelation becomes smaller than 'min_autocorrelation'.
-----------------------------
This is part of WAVEPAL
(C) 2016 G. Lenoir"""
n=x.size
mylength=np.nan
lag_min=0
lag_max=9
mybreak=False
while lag_max<n:
r=autocorrelation(x,lag_min,lag_max)
for k in range(10):
if r[k]<min_autocorrelation:
mylength=lag_min+k
mybreak=True
break
if mybreak==True:
break
lag_min+=10
lag_max+=10
return mylength | {
"content_hash": "def1656e984d6e85ad655ffaaf9a5f64",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 129,
"avg_line_length": 31.0625,
"alnum_prop": 0.716297786720322,
"repo_name": "guillaumelenoir/WAVEPAL",
"id": "8f06595dc44893f0c0ae17fa50da9907a77ab8dc",
"size": "994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wavepal/decorrelation_length.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "447896"
},
{
"name": "Shell",
"bytes": "2632"
}
],
"symlink_target": ""
} |
import CsHelper
import logging
class CsRoute:
""" Manage routes """
def __init__(self):
self.table_prefix = "Table_"
def get_tablename(self, name):
return self.table_prefix + name
def add_table(self, devicename):
tablenumber = devicename[3:]
tablename = self.get_tablename(devicename)
str = "%s %s" % (tablenumber, tablename)
filename = "/etc/iproute2/rt_tables"
logging.info(
"Adding route table: " + str + " to " + filename + " if not present ")
if not CsHelper.definedinfile(filename, str):
CsHelper.execute("sudo echo " + str + " >> /etc/iproute2/rt_tables")
# remove "from all table tablename" if exists, else it will interfer with
# routing of unintended traffic
if self.findRule("from all lookup " + tablename):
CsHelper.execute("sudo ip rule delete from all table " + tablename)
def flush_table(self, tablename):
CsHelper.execute("ip route flush table %s" % (tablename))
CsHelper.execute("ip route flush cache")
def add_route(self, dev, address):
""" Wrapper method that adds table name and device to route statement """
# ip route add dev eth1 table Table_eth1 10.0.2.0/24
table = self.get_tablename(dev)
logging.info("Adding route: dev " + dev + " table: " +
table + " network: " + address + " if not present")
cmd = "dev %s table %s %s" % (dev, table, address)
cmd = "default via %s table %s proto static" % (address, table)
self.set_route(cmd)
def add_network_route(self, dev, address):
""" Wrapper method that adds table name and device to route statement """
# ip route add dev eth1 table Table_eth1 10.0.2.0/24
table = self.get_tablename(dev)
logging.info("Adding route: dev " + dev + " table: " +
table + " network: " + address + " if not present")
cmd = "dev %s table %s %s" % (dev, table, address)
self.set_route(cmd)
def set_route(self, cmd, method="add"):
""" Add a route if it is not already defined """
found = False
for i in CsHelper.execute("ip route show " + cmd):
found = True
if not found and method == "add":
logging.info("Add " + cmd)
cmd = "ip route add " + cmd
elif found and method == "delete":
logging.info("Delete " + cmd)
cmd = "ip route delete " + cmd
else:
return
CsHelper.execute(cmd)
def add_defaultroute(self, gateway):
""" Add a default route
:param str gateway
:return: bool
"""
if not gateway:
raise Exception("Gateway cannot be None.")
if self.defaultroute_exists():
return False
else:
cmd = "default via " + gateway
logging.info("Adding default route")
self.set_route(cmd)
return True
def defaultroute_exists(self):
""" Return True if a default route is present
:return: bool
"""
logging.info("Checking if default ipv4 route is present")
route_found = CsHelper.execute("ip -4 route list 0/0")
if len(route_found) > 0:
logging.info("Default route found: " + route_found[0])
return True
else:
logging.warn("No default route found!")
return False
def findRule(self, rule):
for i in CsHelper.execute("ip rule show"):
if rule in i.strip():
return True
return False | {
"content_hash": "bae3934fbed44ee958836a31a1faca93",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 82,
"avg_line_length": 36.198019801980195,
"alnum_prop": 0.562089715536105,
"repo_name": "resmo/cloudstack",
"id": "927c2ae0d7487e43c9b29415e3c85443f3845723",
"size": "4461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "systemvm/patches/debian/config/opt/cloud/bin/cs/CsRoute.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1451"
},
{
"name": "Batchfile",
"bytes": "11926"
},
{
"name": "C#",
"bytes": "2356211"
},
{
"name": "CSS",
"bytes": "335738"
},
{
"name": "FreeMarker",
"bytes": "4917"
},
{
"name": "Groovy",
"bytes": "153137"
},
{
"name": "HTML",
"bytes": "151164"
},
{
"name": "Java",
"bytes": "33712712"
},
{
"name": "JavaScript",
"bytes": "7719277"
},
{
"name": "Python",
"bytes": "11019815"
},
{
"name": "Ruby",
"bytes": "896"
},
{
"name": "Shell",
"bytes": "770039"
}
],
"symlink_target": ""
} |
import os
import shutil
import fixtures
from oslo.config import cfg
from glance.common import exception
from glance.openstack.common import jsonutils
from glance import store
from glance.store import location
from glance.store import sheepdog
from glance.store import vmware_datastore
from glance.tests import stubs
from glance.tests import utils as test_utils
CONF = cfg.CONF
CONF.import_opt('filesystem_store_datadir', 'glance.store.filesystem')
CONF.import_opt('connection', 'glance.openstack.common.db.sqlalchemy.session',
group='database')
class StoreClearingUnitTest(test_utils.BaseTestCase):
def setUp(self):
super(StoreClearingUnitTest, self).setUp()
# Ensure stores + locations cleared
location.SCHEME_TO_CLS_MAP = {}
self._create_stores()
self.addCleanup(setattr, location, 'SCHEME_TO_CLS_MAP', dict())
def _create_stores(self, passing_config=True):
"""Create known stores. Mock out sheepdog's subprocess dependency
on collie.
:param passing_config: making store driver passes basic configurations.
:returns: the number of how many store drivers been loaded.
"""
def _fun(*args, **kwargs):
if passing_config:
return None
else:
raise exception.BadStoreConfiguration()
self.stubs.Set(sheepdog.Store, 'configure', _fun)
self.stubs.Set(vmware_datastore.Store, 'configure', _fun)
self.stubs.Set(vmware_datastore.Store, 'configure_add', _fun)
return store.create_stores()
class IsolatedUnitTest(StoreClearingUnitTest):
"""
Unit test case that establishes a mock environment within
a testing directory (in isolation)
"""
registry = None
def setUp(self):
super(IsolatedUnitTest, self).setUp()
self.test_dir = self.useFixture(fixtures.TempDir()).path
policy_file = self._copy_data_file('policy.json', self.test_dir)
self.config(connection='sqlite://',
group='database')
self.config(verbose=False,
debug=False,
default_store='filesystem',
filesystem_store_datadir=os.path.join(self.test_dir),
policy_file=policy_file,
lock_path=os.path.join(self.test_dir))
stubs.stub_out_registry_and_store_server(self.stubs,
self.test_dir,
registry=self.registry)
def _copy_data_file(self, file_name, dst_dir):
src_file_name = os.path.join('glance/tests/etc', file_name)
shutil.copy(src_file_name, dst_dir)
dst_file_name = os.path.join(dst_dir, file_name)
return dst_file_name
def set_policy_rules(self, rules):
fap = open(CONF.policy_file, 'w')
fap.write(jsonutils.dumps(rules))
fap.close()
| {
"content_hash": "b52d54ef266ee9dd177ec86856d7302e",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 34.68235294117647,
"alnum_prop": 0.6282225237449118,
"repo_name": "tanglei528/glance",
"id": "f0fcfa24d2db3c88228844c23ae23ae63dd20acb",
"size": "3585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/tests/unit/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3193082"
},
{
"name": "Shell",
"bytes": "7168"
}
],
"symlink_target": ""
} |
"""Tests for proactor_events.py"""
import socket
import unittest
from unittest import mock
import asyncio
from asyncio.proactor_events import BaseProactorEventLoop
from asyncio.proactor_events import _ProactorSocketTransport
from asyncio.proactor_events import _ProactorWritePipeTransport
from asyncio.proactor_events import _ProactorDuplexPipeTransport
from asyncio import test_utils
class ProactorSocketTransportTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
self.proactor = mock.Mock()
self.loop._proactor = self.proactor
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
self.sock = mock.Mock(socket.socket)
def test_ctor(self):
fut = asyncio.Future(loop=self.loop)
tr = _ProactorSocketTransport(
self.loop, self.sock, self.protocol, fut)
test_utils.run_briefly(self.loop)
self.assertIsNone(fut.result())
self.protocol.connection_made(tr)
self.proactor.recv.assert_called_with(self.sock, 4096)
def test_loop_reading(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._loop_reading()
self.loop._proactor.recv.assert_called_with(self.sock, 4096)
self.assertFalse(self.protocol.data_received.called)
self.assertFalse(self.protocol.eof_received.called)
def test_loop_reading_data(self):
res = asyncio.Future(loop=self.loop)
res.set_result(b'data')
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._read_fut = res
tr._loop_reading(res)
self.loop._proactor.recv.assert_called_with(self.sock, 4096)
self.protocol.data_received.assert_called_with(b'data')
def test_loop_reading_no_data(self):
res = asyncio.Future(loop=self.loop)
res.set_result(b'')
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
self.assertRaises(AssertionError, tr._loop_reading, res)
tr.close = mock.Mock()
tr._read_fut = res
tr._loop_reading(res)
self.assertFalse(self.loop._proactor.recv.called)
self.assertTrue(self.protocol.eof_received.called)
self.assertTrue(tr.close.called)
def test_loop_reading_aborted(self):
err = self.loop._proactor.recv.side_effect = ConnectionAbortedError()
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._fatal_error = mock.Mock()
tr._loop_reading()
tr._fatal_error.assert_called_with(
err,
'Fatal read error on pipe transport')
def test_loop_reading_aborted_closing(self):
self.loop._proactor.recv.side_effect = ConnectionAbortedError()
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._closing = True
tr._fatal_error = mock.Mock()
tr._loop_reading()
self.assertFalse(tr._fatal_error.called)
def test_loop_reading_aborted_is_fatal(self):
self.loop._proactor.recv.side_effect = ConnectionAbortedError()
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._closing = False
tr._fatal_error = mock.Mock()
tr._loop_reading()
self.assertTrue(tr._fatal_error.called)
def test_loop_reading_conn_reset_lost(self):
err = self.loop._proactor.recv.side_effect = ConnectionResetError()
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._closing = False
tr._fatal_error = mock.Mock()
tr._force_close = mock.Mock()
tr._loop_reading()
self.assertFalse(tr._fatal_error.called)
tr._force_close.assert_called_with(err)
def test_loop_reading_exception(self):
err = self.loop._proactor.recv.side_effect = (OSError())
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._fatal_error = mock.Mock()
tr._loop_reading()
tr._fatal_error.assert_called_with(
err,
'Fatal read error on pipe transport')
def test_write(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._loop_writing = mock.Mock()
tr.write(b'data')
self.assertEqual(tr._buffer, None)
tr._loop_writing.assert_called_with(data=b'data')
def test_write_no_data(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr.write(b'')
self.assertFalse(tr._buffer)
def test_write_more(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._write_fut = mock.Mock()
tr._loop_writing = mock.Mock()
tr.write(b'data')
self.assertEqual(tr._buffer, b'data')
self.assertFalse(tr._loop_writing.called)
def test_loop_writing(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._buffer = bytearray(b'data')
tr._loop_writing()
self.loop._proactor.send.assert_called_with(self.sock, b'data')
self.loop._proactor.send.return_value.add_done_callback.\
assert_called_with(tr._loop_writing)
@mock.patch('asyncio.proactor_events.logger')
def test_loop_writing_err(self, m_log):
err = self.loop._proactor.send.side_effect = OSError()
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._fatal_error = mock.Mock()
tr._buffer = [b'da', b'ta']
tr._loop_writing()
tr._fatal_error.assert_called_with(
err,
'Fatal write error on pipe transport')
tr._conn_lost = 1
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
self.assertEqual(tr._buffer, None)
m_log.warning.assert_called_with('socket.send() raised exception.')
def test_loop_writing_stop(self):
fut = asyncio.Future(loop=self.loop)
fut.set_result(b'data')
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._write_fut = fut
tr._loop_writing(fut)
self.assertIsNone(tr._write_fut)
def test_loop_writing_closing(self):
fut = asyncio.Future(loop=self.loop)
fut.set_result(1)
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._write_fut = fut
tr.close()
tr._loop_writing(fut)
self.assertIsNone(tr._write_fut)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test_abort(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._force_close = mock.Mock()
tr.abort()
tr._force_close.assert_called_with(None)
def test_close(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr.close()
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
self.assertTrue(tr._closing)
self.assertEqual(tr._conn_lost, 1)
self.protocol.connection_lost.reset_mock()
tr.close()
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.connection_lost.called)
def test_close_write_fut(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._write_fut = mock.Mock()
tr.close()
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.connection_lost.called)
def test_close_buffer(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._buffer = [b'data']
tr.close()
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.connection_lost.called)
@mock.patch('asyncio.base_events.logger')
def test_fatal_error(self, m_logging):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._force_close = mock.Mock()
tr._fatal_error(None)
self.assertTrue(tr._force_close.called)
self.assertTrue(m_logging.error.called)
def test_force_close(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._buffer = [b'data']
read_fut = tr._read_fut = mock.Mock()
write_fut = tr._write_fut = mock.Mock()
tr._force_close(None)
read_fut.cancel.assert_called_with()
write_fut.cancel.assert_called_with()
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
self.assertEqual(None, tr._buffer)
self.assertEqual(tr._conn_lost, 1)
def test_force_close_idempotent(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._closing = True
tr._force_close(None)
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.connection_lost.called)
def test_fatal_error_2(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._buffer = [b'data']
tr._force_close(None)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
self.assertEqual(None, tr._buffer)
def test_call_connection_lost(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._call_connection_lost(None)
self.assertTrue(self.protocol.connection_lost.called)
self.assertTrue(self.sock.close.called)
def test_write_eof(self):
tr = _ProactorSocketTransport(
self.loop, self.sock, self.protocol)
self.assertTrue(tr.can_write_eof())
tr.write_eof()
self.sock.shutdown.assert_called_with(socket.SHUT_WR)
tr.write_eof()
self.assertEqual(self.sock.shutdown.call_count, 1)
tr.close()
def test_write_eof_buffer(self):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
f = asyncio.Future(loop=self.loop)
tr._loop._proactor.send.return_value = f
tr.write(b'data')
tr.write_eof()
self.assertTrue(tr._eof_written)
self.assertFalse(self.sock.shutdown.called)
tr._loop._proactor.send.assert_called_with(self.sock, b'data')
f.set_result(4)
self.loop._run_once()
self.sock.shutdown.assert_called_with(socket.SHUT_WR)
tr.close()
def test_write_eof_write_pipe(self):
tr = _ProactorWritePipeTransport(
self.loop, self.sock, self.protocol)
self.assertTrue(tr.can_write_eof())
tr.write_eof()
self.assertTrue(tr._closing)
self.loop._run_once()
self.assertTrue(self.sock.close.called)
tr.close()
def test_write_eof_buffer_write_pipe(self):
tr = _ProactorWritePipeTransport(self.loop, self.sock, self.protocol)
f = asyncio.Future(loop=self.loop)
tr._loop._proactor.send.return_value = f
tr.write(b'data')
tr.write_eof()
self.assertTrue(tr._closing)
self.assertFalse(self.sock.shutdown.called)
tr._loop._proactor.send.assert_called_with(self.sock, b'data')
f.set_result(4)
self.loop._run_once()
self.loop._run_once()
self.assertTrue(self.sock.close.called)
tr.close()
def test_write_eof_duplex_pipe(self):
tr = _ProactorDuplexPipeTransport(
self.loop, self.sock, self.protocol)
self.assertFalse(tr.can_write_eof())
with self.assertRaises(NotImplementedError):
tr.write_eof()
tr.close()
def test_pause_resume_reading(self):
tr = _ProactorSocketTransport(
self.loop, self.sock, self.protocol)
futures = []
for msg in [b'data1', b'data2', b'data3', b'data4', b'']:
f = asyncio.Future(loop=self.loop)
f.set_result(msg)
futures.append(f)
self.loop._proactor.recv.side_effect = futures
self.loop._run_once()
self.assertFalse(tr._paused)
self.loop._run_once()
self.protocol.data_received.assert_called_with(b'data1')
self.loop._run_once()
self.protocol.data_received.assert_called_with(b'data2')
tr.pause_reading()
self.assertTrue(tr._paused)
for i in range(10):
self.loop._run_once()
self.protocol.data_received.assert_called_with(b'data2')
tr.resume_reading()
self.assertFalse(tr._paused)
self.loop._run_once()
self.protocol.data_received.assert_called_with(b'data3')
self.loop._run_once()
self.protocol.data_received.assert_called_with(b'data4')
tr.close()
class BaseProactorEventLoopTests(test_utils.TestCase):
def setUp(self):
self.sock = mock.Mock(socket.socket)
self.proactor = mock.Mock()
self.ssock, self.csock = mock.Mock(), mock.Mock()
class EventLoop(BaseProactorEventLoop):
def _socketpair(s):
return (self.ssock, self.csock)
self.loop = EventLoop(self.proactor)
self.set_event_loop(self.loop, cleanup=False)
@mock.patch.object(BaseProactorEventLoop, '_call_soon')
@mock.patch.object(BaseProactorEventLoop, '_socketpair')
def test_ctor(self, socketpair, _call_soon):
ssock, csock = socketpair.return_value = (
mock.Mock(), mock.Mock())
loop = BaseProactorEventLoop(self.proactor)
self.assertIs(loop._ssock, ssock)
self.assertIs(loop._csock, csock)
self.assertEqual(loop._internal_fds, 1)
_call_soon.assert_called_with(loop._loop_self_reading, (),
check_loop=False)
def test_close_self_pipe(self):
self.loop._close_self_pipe()
self.assertEqual(self.loop._internal_fds, 0)
self.assertTrue(self.ssock.close.called)
self.assertTrue(self.csock.close.called)
self.assertIsNone(self.loop._ssock)
self.assertIsNone(self.loop._csock)
def test_close(self):
self.loop._close_self_pipe = mock.Mock()
self.loop.close()
self.assertTrue(self.loop._close_self_pipe.called)
self.assertTrue(self.proactor.close.called)
self.assertIsNone(self.loop._proactor)
self.loop._close_self_pipe.reset_mock()
self.loop.close()
self.assertFalse(self.loop._close_self_pipe.called)
def test_sock_recv(self):
self.loop.sock_recv(self.sock, 1024)
self.proactor.recv.assert_called_with(self.sock, 1024)
def test_sock_sendall(self):
self.loop.sock_sendall(self.sock, b'data')
self.proactor.send.assert_called_with(self.sock, b'data')
def test_sock_connect(self):
self.loop.sock_connect(self.sock, 123)
self.proactor.connect.assert_called_with(self.sock, 123)
def test_sock_accept(self):
self.loop.sock_accept(self.sock)
self.proactor.accept.assert_called_with(self.sock)
def test_socketpair(self):
self.assertRaises(
NotImplementedError, BaseProactorEventLoop, self.proactor)
def test_make_socket_transport(self):
tr = self.loop._make_socket_transport(self.sock, asyncio.Protocol())
self.assertIsInstance(tr, _ProactorSocketTransport)
def test_loop_self_reading(self):
self.loop._loop_self_reading()
self.proactor.recv.assert_called_with(self.ssock, 4096)
self.proactor.recv.return_value.add_done_callback.assert_called_with(
self.loop._loop_self_reading)
def test_loop_self_reading_fut(self):
fut = mock.Mock()
self.loop._loop_self_reading(fut)
self.assertTrue(fut.result.called)
self.proactor.recv.assert_called_with(self.ssock, 4096)
self.proactor.recv.return_value.add_done_callback.assert_called_with(
self.loop._loop_self_reading)
def test_loop_self_reading_exception(self):
self.loop.close = mock.Mock()
self.proactor.recv.side_effect = OSError()
self.assertRaises(OSError, self.loop._loop_self_reading)
self.assertTrue(self.loop.close.called)
def test_write_to_self(self):
self.loop._write_to_self()
self.csock.send.assert_called_with(b'\0')
def test_process_events(self):
self.loop._process_events([])
@mock.patch('asyncio.base_events.logger')
def test_create_server(self, m_log):
pf = mock.Mock()
call_soon = self.loop.call_soon = mock.Mock()
self.loop._start_serving(pf, self.sock)
self.assertTrue(call_soon.called)
# callback
loop = call_soon.call_args[0][0]
loop()
self.proactor.accept.assert_called_with(self.sock)
# conn
fut = mock.Mock()
fut.result.return_value = (mock.Mock(), mock.Mock())
make_tr = self.loop._make_socket_transport = mock.Mock()
loop(fut)
self.assertTrue(fut.result.called)
self.assertTrue(make_tr.called)
# exception
fut.result.side_effect = OSError()
loop(fut)
self.assertTrue(self.sock.close.called)
self.assertTrue(m_log.error.called)
def test_create_server_cancel(self):
pf = mock.Mock()
call_soon = self.loop.call_soon = mock.Mock()
self.loop._start_serving(pf, self.sock)
loop = call_soon.call_args[0][0]
# cancelled
fut = asyncio.Future(loop=self.loop)
fut.cancel()
loop(fut)
self.assertTrue(self.sock.close.called)
def test_stop_serving(self):
sock = mock.Mock()
self.loop._stop_serving(sock)
self.assertTrue(sock.close.called)
self.proactor._stop_serving.assert_called_with(sock)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d1cc1ba2538c31d2bb313f8c9bd3c6d0",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 77,
"avg_line_length": 36.58130081300813,
"alnum_prop": 0.6290698966551839,
"repo_name": "OptimusGitEtna/RestSymf",
"id": "0c536986ff7f157ee7a4b35d44120d2623441ce8",
"size": "17998",
"binary": false,
"copies": "58",
"ref": "refs/heads/master",
"path": "Python-3.4.2/Lib/test/test_asyncio/test_proactor_events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "594205"
},
{
"name": "C",
"bytes": "15348597"
},
{
"name": "C++",
"bytes": "65109"
},
{
"name": "CSS",
"bytes": "12039"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "JavaScript",
"bytes": "10597"
},
{
"name": "Makefile",
"bytes": "9444"
},
{
"name": "Objective-C",
"bytes": "1390141"
},
{
"name": "PHP",
"bytes": "93070"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24018306"
},
{
"name": "Shell",
"bytes": "440753"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import sys, json, os, urlparse, re
print "Content-type:application/json\r\n\r\n"
data = json.loads(sys.stdin.read())
path = urlparse.urlparse(data["file"]).path
m = re.match('^/~(\w+/)(.+)', path)
real_path = os.path.realpath('/home/' + m.group(1) + 'public_html/' + m.group(2))
txt_path = os.path.splitext(real_path)[0] + '.txt'
with open(txt_path) as raw_file:
raw_data = raw_file.read()
for a in data["annotations"]:
r = re.compile('(=HYPERLINK.+"%s"\))' % (
re.escape(a['link'])
))
raw_data = re.sub(r, r'\1\tannotated', raw_data)
result = {}
try:
with open(txt_path + '.annotated', 'w') as annotated_file:
annotated_file.write(raw_data)
except IOError, e:
result['error'] = str(e)
else:
result['success'] = 'Annotated file successfully generated.'
print json.dumps(result) | {
"content_hash": "bb3846df934faea6448a184e96a0b069",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 81,
"avg_line_length": 28.419354838709676,
"alnum_prop": 0.5845629965947786,
"repo_name": "wangchulab/CIMAGE",
"id": "7e1c832d8cf6d1e078f47770622e4a47140be88a",
"size": "900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cimage/cimage-clientpatches/source/cimage-annotate/cgi-bin/annotate.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "443"
},
{
"name": "C",
"bytes": "156254"
},
{
"name": "C++",
"bytes": "112188"
},
{
"name": "Emacs Lisp",
"bytes": "53"
},
{
"name": "HTML",
"bytes": "20048"
},
{
"name": "JavaScript",
"bytes": "9392"
},
{
"name": "PHP",
"bytes": "5885"
},
{
"name": "Perl",
"bytes": "2385143"
},
{
"name": "Python",
"bytes": "33025"
},
{
"name": "R",
"bytes": "561694"
},
{
"name": "Shell",
"bytes": "51592"
},
{
"name": "TeX",
"bytes": "828"
}
],
"symlink_target": ""
} |
"""Utilities to extract string literals from object files.
LookupElfRodataInfo():
Runs readelf to extract and return .rodata section spec of an ELF file.
ReadFileChunks():
Reads raw data from a file, given a list of ranges in the file.
ReadStringLiterals():
Reads the ELF file to find the string contents of a list of string literals.
ResolveStringPiecesIndirect():
BulkForkAndCall() target: Given {path: [string addresses]} and
[raw_string_data for each string_section]:
- Reads {path: [src_strings]}.
- For each path, searches for src_strings in at most 1 raw_string_data over
each string_section. If found, translates to string_range and annotates it
to the string_section.
- Returns [{path: [string_ranges]} for each string_section].
ResolveStringPieces():
BulkForkAndCall() target: Given {path: [strings]} and
[raw_string_data for each string_section]:
- For each path, searches for src_strings in at most 1 raw_string_data over
each string_section. If found, translates to string_range and annotates it
to the string_section.
- Returns [{path: [string_ranges]} for each string_section].
GetNameOfStringLiteralBytes():
Converts string literal bytes to printable form, useful for assigning
full_name of string literal symbols. If any non-printable character is found
then returns models.STRING_LITERAL_NAME. Otherwise the returned string is
quoted, and may be truncated (with "[...]" appended).
"""
import ast
import collections
import itertools
import logging
import os
import string
import subprocess
import ar
import models
import parallel
import path_util
_STRING_LITERAL_LENGTH_CUTOFF = 30
_PRINTABLE_TABLE = [False] * 256
for ch in string.printable:
_PRINTABLE_TABLE[ord(ch)] = True
def LookupElfRodataInfo(elf_path):
"""Returns (address, offset, size) for the .rodata section."""
args = [path_util.GetReadElfPath(), '-S', '--wide', elf_path]
output = subprocess.check_output(args).decode('ascii')
lines = output.splitlines()
for line in lines:
# [Nr] Name Type Addr Off Size ES Flg Lk Inf Al
# [07] .rodata PROGBITS 025e7000 237c000 5ec4f6 00 A 0 0 256
if '.rodata ' in line:
fields = line[line.index(models.SECTION_RODATA):].split()
return int(fields[2], 16), int(fields[3], 16), int(fields[4], 16)
raise AssertionError('No .rodata for command: ' + repr(args))
def ReadFileChunks(path, section_ranges):
"""Returns a list of raw data from |path|, specified by |section_ranges|.
Args:
section_ranges: List of (offset, size).
"""
ret = []
if not section_ranges:
return ret
with open(path, 'rb') as f:
for offset, size in section_ranges:
f.seek(offset)
ret.append(f.read(size))
return ret
def _ExtractArchivePath(path):
# E.g. foo/bar.a(baz.o)
if path.endswith(')'):
start_idx = path.index('(')
return path[:start_idx]
return None
def _LookupStringSectionPositions(target, output_directory):
"""Returns a dict of object_path -> [(offset, size)...] of .rodata sections.
Args:
target: An archive path string (e.g., "foo.a") or a list of object paths.
"""
is_archive = isinstance(target, str)
args = [path_util.GetReadElfPath(), '-S', '--wide']
if is_archive:
args.append(target)
else:
# Assign path for when len(target) == 1, (no File: line exists).
path = target[0]
args.extend(target)
output = subprocess.check_output(args, cwd=output_directory).decode('ascii')
lines = output.splitlines()
section_positions_by_path = {}
cur_offsets = []
for line in lines:
# File: base/third_party/libevent/libevent.a(buffer.o)
# [Nr] Name Type Addr Off Size ES Flg Lk Inf Al
# [11] .rodata.str1.1 PROGBITS 00000000 0000b4 000004 01 AMS 0 0 1
# [11] .rodata.str4.4 PROGBITS 00000000 0000b4 000004 01 AMS 0 0 4
# [11] .rodata.str8.8 PROGBITS 00000000 0000b4 000004 01 AMS 0 0 8
# [80] .rodata..L.str PROGBITS 00000000 000530 000002 00 A 0 0 1
# The various string sections differ by alignment.
# The presence of a wchar_t literal (L"asdf") seems to make a str4 section.
# When multiple sections exist, nm gives us no indication as to which
# section each string corresponds to.
if line.startswith('File: '):
if cur_offsets:
section_positions_by_path[path] = cur_offsets
cur_offsets = []
path = line[6:]
elif '.rodata.' in line:
progbits_idx = line.find('PROGBITS ')
if progbits_idx != -1:
fields = line[progbits_idx:].split()
position = (int(fields[2], 16), int(fields[3], 16))
# The heuristics in _IterStringLiterals rely on str1 coming first.
if fields[-1] == '1':
cur_offsets.insert(0, position)
else:
cur_offsets.append(position)
if cur_offsets:
section_positions_by_path[path] = cur_offsets
return section_positions_by_path
def _ReadStringSections(target, output_directory, positions_by_path):
"""Returns a dict of object_path -> [string...] of .rodata chunks.
Args:
target: An archive path string (e.g., "foo.a") or a list of object paths.
positions_by_path: A dict of object_path -> [(offset, size)...]
"""
is_archive = isinstance(target, str)
string_sections_by_path = {}
if is_archive:
for subpath, chunk in ar.IterArchiveChunks(
os.path.join(output_directory, target)):
path = '{}({})'.format(target, subpath)
positions = positions_by_path.get(path)
# No positions if file has no string literals.
if positions:
string_sections_by_path[path] = (
[chunk[offset:offset + size] for offset, size in positions])
else:
for path in target:
positions = positions_by_path.get(path)
# We already log a warning about this in _IterStringLiterals().
if positions:
string_sections_by_path[path] = ReadFileChunks(
os.path.join(output_directory, path), positions)
return string_sections_by_path
def _IterStringLiterals(path, addresses, obj_sections):
"""Yields all string literals (including \0) for the given object path.
Args:
path: Object file path.
addresses: List of string offsets encoded as hex strings.
obj_sections: List of contents of .rodata.str sections read from the given
object file.
"""
next_offsets = sorted(int(a, 16) for a in addresses)
if not obj_sections:
# Happens when there is an address for a symbol which is not actually a
# string literal, or when string_sections_by_path is missing an entry.
logging.warning('Object has %d strings but no string sections: %s',
len(addresses), path)
return
for section_data in obj_sections:
cur_offsets = next_offsets
# Always assume first element is 0. I'm not entirely sure why this is
# necessary, but strings get missed without it.
next_offsets = [0]
prev_offset = 0
# TODO(agrieve): Switch to using nm --print-size in order to capture the
# address+size of each string rather than just the address.
for offset in cur_offsets[1:]:
if offset >= len(section_data):
# Remaining offsets are for next section.
next_offsets.append(offset)
continue
# Figure out which offsets apply to this section via heuristic of them
# all ending with a null character.
if offset == prev_offset or section_data[offset - 1] != 0:
next_offsets.append(offset)
continue
yield section_data[prev_offset:offset]
prev_offset = offset
if prev_offset < len(section_data):
yield section_data[prev_offset:]
def _AnnotateStringData(string_data, path_value_gen):
"""Annotates each |string_data| section data with paths and ranges.
Args:
string_data: [raw_string_data for each string_section] from an ELF file.
path_value_gen: A generator of (path, value) pairs, where |path|
is the path to an object file and |value| is a string to annotate.
Returns:
[{path: [string_ranges]} for each string_section].
"""
ret = [collections.defaultdict(list) for _ in string_data]
# Brute-force search ** merge strings sections in |string_data| for string
# values from |path_value_gen|. This is by far the slowest part of
# AnalyzeStringLiterals().
# TODO(agrieve): Pre-process |string_data| into a dict of literal->address (at
# least for ASCII strings).
for path, value in path_value_gen:
first_match = -1
first_match_dict = None
for target_dict, data in zip(ret, string_data):
# Set offset so that it will be 0 when len(value) is added to it below.
offset = -len(value)
while True:
offset = data.find(value, offset + len(value))
if offset == -1:
break
# Preferring exact matches (those following \0) over substring matches
# significantly increases accuracy (although shows that linker isn't
# being optimal).
if offset == 0 or data[offset - 1] == 0:
break
if first_match == -1:
first_match = offset
first_match_dict = target_dict
if offset != -1:
break
if offset == -1:
# Exact match not found, so take suffix match if it exists.
offset = first_match
target_dict = first_match_dict
# Missing strings happen when optimization make them unused.
if offset != -1:
# Encode tuple as a string for easier mashalling.
target_dict[path].append(str(offset) + ':' + str(len(value)))
return ret
# This is a target for BulkForkAndCall().
def ResolveStringPiecesIndirect(encoded_string_addresses_by_path, string_data,
output_directory):
string_addresses_by_path = parallel.DecodeDictOfLists(
encoded_string_addresses_by_path)
# Assign |target| as archive path, or a list of object paths.
any_path = next(iter(string_addresses_by_path.keys()))
target = _ExtractArchivePath(any_path)
if not target:
target = list(string_addresses_by_path.keys())
# Run readelf to find location of .rodata within the .o files.
section_positions_by_path = _LookupStringSectionPositions(
target, output_directory)
# Load the .rodata sections (from object files) as strings.
string_sections_by_path = _ReadStringSections(
target, output_directory, section_positions_by_path)
def GeneratePathAndValues():
for path, object_addresses in string_addresses_by_path.items():
for value in _IterStringLiterals(
path, object_addresses, string_sections_by_path.get(path)):
yield path, value
ret = _AnnotateStringData(string_data, GeneratePathAndValues())
return [parallel.EncodeDictOfLists(x) for x in ret]
# This is a target for BulkForkAndCall().
def ResolveStringPieces(encoded_strings_by_path, string_data):
# ast.literal_eval() undoes repr() applied to strings.
strings_by_path = parallel.DecodeDictOfLists(
encoded_strings_by_path, value_transform=ast.literal_eval)
def GeneratePathAndValues():
for path, strings in strings_by_path.items():
for value in strings:
yield path, value
ret = _AnnotateStringData(string_data, GeneratePathAndValues())
return [parallel.EncodeDictOfLists(x) for x in ret]
def ReadStringLiterals(symbols, elf_path, all_rodata=False):
"""Returns an iterable of (symbol, data) for all string literal symbols.
Emitted string literal data are null-terminated bytes.
Args:
symbols: An iterable of Symbols
elf_path: Path to the executable containing the symbols.
all_rodata: Assume every symbol within .rodata that ends with a \0 is a
string literal.
"""
address, offset, _ = LookupElfRodataInfo(elf_path)
adjust = offset - address
with open(elf_path, 'rb') as f:
for symbol in symbols:
if symbol.section != 'r':
continue
f.seek(symbol.address + adjust)
data = f.read(symbol.size_without_padding)
# As of Oct 2017, there are ~90 symbols name .L.str(.##). These appear
# in the linker map file explicitly, and there doesn't seem to be a
# pattern as to which variables lose their kConstant name (the more
# common case), or which string literals don't get moved to
# ** merge strings (less common).
if symbol.IsStringLiteral() or (all_rodata and data and data[-1] == 0):
yield ((symbol, data))
def GetNameOfStringLiteralBytes(b):
"""Converts string literal bytes to printable form, may be truncated."""
b = b.replace(b'\n', b'').replace(b'\t', b'').strip(b'\00')
is_printable = all(_PRINTABLE_TABLE[c] for c in b)
if is_printable:
s = b.decode('ascii')
if len(s) > _STRING_LITERAL_LENGTH_CUTOFF:
return '"{}[...]"'.format(s[:_STRING_LITERAL_LENGTH_CUTOFF])
return '"{}"'.format(s)
return models.STRING_LITERAL_NAME
| {
"content_hash": "8f0978e897f9b0f037b167aaf1fddfcf",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 80,
"avg_line_length": 37.41159420289855,
"alnum_prop": 0.6706438366777717,
"repo_name": "chromium/chromium",
"id": "d8c7f67f896a624ee98eedc64273678641626472",
"size": "13048",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "tools/binary_size/libsupersize/string_extract.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os, sys
import multiprocessing
import time
from random import shuffle
newstdin = os.fdopen(os.dup(sys.stdin.fileno()))
def popup_words(level, stdin = newstdin):
with open("word-list/%s" %(level)) as f:
words = f.readlines()
shuffle(words)
correct_n = 0
total = 0
for word in words:
print "%s %s %s" %("*" * 10, word.strip(), "*"*10)
print("Y as CORRECT , Others as SKIP")
result = stdin.readline()
if result.strip() == "y":
correct_n += 1
total += 1
with open("temp", "w") as tempf:
tempf.write("%d / %d" %(correct_n, total))
word = f.readline()
main = multiprocessing.Process(target=popup_words, args = ("easy", ))
main.start()
timeout = 90
time.sleep(timeout)
main.terminate()
with open("temp", "r") as tempf:
print "Result:"
print tempf.read()
| {
"content_hash": "1377c2eb43b60a9400e1a20e36375adf",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 69,
"avg_line_length": 24.225,
"alnum_prop": 0.5242518059855521,
"repo_name": "xiaohan2012/catch-phrase-game",
"id": "40a59d7995e5e049f3c1fa29a367111cad37f58a",
"size": "969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "969"
}
],
"symlink_target": ""
} |
import argparse
import random
import numpy as np
import h5py
import qusp
import bossdata.path
import bossdata.remote
from astropy.io import fits
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--verbose", action="store_true",
help="print verbose output")
parser.add_argument("-o", "--output", type=str, default=None,
help="hdf5 output filename")
parser.add_argument("--save-model", action="store_true",
help="specify to save raw data of sparse matrix model")
parser.add_argument("--save-chisq", action="store_true",
help="specify to save per obs chisq values")
## targets to fit
parser.add_argument("-i", "--input", type=str, default=None,
help="target list")
parser.add_argument("-n", "--ntargets", type=int, default=0,
help="number of targets to use, 0 for all")
parser.add_argument("--random", action="store_true",
help="use a random selection of input targets")
parser.add_argument("--seed", type=int, default=42,
help="rng seed")
# fit options
parser.add_argument("--sklearn", action="store_true",
help="use sklearn linear regression instead of scipy lstsq")
# scipy specifc options
parser.add_argument("--max-iter", type=int, default=100,
help="max number of iterations to use in lsqr")
parser.add_argument("--atol", type=float, default=1e-4,
help="a stopping tolerance")
parser.add_argument("--btol", type=float, default=1e-8,
help="b stopping tolerance")
# input data columns
parser.add_argument("--z-col", type=int, default=3,
help="redshift column of input targetlist")
parser.add_argument("--sn-col", type=int, default=None,
help="sn column of input targetlist")
parser.add_argument("--norm-col", type=int, default=None,
help="norm param column of input targetlist")
parser.add_argument("--tilt-col", type=int, default=None,
help="tilt param column of input targetlist")
parser.add_argument("--fix-norm", action="store_true",
help="fix norm param")
parser.add_argument("--fix-tilt", action="store_true",
help="fix tilt param")
parser.add_argument("--continuum-file", type=str, default=None,
help="continuum to load")
qusp.Paths.add_args(parser)
qusp.ContinuumModel.add_args(parser)
args = parser.parse_args()
# setup boss data directory path
paths = qusp.Paths(**qusp.Paths.from_args(args))
try:
finder = bossdata.path.Finder()
mirror = bossdata.remote.Manager()
except ValueError as e:
print(e)
return -1
# read target data
fields = [('z', float, args.z_col)]
if args.norm_col is not None:
fields.append(('amp', float, args.norm_col))
if args.tilt_col is not None:
fields.append(('nu', float, args.tilt_col))
if args.sn_col is not None:
fields.append(('sn', float, args.sn_col))
targets = qusp.target.load_target_list(
args.input, fields, verbose=args.verbose)
# use the first n targets or a random sample
ntargets = args.ntargets if args.ntargets > 0 else len(targets)
if args.random:
random.seed(args.seed)
targets = random.sample(targets, ntargets)
else:
targets = targets[:ntargets]
continuum = None
if args.continuum_file:
specfits = h5py.File(args.continuum_file)
wave = specfits['restWaveCenters'].value
flux = specfits['continuum'].value
continuum = qusp.spectrum.SpectralFluxDensity(wave, flux)
# Initialize model
model = qusp.ContinuumModel(continuum=continuum, **qusp.ContinuumModel.from_args(args))
# Add observations to model
model_targets = []
npixels = []
if args.verbose:
print '... adding observations to fit ...\n'
def get_lite_spectra(targets):
for target in targets:
remote_path = finder.get_spec_path(plate=target['plate'], mjd=target['mjd'], fiber=target['fiber'], lite=True)
try:
local_path = mirror.get(remote_path, auto_download=False)
except RuntimeError as e:
print e
continue
spec = fits.open(local_path)
yield target, qusp.spectrum.read_lite_spectrum(spec)
# for target, combined in qusp.target.get_combined_spectra(targets, paths=paths):
for target, combined in get_lite_spectra(targets):
wavelength = combined.wavelength
ivar = combined.ivar.values
flux = combined.flux.values
# fix quasar spectrum normalization
if args.fix_norm:
if not hasattr(target, 'nu'):
# estimate quasar normalization
try:
norm = combined.mean_flux(args.continuum_normmin*(1+target['z']),
args.continuum_normmax*(1+target['z']))
except RuntimeError:
continue
if norm <= 0:
continue
# restframe amplitude
target['amp'] = norm*(1+target['z'])
# fix spectal tilt
if args.fix_tilt:
if not hasattr(target, 'nu'):
target['nu'] = 0
# Add this observation to our model
npixels_added = model.add_observation(
target, flux, wavelength, ivar, unweighted=args.unweighted)
if npixels_added > 0:
model_targets.append(target)
npixels.append(npixels_added)
if args.verbose:
print target, npixels_added
# Add constraints
if args.continuum_normmax > args.continuum_normmin:
model.add_continuum_constraint(
0, args.continuum_normmin, args.continuum_normmax, args.continuum_normweight)
if args.transmission_normmax > args.transmission_normmin:
model.add_transmission_constraint(
0, args.transmission_normmin, args.transmission_normmax, args.transmission_normweight)
if args.tiltweight > 0:
model.add_tilt_constraint(args.tiltweight)
if args.verbose:
print ''
# Construct the model
model_matrix, model_y = model.get_model()
# perform fit
if args.sklearn:
from sklearn import linear_model
regr = linear_model.LinearRegression(fit_intercept=False)
if args.verbose:
print ('... performing fit using '
'sklearn.linear_model.LinearRegression ...\n')
regr.fit(model_matrix, model_y)
soln = regr.coef_
else:
import scipy.sparse.linalg
if args.verbose:
print '... performing fit using scipy.sparse.linalg.lsqr ...\n'
lsqr_soln = scipy.sparse.linalg.lsqr(
model_matrix, model_y, show=args.verbose, iter_lim=args.max_iter,
atol=args.atol, btol=args.btol)
soln = lsqr_soln[0]
chisq = model.get_chisq(soln)
if args.verbose:
print 'chisq (nModelParams,nConstraints): %.2g (%d,%d)' % (
chisq, model.model.shape[1], model.model_nconstraints)
print 'reduced chisq: %.2g' % (
chisq/(model.model.shape[1]-model.model_nconstraints))
# Save HDF5 file with results
outfile = model.save(args.output+'.hdf5', soln, args, args.save_model, args.save_chisq)
outfile.create_dataset('npixels', data=npixels)
outfile.create_dataset(
'targets', data=[target['target'] for target in model_targets])
outfile.create_dataset(
'redshifts', data=[target['z'] for target in model_targets])
try:
mediansn = [target['sn'] for target in model_targets]
except KeyError:
mediansn = np.zeros(len(model_targets))
outfile.create_dataset('sn', data=mediansn)
outfile.close()
# Save target list text file
results = model.get_results(soln)
for index, target in enumerate(model_targets):
target['amp'] = results['amplitude'][index]
target['nu'] = results['nu'][index]
qusp.target.save_target_list(
args.output+'.txt', model_targets, ['z', 'amp', 'nu'],
verbose=args.verbose)
if __name__ == '__main__':
main()
| {
"content_hash": "ec049f10c10e5716a2e8a547d0a7d2fd",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 122,
"avg_line_length": 37.270270270270274,
"alnum_prop": 0.6207396664249456,
"repo_name": "dmargala/qusp",
"id": "f7f25661f221f5e969c6b585f193a907fcba86f9",
"size": "8296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/fitspec.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "181775"
},
{
"name": "Python",
"bytes": "89768"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.html import escape
from django.utils.text import Truncator
from django.utils.translation import ugettext as _
from django import forms
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ModelFormAdminView
from xadmin.util import vendor
class ForeignKeySearchWidget(forms.TextInput):
def __init__(self, rel, admin_view, attrs=None, using=None):
self.rel = rel
self.admin_view = admin_view
self.db = using
super(ForeignKeySearchWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
to_opts = self.rel.to._meta
if attrs is None:
attrs = {}
if "class" not in attrs:
attrs['class'] = 'select-search'
else:
attrs['class'] = attrs['class'] + ' select-search'
attrs['data-search-url'] = self.admin_view.get_admin_url(
'%s_%s_changelist' % (to_opts.app_label, to_opts.module_name))
attrs['data-placeholder'] = _('Search %s') % to_opts.verbose_name
if value:
attrs['data-label'] = self.label_for_value(value)
return super(ForeignKeySearchWidget, self).render(name, value, attrs)
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(
self.db).get(**{key: value})
return '%s' % escape(Truncator(obj).words(14, truncate='...'))
except (ValueError, self.rel.to.DoesNotExist):
return ""
@property
def media(self):
return vendor('select2.js', 'select2.css', 'xadmin.widget.select.js')
class RelateFieldPlugin(BaseAdminPlugin):
def get_field_style(self, attrs, db_field, style, **kwargs):
# search able fk field
if style == 'fk-ajax' and isinstance(db_field, models.ForeignKey):
if (db_field.rel.to in self.admin_view.admin_site._registry) and \
self.has_model_perm(db_field.rel.to, 'view'):
db = kwargs.get('using')
return dict(attrs or {}, widget=ForeignKeySearchWidget(db_field.rel, self.admin_view, using=db))
return attrs
site.register_plugin(RelateFieldPlugin, ModelFormAdminView)
| {
"content_hash": "1218da34e9bf0a61bb1cbd268f994ecc",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 112,
"avg_line_length": 38.11666666666667,
"alnum_prop": 0.6270222999562746,
"repo_name": "lipengyu/django-bootstrap",
"id": "3d1516fe44f51223b37c307651009e3955771bb2",
"size": "2288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xadmin/plugins/relfield.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from typing import Any
import orjson
from fastapi import APIRouter, FastAPI
from fastapi.responses import HTMLResponse, JSONResponse, PlainTextResponse
from fastapi.testclient import TestClient
class ORJSONResponse(JSONResponse):
media_type = "application/x-orjson"
def render(self, content: Any) -> bytes:
return orjson.dumps(content)
class OverrideResponse(JSONResponse):
media_type = "application/x-override"
app = FastAPI(default_response_class=ORJSONResponse)
router_a = APIRouter()
router_a_a = APIRouter()
router_a_b_override = APIRouter() # Overrides default class
router_b_override = APIRouter() # Overrides default class
router_b_a = APIRouter()
router_b_a_c_override = APIRouter() # Overrides default class again
@app.get("/")
def get_root():
return {"msg": "Hello World"}
@app.get("/override", response_class=PlainTextResponse)
def get_path_override():
return "Hello World"
@router_a.get("/")
def get_a():
return {"msg": "Hello A"}
@router_a.get("/override", response_class=PlainTextResponse)
def get_a_path_override():
return "Hello A"
@router_a_a.get("/")
def get_a_a():
return {"msg": "Hello A A"}
@router_a_a.get("/override", response_class=PlainTextResponse)
def get_a_a_path_override():
return "Hello A A"
@router_a_b_override.get("/")
def get_a_b():
return "Hello A B"
@router_a_b_override.get("/override", response_class=HTMLResponse)
def get_a_b_path_override():
return "Hello A B"
@router_b_override.get("/")
def get_b():
return "Hello B"
@router_b_override.get("/override", response_class=HTMLResponse)
def get_b_path_override():
return "Hello B"
@router_b_a.get("/")
def get_b_a():
return "Hello B A"
@router_b_a.get("/override", response_class=HTMLResponse)
def get_b_a_path_override():
return "Hello B A"
@router_b_a_c_override.get("/")
def get_b_a_c():
return "Hello B A C"
@router_b_a_c_override.get("/override", response_class=OverrideResponse)
def get_b_a_c_path_override():
return {"msg": "Hello B A C"}
router_b_a.include_router(
router_b_a_c_override, prefix="/c", default_response_class=HTMLResponse
)
router_b_override.include_router(router_b_a, prefix="/a")
router_a.include_router(router_a_a, prefix="/a")
router_a.include_router(
router_a_b_override, prefix="/b", default_response_class=PlainTextResponse
)
app.include_router(router_a, prefix="/a")
app.include_router(
router_b_override, prefix="/b", default_response_class=PlainTextResponse
)
client = TestClient(app)
orjson_type = "application/x-orjson"
text_type = "text/plain; charset=utf-8"
html_type = "text/html; charset=utf-8"
override_type = "application/x-override"
def test_app():
with client:
response = client.get("/")
assert response.json() == {"msg": "Hello World"}
assert response.headers["content-type"] == orjson_type
def test_app_override():
with client:
response = client.get("/override")
assert response.content == b"Hello World"
assert response.headers["content-type"] == text_type
def test_router_a():
with client:
response = client.get("/a")
assert response.json() == {"msg": "Hello A"}
assert response.headers["content-type"] == orjson_type
def test_router_a_override():
with client:
response = client.get("/a/override")
assert response.content == b"Hello A"
assert response.headers["content-type"] == text_type
def test_router_a_a():
with client:
response = client.get("/a/a")
assert response.json() == {"msg": "Hello A A"}
assert response.headers["content-type"] == orjson_type
def test_router_a_a_override():
with client:
response = client.get("/a/a/override")
assert response.content == b"Hello A A"
assert response.headers["content-type"] == text_type
def test_router_a_b():
with client:
response = client.get("/a/b")
assert response.content == b"Hello A B"
assert response.headers["content-type"] == text_type
def test_router_a_b_override():
with client:
response = client.get("/a/b/override")
assert response.content == b"Hello A B"
assert response.headers["content-type"] == html_type
def test_router_b():
with client:
response = client.get("/b")
assert response.content == b"Hello B"
assert response.headers["content-type"] == text_type
def test_router_b_override():
with client:
response = client.get("/b/override")
assert response.content == b"Hello B"
assert response.headers["content-type"] == html_type
def test_router_b_a():
with client:
response = client.get("/b/a")
assert response.content == b"Hello B A"
assert response.headers["content-type"] == text_type
def test_router_b_a_override():
with client:
response = client.get("/b/a/override")
assert response.content == b"Hello B A"
assert response.headers["content-type"] == html_type
def test_router_b_a_c():
with client:
response = client.get("/b/a/c")
assert response.content == b"Hello B A C"
assert response.headers["content-type"] == html_type
def test_router_b_a_c_override():
with client:
response = client.get("/b/a/c/override")
assert response.json() == {"msg": "Hello B A C"}
assert response.headers["content-type"] == override_type
| {
"content_hash": "d1d8c179a2ebe2e41d92231202a32450",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 78,
"avg_line_length": 24.837962962962962,
"alnum_prop": 0.665237651444548,
"repo_name": "tiangolo/fastapi",
"id": "042fb1dea93231e9cfd06378f1b66366008a37e6",
"size": "5365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_default_response_class.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
} |
"""
test_django-pj-core
------------
Tests for `django-pj-core` models module.
"""
from django.test import TestCase
from pjcore import models
class TestPjcore(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
| {
"content_hash": "0cdb5877a0432d57c34c3def4b269820",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 41,
"avg_line_length": 13.272727272727273,
"alnum_prop": 0.6164383561643836,
"repo_name": "jokimies/django-pj-core",
"id": "2b084ed536c23ec0134a751547f869556e2b1dd6",
"size": "339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "661"
},
{
"name": "Makefile",
"bytes": "1232"
},
{
"name": "Python",
"bytes": "4946"
}
],
"symlink_target": ""
} |
"""Lasagne layer implementing a variant of highway and residual networks.
"""
import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
from deep_learning_layers import *
class MultiplicativeGatingLayer(nn.layers.MergeLayer):
"""
Generic layer that combines its 3 inputs t, h1, h2 as follows:
y = t * h1 + (1 - t) * h2
"""
def __init__(self, gate, input1, input2, **kwargs):
if gate:
incomings = [gate, input1, input2]
self.smallest_shape = tuple([min(a,b,c) for a,b,c in zip(gate.output_shape, input1.output_shape, input2.output_shape)])
else:
incomings = [input1, input2]
self.smallest_shape = tuple([min(a,b) for a,b in zip(input1.output_shape, input2.output_shape)])
super(MultiplicativeGatingLayer, self).__init__(incomings, **kwargs)
self.slices = []
for input in incomings:
input_slicing = []
for dim in xrange(len(input.output_shape)):
diff = input.output_shape[dim] - self.smallest_shape[dim]
# sample from the middle if a slice is too big.
input_slice = slice(diff/2, input.output_shape[dim]-(diff-diff/2))
input_slicing.append(input_slice)
self.slices.append(tuple(input_slicing))
#print
#print gate.output_shape, input1.output_shape, input2.output_shape
def get_output_shape_for(self, input_shapes):
return self.smallest_shape
def get_output_for(self, inputs, **kwargs):
# take the minimal working slice size, and use that one.
if len(inputs)==3:
return inputs[0][self.slices[0]] * inputs[1][self.slices[1]] + (1 - inputs[0][self.slices[0]]) * inputs[2][self.slices[2]]
else:
return inputs[0][self.slices[0]] + inputs[1][self.slices[1]]
class PadWithZerosLayer(nn.layers.Layer):
def __init__(self, incoming, final_size, dimension=1, val=0, **kwargs):
super(PadWithZerosLayer, self).__init__(incoming, **kwargs)
self.final_size = final_size
self.dimension = dimension
self.val = val
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
output_shape[self.dimension] = self.final_size
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
# do nothing if not needed
if self.input_shape[self.dimension] == self.output_shape[self.dimension]:
return input
indices = tuple([slice(0,i) for i in self.input_shape])
out = T.zeros(self.output_shape)
return T.set_subtensor(out[indices], input)
def jonas_highway(incoming, num_filters=None,
num_conv=3,
filter_size=(3,3), pool_size=(2,2), pad=(1,1), channel=1, axis=(2,3),
W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.0),
Wt=nn.init.Orthogonal(), bt=nn.init.Constant(0.01),
nonlinearity=nn.nonlinearities.rectify):
l_h = incoming
for _ in xrange(num_conv):
l_h = Conv2DDNNLayer(l_h, num_filters=num_filters,
axis=axis, channel=channel,
filter_size=filter_size,
pad=pad,
W=W, b=b,
nonlinearity=nonlinearity)
l_maxpool = MaxPool2DDNNLayer(l_h, pool_size=pool_size,
stride=pool_size,
axis=axis)
# reduce the incoming layers size to more or less the remaining size after the
# previous steps, but with the correct number of channels
l_maxpool_incoming = MaxPool2DDNNLayer(incoming, pool_size=pool_size,
stride=pool_size,
axis=axis)
l_proc_incoming = PadWithZerosLayer(l_maxpool_incoming,
final_size=num_filters
)
# gate layer
l_t = Conv2DDNNLayer(l_maxpool_incoming, num_filters=num_filters,
filter_size=filter_size,
pad=pad,
W=Wt, b=bt,
nonlinearity=T.nnet.sigmoid)
return MultiplicativeGatingLayer(gate=l_t, input1=l_maxpool, input2=l_proc_incoming)
def jonas_residual(incoming, num_filters=None,
num_conv=3,
filter_size=(3,3), pool_size=(2,2), pad=(1,1), channel=1, axis=(2,3),
W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.0),
nonlinearity=nn.nonlinearities.rectify):
l_h = incoming
for _ in xrange(num_conv):
l_h = ConvolutionOver2DAxisLayer(l_h, num_filters=num_filters,
axis=axis, channel=channel,
filter_size=filter_size,
pad=pad,
W=W, b=b,
nonlinearity=nonlinearity)
l_maxpool = MaxPoolOver2DAxisLayer(l_h, pool_size=pool_size,
stride=pool_size,
axis=axis)
# reduce the incoming layers size to more or less the remaining size after the
# previous steps, but with the correct number of channels
l_maxpool_incoming = MaxPoolOver2DAxisLayer(incoming, pool_size=pool_size,
stride=pool_size,
axis=axis)
l_proc_incoming = PadWithZerosLayer(l_maxpool_incoming,
final_size=num_filters
)
return MultiplicativeGatingLayer(gate=None, input1=l_maxpool, input2=l_proc_incoming)
| {
"content_hash": "32cff2942d8aa1a8670b38d806906dfc",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 134,
"avg_line_length": 43.20422535211268,
"alnum_prop": 0.5245313773431133,
"repo_name": "317070/kaggle-heart",
"id": "cfb887349ab8db2eb2281f1338e4a1b9ff342083",
"size": "6135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "highway.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2686608"
}
],
"symlink_target": ""
} |
import flask
from designate.openstack.common import log as logging
from designate.central import rpcapi as central_rpcapi
LOG = logging.getLogger(__name__)
central_api = central_rpcapi.CentralAPI()
blueprint = flask.Blueprint('sync', __name__)
@blueprint.route('/domains/sync', methods=['POST'])
def sync_domains():
context = flask.request.environ.get('context')
central_api.sync_domains(context)
return flask.Response(status=200)
@blueprint.route('/domains/<uuid:domain_id>/sync', methods=['POST'])
def sync_domain(domain_id):
context = flask.request.environ.get('context')
central_api.sync_domain(context, domain_id)
return flask.Response(status=200)
@blueprint.route('/domains/<uuid:domain_id>/records/<uuid:record_id>/sync',
methods=['POST'])
def sync_record(domain_id, record_id):
context = flask.request.environ.get('context')
record = central_api.find_record(context, {'id': record_id})
central_api.sync_record(context, domain_id, record['recordset_id'],
record_id)
return flask.Response(status=200)
| {
"content_hash": "bf15f0d373bb7a178974b4c00e99427e",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 75,
"avg_line_length": 28.384615384615383,
"alnum_prop": 0.6901535682023487,
"repo_name": "melodous/designate",
"id": "5d1dd4d4e3f1dcbbe3aa78d1af17d05ba57fea18",
"size": "1777",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/juno",
"path": "designate/api/v1/extensions/sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1378"
},
{
"name": "Python",
"bytes": "1335614"
},
{
"name": "Ruby",
"bytes": "2332"
},
{
"name": "Shell",
"bytes": "7341"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import six
from tastypie.api import Api
from tastypie.fields import CharField
from tastypie.resources import Resource, ModelResource
from webtsaservices.models import DataSeries, Site, SourcesDataService, VariableCategory, Variable, QualityControlLevel
class UnicodeCharField(CharField):
"""
A text field of arbitrary length, ignoring unicode errors.
"""
def convert(self, value):
if value is None:
return None
return six.text_type(value, 'utf8', 'ignore')
class PossibleValuesResource(Resource):
def full_dehydrate(self, bundle, for_list=False):
for field in bundle.obj:
bundle.data[field] = bundle.obj[field]
return super(PossibleValuesResource, self).full_dehydrate(bundle, for_list)
def obj_get_list(self, bundle, **kwargs):
if 'field' not in bundle.request.GET:
return super(PossibleValuesResource, self).obj_get_list(bundle, **kwargs)
field = bundle.request.GET['field']
raw_data = DataSeries.objects.order_by(field).values(field).distinct()
bundle.data['objects'] = raw_data
return bundle.data['objects']
class Meta:
resource_name = 'values'
list_allowed_methods = ['get']
max_limit = 0
filtering = {}
class SourcesDataServicesResource(ModelResource):
class Meta:
queryset = SourcesDataService.objects.all()
resource_name = 'networks'
list_allowed_methods = ['get']
max_limit = 0
filtering = {}
class DataSeriesResource(ModelResource):
methoddescription = UnicodeCharField()
class Meta:
queryset = DataSeries.objects.all()
resource_name = 'dataseries'
list_allowed_methods = ['get']
max_limit = 0
filtering = {}
class SitesResource(ModelResource):
class Meta:
queryset = Site.objects.all()
resource_name = 'sites'
list_allowed_methods = ['get']
max_limit = 0
filtering = {}
class VariableCategoriesResource(ModelResource):
class Meta:
queryset = VariableCategory.objects.all()
resource_name = 'variablecategories'
list_allowed_methods = ['get']
max_limit = 0
filtering = {}
class VariablesResource(ModelResource):
class Meta:
queryset = Variable.objects.all()
resource_name = 'variables'
list_allowed_methods = ['get']
max_limit = 0
filtering = {}
class QualityControlLevelsResource(ModelResource):
class Meta:
queryset = QualityControlLevel.objects.all()
resource_name = 'qualitylevels'
list_allowed_methods = ['get']
max_limit = 0
filtering = {}
v1_api = Api(api_name='v1')
v1_api.register(SourcesDataServicesResource())
v1_api.register(DataSeriesResource())
v1_api.register(SitesResource())
v1_api.register(VariableCategoriesResource())
v1_api.register(VariablesResource())
v1_api.register(QualityControlLevelsResource())
v1_api.register(PossibleValuesResource())
| {
"content_hash": "dd2534c381b6f30e1fcbc01750ca7336",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 119,
"avg_line_length": 27.98181818181818,
"alnum_prop": 0.6598440545808967,
"repo_name": "UCHIC/WEBTSA",
"id": "4c65ef52f089a86fa8cfd08f1b8fe6e52ad36845",
"size": "3078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/webtsaservices/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "15148"
},
{
"name": "HTML",
"bytes": "24316"
},
{
"name": "JavaScript",
"bytes": "139077"
},
{
"name": "Python",
"bytes": "29701"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import asyncio
import logging
import lzma
import os
import sys
import tarfile
from asyncio import Task
from datetime import datetime
from pathlib import Path
from typing import Any, Iterable, Optional, Type, TypeVar, overload
import aiohttp
import toml
from asyncqlio.db import DatabaseInterface
from discord import AllowedMentions, Game, Intents, Message
from discord.ext import commands
from discord.ext.commands import Bot, Context, when_mentioned_or
from discord.http import HTTPClient
from config import Config
from context import BContext
from help import BHelp
from utils import contextmanagers, exceptions
from utils.aioutils import do_every
C = TypeVar("C", bound=Context)
class BeattieBot(Bot):
"""A very cute robot boy"""
command_ignore = (commands.CommandNotFound, commands.CheckFailure)
general_ignore = (ConnectionResetError,)
archive_task: Optional[Task[Any]]
http: HTTPClient
session: aiohttp.ClientSession
extra: dict[str, Any]
def __init__(
self,
prefixes: tuple[str, ...],
debug: bool = False,
):
async def prefix_func(bot: Bot, message: Message) -> Iterable[str]:
prefix = prefixes
if guild := message.guild:
guild_conf = await bot.config.get_guild(guild.id) # type: ignore
if guild_pre := guild_conf.get("prefix"):
prefix = prefix + (guild_pre,)
return when_mentioned_or(*prefix)(self, message)
help_command: commands.HelpCommand = BHelp()
game = Game(name=f"{prefixes[0]}help")
super().__init__(
prefix_func,
activity=game,
case_insensitive=True,
help_command=help_command,
intents=Intents.all(),
allowed_mentions=AllowedMentions.none(),
)
with open("config/config.toml") as file:
data = toml.load(file)
password = data.get("config_password", "")
self.loglevel = data.get("loglevel", logging.WARNING)
self.debug = debug
dsn = f"postgresql://beattie:{password}@localhost/beattie"
self.db = DatabaseInterface(dsn)
self.config = Config(self)
self.uptime = datetime.now().astimezone()
self.extra = {}
if debug:
self.loglevel = logging.DEBUG
self.archive_task = None
else:
self.archive_task = do_every(60 * 60 * 24, self.swap_logs)
self.new_logger()
async def setup_hook(self) -> None:
self.session = aiohttp.ClientSession()
await self.db.connect()
await self.config.async_init()
extensions = [f"cogs.{f.stem}" for f in Path("cogs").glob("*.py")]
extensions.append("jishaku")
for extension in extensions:
try:
await self.load_extension(extension)
except Exception as e:
print(f"Failed to load extension {extension}\n{type(e).__name__}: {e}")
async def close(self) -> None:
await self.session.close()
await self.db.close()
if self.archive_task is not None:
self.archive_task.cancel()
await super().close()
async def swap_logs(self, new: bool = True) -> None:
if new:
self.new_logger()
await asyncio.to_thread(self.archive_logs)
def new_logger(self) -> None:
logger = logging.getLogger()
logger.setLevel(self.loglevel)
now = datetime.now().astimezone()
if self.debug:
pre = "debug"
else:
pre = "discord"
filename = now.strftime(f"{pre}%Y%m%d%H%M.log")
handler = logging.FileHandler(filename=filename, encoding="utf-8", mode="w")
handler.setFormatter(
logging.Formatter("%(asctime)s:%(levelname)s:%(name)s: %(message)s")
)
logger.addHandler(handler)
self.logger = logger
def archive_logs(self) -> None:
logname = "logs.tar"
if os.path.exists(logname):
mode = "a"
else:
mode = "w"
# get all logfiles but newest
old_logs = sorted(Path(".").glob("discord*.log"), key=os.path.getmtime)[:-1]
with tarfile.open(logname, mode) as tar:
for log in old_logs:
name = f"{log.name}.xz"
with open(log, "rb") as r, lzma.open(name, "w") as w:
for line in r:
w.write(line)
tar.add(name)
os.unlink(name)
log.unlink()
async def handle_error(self, ctx: Context, e: Exception) -> None:
if isinstance(e, (commands.CommandInvokeError, commands.ExtensionFailed)):
e = e.original
if isinstance(e, commands.MissingRequiredArgument):
await ctx.send("Missing required arguments.")
elif isinstance(e, commands.BadArgument):
await ctx.send("Bad arguments.")
elif isinstance(e, exceptions.ResponseError):
await ctx.send(
f"An HTTP request to <{e.url}> failed with error code {e.code}"
)
elif not isinstance(e, self.command_ignore):
await ctx.send(f"{type(e).__name__}: {e}")
if ctx.command is not None:
message = f"An error occurred in {ctx.command.name}"
else:
message = (
f"An error occured in guild {ctx.guild} channel #{ctx.channel}"
)
self.logger.exception(message, exc_info=(type(e), e, e.__traceback__))
raise e from None
async def on_ready(self) -> None:
assert self.user is not None
print("Logged in as")
print(self.user.name)
print(self.user.id)
print("------")
@overload
async def get_context(self, message: Message) -> BContext:
...
@overload
async def get_context(self, message: Message, *, cls: Type[C]) -> C:
...
async def get_context(
self, message: Message, *, cls: Type[Context] = None
) -> Context:
return await super().get_context(message, cls=cls or BContext)
async def on_command_error(self, ctx: Context, e: Exception) -> None:
if not hasattr(ctx.command, "on_error"):
await self.handle_error(ctx, e)
async def on_error(self, event_method: str, *args: Any, **kwargs: Any) -> None:
_, e, _ = sys.exc_info()
if isinstance(e, (commands.CommandInvokeError, commands.ExtensionFailed)):
e = e.original
if not isinstance(e, self.general_ignore):
await super().on_error(event_method, *args, **kwargs)
def get(self, *args: Any, **kwargs: Any) -> contextmanagers.get:
return contextmanagers.get(self.session, *args, **kwargs)
| {
"content_hash": "75029995b345d454d9f38333c1ddcd2d",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 87,
"avg_line_length": 34.611111111111114,
"alnum_prop": 0.5849992703925289,
"repo_name": "BeatButton/beattie-bot",
"id": "8f9908a845f3fada2842d3bf53e3625a583b3b7a",
"size": "6853",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60611"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
import os
import shutil
from time import time
import click
from utils.process import silent_run
from workspace.commands import AbstractCommand
from workspace.commands.helpers import expand_product_groups
from workspace.config import config
from workspace.scm import workspace_path, product_name, repos, stat_repo, all_branches, repo_path
log = logging.getLogger(__name__)
class Clean(AbstractCommand):
"""
Clean workspace by removing build, dist, and .pyc files
:param bool force: Remove untracked files too.
"""
@classmethod
def arguments(cls):
_, docs = cls.docs()
return [
cls.make_args('-f', '--force', action='store_true', help=docs['force'])
]
def run(self):
repo = repo_path()
if repo:
click.echo('Removing build/dist folders')
silent_run("rm -rf build dist docs/_build */activate", cwd=repo, shell=True)
click.echo('Removing *.pyc files')
silent_run(r"find . -type d \( -path '*/.tox' -o -path '*/mppy-*' \) -prune -o -name *.pyc -exec rm {} \;",
cwd=repo, shell=True)
if self.force:
click.echo('Removing untracked/ignored files')
silent_run('git clean -fdx')
else:
path = workspace_path()
click.echo('Cleaning {}'.format(path))
if config.clean.remove_products_older_than_days or config.clean.remove_all_products_except:
keep_time = 0
keep_products = []
if config.clean.remove_all_products_except:
click.echo('Removing all products except: %s' % config.clean.remove_all_products_except)
keep_products = expand_product_groups(config.clean.remove_all_products_except.split())
if config.clean.remove_products_older_than_days:
click.echo('Removing products older than %s days' % config.clean.remove_products_older_than_days)
keep_time = time() - config.clean.remove_products_older_than_days * 86400
removed_products = []
for repo in repos(path):
name = product_name(repo)
modified_time = os.stat(repo).st_mtime
if keep_products and name not in keep_products or keep_time and modified_time < keep_time:
status = stat_repo(repo, return_output=True)
if (not status or 'nothing to commit' in status
and ('working directory clean' in status or 'working tree clean' in status)
and len(all_branches(repo)) <= 1):
shutil.rmtree(repo)
removed_products.append(name)
else:
click.echo(' - Skipping "%s" as it has changes that may not be committed' % name)
if removed_products:
click.echo('Removed ' + ', '.join(removed_products))
| {
"content_hash": "6aa778886e07b22db999699bc2642f95",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 119,
"avg_line_length": 39.54430379746835,
"alnum_prop": 0.5672215108834827,
"repo_name": "maxzheng/workspace-tools",
"id": "44d45117de385fc9f64d33d2e40597ceda28bb51",
"size": "3124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workspace/commands/clean.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "143124"
}
],
"symlink_target": ""
} |
__author__ = 'Charlie'
# Utils used with tensorflow implemetation
import tensorflow as tf
import numpy as np
import scipy.misc as misc
import os, sys
from six.moves import urllib
import tarfile
import zipfile
def maybe_download_and_extract(dir_path, url_name, is_tarfile=False, is_zipfile=False):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
filename = url_name.split('/')[-1]
filepath = os.path.join(dir_path, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(url_name, filepath, reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
if is_tarfile:
tarfile.open(filepath, 'r:gz').extractall(dir_path)
elif is_zipfile:
with zipfile.ZipFile(filepath) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dir_path)
def save_image(image, image_size, save_dir, name=""):
"""
Save image by unprocessing assuming mean 127.5
:param image:
:param save_dir:
:param name:
:return:
"""
image += 1
image *= 127.5
image = np.clip(image, 0, 255).astype(np.uint8)
image = np.reshape(image, (image_size, image_size, -1))
misc.imsave(os.path.join(save_dir, name + "pred_image.png"), image)
def xavier_init(fan_in, fan_out, constant=1):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
high = constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32)
def weight_variable_xavier_initialized(shape, constant=1, name=None):
stddev = constant * np.sqrt(2.0 / (shape[2] + shape[3]))
return weight_variable(shape, stddev=stddev, name=name)
def weight_variable(shape, stddev=0.02, name=None):
print shape,stddev
initial = tf.truncated_normal(shape, stddev=stddev)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def bias_variable(shape, name=None):
initial = tf.constant(0.0, shape=shape)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def get_tensor_size(tensor):
from operator import mul
return reduce(mul, (d.value for d in tensor.get_shape()), 1)
def conv2d_basic(x, W, bias):
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
return tf.nn.bias_add(conv, bias)
def conv2d_strided(x, W, b):
conv = tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding="SAME")
return tf.nn.bias_add(conv, b)
def conv2d_transpose_strided(x, W, b, output_shape=None):
# print x.get_shape()
# print W.get_shape()
if output_shape is None:
output_shape = x.get_shape().as_list()
output_shape[1] *= 2
output_shape[2] *= 2
output_shape[3] = W.get_shape().as_list()[2]
# print output_shape
conv = tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, 2, 2, 1], padding="SAME")
return tf.nn.bias_add(conv, b)
def leaky_relu(x, alpha=0.0, name=""):
return tf.maximum(alpha * x, x, name)
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def avg_pool_2x2(x):
return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def local_response_norm(x):
return tf.nn.lrn(x, depth_radius=5, bias=2, alpha=1e-4, beta=0.75)
'''
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
"""
Code taken from http://stackoverflow.com/a/34634291/2267819
"""
with tf.variable_scope(scope) as scope:
scope.reuse_variables()
beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
, trainable=True)
gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return normed
'''
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
return tf.contrib.layers.batch_norm(x,
decay = decay,
updates_collections=None,
epsilon = eps,
scale=True,
is_training=phase_train,
scope=scope)
def process_image(image, mean_pixel):
return image - mean_pixel
def unprocess_image(image, mean_pixel):
return image + mean_pixel
def bottleneck_unit(x, out_chan1, out_chan2, down_stride=False, up_stride=False, name=None):
"""
Modified implementation from github ry?!
"""
def conv_transpose(tensor, out_channel, shape, strides, name=None):
out_shape = tensor.get_shape().as_list()
in_channel = out_shape[-1]
kernel = weight_variable([shape, shape, out_channel, in_channel], name=name)
shape[-1] = out_channel
return tf.nn.conv2d_transpose(x, kernel, output_shape=out_shape, strides=[1, strides, strides, 1],
padding='SAME', name='conv_transpose')
def conv(tensor, out_chans, shape, strides, name=None):
in_channel = tensor.get_shape().as_list()[-1]
kernel = weight_variable([shape, shape, in_channel, out_chans], name=name)
return tf.nn.conv2d(x, kernel, strides=[1, strides, strides, 1], padding='SAME', name='conv')
def bn(tensor, name=None):
"""
:param tensor: 4D tensor input
:param name: name of the operation
:return: local response normalized tensor - not using batch normalization :(
"""
return tf.nn.lrn(tensor, depth_radius=5, bias=2, alpha=1e-4, beta=0.75, name=name)
in_chans = x.get_shape().as_list()[3]
if down_stride or up_stride:
first_stride = 2
else:
first_stride = 1
with tf.variable_scope('res%s' % name):
if in_chans == out_chan2:
b1 = x
else:
with tf.variable_scope('branch1'):
if up_stride:
b1 = conv_transpose(x, out_chans=out_chan2, shape=1, strides=first_stride,
name='res%s_branch1' % name)
else:
b1 = conv(x, out_chans=out_chan2, shape=1, strides=first_stride, name='res%s_branch1' % name)
b1 = bn(b1, 'bn%s_branch1' % name, 'scale%s_branch1' % name)
with tf.variable_scope('branch2a'):
if up_stride:
b2 = conv_transpose(x, out_chans=out_chan1, shape=1, strides=first_stride, name='res%s_branch2a' % name)
else:
b2 = conv(x, out_chans=out_chan1, shape=1, strides=first_stride, name='res%s_branch2a' % name)
b2 = bn(b2, 'bn%s_branch2a' % name, 'scale%s_branch2a' % name)
b2 = tf.nn.relu(b2, name='relu')
with tf.variable_scope('branch2b'):
b2 = conv(b2, out_chans=out_chan1, shape=3, strides=1, name='res%s_branch2b' % name)
b2 = bn(b2, 'bn%s_branch2b' % name, 'scale%s_branch2b' % name)
b2 = tf.nn.relu(b2, name='relu')
with tf.variable_scope('branch2c'):
b2 = conv(b2, out_chans=out_chan2, shape=1, strides=1, name='res%s_branch2c' % name)
b2 = bn(b2, 'bn%s_branch2c' % name, 'scale%s_branch2c' % name)
x = b1 + b2
return tf.nn.relu(x, name='relu')
def add_to_regularization_and_summary(var):
if var is not None:
tf.histogram_summary(var.op.name, var)
tf.add_to_collection("reg_loss", tf.nn.l2_loss(var))
def add_activation_summary(var):
tf.histogram_summary(var.op.name + "/activation", var)
tf.scalar_summary(var.op.name + "/sparsity", tf.nn.zero_fraction(var))
def add_gradient_summary(grad, var):
if grad is not None:
tf.histogram_summary(var.op.name + "/gradient", grad)
| {
"content_hash": "7cb69c8086c2c8db1d1498f193e51ea4",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 120,
"avg_line_length": 36.23107569721115,
"alnum_prop": 0.590169342423576,
"repo_name": "LarsDu/DeepPixelMonster",
"id": "395913f365dd7defd69be65ca291c2b4c84f8fb8",
"size": "9094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dpixelmonster/shekkizh_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "323977"
},
{
"name": "C++",
"bytes": "397238"
},
{
"name": "CSS",
"bytes": "145482"
},
{
"name": "HTML",
"bytes": "8407"
},
{
"name": "Python",
"bytes": "190166"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
} |
"""Astronomical and physics constants.
This module complements constants defined in `astropy.constants`,
with gravitational paremeters and radii.
Note that `GM_jupiter` and `GM_neptune` are both referred to the whole planetary system gravitational parameter.
Unless otherwise specified, gravitational and mass parameters were obtained from:
* Luzum, Brian et al. “The IAU 2009 System of Astronomical Constants: The Report of the IAU Working Group on Numerical
Standards for Fundamental Astronomy.” Celestial Mechanics and Dynamical Astronomy 110.4 (2011): 293–304.
Crossref. Web. `DOI: 10.1007/s10569-011-9352-4`_
and radii were obtained from:
* Archinal, B. A. et al. “Report of the IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009.”
Celestial Mechanics and Dynamical Astronomy 109.2 (2010): 101–135. Crossref. Web. `DOI: 10.1007/s10569-010-9320-4`_
.. _`DOI: 10.1007/s10569-011-9352-4`: http://dx.doi.org/10.1007/s10569-011-9352-4
.. _`DOI: 10.1007/s10569-010-9320-4`: http://dx.doi.org/10.1007/s10569-010-9320-4
"""
from astropy.constants import Constant
from astropy import time
J2000 = time.Time('J2000', scale='tdb')
GM_sun = Constant('GM_sun', 'Heliocentric gravitational constant', 1.32712442099e20, 'm3 / (s2)', 0.0000000001e20,
'IAU 2009 system of astronomical constants', system='si')
GM_earth = Constant('GM_earth', 'Geocentric gravitational constant', 3.986004418e14, 'm3 / (s2)', 0.000000008e14,
'IAU 2009 system of astronomical constants', system='si')
# Anderson, John D. et al. “The Mass, Gravity Field, and Ephemeris of Mercury.” Icarus 71.3 (1987): 337–349.
# Crossref. Web. DOI: 10.1016/0019-1035(87)90033-9
GM_mercury = Constant('GM_mercury', 'Mercury gravitational constant', 2.203209e13, 'm3 / (s2)', 0.91,
'IAU 2009 system of astronomical constants', system='si')
# Konopliv, A.S., W.B. Banerdt, and W.L. Sjogren. “Venus Gravity: 180th Degree and Order Model.”
# Icarus 139.1 (1999): 3–18. Crossref. Web. DOI: 10.1006/icar.1999.6086
GM_venus = Constant('GM_venus', 'Venus gravitational constant', 3.24858592e14, 'm3 / (s2)', 0.006,
'IAU 2009 system of astronomical constants', system='si')
# Konopliv, Alex S. et al. “A Global Solution for the Mars Static and Seasonal Gravity, Mars Orientation, Phobos and
# Deimos Masses, and Mars Ephemeris.” Icarus 182.1 (2006): 23–50.
# Crossref. Web. DOI: 10.1016/j.icarus.2005.12.025
GM_mars = Constant('GM_mars', 'Mars gravitational constant', 4.282837440e13, 'm3 / (s2)', 0.00028,
'IAU 2009 system of astronomical constants', system='si')
# Jacobson, R. A. et al. “A comprehensive orbit reconstruction for the galileo prime mission in the JS200 system.”
# The Journal of the Astronautical Sciences 48.4 (2000): 495–516.
# Crossref. Web.
GM_jupiter = Constant('GM_jupiter', 'Jovian system gravitational constant', 1.2671276253e17, 'm3 / (s2)', 2.00,
'IAU 2009 system of astronomical constants', system='si')
# Jacobson, R. A. et al. “The Gravity Field of the Saturnian System from Satellite Observations and Spacecraft
# Tracking Data.” The Astronomical Journal 132.6 (2006): 2520–2526.
# Crossref. Web. DOI: 10.1086/508812
GM_saturn = Constant('GM_saturn', 'Saturn gravitational constant', 3.79312077e16, 'm3 / (s2)', 1.1,
'IAU 2009 system of astronomical constants', system='si')
# Jacobson, R. A. et al. “The Masses of Uranus and Its Major Satellites from Voyager Tracking Data and Earth-Based
# Uranian Satellite Data.” The Astronomical Journal 103 (1992): 2068.
# Crossref. Web. DOI: 10.1086/116211
GM_uranus = Constant('GM_uranus', 'Uranus gravitational constant', 5.7939393e15, 'm3 / (s2)', 13.0,
'IAU 2009 system of astronomical constants', system='si')
# Jacobson, R. A. “THE ORBITS OF THE NEPTUNIAN SATELLITES AND THE ORIENTATION OF THE POLE OF NEPTUNE.”
# The Astronomical Journal 137.5 (2009): 4322–4329. Crossref. Web. DOI:
# 10.1088/0004-6256/137/5/4322
GM_neptune = Constant('GM_neptune', 'Neptunian system gravitational constant', 6.836527100580397e15, 'm3 / (s2)', 10.0,
'IAU 2009 system of astronomical constants', system='si')
# Tholen, David J. et al. “MASSES OF NIX AND HYDRA.” The Astronomical Journal 135.3 (2008): 777–784. Crossref. Web.
# DOI: 10.1088/0004-6256/135/3/777
GM_pluto = Constant('GM_pluto', 'Pluto gravitational constant', 8.703e11, 'm3 / (s2)', 3.7,
'IAU 2009 system of astronomical constants', system='si')
# Lemoine, Frank G. et al. “High-Degree Gravity Models from GRAIL Primary Mission Data.”
# Journal of Geophysical Research: Planets 118.8 (2013): 1676–1698.
# Crossref. Web. DOI: 10.1002/jgre.20118
GM_moon = Constant('GM_moon', 'Moon gravitational constant', 4.90279981e12, 'm3 / (s2)', 0.00000774,
'Journal of Geophysical Research: Planets 118.8 (2013)', system='si')
R_sun = Constant('R_sun', 'Sun equatorial radius', 6.96000e8, 'm', 0,
'IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009', system='si')
R_earth = Constant('R_earth', 'Earth equatorial radius', 6.3781366e6, 'm', 0.0001,
'IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009', system='si')
R_mercury = Constant('R_mercury', 'Mercury equatorial radius', 2.4397e6, 'm', 1.0,
'IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009', system='si')
R_venus = Constant('R_venus', 'Venus equatorial radius', 6.0518e6, 'm', 1.0,
'IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009', system='si')
R_mars = Constant('R_mars', 'Mars equatorial radius', 3.39619e6, 'm', 0.1,
'IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009', system='si')
R_jupiter = Constant('R_jupiter', 'Jupiter equatorial radius', 7.1492e7, 'm', 4,
'IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009', system='si')
R_saturn = Constant('R_saturn', 'Saturn equatorial radius', 6.0268e7, 'm', 4,
'IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009', system='si')
R_uranus = Constant('R_uranus', 'Uranus equatorial radius', 2.5559e7, 'm', 4,
'IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009', system='si')
R_neptune = Constant('R_neptune', 'Neptune equatorial radius', 2.4764e7, 'm', 15,
'IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009', system='si')
R_pluto = Constant('R_pluto', 'Pluto effective radius', 1.195e6, 'm', 5,
'IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009', system='si')
R_moon = Constant('R_moon', 'Moon equatorial radius', 1.7374e6, 'm', 1,
'IAU Working Group on Cartographic Coordinates and Rotational Elements: 2009', system='si')
| {
"content_hash": "f9d4cb2398908db16a2cac18687c660f",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 119,
"avg_line_length": 59.23529411764706,
"alnum_prop": 0.6876152645765357,
"repo_name": "anhiga/poliastro",
"id": "dffc2ce80bb55c41417a26ba11233d5a1f72c6cd",
"size": "7113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/poliastro/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3053"
},
{
"name": "Jupyter Notebook",
"bytes": "4110"
},
{
"name": "Python",
"bytes": "180988"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
} |
"""Tests for TPU outside compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorboard.plugins.histogram import summary_v2 as histogram_summary_v2
from tensorboard.plugins.scalar import summary_v2 as scalar_summary_v2
from tensorflow.core.util import event_pb2
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import summary_ops_v2 as summary
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.tpu.ops import tpu_ops
FLAGS = flags.FLAGS
flags.DEFINE_string("tpu", "", "Name of TPU to connect to.")
flags.DEFINE_string("project", None, "Name of GCP project with TPU.")
flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.")
def get_tpu_cluster_resolver():
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
zone=FLAGS.zone,
project=FLAGS.project,
)
return resolver
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
tpu_strategy_util.initialize_tpu_system(resolver)
return tpu_lib.TPUStrategyV2(resolver)
def computation_with_string_ops(x):
output = string_ops.string_format("1{}", x)
return string_ops.string_to_number(output)
def _events_from_logdir(test_case, logdir):
"""Reads summary events from log directory."""
test_case.assertTrue(gfile.Exists(logdir))
files = gfile.ListDirectory(logdir)
test_case.assertLen(files, 1)
records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def _rewrite_func_wrapper(tf_func):
def tpu_fn(*args, **kwargs):
# tpu.rewrite only accepts list of tensors as input. We need to flatten
# keyword arguments to meet this requirement.
concrete = tf_func.get_concrete_function(*(list(args) +
list(kwargs.values())))
return tpu.rewrite(concrete.__call__, list(args) + list(kwargs.values()))
return def_function.function(tpu_fn)
def _tpu_partitioned_call_wrapper(tf_func):
"""Wrap a tensorflow Function with TPUPartitionedCall."""
def inner_func(*args, **kwargs):
concrete = tf_func.get_concrete_function(*args, **kwargs)
# TPUPartitionedCall only accepts list of tensors as input args.
# Flatten keyword arguments and do some basic ordering:
# Positional args + Flattened keyword args + Captured args.
op_args = list(args) + list(kwargs.values()) + concrete.captured_inputs
return tpu_functional.TPUPartitionedCall(
args=op_args,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in concrete.function_def.signature.output_arg],
f=concrete)
return def_function.function(inner_func)
class TpuOutsideCompilationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(TpuOutsideCompilationTest, self).setUp()
config.set_soft_device_placement(False)
def testHostNoInput(self):
strategy = get_tpu_strategy()
def outside_fn():
logging_ops.print_v2("Outside compiled")
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu.outside_compilation(outside_fn)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testHostInputOnly(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu.outside_compilation(outside_fn, x2)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testHostInputOutput(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
output = tpu.outside_compilation(outside_fn, x2)
return output
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(36., shape=(strategy.num_replicas_in_sync)))
def testHostMultipleInputs(self):
strategy = get_tpu_strategy()
val0 = np.arange(6).reshape((2, 3)).astype(np.float32)
val1 = np.arange(6).reshape((3, 2)).astype(np.float32)
def outside_fn(arg0, arg1):
tmp = array_ops.reshape(arg1, array_ops.shape(arg0))
ret0 = arg0 + tmp
ret1 = math_ops.matmul(arg0, arg1)
ret2 = array_ops.concat([arg0, tmp], 0)
return ret0, ret1, ret2
@def_function.function
def train_step():
def tpu_fn(x, y):
a = x + 7.0
b = y * 2.0
c, d, e = tpu.outside_compilation(outside_fn, a, b)
return (math_ops.reduce_max(c) + math_ops.reduce_min(d) +
math_ops.reduce_sum(e))
return strategy.run(tpu_fn, args=(val0, val1))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(213., shape=(strategy.num_replicas_in_sync)))
def testMultipleClusters(self):
strategy = get_tpu_strategy()
def outside_fn1(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
def outside_fn2(x):
logging_ops.print_v2("Outside compiled", x)
return x - 18.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
output1 = tpu.outside_compilation(outside_fn1, x2)
x3 = output1 + 3.0
output2 = tpu.outside_compilation(outside_fn2, x3)
return output2
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(21., shape=(strategy.num_replicas_in_sync)))
@parameterized.parameters((True), (False))
def testOutsideCompilationControlFlowIf(self, take_true_branch):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
input_value = 51.0 if take_true_branch else 25.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
if x < 50.0:
return tpu.outside_compilation(outside_fn, x2)
else:
return x2
return strategy.run(tpu_fn, args=(input_value,))
output_value = 36.0
if take_true_branch:
output_value = 56.0
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationControlFlowWhile(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
while x2 < 50.0:
x2 = tpu.outside_compilation(outside_fn, x2)
return x2 + 4.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationHostControlFlow(self):
"""Tests that control flow on host for outside_compilation works."""
strategy = get_tpu_strategy()
def outside_fn(x):
n = 0
while n < 4:
x = x + 6.0
n = n + 1
return x
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
x2 = tpu.outside_compilation(outside_fn, x2)
return x2 + 4.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))
def testSummary(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = tpu.outside_compilation(host_computation, x)
y = tpu.outside_compilation(host_computation, x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
@parameterized.parameters((True), (False))
def testSummaryInCond(self, take_true_branch):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step(take_true_branch):
def computation(x):
x = x + 1.0
if x < 5.0:
y = tpu.outside_compilation(host_computation, x)
y = tpu.outside_compilation(host_computation, x)
x = y
return x + 1.0
if take_true_branch:
return strategy.run(computation, args=(2.0,))
else:
return strategy.run(computation, args=(10.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
output_value = 12.
if take_true_branch:
output_value = 7.
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step(take_true_branch)),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
def testSummaryInWhile(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
n = 0
while n < 3:
x = x + 1.0
y = tpu.outside_compilation(host_computation, x)
y = tpu.outside_compilation(host_computation, x)
x = y
n = n + 1
return y + 1.0
return strategy.run(computation, args=(2.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(31., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationAtHeadAndTail(self):
"""Tests that outside_compilation at head/tail of TPU computation works."""
strategy = get_tpu_strategy()
def host_computation(x):
return x * 2.0
@def_function.function
def train_step():
def computation(x):
w = tpu.outside_compilation(host_computation, x)
y = w + 1.0
z = tpu.outside_compilation(host_computation, y)
return z + 5.0
return strategy.run(computation, args=(2.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(15., shape=(strategy.num_replicas_in_sync)))
def testGradientAcrossOutsideCompilation(self):
"""Tests compiled gradients can contain host computations."""
strategy = get_tpu_strategy()
def host_computation(a):
b = a * a
c = b * b
return c
@def_function.function
def train_step():
def computation(x, y):
a = x + 7.0
b = tpu.outside_compilation(host_computation, a)
c = b * y
d = gradients_impl.gradients(
[c], [x], colocate_gradients_with_ops=True)[0]
return d
return strategy.run(computation, args=(2.0, 3.0))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(8748., shape=(strategy.num_replicas_in_sync)))
def testGradientOfGradientAcrossOutsideCompilation(self):
"""Tests compiled gradients of gradients can contain host computations."""
strategy = get_tpu_strategy()
def host_computation(a):
b = a * a
c = b * b
return c
@def_function.function
def train_step():
def computation(x, y):
a = x + 7.0
b = tpu.outside_compilation(host_computation, a)
c = b * y
d = gradients_impl.gradients(
[c], [x], colocate_gradients_with_ops=True)[0]
e = gradients_impl.gradients(
[d], [x], colocate_gradients_with_ops=True)[0]
return e
return strategy.run(computation, args=(2.0, 3.0))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(2916., shape=(strategy.num_replicas_in_sync)))
def testColocateGradientWithOutsideCompiledOp(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step():
@def_function.function
def tpu_fn(x):
x1 = tpu.outside_compilation(math_ops.sqrt, x)
grad = gradients_impl.gradients([x1], [x],
colocate_gradients_with_ops=True)[0]
sqrt = [
op for op in ops.get_default_graph().get_operations()
if op.type == "Sqrt"
][0]
sqrt_grad = [
op for op in ops.get_default_graph().get_operations()
if op.type == "SqrtGrad"
][0]
assert sqrt.get_attr(tpu._OUTSIDE_COMPILATION_ATTR) == b"0"
assert (sqrt_grad.get_attr(
tpu._OUTSIDE_COMPILATION_ATTR) == b"0.gradients/uid")
return grad
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(.1, shape=(strategy.num_replicas_in_sync)))
class OutsideCompilationOnUnsupportedOpTest(test.TestCase,
parameterized.TestCase):
def setUp(self):
super(OutsideCompilationOnUnsupportedOpTest, self).setUp()
config.set_soft_device_placement(True)
def testStringOpWithManualOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(x):
def computation(x):
return tpu.outside_compilation(computation_with_string_ops, x)
return strategy.run(computation, args=(x,))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testStringOpWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(x):
def computation(x):
return computation_with_string_ops(x)
return strategy.run(computation, args=(x,))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testSummaryWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
def testNestedFunctionScalarSummary(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
@def_function.function
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
def testHistogramSummaryWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
def host_computation(x):
histogram_summary_v2.histogram("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
@parameterized.parameters((True), (False))
def testSummaryControlFlowIfWithAutoOutsideCompilation(
self, take_true_branch):
strategy = get_tpu_strategy()
@def_function.function
def step():
def computation(x):
x = x + 1.0
if x < 5:
scalar_summary_v2.scalar("x", x, step=0)
x = x * 2.0
return x + 1.0
if take_true_branch:
return strategy.run(computation, args=(2.0,))
else:
return strategy.run(computation, args=(10.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
output_value = 12.
if take_true_branch:
output_value = 7.
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
if take_true_branch:
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
#
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "cond/x")
@test_util.disable_mlir_bridge(
"TODO(b/168493455): Reenable this test once deadlock resolved."
)
def testAutoOutsideCompilationWithFunctionalNodes(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(a, b):
def fn(a, b):
fn1 = lambda: computation_with_string_ops(a * 100)
fn2 = lambda: computation_with_string_ops(a)
pred = math_ops.greater_equal(a, b)
result = array_ops.identity(
control_flow_ops.cond(pred, fn1, fn2),
name="uncompilable_control_flow")
return result
return strategy.run(fn, args=(a, b))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0.0, -1.0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testRandomOpsWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step():
def computation():
return random_ops.random_normal(shape=[1, 2, 3])
return strategy.run(computation, args=())
self.assertAllEqual(
strategy.experimental_local_results(train_step())[0].shape, [1, 2, 3])
@test_util.disable_mlir_bridge(
"TODO(b/167235391): Reenable this test once function calls are handled "
"by MLIR bridge."
)
def testOutsideCompilationWithTPUPartitionedCallOp(self):
"""Tests that control flow with TPUPartitionedCall including outside_compilation works."""
get_tpu_strategy()
def host_computation(x):
return x + 1
@def_function.function()
def train_step(x):
x2 = x + 5.0
logging_ops.print_v2(x2)
x2 = tpu.outside_compilation(host_computation, x2)
return x2 + 4.0
tpu_fn = _rewrite_func_wrapper(train_step)
partitioned_tpu_fn = _tpu_partitioned_call_wrapper(tpu_fn)
concrete = partitioned_tpu_fn.get_concrete_function(
x=tensor_spec.TensorSpec(
shape=(1), dtype=dtypes.float32, name="input_tensor"))
self.assertIsInstance(
concrete(array_ops.ones((1), dtype=dtypes.float32))[0], ops.Tensor)
if __name__ == "__main__":
test.main()
| {
"content_hash": "8ca1c69465a8513c2b429c8fe2bda40a",
"timestamp": "",
"source": "github",
"line_count": 736,
"max_line_length": 94,
"avg_line_length": 31.214673913043477,
"alnum_prop": 0.6495168451292765,
"repo_name": "annarev/tensorflow",
"id": "11fc7d54513ecf17ddb7d417493b0840e4307172",
"size": "23663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/tpu/tpu_outside_compilation_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49343974"
},
{
"name": "CMake",
"bytes": "195286"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "863222"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41289329"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "469612"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
class Solution(object):
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = []
stack = []
node = root
while node or stack:
while node:
stack.append(node)
node = node.left
node = stack.pop()
res.append(node.val)
node = node.right
return res
| {
"content_hash": "85d302440e6c228c6d4dc4f948ecdad8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 37,
"avg_line_length": 22.789473684210527,
"alnum_prop": 0.44341801385681295,
"repo_name": "yehzhang/RapidTest",
"id": "f15035833d03cd7aa7bcbae18fec96a940dc222d",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/solutions/binary_tree_inorder_traversal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "30481"
},
{
"name": "Python",
"bytes": "102077"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class OrganizationsRequestsConfig(AppConfig):
name = 'watchdog_kj_kultura.organizations_requests'
verbose_name = _("Requests to organizations system")
| {
"content_hash": "2e519b8ddcc92ff0ec6a14330829180a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 56,
"avg_line_length": 35.857142857142854,
"alnum_prop": 0.7888446215139442,
"repo_name": "watchdogpolska/watchdog-kj-kultura",
"id": "2c9b1da13ba2f9995a55bc1ad8fd886868b393d3",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watchdog_kj_kultura/organizations_requests/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "182971"
},
{
"name": "HTML",
"bytes": "93827"
},
{
"name": "JavaScript",
"bytes": "3024"
},
{
"name": "Python",
"bytes": "168593"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0028_auto_20170509_0231'),
]
operations = [
migrations.AddField(
model_name='socialaccount',
name='bucket_id',
field=models.CharField(max_length=256, null=True),
),
migrations.AddField(
model_name='socialaccount',
name='image',
field=models.ImageField(blank=True, upload_to=b'img/'),
),
migrations.AddField(
model_name='socialaccount',
name='image_link',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| {
"content_hash": "43298050477aef4895ad135208073110",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 27.178571428571427,
"alnum_prop": 0.5716162943495401,
"repo_name": "semitki/semitki",
"id": "46d4041a9f2e93a76e7b982de099671e6a49f70a",
"size": "834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/sonetworks/migrations/0029_auto_20170509_2122.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10752"
},
{
"name": "HTML",
"bytes": "84995"
},
{
"name": "JavaScript",
"bytes": "97680"
},
{
"name": "Nginx",
"bytes": "938"
},
{
"name": "Python",
"bytes": "92120"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
} |
import os
import numpy as np
from panda3d.core import loadPrcFile
from panda3d.core import TextNode
from direct.task import Task
from direct.gui.OnscreenText import OnscreenText
from citysim3d.envs import SimpleQuadPanda3dEnv
from citysim3d.spaces import BoxSpace
import citysim3d.utils.transformations as tf
assert "CITYSIM3D_DIR" in os.environ
loadPrcFile(os.path.expandvars('${CITYSIM3D_DIR}/config.prc'))
def add_instructions(app, pos, msg):
return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), scale=.05,
shadow=(0, 0, 0, 1), parent=app.a2dTopLeft,
pos=(0.08, -pos - 0.04), align=TextNode.ALeft)
def as_one(boolean):
""" Maps True to 1 and False to -1 """
boolean = bool(boolean) # ensure it's a boolean
return 2 * int(boolean) - 1
class CustomSimpleQuadPanda3dEnv(SimpleQuadPanda3dEnv):
"""
Same as SimpleQuadPanda3dEnv except that actions are directly mapped to
the car's actions and the quad is constrained to move behind the car.
"""
def __init__(self, *args, **kwargs):
super(CustomSimpleQuadPanda3dEnv, self).__init__(*args, **kwargs)
# change the car's speed limit
self.car_env.speed_offset_space.low[0] = 0.0
self.car_env.speed_offset_space.high[0] = np.inf
@property
def action_space(self):
return self.car_env.action_space
def step(self, action):
# update the angle of the propellers (for rendering purposes)
if self.prop_rpm:
self.prop_angle += (self.prop_rpm * 2 * np.pi / 60) * self.dt
self.prop_angle -= 2 * np.pi * np.floor(self.prop_angle / (2 * np.pi))
for quad_prop_id, quad_prop_local_node in enumerate(self.quad_prop_local_nodes):
is_ccw = quad_prop_id in (1, 2)
angle = self.prop_angle if is_ccw else -self.prop_angle
quad_prop_local_node.setQuat(tuple(tf.quaternion_about_axis(angle, np.array([0, 0, 1]))))
self.car_env.step(action)
# set the position of the quad to be behind the car
car_T = tf.pose_matrix(self.car_node.getQuat(), self.car_node.getPos())
quad_pos = car_T[:3, 3] + car_T[:3, :3].dot(np.array([0., -4., 3.]) * 4)
# set the rotation of the quad to be the rotation of the car projected so that the z-axis is up
axis = np.cross(car_T[:3, 2], np.array([0, 0, 1]))
angle = tf.angle_between_vectors(car_T[:3, 2], np.array([0, 0, 1]))
if np.isclose(angle, 0.0):
project_T = np.eye(4)
else:
project_T = tf.rotation_matrix(angle, axis)
quad_T = project_T.dot(car_T)
quad_quat = tf.quaternion_from_matrix(quad_T[:3, :3])
tightness = 0.1
self.quad_node.setPosQuat(tuple((1 - tightness) * np.array(self.quad_node.getPos()) + tightness * quad_pos), tuple(quad_quat))
return self.observe(), None, False, dict()
def reset(self):
"""
Same as SimpleQuadPanda3dEnv.reset except that the car's speed is
always set to 10.0"""
# reset to given or random state...
super(CustomSimpleQuadPanda3dEnv, self).reset()
state = self.car_env.get_state()
# ... but then set the car's speed to 10.0
state[0] = 10.0
self.car_env.set_state(state)
return self.observe()
def main():
# actions are forward acceleration and lateral velocity
action_space = BoxSpace(low=np.array([-1.0, -1.0]),
high=np.array([1.0, 1.0]))
car_model_names = ['camaro2', 'kia_rio_blue', 'kia_rio_red',
'kia_rio_silver', 'kia_rio_white', 'kia_rio_yellow',
'mazda6', 'mitsubishi_lancer_evo', 'sport']
env = CustomSimpleQuadPanda3dEnv(action_space,
sensor_names=[], # empty sensor_names means no observations
car_model_names=car_model_names)
num_camera_modes = 3
key_map = dict(left=False, right=False, up=False, down=False, camera_pressed=False, camera_mode=0)
def step(task):
forward_acceleration = as_one(key_map['up']) - as_one(key_map['down'])
lateral_velocity = as_one(key_map['right']) - as_one(key_map['left'])
action = np.array([forward_acceleration, lateral_velocity])
env.step(action)
if key_map['camera_pressed']:
key_map['camera_mode'] = (key_map['camera_mode'] + 1) % num_camera_modes
if key_map['camera_mode'] == 0:
env.app.cam.reparentTo(env.app.render)
env.app.cam.setQuat(tuple(tf.quaternion_about_axis(-np.pi / 2, np.array([1, 0, 0]))))
env.app.cam.setPos(tuple(np.array(env.car_node.getPos()) + np.array([0., 0., 100.])))
elif key_map['camera_mode'] in (1, 2):
if key_map['camera_pressed']:
tightness = 1.0
else:
tightness = 0.1
if key_map['camera_mode'] == 1:
target_node = env.car_node
offset = np.array([0., -4., 3.]) * 3
else:
target_node = env.quad_node
offset = np.array([0., -4., 3.]) * .5
target_T = tf.pose_matrix(target_node.getQuat(), target_node.getPos())
target_camera_pos = target_T[:3, 3] + target_T[:3, :3].dot(offset)
env.app.cam.setPos(tuple((1 - tightness) * np.array(env.app.cam.getPos()) + tightness * target_camera_pos))
env.app.cam.lookAt(target_node)
else:
env.app.cam.reparentTo(env.car_node)
env.app.cam.setQuat((1, 0, 0, 0))
env.app.cam.setPos(tuple(np.array([0, 1, 2]))) # slightly in front of the car
key_map['camera_pressed'] = False
return Task.cont
env.app.taskMgr.add(step, "step")
add_instructions(env.app, 0.06, "[ESC]: Quit")
add_instructions(env.app, 0.12, "[R]: Reset environment")
add_instructions(env.app, 0.18, "[Left Arrow]: Move car left")
add_instructions(env.app, 0.24, "[Right Arrow]: Move car right")
add_instructions(env.app, 0.30, "[Up Arrow]: Accelerate the car")
add_instructions(env.app, 0.36, "[Down Arrow]: Decelerate the car")
add_instructions(env.app, 0.42, "[C]: Toggle camera mode")
add_instructions(env.app, 0.48, "[S]: Take screenshot")
env.app.accept('r', env.reset)
env.app.accept('arrow_left', key_map.update, [[('left', True)]])
env.app.accept('arrow_left-up', key_map.update, [[('left', False)]])
env.app.accept('arrow_right', key_map.update, [[('right', True)]])
env.app.accept('arrow_right-up', key_map.update, [[('right', False)]])
env.app.accept('arrow_up', key_map.update, [[('up', True)]])
env.app.accept('arrow_up-up', key_map.update, [[('up', False)]])
env.app.accept('arrow_down', key_map.update, [[('down', True)]])
env.app.accept('arrow_down-up', key_map.update, [[('down', False)]])
env.app.accept('c-up', key_map.update, [[('camera_pressed', True)]])
env.app.accept('s-up', env.app.screenshot)
env.reset()
env.app.run()
if __name__ == '__main__':
main()
| {
"content_hash": "bb599f834e7752869641d6018b9e56c7",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 134,
"avg_line_length": 44.10493827160494,
"alnum_prop": 0.5899230230930721,
"repo_name": "alexlee-gk/citysim3d",
"id": "1d2d781b020de8976f6d797d815cdf85a769cf2e",
"size": "7145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/interactive_demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "172597"
}
],
"symlink_target": ""
} |
from django import forms
from imager_profile.models import ImagerProfile
class EditProfileForm(forms.ModelForm):
"""Form to edit a profile."""
def __init__(self, *args, **kwargs):
"""Form."""
super(EditProfileForm, self).__init__(*args, **kwargs)
self.fields["name"] = forms.CharField(initial=self.instance.user.first_name)
self.fields["camera_type"] = forms.CharField(initial=self.instance.user.profile.camera_type)
self.fields["personal_website"] = forms.URLField(initial=self.instance.user.profile.personal_website)
self.fields["bio"] = forms.CharField(initial=self.instance.user.profile.bio)
self.fields["travel_radius"] = forms.CharField(initial=self.instance.user.profile.travel_radius)
self.fields["phone"] = forms.CharField(initial=self.instance.user.profile.phone)
self.fields["photo_type"] = forms.CharField(initial=self.instance.user.profile.photo_type)
del self.fields["user"]
class Meta:
model = ImagerProfile
exclude = []
| {
"content_hash": "018931d31b307fea14f345afd76b9db6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 109,
"avg_line_length": 47.63636363636363,
"alnum_prop": 0.6870229007633588,
"repo_name": "JSchatzman/django-imager",
"id": "11bd66edae45478d271e0293b158a5b231cd8357",
"size": "1048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_profile/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8947"
},
{
"name": "HTML",
"bytes": "9148"
},
{
"name": "JavaScript",
"bytes": "1325"
},
{
"name": "Python",
"bytes": "48503"
}
],
"symlink_target": ""
} |
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras.regularizers import l2
from keras import backend as K
class AlexNet:
@staticmethod
def build(width, height, depth, classes, reg=0.0002):
# initialize the model along with the input shape to be
# "channels last" and the channels dimension itself
model = Sequential()
inputShape = (height, width, depth)
chanDim = -1
# if we are using "channels first", update the input shape
# and channels dimension
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
chanDim = 1
# Block #1: first CONV => RELU => POOL layer set
model.add(Conv2D(96, (11, 11), strides=(4, 4),
input_shape=inputShape, padding="same",
kernel_regularizer=l2(reg)))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Dropout(0.25))
# Block #2: second CONV => RELU => POOL layer set
model.add(Conv2D(256, (5, 5), padding="same",
kernel_regularizer=l2(reg)))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Dropout(0.25))
# Block #3: CONV => RELU => CONV => RELU => CONV => RELU
model.add(Conv2D(384, (3, 3), padding="same",
kernel_regularizer=l2(reg)))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(384, (3, 3), padding="same",
kernel_regularizer=l2(reg)))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(256, (3, 3), padding="same",
kernel_regularizer=l2(reg)))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Dropout(0.25))
# Block #4: first set of FC => RELU layers
model.add(Flatten())
model.add(Dense(4096, kernel_regularizer=l2(reg)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# Block #5: second set of FC => RELU layers
model.add(Dense(4096, kernel_regularizer=l2(reg)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# softmax classifier
model.add(Dense(classes, kernel_regularizer=l2(reg)))
model.add(Activation("softmax"))
# return the constructed network architecture
return model | {
"content_hash": "6dabdbe2d4fa0fc6f5831ff592d9da7b",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 60,
"avg_line_length": 34.85897435897436,
"alnum_prop": 0.7138653916881207,
"repo_name": "CyberLabs-BR/face_detect",
"id": "47219b401e0bdc1057f50b24a69a34a93bbd02cf",
"size": "2751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyimagesearch/nn/conv/alexnet.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "141928"
},
{
"name": "Shell",
"bytes": "109"
}
],
"symlink_target": ""
} |
import asyncio
import inspect
import sys
from datetime import datetime, timezone
from pathlib import Path
from typing import List
import pytest
from typing_extensions import Annotated, TypedDict
from pydantic import BaseModel, Extra, Field, ValidationError, validate_arguments
from pydantic.decorator import ValidatedFunction
from pydantic.errors import ConfigError
skip_pre_38 = pytest.mark.skipif(sys.version_info < (3, 8), reason='testing >= 3.8 behaviour only')
def test_args():
@validate_arguments
def foo(a: int, b: int):
return f'{a}, {b}'
assert foo(1, 2) == '1, 2'
assert foo(*[1, 2]) == '1, 2'
assert foo(*(1, 2)) == '1, 2'
assert foo(*[1], 2) == '1, 2'
with pytest.raises(ValidationError) as exc_info:
foo()
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'},
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 'x')
assert exc_info.value.errors() == [
{'loc': ('b',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, 3)
assert exc_info.value.errors() == [
{'loc': ('args',), 'msg': '2 positional arguments expected but 3 given', 'type': 'type_error'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, apple=3)
assert exc_info.value.errors() == [
{'loc': ('kwargs',), 'msg': "unexpected keyword argument: 'apple'", 'type': 'type_error'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, a=3)
assert exc_info.value.errors() == [
{'loc': ('v__duplicate_kwargs',), 'msg': "multiple values for argument: 'a'", 'type': 'type_error'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, a=3, b=4)
assert exc_info.value.errors() == [
{'loc': ('v__duplicate_kwargs',), 'msg': "multiple values for arguments: 'a', 'b'", 'type': 'type_error'}
]
def test_wrap():
@validate_arguments
def foo_bar(a: int, b: int):
"""This is the foo_bar method."""
return f'{a}, {b}'
assert foo_bar.__doc__ == 'This is the foo_bar method.'
assert foo_bar.__name__ == 'foo_bar'
assert foo_bar.__module__ == 'tests.test_decorator'
assert foo_bar.__qualname__ == 'test_wrap.<locals>.foo_bar'
assert isinstance(foo_bar.vd, ValidatedFunction)
assert callable(foo_bar.raw_function)
assert foo_bar.vd.arg_mapping == {0: 'a', 1: 'b'}
assert foo_bar.vd.positional_only_args == set()
assert issubclass(foo_bar.model, BaseModel)
assert foo_bar.model.__fields__.keys() == {'a', 'b', 'args', 'kwargs', 'v__duplicate_kwargs'}
assert foo_bar.model.__name__ == 'FooBar'
assert foo_bar.model.schema()['title'] == 'FooBar'
assert repr(inspect.signature(foo_bar)) == '<Signature (a: int, b: int)>'
def test_kwargs():
@validate_arguments
def foo(*, a: int, b: int):
return a + b
assert foo.model.__fields__.keys() == {'a', 'b', 'args', 'kwargs'}
assert foo(a=1, b=3) == 4
with pytest.raises(ValidationError) as exc_info:
foo(a=1, b='x')
assert exc_info.value.errors() == [
{'loc': ('b',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 'x')
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('args',), 'msg': '0 positional arguments expected but 2 given', 'type': 'type_error'},
]
def test_untyped():
@validate_arguments
def foo(a, b, c='x', *, d='y'):
return ', '.join(str(arg) for arg in [a, b, c, d])
assert foo(1, 2) == '1, 2, x, y'
assert foo(1, {'x': 2}, c='3', d='4') == "1, {'x': 2}, 3, 4"
@pytest.mark.parametrize('validated', (True, False))
def test_var_args_kwargs(validated):
def foo(a, b, *args, d=3, **kwargs):
return f'a={a!r}, b={b!r}, args={args!r}, d={d!r}, kwargs={kwargs!r}'
if validated:
foo = validate_arguments(foo)
assert foo(1, 2) == 'a=1, b=2, args=(), d=3, kwargs={}'
assert foo(1, 2, 3, d=4) == 'a=1, b=2, args=(3,), d=4, kwargs={}'
assert foo(*[1, 2, 3], d=4) == 'a=1, b=2, args=(3,), d=4, kwargs={}'
assert foo(1, 2, args=(10, 11)) == "a=1, b=2, args=(), d=3, kwargs={'args': (10, 11)}"
assert foo(1, 2, 3, args=(10, 11)) == "a=1, b=2, args=(3,), d=3, kwargs={'args': (10, 11)}"
assert foo(1, 2, 3, e=10) == "a=1, b=2, args=(3,), d=3, kwargs={'e': 10}"
assert foo(1, 2, kwargs=4) == "a=1, b=2, args=(), d=3, kwargs={'kwargs': 4}"
assert foo(1, 2, kwargs=4, e=5) == "a=1, b=2, args=(), d=3, kwargs={'kwargs': 4, 'e': 5}"
def test_field_can_provide_factory() -> None:
@validate_arguments
def foo(a: int, b: int = Field(default_factory=lambda: 99), *args: int) -> int:
"""mypy is happy with this"""
return a + b + sum(args)
assert foo(3) == 102
assert foo(1, 2, 3) == 6
def test_annotated_field_can_provide_factory() -> None:
@validate_arguments
def foo2(a: int, b: Annotated[int, Field(default_factory=lambda: 99)], *args: int) -> int:
"""mypy reports Incompatible default for argument "b" if we don't supply ANY as default"""
return a + b + sum(args)
assert foo2(1) == 100
@skip_pre_38
def test_positional_only(create_module):
module = create_module(
# language=Python
"""
from pydantic import validate_arguments
@validate_arguments
def foo(a, b, /, c=None):
return f'{a}, {b}, {c}'
"""
)
assert module.foo(1, 2) == '1, 2, None'
assert module.foo(1, 2, 44) == '1, 2, 44'
assert module.foo(1, 2, c=44) == '1, 2, 44'
with pytest.raises(ValidationError) as exc_info:
module.foo(1, b=2)
assert exc_info.value.errors() == [
{
'loc': ('v__positional_only',),
'msg': "positional-only argument passed as keyword argument: 'b'",
'type': 'type_error',
}
]
with pytest.raises(ValidationError) as exc_info:
module.foo(a=1, b=2)
assert exc_info.value.errors() == [
{
'loc': ('v__positional_only',),
'msg': "positional-only arguments passed as keyword arguments: 'a', 'b'",
'type': 'type_error',
}
]
def test_args_name():
@validate_arguments
def foo(args: int, kwargs: int):
return f'args={args!r}, kwargs={kwargs!r}'
assert foo.model.__fields__.keys() == {'args', 'kwargs', 'v__args', 'v__kwargs', 'v__duplicate_kwargs'}
assert foo(1, 2) == 'args=1, kwargs=2'
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, apple=4)
assert exc_info.value.errors() == [
{'loc': ('v__kwargs',), 'msg': "unexpected keyword argument: 'apple'", 'type': 'type_error'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, apple=4, banana=5)
assert exc_info.value.errors() == [
{'loc': ('v__kwargs',), 'msg': "unexpected keyword arguments: 'apple', 'banana'", 'type': 'type_error'}
]
with pytest.raises(ValidationError) as exc_info:
foo(1, 2, 3)
assert exc_info.value.errors() == [
{'loc': ('v__args',), 'msg': '2 positional arguments expected but 3 given', 'type': 'type_error'}
]
def test_v_args():
with pytest.raises(
ConfigError, match='"v__args", "v__kwargs", "v__positional_only" and "v__duplicate_kwargs" are not permitted'
):
@validate_arguments
def foo1(v__args: int):
pass
with pytest.raises(
ConfigError, match='"v__args", "v__kwargs", "v__positional_only" and "v__duplicate_kwargs" are not permitted'
):
@validate_arguments
def foo2(v__kwargs: int):
pass
with pytest.raises(
ConfigError, match='"v__args", "v__kwargs", "v__positional_only" and "v__duplicate_kwargs" are not permitted'
):
@validate_arguments
def foo3(v__positional_only: int):
pass
with pytest.raises(
ConfigError, match='"v__args", "v__kwargs", "v__positional_only" and "v__duplicate_kwargs" are not permitted'
):
@validate_arguments
def foo4(v__duplicate_kwargs: int):
pass
def test_async():
@validate_arguments
async def foo(a, b):
return f'a={a} b={b}'
async def run():
v = await foo(1, 2)
assert v == 'a=1 b=2'
loop = asyncio.get_event_loop_policy().get_event_loop()
loop.run_until_complete(run())
with pytest.raises(ValidationError) as exc_info:
loop.run_until_complete(foo('x'))
assert exc_info.value.errors() == [{'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'}]
def test_string_annotation():
@validate_arguments
def foo(a: 'List[int]', b: 'Path'):
return f'a={a!r} b={b!r}'
assert foo([1, 2, 3], '/')
with pytest.raises(ValidationError) as exc_info:
foo(['x'])
assert exc_info.value.errors() == [
{'loc': ('a', 0), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'},
]
def test_item_method():
class X:
def __init__(self, v):
self.v = v
@validate_arguments
def foo(self, a: int, b: int):
assert self.v == a
return f'{a}, {b}'
x = X(4)
assert x.foo(4, 2) == '4, 2'
assert x.foo(*[4, 2]) == '4, 2'
with pytest.raises(ValidationError) as exc_info:
x.foo()
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'},
]
def test_class_method():
class X:
@classmethod
@validate_arguments
def foo(cls, a: int, b: int):
assert cls == X
return f'{a}, {b}'
x = X()
assert x.foo(4, 2) == '4, 2'
assert x.foo(*[4, 2]) == '4, 2'
with pytest.raises(ValidationError) as exc_info:
x.foo()
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'},
]
def test_config_title():
@validate_arguments(config=dict(title='Testing'))
def foo(a: int, b: int):
return f'{a}, {b}'
assert foo(1, 2) == '1, 2'
assert foo(1, b=2) == '1, 2'
assert foo.model.schema()['title'] == 'Testing'
def test_config_title_cls():
class Config:
title = 'Testing'
@validate_arguments(config=Config)
def foo(a: int, b: int):
return f'{a}, {b}'
assert foo(1, 2) == '1, 2'
assert foo(1, b=2) == '1, 2'
assert foo.model.schema()['title'] == 'Testing'
def test_config_fields():
with pytest.raises(ConfigError, match='Setting the "fields" and "alias_generator" property on custom Config for @'):
@validate_arguments(config=dict(fields={'b': 'bang'}))
def foo(a: int, b: int):
return f'{a}, {b}'
def test_config_arbitrary_types_allowed():
class EggBox:
def __str__(self) -> str:
return 'EggBox()'
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def foo(a: int, b: EggBox):
return f'{a}, {b}'
assert foo(1, EggBox()) == '1, EggBox()'
with pytest.raises(ValidationError) as exc_info:
assert foo(1, 2) == '1, 2'
assert exc_info.value.errors() == [
{
'loc': ('b',),
'msg': 'instance of EggBox expected',
'type': 'type_error.arbitrary_type',
'ctx': {'expected_arbitrary_type': 'EggBox'},
},
]
def test_validate(mocker):
stub = mocker.stub(name='on_something_stub')
@validate_arguments
def func(s: str, count: int, *, separator: bytes = b''):
stub(s, count, separator)
func.validate('qwe', 2)
with pytest.raises(ValidationError):
func.validate(['qwe'], 2)
stub.assert_not_called()
def test_validate_all():
@validate_arguments(config=dict(validate_all=True))
def foo(dt: datetime = Field(default_factory=lambda: 946684800)):
return dt
assert foo() == datetime(2000, 1, 1, tzinfo=timezone.utc)
assert foo(0) == datetime(1970, 1, 1, tzinfo=timezone.utc)
@skip_pre_38
def test_validate_all_positional(create_module):
module = create_module(
# language=Python
"""
from datetime import datetime
from pydantic import Field, validate_arguments
@validate_arguments(config=dict(validate_all=True))
def foo(dt: datetime = Field(default_factory=lambda: 946684800), /):
return dt
"""
)
assert module.foo() == datetime(2000, 1, 1, tzinfo=timezone.utc)
assert module.foo(0) == datetime(1970, 1, 1, tzinfo=timezone.utc)
def test_validate_extra():
class TypedTest(TypedDict):
y: str
@validate_arguments(config={'extra': Extra.allow})
def test(other: TypedTest):
return other
assert test(other={'y': 'b', 'z': 'a'}) == {'y': 'b', 'z': 'a'}
@validate_arguments(config={'extra': Extra.ignore})
def test(other: TypedTest):
return other
assert test(other={'y': 'b', 'z': 'a'}) == {'y': 'b'}
| {
"content_hash": "8aa9aad3ef945faf3fe0acc061e5a1e8",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 120,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.5662455934195064,
"repo_name": "samuelcolvin/pydantic",
"id": "c2503dd72f25b161d5322bd22e3c27dbe6d17c01",
"size": "13616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_decorator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2905"
},
{
"name": "Python",
"bytes": "1140694"
},
{
"name": "Shell",
"bytes": "258"
}
],
"symlink_target": ""
} |
"""
lunaport.domain.job
~~~~~~~~~~~~~~~~~~~
DESC
"""
import pprint
pp = pprint.PrettyPrinter(indent=4).pprint
from lunaport_worker.tasks.check import reduce_test
from .. dao.test import RDBMS as dao_test
class JobFabric(object):
""" Job static builder.
"""
req_attr_allowed = [
'name',
'args',
'kwargs',
]
job_names_allowed = [
'test_reduce'
]
req_attr_allowed_set = set(req_attr_allowed)
@classmethod
def test_reduce(cls, **kw):
assert ('test_id' in kw), 'Required parameter missing: test_id'
t = dao_test.get_by_id(test_id=int(kw['test_id']))
if not t:
raise ValueError('No such test id:{}'.format(kw['test_id']))
reduce_test.apply_async(
args=[t.id, t.load_src, t.lunapark['t_tank_id']],
kwargs={'eval_only': True})
return None
@classmethod
def from_Flask_req(cls, r):
""" Creates class instance from Flask request object.
Args:
r: Flask request object.
Returns:
err: Unicode stings with error text if needed.
"""
if r.mimetype == 'multipart/form-data':
msg_rv = r.form
elif r.mimetype == 'application/json':
msg_rv = r.json
else:
raise ValueError('Unsupported mime type')
if not msg_rv:
raise ValueError('Can\'t deserialize request body')
# ImmutableMultiDict to dict cast
msg_rv = dict((k, v) for k, v in msg_rv.items())
for k, v in msg_rv.iteritems():
if isinstance(v, list) and len(v) == 1:
msg_rv[k] = v[0]
msg_set = set(msg_rv.keys())
if not msg_set.issubset(cls.req_attr_allowed_set):
err_msg = [
'Body contains unexpected params:',
str(list(msg_set - cls.req_attr_allowed_set))
]
raise ValueError(' '.join(err_msg))
if msg_rv.get('name') not in cls.job_names_allowed:
raise ValueError('Unknown job name:{}'.format(msg_rv.get('name')))
return getattr(cls, msg_rv.get('name'))(**msg_rv.get('kwargs'))
| {
"content_hash": "2d38eec2a27453b2dce08e44641a80bc",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 78,
"avg_line_length": 28.076923076923077,
"alnum_prop": 0.5420091324200913,
"repo_name": "greggyNapalm/lunaport_server",
"id": "0536ff4984376631b38920d2fd35b649b2bc7d73",
"size": "2217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lunaport_server/domain/job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "287625"
},
{
"name": "Shell",
"bytes": "5508"
}
],
"symlink_target": ""
} |
from django_simptools.tests import AuthorizedViewTestCase
from django.core.urlresolvers import reverse
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
COMMON_FIXTURES = [
"payway_accounts_auth.json",
"payway_accounts_accounts.json",
"qiwi.json",
]
class QiwiViewTestCase(AuthorizedViewTestCase):
fixtures = COMMON_FIXTURES
def test_add_money(self):
url = reverse("qiwi_add_money", kwargs={"invoice_uid": "1961380063"})
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
request_params_names = ['txn_id', 'from', 'to', 'summ', 'com', 'lifetime', 'check_agt']
self.assertContainsTextItems(response, request_params_names) | {
"content_hash": "8ddc5b2359061651bf8840bd7c46e115",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 95,
"avg_line_length": 33.45454545454545,
"alnum_prop": 0.688858695652174,
"repo_name": "RANUX/django-payway",
"id": "ed743dac832d01a9e7d55cf1fc7aaca4316ce166",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "payway/qiwi/tests/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "99640"
}
],
"symlink_target": ""
} |
"""
Utilities for LaTeX to/from Unicode Text conversion.
Main Site:
https://github.com/phfaist/pylatexenc/
"""
from .version import version_str as _version_str
__version__ = _version_str
| {
"content_hash": "ec88faa4cdc8e0709578f4c45a43eb0e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 15.153846153846153,
"alnum_prop": 0.7055837563451777,
"repo_name": "phfaist/pylatexenc",
"id": "c7d2d393dc9eba35c406400b5224106c9785ed4f",
"size": "1326",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pylatexenc/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "741941"
},
{
"name": "TeX",
"bytes": "128"
}
],
"symlink_target": ""
} |
the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
#this first kind of for-loop goes through a list
for number in the_count:
print "This is count %d" % number
#same as above
for fruit in fruits:
print "A fruit of type: %s" % fruit
#also we can go through mixed lists too
#notice we have to use %r since we don't know what's in it
for i in change:
print "I got %r" % i
#we can also build lists, first start with an empty one
elements = []
#then use the range function to do 0 to 5 counts
#could alternatively use:
#elements.append(range(0,6)) and eliminate the for loop
for i in range(0, 6):
print "Adding %d to the list." % i
#append is a function that lists understand
elements.append(i)
#to make a 2D list put lists whithin the list:
#elements = [[range(1,4)], [range(4,7)]]
#now we can print them out too
for i in elements:
print "Element was: %d" % i
| {
"content_hash": "5cb2cfad2f15d8e9a9e23a74fc549436",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 58,
"avg_line_length": 27.91176470588235,
"alnum_prop": 0.6838777660695469,
"repo_name": "liggettla/python",
"id": "686f0aaf47e8edae0e6966a94e6fa1326bae882a",
"size": "1009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex32.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41183"
},
{
"name": "Shell",
"bytes": "493"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.