repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
christiangalsterer/execbeat
|
refs/heads/master
|
vendor/github.com/elastic/beats/filebeat/tests/system/test_harvester.py
|
2
|
# coding=utf-8
from filebeat import BaseTest
import os
import codecs
import time
"""
Test Harvesters
"""
class Test(BaseTest):
def test_close_renamed(self):
"""
Checks that a file is closed when its renamed / rotated
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
close_renamed="true",
scan_frequency="0.1s"
)
os.mkdir(self.working_dir + "/log/")
testfile1 = self.working_dir + "/log/test.log"
testfile2 = self.working_dir + "/log/test.log.rotated"
file = open(testfile1, 'w')
iterations1 = 5
for n in range(0, iterations1):
file.write("rotation file")
file.write("\n")
file.close()
filebeat = self.start_beat()
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=iterations1), max_timeout=10)
os.rename(testfile1, testfile2)
file = open(testfile1, 'w', 0)
file.write("Hello World\n")
file.close()
# Wait until error shows up
self.wait_until(
lambda: self.log_contains(
"Closing because close_renamed is enabled"),
max_timeout=15)
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=iterations1 + 1), max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Make sure new file was picked up. As it has the same file name,
# one entry for the new and one for the old should exist
assert len(data) == 2
def test_close_removed(self):
"""
Checks that a file is closed if removed
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
close_removed="true",
clean_removed="false",
scan_frequency="0.1s"
)
os.mkdir(self.working_dir + "/log/")
testfile1 = self.working_dir + "/log/test.log"
file = open(testfile1, 'w')
iterations1 = 5
for n in range(0, iterations1):
file.write("rotation file")
file.write("\n")
file.close()
filebeat = self.start_beat()
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=iterations1), max_timeout=10)
os.remove(testfile1)
# Make sure state is written
self.wait_until(
lambda: self.log_contains_count(
"Write registry file") > 1,
max_timeout=10)
# Wait until error shows up on windows
self.wait_until(
lambda: self.log_contains(
"Closing because close_removed is enabled"),
max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Make sure the state for the file was persisted
assert len(data) == 1
def test_close_eof(self):
"""
Checks that a file is closed if eof is reached
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
close_eof="true",
scan_frequency="0.1s"
)
os.mkdir(self.working_dir + "/log/")
testfile1 = self.working_dir + "/log/test.log"
file = open(testfile1, 'w')
iterations1 = 5
for n in range(0, iterations1):
file.write("rotation file")
file.write("\n")
file.close()
filebeat = self.start_beat()
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=iterations1), max_timeout=10)
# Wait until error shows up on windows
self.wait_until(
lambda: self.log_contains(
"Closing because close_eof is enabled"),
max_timeout=15)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Make sure the state for the file was persisted
assert len(data) == 1
def test_empty_line(self):
"""
Checks that no empty events are sent for an empty line but state is still updated
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
)
os.mkdir(self.working_dir + "/log/")
logfile = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
with open(logfile, 'w') as f:
f.write("Hello world\n")
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=1), max_timeout=10)
with open(logfile, 'a') as f:
f.write("\n")
expectedOffset = 13
if os.name == "nt":
# Two additional newline chars
expectedOffset += 2
# Wait until offset for new line is updated
self.wait_until(
lambda: self.log_contains(
"offset: " + str(expectedOffset)),
max_timeout=15)
with open(logfile, 'a') as f:
f.write("Third line\n")
# Make sure only 2 events are written
self.wait_until(
lambda: self.output_has(lines=2), max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Make sure the state for the file was persisted
assert len(data) == 1
def test_empty_lines_only(self):
"""
Checks that no empty events are sent for a file with only empty lines
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
)
os.mkdir(self.working_dir + "/log/")
logfile = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
with open(logfile, 'w') as f:
f.write("\n")
f.write("\n")
f.write("\n")
expectedOffset = 3
if os.name == "nt":
# Two additional newline chars
expectedOffset += 3
# Wait until offset for new line is updated
self.wait_until(
lambda: self.log_contains(
"offset: " + str(expectedOffset)),
max_timeout=15)
assert os.path.isfile(self.working_dir + "/output/filebeat") == False
filebeat.check_kill_and_wait()
data = self.get_registry()
# Make sure the state for the file was persisted
assert len(data) == 1
def test_exceed_buffer(self):
"""
Checks that also full line is sent if lines exceeds buffer
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
harvester_buffer_size=10,
)
os.mkdir(self.working_dir + "/log/")
logfile = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
message = "This exceeds the buffer"
with open(logfile, 'w') as f:
f.write(message + "\n")
# Wait until state is written
self.wait_until(
lambda: self.log_contains(
"Registrar states cleaned up"),
max_timeout=15)
filebeat.check_kill_and_wait()
data = self.get_registry()
assert len(data) == 1
output = self.read_output_json()
assert message == output[0]["message"]
def test_truncated_file_open(self):
"""
Checks if it is correctly detected if an open file is truncated
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
)
os.mkdir(self.working_dir + "/log/")
logfile = self.working_dir + "/log/test.log"
message = "Hello World"
filebeat = self.start_beat()
# Write 3 lines
with open(logfile, 'w') as f:
f.write(message + "\n")
f.write(message + "\n")
f.write(message + "\n")
# wait for the "Skipping file" log message
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=10)
# Write 1 line -> truncation
with open(logfile, 'w') as f:
f.write(message + "\n")
# wait for the "Skipping file" log message
self.wait_until(
lambda: self.output_has(lines=4),
max_timeout=10)
# Test if truncation was reported properly
self.wait_until(
lambda: self.log_contains(
"File was truncated as offset"),
max_timeout=15)
self.wait_until(
lambda: self.log_contains(
"File was truncated. Begin reading file from offset 0"),
max_timeout=15)
filebeat.check_kill_and_wait()
def test_truncated_file_closed(self):
"""
Checks if it is correctly detected if a closed file is truncated
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
close_inactive="1s",
)
os.mkdir(self.working_dir + "/log/")
logfile = self.working_dir + "/log/test.log"
message = "Hello World"
filebeat = self.start_beat()
# Write 3 lines
with open(logfile, 'w') as f:
f.write(message + "\n")
f.write(message + "\n")
f.write(message + "\n")
# wait for the "Skipping file" log message
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=10)
# Wait until harvester is closed
self.wait_until(
lambda: self.log_contains(
"Stopping harvester for file"),
max_timeout=15)
# Write 1 line -> truncation
with open(logfile, 'w') as f:
f.write(message + "\n")
# wait for the "Skipping file" log message
self.wait_until(
lambda: self.output_has(lines=4),
max_timeout=10)
# Test if truncation was reported properly
self.wait_until(
lambda: self.log_contains(
"Old file was truncated. Starting from the beginning"),
max_timeout=15)
filebeat.check_kill_and_wait()
def test_close_timeout(self):
"""
Checks that a file is closed after close_timeout
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
close_timeout="1s",
scan_frequency="1s"
)
os.mkdir(self.working_dir + "/log/")
filebeat = self.start_beat()
testfile1 = self.working_dir + "/log/test.log"
file = open(testfile1, 'w')
# Write 1000 lines with a sleep between each line to make sure it takes more then 1s to complete
iterations1 = 1000
for n in range(0, iterations1):
file.write("example data")
file.write("\n")
time.sleep(0.001)
file.close()
# Wait until harvester is closed because of ttl
self.wait_until(
lambda: self.log_contains(
"Closing harvester because close_timeout was reached"),
max_timeout=15)
filebeat.check_kill_and_wait()
data = self.get_registry()
assert len(data) == 1
# Check that not all but some lines were read. It can happen sometimes that filebeat finishes reading ...
assert self.output_lines() <= 1000
assert self.output_lines() > 0
def test_bom_utf8(self):
"""
Test utf8 log file with bom
Additional test here to make sure in case generation in python is not correct
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
)
os.mkdir(self.working_dir + "/log/")
self.copy_files(["logs/bom8.log"],
source_dir="../files",
target_dir="log")
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=7),
max_timeout=10)
# Check that output does not cotain bom
output = self.read_output_json()
assert output[0]["message"] == "#Software: Microsoft Exchange Server"
filebeat.check_kill_and_wait()
def test_boms(self):
"""
Test bom log files if bom is removed properly
"""
os.mkdir(self.working_dir + "/log/")
os.mkdir(self.working_dir + "/output/")
message = "Hello World"
# Config array contains:
# filebeat encoding, python encoding name, bom
configs = [
("utf-8", "utf-8", codecs.BOM_UTF8),
("utf-16be-bom", "utf-16-be", codecs.BOM_UTF16_BE),
("utf-16le-bom", "utf-16-le", codecs.BOM_UTF16_LE),
]
for config in configs:
# Render config with specific encoding
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
encoding=config[0],
output_file_filename=config[0],
)
logfile = self.working_dir + "/log/" + config[0] + "test.log"
# Write bom to file
with codecs.open(logfile, 'wb') as file:
file.write(config[2])
# Write hello world to file
with codecs.open(logfile, 'a', config[1]) as file:
content = message + '\n'
file.write(content)
filebeat = self.start_beat(output=config[0] + ".log")
self.wait_until(
lambda: self.output_has(lines=1, output_file="output/" + config[0]),
max_timeout=10)
# Verify that output does not contain bom
output = self.read_output_json(output_file="output/" + config[0])
assert output[0]["message"] == message
filebeat.kill_and_wait()
def test_ignore_symlink(self):
"""
Test that symlinks are ignored
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/symlink.log",
)
os.mkdir(self.working_dir + "/log/")
logfile = self.working_dir + "/log/test.log"
symlink = self.working_dir + "/log/symlink.log"
if os.name == "nt":
import win32file
win32file.CreateSymbolicLink(symlink, logfile, 0)
else:
os.symlink(logfile, symlink)
with open(logfile, 'a') as file:
file.write("Hello World\n")
filebeat = self.start_beat()
# Make sure symlink is skipped
self.wait_until(
lambda: self.log_contains(
"skipped as it is a symlink"),
max_timeout=15)
filebeat.check_kill_and_wait()
def test_symlinks_enabled(self):
"""
Test if symlinks are harvested
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/symlink.log",
symlinks="true",
)
os.mkdir(self.working_dir + "/log/")
logfile = self.working_dir + "/log/test.log"
symlink = self.working_dir + "/log/symlink.log"
if os.name == "nt":
import win32file
win32file.CreateSymbolicLink(symlink, logfile, 0)
else:
os.symlink(logfile, symlink)
with open(logfile, 'a') as file:
file.write("Hello World\n")
filebeat = self.start_beat()
# Make sure content in symlink file is read
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
filebeat.check_kill_and_wait()
def test_symlink_rotated(self):
"""
Test what happens if symlink removed and points to a new file
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/symlink.log",
symlinks="true",
)
os.mkdir(self.working_dir + "/log/")
logfile1 = self.working_dir + "/log/test1.log"
logfile2 = self.working_dir + "/log/test2.log"
symlink = self.working_dir + "/log/symlink.log"
if os.name == "nt":
import win32file
win32file.CreateSymbolicLink(symlink, logfile1, 0)
else:
os.symlink(logfile1, symlink)
with open(logfile1, 'a') as file:
file.write("Hello World1\n")
with open(logfile2, 'a') as file:
file.write("Hello World2\n")
filebeat = self.start_beat()
# Make sure state is written
self.wait_until(
lambda: self.log_contains_count(
"Write registry file") > 1,
max_timeout=10)
# Make sure symlink is skipped
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
os.remove(symlink)
if os.name == "nt":
import win32file
win32file.CreateSymbolicLink(symlink, logfile2, 0)
else:
os.symlink(logfile2, symlink)
with open(logfile1, 'a') as file:
file.write("Hello World3\n")
file.write("Hello World4\n")
# Make sure new file and addition to old file were read
self.wait_until(
lambda: self.output_has(lines=4),
max_timeout=10)
filebeat.check_kill_and_wait()
# Check if two different files are in registry
data = self.get_registry()
assert len(data) == 2
def test_symlink_removed(self):
"""
Tests that if a symlink to a file is removed, further data is read which is added to the original file
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/symlink.log",
symlinks="true",
clean_removed="false"
)
os.mkdir(self.working_dir + "/log/")
logfile = self.working_dir + "/log/test.log"
symlink = self.working_dir + "/log/symlink.log"
if os.name == "nt":
import win32file
win32file.CreateSymbolicLink(symlink, logfile, 0)
else:
os.symlink(logfile, symlink)
with open(logfile, 'a') as file:
file.write("Hello World1\n")
filebeat = self.start_beat()
# Make sure symlink is skipped
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
os.remove(symlink)
with open(logfile, 'a') as file:
file.write("Hello World2\n")
# Sleep 1s to make sure new events are not picked up
time.sleep(1)
# Make sure also new file was read
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=10)
filebeat.check_kill_and_wait()
# Check if two different files are in registry
data = self.get_registry()
assert len(data) == 1
def test_symlink_and_file(self):
"""
Tests that if symlink and original file are read, that only events from one are added
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
symlinks="true",
)
os.mkdir(self.working_dir + "/log/")
logfile = self.working_dir + "/log/test.log"
symlink = self.working_dir + "/log/symlink.log"
if os.name == "nt":
import win32file
win32file.CreateSymbolicLink(symlink, logfile, 0)
else:
os.symlink(logfile, symlink)
with open(logfile, 'a') as file:
file.write("Hello World1\n")
filebeat = self.start_beat()
# Make sure both files were read
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
filebeat.check_kill_and_wait()
# Check if two different files are in registry
data = self.get_registry()
assert len(data) == 1
def test_truncate(self):
"""
Tests what happens if file is truncated and symlink recreated
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
symlinks="true",
)
os.mkdir(self.working_dir + "/log/")
logfile = self.working_dir + "/log/test.log"
symlink = self.working_dir + "/log/symlink.log"
if os.name == "nt":
import win32file
win32file.CreateSymbolicLink(symlink, logfile, 0)
else:
os.symlink(logfile, symlink)
with open(logfile, 'w') as file:
file.write("Hello World1\n")
file.write("Hello World2\n")
file.write("Hello World3\n")
file.write("Hello World4\n")
filebeat = self.start_beat()
# Make sure both files were read
self.wait_until(
lambda: self.output_has(lines=4),
max_timeout=10)
os.remove(symlink)
with open(logfile, 'w') as file:
file.truncate()
file.seek(0)
if os.name == "nt":
import win32file
win32file.CreateSymbolicLink(symlink, logfile, 0)
else:
os.symlink(logfile, symlink)
# Write new file with content shorter then old one
with open(logfile, 'a') as file:
file.write("Hello World5\n")
file.write("Hello World6\n")
file.write("Hello World7\n")
# Make sure both files were read
self.wait_until(
lambda: self.output_has(lines=7),
max_timeout=10)
filebeat.check_kill_and_wait()
# Check that only 1 registry entry as original was only truncated
data = self.get_registry()
assert len(data) == 1
def test_decode_error(self):
"""
Tests that in case of a decoding error it is handled gracefully
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
encoding="GBK", # Set invalid encoding for entry below which is actually uft-8
)
os.mkdir(self.working_dir + "/log/")
logfile = self.working_dir + "/log/test.log"
with open(logfile, 'w') as file:
file.write("hello world1" + "\n")
file.write('<meta content="瞭解「Google 商業解決方案」提供的各類服務軟件如何助您分析資料、刊登廣告、提升網站成效等。" name="description">' + '\n')
file.write("hello world2" + "\n")
filebeat = self.start_beat()
# Make sure both files were read
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=10)
# Wait until error shows up
self.wait_until(
lambda: self.log_contains("Error decoding line: simplifiedchinese: invalid GBK encoding"),
max_timeout=5)
filebeat.check_kill_and_wait()
# Check that only 1 registry entry as original was only truncated
data = self.get_registry()
assert len(data) == 1
output = self.read_output_json()
assert output[2]["message"] == "hello world2"
|
a-buck/airmozilla
|
refs/heads/master
|
airmozilla/manage/tests/test_pestering.py
|
3
|
import datetime
from django.test import TestCase
from django.core import mail
from django.contrib.auth.models import Group, User
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils import timezone
from funfactory.urlresolvers import reverse
from nose.tools import eq_, ok_
from airmozilla.manage.pestering import pester
from airmozilla.main.models import (
Approval,
Event
)
class PesteringTestCase(TestCase):
fixtures = ['airmozilla/manage/tests/main_testdata.json']
def _age_event_created(self, event, save=True):
extra_seconds = settings.PESTER_INTERVAL_DAYS * 24 * 60 * 60 + 1
now = timezone.now()
event.created = now - datetime.timedelta(seconds=extra_seconds)
save and event.save()
def test_nothing_happens(self):
result = pester()
ok_(not result)
def test_sending(self):
group = Group.objects.create(name='PR Group')
# we need some people to belong to the group
bob = User.objects.create(
username='bob',
email='bob@example.com',
is_staff=True
)
bob.groups.add(group)
mr_inactive = User.objects.create(
username='mr_inactive',
email='long@gone.com',
is_staff=True,
is_active=False,
)
mr_inactive.groups.add(group)
event = Event.objects.get(title='Test event')
# first pretend that the event was created now
now = timezone.now()
event.created = now
event.save()
approval = Approval.objects.create(
event=event,
group=group,
)
site = Site.objects.get_current()
result = pester(dry_run=True)
eq_(len(mail.outbox), 0)
eq_(len(result), 0)
# nothing because the event is too new
# let's pretend it's older
self._age_event_created(event)
result = pester(dry_run=True)
eq_(len(mail.outbox), 0)
eq_(len(result), 1)
email, subject, message = result[0]
eq_(email, bob.email)
ok_('[Air Mozilla]' in subject)
ok_('1 event' in subject)
ok_('://%s' % site.domain in message)
ok_(group.name in message)
ok_(event.title in message)
ok_(event.description in message)
ok_(event.location.name in message)
ok_(event.location.timezone in message)
approve_url = reverse('manage:approval_review', args=(approval.pk,))
ok_(approve_url in message)
manage_url = reverse('manage:approvals')
ok_(manage_url in message)
now = timezone.now()
assert event.start_time < now
ok_('Time left: overdue!' in message)
result = pester()
# check that 1 email was sent
eq_(len(mail.outbox), 1)
email_sent = mail.outbox[-1]
eq_(email_sent.subject, subject)
ok_(message in email_sent.body)
eq_([bob.email], email_sent.recipients())
# try to send it again and nothing should happen
result = pester()
ok_(not result)
# or force past the caching
result = pester(force_run=True)
ok_(result)
eq_(len(mail.outbox), 2)
def test_sending_future_event_to_multiple_people(self):
group = Group.objects.create(name='PR Group')
group2 = Group.objects.create(name='Hippies')
# we need some people to belong to the group
bob = User.objects.create(
username='bob',
email='bob@example.com',
is_staff=True
)
bob.groups.add(group)
steve = User.objects.create(
username='steve',
email='steve@example.com',
is_staff=True
)
steve.groups.add(group)
steve.groups.add(group2)
now = timezone.now()
event = Event.objects.get(title='Test event')
event.start_time = now + datetime.timedelta(hours=1, minutes=1)
event.save()
# create a second event
event2 = Event.objects.create(
title='Second Title',
slug='second-title',
description='Second Event Description',
start_time=now + datetime.timedelta(days=1, minutes=1),
status=event.status,
location=event.location,
creator=event.creator
)
# let's pretend it's older
self._age_event_created(event)
self._age_event_created(event2)
Approval.objects.create(
event=event,
group=group,
)
Approval.objects.create(
event=event2,
group=group2,
)
result = pester()
eq_(len(result), 2)
eq_(len(mail.outbox), 2)
for email, subject, message in result:
ok_('Time left: overdue!' not in message)
if email == bob.email:
ok_('1 event to approve' in subject)
ok_(event.title in message)
ok_(event2.title not in message)
ok_(u'Time left: 1\xa0hour' in message)
elif email == steve.email:
ok_('2 events to approve' in subject)
ok_(event.title in message)
ok_(event2.title in message)
ok_(u'Time left: 1\xa0day' in message)
else:
raise AssertionError(email)
|
40223208/2015cdb_g4
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_break.py
|
785
|
import gc
import io
import os
import sys
import signal
import weakref
import unittest
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreak(unittest.TestCase):
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest.signals._results = weakref.WeakKeyDictionary()
unittest.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
for ref in unittest.signals._results:
if ref is result:
break
elif ref is not result:
self.fail("odd object in result set")
else:
self.fail("result not found")
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest.installHandler()
result = unittest.TestResult()
unittest.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest.TestResult()
unittest.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest.TextTestRunner(stream=io.StringIO())
result = runner.run(unittest.TestSuite())
self.assertIn(result, unittest.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest.TestResult()
unittest.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
unittest.installHandler()
self.assertTrue(unittest.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest.removeResult(unittest.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
@unittest.removeHandler
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
|
livepy/portia
|
refs/heads/master
|
slybot/slybot/spider.py
|
12
|
from __future__ import absolute_import
import json
import re
from operator import itemgetter
from copy import deepcopy
import itertools
from six.moves.urllib_parse import urlparse
from w3lib.http import basic_auth_header
from scrapy.http import Request, HtmlResponse, FormRequest
import six
try:
from scrapy.spiders import Spider
except ImportError:
# BaseSpider class was deprecated in Scrapy 0.21
from scrapy.spider import BaseSpider as Spider
from loginform import fill_login_form
from slybot.utils import (iter_unique_scheme_hostname, load_plugins,
load_plugin_names, IndexedDict)
from slybot.linkextractor import create_linkextractor_from_specs
from slybot.generic_form import GenericForm
STRING_KEYS = ['start_urls', 'exclude_patterns', 'follow_patterns',
'allowed_domains', 'js_enabled', 'js_enable_patterns',
'js_disable_patterns']
class IblSpider(Spider):
def __init__(self, name, spec, item_schemas, all_extractors, settings=None,
**kw):
super(IblSpider, self).__init__(name, **kw)
self._job_id = settings.get('JOB', '')
spec = deepcopy(spec)
for key, val in kw.items():
if isinstance(val, six.string_types) and key in STRING_KEYS:
val = val.splitlines()
spec[key] = val
self._item_template_pages = sorted(
((t['scrapes'], t) for t in spec['templates']
if t.get('page_type', 'item') == 'item'), key=itemgetter(0))
self._templates = [templ for _, templ in self._item_template_pages]
self.plugins = IndexedDict()
for plugin_class, plugin_name in zip(load_plugins(settings),
load_plugin_names(settings)):
instance = plugin_class()
instance.setup_bot(settings, spec, item_schemas, all_extractors)
self.plugins[plugin_name] = instance
self.js_enabled = False
self.SPLASH_HOST = None
if settings.get('SPLASH_URL'):
self.SPLASH_HOST = urlparse(settings.get('SPLASH_URL')).hostname
self.js_enabled = spec.get('js_enabled', False)
if self.js_enabled and (settings.get('SPLASH_PASS') is not None or
settings.get('SPLASH_USER') is not None):
self.splash_auth = basic_auth_header(
settings.get('SPLASH_USER', ''),
settings.get('SPLASH_PASS', ''))
self._filter_js_urls = self._build_js_url_filter(spec)
self.login_requests = []
self.form_requests = []
self._start_requests = []
self.generic_form = GenericForm(**kw)
self._create_init_requests(spec.get("init_requests", []))
self._process_start_urls(spec)
self.allowed_domains = spec.get(
'allowed_domains',
self._get_allowed_domains(self._templates)
)
if not self.allowed_domains:
self.allowed_domains = None
def _process_start_urls(self, spec):
self.start_urls = spec.get('start_urls')
for url in self.start_urls:
request = Request(url, callback=self.parse, dont_filter=True)
self._add_splash_meta(request)
self._start_requests.append(request)
def _create_init_requests(self, spec):
for rdata in spec:
if rdata["type"] == "login":
request = Request(url=rdata.pop("loginurl"), meta=rdata,
callback=self.parse_login_page,
dont_filter=True)
self._add_splash_meta(request)
self.login_requests.append(request)
elif rdata["type"] == "form":
self.form_requests.append(
self.get_generic_form_start_request(rdata)
)
elif rdata["type"] == "start":
self._start_requests.append(
self._create_start_request_from_specs(rdata)
)
def parse_login_page(self, response):
username = response.request.meta["username"]
password = response.request.meta["password"]
args, url, method = fill_login_form(response.url, response.body,
username, password)
return FormRequest(url, method=method, formdata=args,
callback=self.after_login, dont_filter=True)
def after_login(self, response):
for result in self.parse(response):
yield result
for req in self._start_requests:
yield req
def get_generic_form_start_request(self, form_descriptor):
file_fields = list(self.generic_form.get_url_field(form_descriptor))
if file_fields:
(field_index, field_descriptor) = file_fields.pop(0)
form_descriptor['field_index'] = field_index
return FormRequest(self.generic_form.get_value(field_descriptor),
meta=form_descriptor,
callback=self.parse_field_url_page,
dont_filter=True)
else:
return Request(url=form_descriptor.pop("form_url"),
meta=form_descriptor, callback=self.parse_form_page,
dont_filter=True)
def parse_field_url_page(self, response):
form_descriptor = response.request.meta
field_index = form_descriptor['field_index']
field_descriptor = form_descriptor['fields'][field_index]
self.generic_form.set_values_url_field(field_descriptor, response.body)
yield self.get_generic_form_start_request(form_descriptor)
def parse_form_page(self, response):
fill_form = self.generic_form.fill_generic_form
try:
for (args, url, method) in fill_form(response.url, response.body,
response.request.meta):
yield FormRequest(url, method=method, formdata=args,
callback=self.after_form_page,
dont_filter=True)
except Exception as e:
self.logger.warning(str(e))
for req in self._start_requests:
yield req
def after_form_page(self, response):
for result in self.parse(response):
yield result
def _get_allowed_domains(self, templates):
urls = [x['url'] for x in templates]
urls += [x.url for x in self._start_requests]
return [x[1] for x in iter_unique_scheme_hostname(urls)]
def start_requests(self):
start_requests = []
if self.login_requests:
start_requests = self.login_requests
elif self.form_requests:
start_requests = self.form_requests
else:
start_requests = self._start_requests
for req in start_requests:
yield req
def _create_start_request_from_specs(self, info):
url = info["url"]
lspecs = info.get("link_extractor")
if lspecs:
linkextractor = create_linkextractor_from_specs(lspecs)
def _callback(spider, response):
for link in linkextractor.links_to_follow(response):
request = Request(url=link.url, callback=spider.parse)
yield self._add_splash_meta(request)
request = Request(url=url, callback=_callback)
return self._add_splash_meta(request)
request = Request(url=url, callback=self.parse)
return self._add_splash_meta(request)
def parse(self, response):
"""Main handler for all downloaded responses"""
request = response.request
if (request and request.method == 'POST' and
urlparse(request.url).hostname == self.SPLASH_HOST):
url = (json.loads(request.body).get('url'))
if url:
response._url = url
content_type = response.headers.get('Content-Type', '')
if isinstance(response, HtmlResponse):
return self.handle_html(response)
elif "application/rss+xml" in content_type:
return self.handle_rss(response)
else:
self.logger.debug(
"Ignoring page with content-type=%r: %s" % (content_type,
response.url)
)
return []
def _plugin_hook(self, name, *args):
results = []
for plugin in self.plugins.values():
if hasattr(plugin, name):
results.append(getattr(plugin, name)(*args))
return results
def _handle(self, hook, response, *extrasrgs):
generators = self._plugin_hook(hook, response, *extrasrgs)
for item_or_request in itertools.chain(*generators):
if isinstance(item_or_request, Request):
self._plugin_hook('process_request', item_or_request, response)
else:
self._plugin_hook('process_item', item_or_request, response)
if isinstance(item_or_request, Request):
item_or_request = self._add_splash_meta(item_or_request)
yield item_or_request
def handle_rss(self, response):
return self._handle('handle_rss', response, set([]))
def handle_html(self, response):
return self._handle('handle_html', response)
def _build_js_url_filter(self, spec):
if not self.js_enabled:
return lambda x: None
enable_patterns = spec.get('js_enable_patterns')
disable_patterns = spec.get('js_disable_patterns')
filterf = None
enablef = None
if enable_patterns:
pattern = enable_patterns[0] if len(enable_patterns) == 1 else \
"(?:%s)" % '|'.join(enable_patterns)
enablef = re.compile(pattern).search
filterf = enablef
if disable_patterns:
pattern = disable_patterns[0] if len(disable_patterns) == 1 else \
"(?:%s)" % '|'.join(disable_patterns)
disablef = re.compile(pattern).search
if not enablef:
filterf = lambda x: not disablef(x)
else:
filterf = lambda x: enablef(x) and not disablef(x)
return filterf if filterf else lambda x: x
def _add_splash_meta(self, request):
if self.js_enabled and self._filter_js_urls(request.url):
cleaned_url = urlparse(request.url)._replace(params='', query='',
fragment='').geturl()
request.meta['splash'] = {
'endpoint': 'render.html?job_id=%s' % self._job_id,
'args': {
'wait': 5,
'images': 0,
'url': request.url,
'baseurl': cleaned_url
}
}
return request
|
katrid/django
|
refs/heads/master
|
tests/forms_tests/urls.py
|
452
|
from django.conf.urls import url
from .views import ArticleFormView
urlpatterns = [
url(r'^model_form/(?P<pk>[0-9]+)/$', ArticleFormView.as_view(), name="article_form"),
]
|
aerickson/ansible
|
refs/heads/devel
|
lib/ansible/plugins/callback/json.py
|
118
|
# (c) 2016, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'json'
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
self.results = []
def _new_play(self, play):
return {
'play': {
'name': play.name,
'id': str(play._uuid)
},
'tasks': []
}
def _new_task(self, task):
return {
'task': {
'name': task.name,
'id': str(task._uuid)
},
'hosts': {}
}
def v2_playbook_on_play_start(self, play):
self.results.append(self._new_play(play))
def v2_playbook_on_task_start(self, task, is_conditional):
self.results[-1]['tasks'].append(self._new_task(task))
def v2_runner_on_ok(self, result, **kwargs):
host = result._host
self.results[-1]['tasks'][-1]['hosts'][host.name] = result._result
def v2_playbook_on_stats(self, stats):
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
summary = {}
for h in hosts:
s = stats.summarize(h)
summary[h] = s
output = {
'plays': self.results,
'stats': summary
}
self._display.display(json.dumps(output, indent=4, sort_keys=True))
v2_runner_on_failed = v2_runner_on_ok
v2_runner_on_unreachable = v2_runner_on_ok
v2_runner_on_skipped = v2_runner_on_ok
|
asm0dey/Flexget
|
refs/heads/master
|
flexget/utils/sqlalchemy_utils.py
|
6
|
"""
Miscellaneous SQLAlchemy helpers.
"""
from __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import ColumnDefault, Sequence, Index
from sqlalchemy.types import TypeEngine
from sqlalchemy.schema import Table, MetaData
from sqlalchemy.exc import NoSuchTableError, OperationalError
log = logging.getLogger('sql_utils')
def table_exists(name, session):
"""
Use SQLAlchemy reflect to check table existences.
:param string name: Table name to check
:param Session session: Session to use
:return: True if table exists, False otherwise
:rtype: bool
"""
try:
table_schema(name, session)
except NoSuchTableError:
return False
return True
def table_schema(name, session):
"""
:returns: Table schema using SQLAlchemy reflect as it currently exists in the db
:rtype: Table
"""
return Table(name, MetaData(bind=session.bind), autoload=True)
def table_columns(table, session):
"""
:param string table: Name of table or table schema
:param Session session: SQLAlchemy Session
:returns: List of column names in the table or empty list
"""
res = []
if isinstance(table, basestring):
table = table_schema(table, session)
for column in table.columns:
res.append(column.name)
return res
def table_add_column(table, name, col_type, session, default=None):
"""Adds a column to a table
.. warning:: Uses raw statements, probably needs to be changed in
order to work on other databases besides SQLite
:param string table: Table to add column to (can be name or schema)
:param string name: Name of new column to add
:param col_type: The sqlalchemy column type to add
:param Session session: SQLAlchemy Session to do the alteration
:param default: Default value for the created column (optional)
"""
if isinstance(table, basestring):
table = table_schema(table, session)
if name in table_columns(table, session):
# If the column already exists, we don't have to do anything.
return
# Add the column to the table
if not isinstance(col_type, TypeEngine):
# If we got a type class instead of an instance of one, instantiate it
col_type = col_type()
type_string = session.bind.engine.dialect.type_compiler.process(col_type)
statement = 'ALTER TABLE %s ADD %s %s' % (table.name, name, type_string)
session.execute(statement)
# Update the table with the default value if given
if default is not None:
# Get the new schema with added column
table = table_schema(table.name, session)
if not isinstance(default, (ColumnDefault, Sequence)):
default = ColumnDefault(default)
default._set_parent(getattr(table.c, name))
statement = table.update().values({name: default.execute(bind=session.bind)})
session.execute(statement)
def drop_tables(names, session):
"""Takes a list of table names and drops them from the database if they exist."""
metadata = MetaData()
metadata.reflect(bind=session.bind)
for table in metadata.sorted_tables:
if table.name in names:
table.drop()
def get_index_by_name(table, name):
"""
Find declaratively defined index from table by name
:param table: Table object
:param string name: Name of the index to get
:return: Index object
"""
for index in table.indexes:
if index.name == name:
return index
def create_index(table_name, session, *column_names):
"""
Creates an index on specified `columns` in `table_name`
:param table_name: Name of table to create the index on.
:param session: Session object which should be used
:param column_names: The names of the columns that should belong to this index.
"""
index_name = '_'.join(['ix', table_name] + list(column_names))
table = table_schema(table_name, session)
columns = [getattr(table.c, column) for column in column_names]
try:
Index(index_name, *columns).create(bind=session.bind)
except OperationalError:
log.debug('Error creating index.', exc_info=True)
|
sopier/django
|
refs/heads/master
|
django/contrib/humanize/templatetags/__init__.py
|
12133432
| |
KimGlazebrook/wagtail-experiment
|
refs/heads/master
|
wagtail/wagtailembeds/templatetags/__init__.py
|
12133432
| |
hdinsight/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/cache/__init__.py
|
12133432
| |
WebMonitor/ZhihuMonitor
|
refs/heads/master
|
zhihu_fetch_info/Redis/redis_manage.py
|
1
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import redis
redis_conn = redis.Redis(host='123.207.100.124', port=9999, db=0, password='54A4D5E5131BB76124D661335EE6E6FE')
|
keedio/hue
|
refs/heads/master
|
desktop/core/ext-py/Paste-2.0.1/paste/progress.py
|
78
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# (c) 2005 Clark C. Evans
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This code was written with funding by http://prometheusresearch.com
"""
Upload Progress Monitor
This is a WSGI middleware component which monitors the status of files
being uploaded. It includes a small query application which will return
a list of all files being uploaded by particular session/user.
>>> from paste.httpserver import serve
>>> from paste.urlmap import URLMap
>>> from paste.auth.basic import AuthBasicHandler
>>> from paste.debug.debugapp import SlowConsumer, SimpleApplication
>>> # from paste.progress import *
>>> realm = 'Test Realm'
>>> def authfunc(username, password):
... return username == password
>>> map = URLMap({})
>>> ups = UploadProgressMonitor(map, threshold=1024)
>>> map['/upload'] = SlowConsumer()
>>> map['/simple'] = SimpleApplication()
>>> map['/report'] = UploadProgressReporter(ups)
>>> serve(AuthBasicHandler(ups, realm, authfunc))
serving on...
.. note::
This is experimental, and will change in the future.
"""
import time
from paste.wsgilib import catch_errors
DEFAULT_THRESHOLD = 1024 * 1024 # one megabyte
DEFAULT_TIMEOUT = 60*5 # five minutes
ENVIRON_RECEIVED = 'paste.bytes_received'
REQUEST_STARTED = 'paste.request_started'
REQUEST_FINISHED = 'paste.request_finished'
class _ProgressFile(object):
"""
This is the input-file wrapper used to record the number of
``paste.bytes_received`` for the given request.
"""
def __init__(self, environ, rfile):
self._ProgressFile_environ = environ
self._ProgressFile_rfile = rfile
self.flush = rfile.flush
self.write = rfile.write
self.writelines = rfile.writelines
def __iter__(self):
environ = self._ProgressFile_environ
riter = iter(self._ProgressFile_rfile)
def iterwrap():
for chunk in riter:
environ[ENVIRON_RECEIVED] += len(chunk)
yield chunk
return iter(iterwrap)
def read(self, size=-1):
chunk = self._ProgressFile_rfile.read(size)
self._ProgressFile_environ[ENVIRON_RECEIVED] += len(chunk)
return chunk
def readline(self):
chunk = self._ProgressFile_rfile.readline()
self._ProgressFile_environ[ENVIRON_RECEIVED] += len(chunk)
return chunk
def readlines(self, hint=None):
chunk = self._ProgressFile_rfile.readlines(hint)
self._ProgressFile_environ[ENVIRON_RECEIVED] += len(chunk)
return chunk
class UploadProgressMonitor(object):
"""
monitors and reports on the status of uploads in progress
Parameters:
``application``
This is the next application in the WSGI stack.
``threshold``
This is the size in bytes that is needed for the
upload to be included in the monitor.
``timeout``
This is the amount of time (in seconds) that a upload
remains in the monitor after it has finished.
Methods:
``uploads()``
This returns a list of ``environ`` dict objects for each
upload being currently monitored, or finished but whose time
has not yet expired.
For each request ``environ`` that is monitored, there are several
variables that are stored:
``paste.bytes_received``
This is the total number of bytes received for the given
request; it can be compared with ``CONTENT_LENGTH`` to
build a percentage complete. This is an integer value.
``paste.request_started``
This is the time (in seconds) when the request was started
as obtained from ``time.time()``. One would want to format
this for presentation to the user, if necessary.
``paste.request_finished``
This is the time (in seconds) when the request was finished,
canceled, or otherwise disconnected. This is None while
the given upload is still in-progress.
TODO: turn monitor into a queue and purge queue of finished
requests that have passed the timeout period.
"""
def __init__(self, application, threshold=None, timeout=None):
self.application = application
self.threshold = threshold or DEFAULT_THRESHOLD
self.timeout = timeout or DEFAULT_TIMEOUT
self.monitor = []
def __call__(self, environ, start_response):
length = environ.get('CONTENT_LENGTH', 0)
if length and int(length) > self.threshold:
# replace input file object
self.monitor.append(environ)
environ[ENVIRON_RECEIVED] = 0
environ[REQUEST_STARTED] = time.time()
environ[REQUEST_FINISHED] = None
environ['wsgi.input'] = \
_ProgressFile(environ, environ['wsgi.input'])
def finalizer(exc_info=None):
environ[REQUEST_FINISHED] = time.time()
return catch_errors(self.application, environ,
start_response, finalizer, finalizer)
return self.application(environ, start_response)
def uploads(self):
return self.monitor
class UploadProgressReporter(object):
"""
reports on the progress of uploads for a given user
This reporter returns a JSON file (for use in AJAX) listing the
uploads in progress for the given user. By default, this reporter
uses the ``REMOTE_USER`` environment to compare between the current
request and uploads in-progress. If they match, then a response
record is formed.
``match()``
This member function can be overriden to provide alternative
matching criteria. It takes two environments, the first
is the current request, the second is a current upload.
``report()``
This member function takes an environment and builds a
``dict`` that will be used to create a JSON mapping for
the given upload. By default, this just includes the
percent complete and the request url.
"""
def __init__(self, monitor):
self.monitor = monitor
def match(self, search_environ, upload_environ):
if search_environ.get('REMOTE_USER', None) == \
upload_environ.get('REMOTE_USER', 0):
return True
return False
def report(self, environ):
retval = { 'started': time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime(environ[REQUEST_STARTED])),
'finished': '',
'content_length': environ.get('CONTENT_LENGTH'),
'bytes_received': environ[ENVIRON_RECEIVED],
'path_info': environ.get('PATH_INFO',''),
'query_string': environ.get('QUERY_STRING','')}
finished = environ[REQUEST_FINISHED]
if finished:
retval['finished'] = time.strftime("%Y:%m:%d %H:%M:%S",
time.gmtime(finished))
return retval
def __call__(self, environ, start_response):
body = []
for map in [self.report(env) for env in self.monitor.uploads()
if self.match(environ, env)]:
parts = []
for k, v in map.items():
v = str(v).replace("\\", "\\\\").replace('"', '\\"')
parts.append('%s: "%s"' % (k, v))
body.append("{ %s }" % ", ".join(parts))
body = "[ %s ]" % ", ".join(body)
start_response("200 OK", [('Content-Type', 'text/plain'),
('Content-Length', len(body))])
return [body]
__all__ = ['UploadProgressMonitor', 'UploadProgressReporter']
if "__main__" == __name__:
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
kissf-lu/jupyter_app
|
refs/heads/master
|
ipython/py36_simpy_sim/tool/items.py
|
1
|
# -*- coding: utf-8 -*-
"""
作者:Ted
日期:2017-07-13
说明:
包裹 class
货物 class
Uld class
"""
import simpy
import pandas as pd
__all__ = ["Package", "Truck", "Uld", "SmallBag", "SmallPackage", "Pipeline"]
class Package:
"""包裹"""
def __init__(self,
env: simpy.Environment,
attr: pd.Series,
item_id : str,
path: tuple, ):
# 包裹的所有信息都在 attr
self.attr = attr
# id
self.item_id = item_id
# env
self.env = env
# for record
self.plan_path = path
# for popping
self.path = list(path)
# for time
self.time_records = []
# next pipeline_id
self.next_pipeline = ()
# record for package enter machine
self.package_record = dict(package_id=item_id)
def add_machine_id(self, machine_id):
self.package_record["machine_id"] = machine_id
def start_wait(self):
self.package_record["start_wait"] = self.env.now
def start_serve(self):
self.package_record["start_serve"] = self.env.now
def end_serve(self):
self.package_record["end_serve"] = self.env.now
def pop_mark(self):
"""返回下一个pipeline id: (now_loc, next_loc), 删去第一个节点,记录当前的时间点"""
if len(self.path) >= 2:
now_loc, next_loc = self.path[0: 2]
# 当 package 去到 reload(终分拣), 终分拣的队列 id 只有一个值
elif len(self.path) == 1:
now_loc, next_loc = self.path[-1], None
else:
raise ValueError('The path have been empty!')
# remove the now_loc
pop_loc = self.path.pop(0)
self.time_records.append((pop_loc, self.env.now))
# 改变下一个 pipeline id
self.next_pipeline = now_loc, next_loc
def __str__(self):
display_dct = dict(self.attr)
return f"<package attr:{dict(display_dct)}, path: {self.plan_path}>"
class SmallPackage(Package):
"""小件包裹"""
def __str__(self):
display_dct = dict(self.attr)
return f"<SmallBag attr:{dict(display_dct)}, path: {self.plan_path}>"
class SmallBag(Package):
"""小件包"""
# todo
def __init__(self, env: simpy.Environment,
attr: pd.Series,
item_id : str,
path: tuple,
small_packages: pd.DataFrame):
super(SmallBag, self).__init__(env, attr, item_id, path)
self.store = small_packages
self.store_size = len(self.store)
def __str__(self):
display_dct = dict(self.attr)
return f"<SmallBag attr:{dict(display_dct)}, path: {self.plan_path}, store_size:{store_size}>"
class Truck:
"""货车"""
def __init__(self, env: simpy.Environment, item_id: str, come_time: int, truck_type: str, packages:pd.DataFrame):
"""
:param truck_id: self explain
:param come_time: self explain
:param packages: a dataframe contain all packages
"""
self.item_id = item_id
self.come_time = come_time
self.store = packages
self.store_size = len(self.store)
self.truck_type = truck_type
self.env = env
def __str__(self):
return f"<truck_id: {self.item_id}, come_time: {self.come_time}, store_size:{self.store_size}>"
class Uld(Truck):
"""航空箱"""
pass
class Pipeline:
"""传送带"""
def __init__(self,
env: simpy.Environment,
delay_time: float=0,
pipeline_id: tuple=None,
queue_id: str=None,
machine_type: str=None,
):
self.env = env
self.delay = delay_time
self.queue = simpy.Store(env)
self.pipeline_id = pipeline_id
self.queue_id = queue_id
self.machine_type = machine_type
# 传送带上货物的计数
self.latency_counts = 0
self.latency_counts_time = []
# 机器等待区, 队列的计数
self.machine_waiting_counts_time = []
# 加入计数器
self.env.process(self.get_counts())
def get_counts(self):
"""计数器"""
while True:
latency_dict = dict(pipeline_id=self.pipeline_id,
timestamp=self.env.now,
counts=self.latency_counts)
wait_dict = dict(pipeline_id=self.pipeline_id,
timestamp=self.env.now,
counts=len(self.queue.items))
self.latency_counts_time.append(latency_dict)
self.machine_waiting_counts_time.append(wait_dict)
yield self.env.timeout(1)
def latency(self, item: Package):
"""模拟传送时间"""
self.latency_counts += 1
yield self.env.timeout(self.delay)
# 加入数据点
item.pop_mark()
item.add_machine_id(machine_id=self.pipeline_id)
self.queue.put(item)
self.latency_counts -= 1
def put(self, item: Package):
item.start_wait = self.env.now
self.env.process(self.latency(item))
def get(self):
return self.queue.get()
def __str__(self):
return f"<Pipeline: {self.pipeline_id}, delay: {self.delay}, package_counts: {self.latency_counts}>"
if __name__ == '__main__':
pass
|
yfried/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/google/gcp_compute_https_health_check_facts.py
|
8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_https_health_check_facts
description:
- Gather facts for GCP HttpsHealthCheck
short_description: Gather facts for GCP HttpsHealthCheck
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
A list of filter value pairs. Available filters are listed here
U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
Each additional filter in the list will act be added as an AND condition
(filter1 and filter2)
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a https health check facts
gcp_compute_https_health_check_facts:
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
checkIntervalSec:
description:
- How often (in seconds) to send a health check. The default value is 5 seconds.
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
healthyThreshold:
description:
- A so-far unhealthy instance will be marked healthy after this many consecutive successes.
The default value is 2.
returned: success
type: int
host:
description:
- The value of the host header in the HTTPS health check request. If left empty (default
value), the public IP on behalf of which this health check is performed will be
used.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The name
must be 1-63 characters long, and comply with RFC1035. Specifically, the name must
be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot
be a dash.
returned: success
type: str
port:
description:
- The TCP port number for the HTTPS health check request.
- The default value is 80.
returned: success
type: int
requestPath:
description:
- The request path of the HTTPS health check request.
- The default value is /.
returned: success
type: str
timeoutSec:
description:
- How long (in seconds) to wait before claiming failure.
- The default value is 5 seconds. It is invalid for timeoutSec to have greater value
than checkIntervalSec.
returned: success
type: int
unhealthyThreshold:
description:
- A so-far healthy instance will be marked unhealthy after this many consecutive failures.
The default value is 2.
returned: success
type: int
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(
argument_spec=dict(
filters=dict(type='list', elements='str')
)
)
if 'scopes' not in module.params:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {
'items': items
}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/httpsHealthChecks".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
40423147/2017springcd_hw
|
refs/heads/gh-pages
|
plugin/tipue_search/__init__.py
|
371
|
from .tipue_search import *
|
citrix-openstack-build/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/settings/dashboard.py
|
10
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _ # noqa
import horizon
class Settings(horizon.Dashboard):
name = _("Settings")
slug = "settings"
panels = ('user', 'password', )
default_panel = 'user'
nav = False
horizon.register(Settings)
|
provaleks/o8
|
refs/heads/8.0
|
addons/mass_mailing/models/res_config.py
|
385
|
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class MassMailingConfiguration(osv.TransientModel):
_name = 'marketing.config.settings'
_inherit = 'marketing.config.settings'
_columns = {
'group_mass_mailing_campaign': fields.boolean(
'Manage Mass Mailing using Campaign',
implied_group='mass_mailing.group_mass_mailing_campaign',
help="""Manage mass mailign using Campaigns"""),
}
|
Beramos/QuestionHistCrit
|
refs/heads/master
|
questenv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.py
|
1428
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Backports for individual classes and functions."""
import os
import sys
__all__ = ['cache_from_source', 'callable', 'fsencode']
try:
from imp import cache_from_source
except ImportError:
def cache_from_source(py_file, debug=__debug__):
ext = debug and 'c' or 'o'
return py_file + ext
try:
callable = callable
except NameError:
from collections import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
except AttributeError:
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
|
dwoods/gn-maps
|
refs/heads/master
|
geonode/layers/management/commands/updatelayers.py
|
6
|
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.core.management.base import BaseCommand
from optparse import make_option
from geonode.people.utils import get_valid_user
from geonode.geoserver.helpers import gs_slurp
import traceback
import datetime
import sys
class Command(BaseCommand):
help = 'Update the GeoNode application with data from GeoServer'
option_list = BaseCommand.option_list + (
make_option('-i', '--ignore-errors',
action='store_true',
dest='ignore_errors',
default=False,
help='Stop after any errors are encountered.'),
make_option('--skip-unadvertised',
action='store_true',
dest='skip_unadvertised',
default=False,
help='Skip processing unadvertised layers from GeoSever.'),
make_option('--remove-deleted',
action='store_true',
dest='remove_deleted',
default=False,
help='Remove GeoNode layers that have been deleted from GeoSever.'),
make_option('-u', '--user', dest="user", default=None,
help="Name of the user account which should own the imported layers"),
make_option('-f', '--filter', dest="filter", default=None,
help="Only update data the layers that match the given filter"),
make_option('-s', '--store', dest="store", default=None,
help="Only update data the layers for the given geoserver store name"),
make_option('-w', '--workspace', dest="workspace", default=None,
help="Only update data on specified workspace")
)
def handle(self, **options):
ignore_errors = options.get('ignore_errors')
skip_unadvertised = options.get('skip_unadvertised')
remove_deleted = options.get('remove_deleted')
verbosity = int(options.get('verbosity'))
user = options.get('user')
owner = get_valid_user(user)
workspace = options.get('workspace')
filter = options.get('filter')
store = options.get('store')
if verbosity > 0:
console = sys.stdout
else:
console = None
output = gs_slurp(ignore_errors, verbosity=verbosity,
owner=owner, console=console, workspace=workspace, store=store, filter=filter, skip_unadvertised=skip_unadvertised, remove_deleted=remove_deleted)
if verbosity > 1:
print "\nDetailed report of failures:"
for dict_ in output['layers']:
if dict_['status'] == 'failed':
print "\n\n", dict_['name'], "\n================"
traceback.print_exception(dict_['exception_type'],
dict_['error'],
dict_['traceback'])
if remove_deleted:
print "Detailed report of layers to be deleted from GeoNode that failed:"
for dict_ in output['deleted_layers']:
if dict_['status'] == 'delete_failed':
print "\n\n", dict_['name'], "\n================"
traceback.print_exception(dict_['exception_type'],
dict_['error'],
dict_['traceback'])
if verbosity > 0:
print "\n\nFinished processing %d layers in %s seconds.\n" % (
len(output['layers']), round(output['stats']['duration_sec'],2))
print "%d Created layers" % output['stats']['created']
print "%d Updated layers" % output['stats']['updated']
print "%d Failed layers" % output['stats']['failed']
try:
duration_layer = round(output['stats']['duration_sec'] * 1.0 / len(output['layers']),2)
except ZeroDivisionError:
duration_layer = 0
if len(output) > 0:
print "%f seconds per layer" % duration_layer
if remove_deleted: print "\n%d Deleted layers" % output['stats']['deleted']
|
k0ste/ansible
|
refs/heads/devel
|
lib/ansible/parsing/yaml/loader.py
|
191
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from _yaml import CParser, CEmitter
HAVE_PYYAML_C = True
except ImportError:
HAVE_PYYAML_C = False
from yaml.resolver import Resolver
from ansible.parsing.yaml.constructor import AnsibleConstructor
if HAVE_PYYAML_C:
class AnsibleLoader(CParser, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None, vault_secrets=None):
CParser.__init__(self, stream)
AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
Resolver.__init__(self)
else:
from yaml.composer import Composer
from yaml.reader import Reader
from yaml.scanner import Scanner
from yaml.parser import Parser
class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None, vault_secrets=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
Resolver.__init__(self)
|
ammarkhann/FinalSeniorCode
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/gis/gdal/feature.py
|
439
|
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"""
This class that wraps an OGR Feature, needs to be instantiated
from a Layer object.
"""
def __init__(self, feat, layer):
"""
Initializes Feature from a pointer and its Layer object.
"""
if not feat:
raise GDALException('Cannot create OGR Feature, invalid pointer given.')
self.ptr = feat
self._layer = layer
def __del__(self):
"Releases a reference to this object."
if self._ptr and capi:
capi.destroy_feature(self._ptr)
def __getitem__(self, index):
"""
Gets the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, six.string_types):
i = self.index(index)
else:
if index < 0 or index > self.num_fields:
raise OGRIndexError('index out of range')
i = index
return Field(self, i)
def __iter__(self):
"Iterates over each field in the Feature."
for i in range(self.num_fields):
yield self[i]
def __len__(self):
"Returns the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)
def __eq__(self, other):
"Does equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
# #### Feature Properties ####
@property
def encoding(self):
return self._layer._ds.encoding
@property
def fid(self):
"Returns the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Returns the name of the layer for the feature."
name = capi.get_feat_name(self._layer._ldefn)
return force_text(name, self.encoding, strings_only=True)
@property
def num_fields(self):
"Returns the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Returns a list of fields in the Feature."
return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i))
for i in range(self.num_fields)]
@property
def geom(self):
"Returns the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Returns the OGR Geometry Type for this Feture."
return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))
# #### Feature Methods ####
def get(self, field):
"""
Returns the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, 'name', field)
return self[field_name].value
def index(self, field_name):
"Returns the index of the given field name."
i = capi.get_field_index(self.ptr, force_bytes(field_name))
if i < 0:
raise OGRIndexError('invalid OFT field name given: "%s"' % field_name)
return i
|
wiki05/SublimeJEDI
|
refs/heads/master
|
jedi/__init__.py
|
4
|
"""
Jedi is an autocompletion tool for Python that can be used in IDEs/editors.
Jedi works. Jedi is fast. It understands all of the basic Python syntax
elements including many builtin functions.
Additionaly, Jedi suports two different goto functions and has support for
renaming as well as Pydoc support and some other IDE features.
Jedi uses a very simple API to connect with IDE's. There's a reference
implementation as a `VIM-Plugin <http://github.com/davidhalter/jedi-vim>`_,
which uses Jedi's autocompletion. I encourage you to use Jedi in your IDEs.
It's really easy. If there are any problems (also with licensing), just contact
me.
To give you a simple example how you can use the Jedi library, here is an
example for the autocompletion feature:
>>> import jedi
>>> source = '''
... import datetime
... datetime.da'''
>>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py')
>>> script
<Script: 'example.py'>
>>> completions = script.completions()
>>> completions #doctest: +ELLIPSIS
[<Completion: date>, <Completion: datetime>, ...]
>>> print(completions[0].complete)
te
>>> print(completions[0].name)
date
As you see Jedi is pretty simple and allows you to concentrate on writing a
good text editor, while still having very good IDE features for Python.
"""
__version__ = '0.9.0'
from jedi.api import Script, Interpreter, NotFoundError, set_debug_function
from jedi.api import preload_module, defined_names, names
from jedi import settings
|
duducosmos/pgs4a
|
refs/heads/master
|
python-install/lib/python2.7/tabnanny.py
|
394
|
#! /usr/bin/env python
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
tabnanny -- Detection of ambiguous indentation
For the time being this module is intended to be called as a script.
However it is possible to import it into an IDE and use the function
check() described below.
Warning: The API provided by this module is likely to change in future
releases; such changes may not be backward compatible.
"""
# Released to the public domain, by Tim Peters, 15 April 1998.
# XXX Note: this is now a standard library module.
# XXX The API needs to undergo changes however; the current code is too
# XXX script-like. This will be addressed later.
__version__ = "6"
import os
import sys
import getopt
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
__all__ = ["check", "NannyNag", "process_tokens"]
verbose = 0
filename_only = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
global verbose, filename_only
try:
opts, args = getopt.getopt(sys.argv[1:], "qv")
except getopt.error, msg:
errprint(msg)
return
for o, a in opts:
if o == '-q':
filename_only = filename_only + 1
if o == '-v':
verbose = verbose + 1
if not args:
errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
return
for arg in args:
check(arg)
class NannyNag(Exception):
"""
Raised by tokeneater() if detecting an ambiguous indent.
Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
self.lineno, self.msg, self.line = lineno, msg, line
def get_lineno(self):
return self.lineno
def get_msg(self):
return self.msg
def get_line(self):
return self.line
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "%r: listing directory" % (file,)
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = open(file)
except IOError, msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print "checking %r ..." % file
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError, msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError, msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag, nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print "%r: *** Line %d: trouble in tab city! ***" % (file, badline)
print "offending line: %r" % (line,)
print nag.get_msg()
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print file
else: print file, badline, repr(line)
return
if verbose:
print "%r: Clean bill of health." % (file,)
class Whitespace:
# the characters used for space and tab
S, T = ' \t'
# members:
# raw
# the original string
# n
# the number of leading whitespace characters in raw
# nt
# the number of tabs in raw[:n]
# norm
# the normal form as a pair (count, trailing), where:
# count
# a tuple such that raw[:n] contains count[i]
# instances of S * i + T
# trailing
# the number of trailing spaces in raw[:n]
# It's A Theorem that m.indent_level(t) ==
# n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
# is_simple
# true iff raw[:n] is of the form (T*)(S*)
def __init__(self, ws):
self.raw = ws
S, T = Whitespace.S, Whitespace.T
count = []
b = n = nt = 0
for ch in self.raw:
if ch == S:
n = n + 1
b = b + 1
elif ch == T:
n = n + 1
nt = nt + 1
if b >= len(count):
count = count + [0] * (b - len(count) + 1)
count[b] = count[b] + 1
b = 0
else:
break
self.n = n
self.nt = nt
self.norm = tuple(count), b
self.is_simple = len(count) <= 1
# return length of longest contiguous run of spaces (whether or not
# preceding a tab)
def longest_run_of_spaces(self):
count, trailing = self.norm
return max(len(count)-1, trailing)
def indent_level(self, tabsize):
# count, il = self.norm
# for i in range(len(count)):
# if count[i]:
# il = il + (i/tabsize + 1)*tabsize * count[i]
# return il
# quicker:
# il = trailing + sum (i/ts + 1)*ts*count[i] =
# trailing + ts * sum (i/ts + 1)*count[i] =
# trailing + ts * sum i/ts*count[i] + count[i] =
# trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
# trailing + ts * [(sum i/ts*count[i]) + num_tabs]
# and note that i/ts*count[i] is 0 when i < ts
count, trailing = self.norm
il = 0
for i in range(tabsize, len(count)):
il = il + i/tabsize * count[i]
return trailing + tabsize * (il + self.nt)
# return true iff self.indent_level(t) == other.indent_level(t)
# for all t >= 1
def equal(self, other):
return self.norm == other.norm
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
# Intended to be used after not self.equal(other) is known, in which
# case it will return at least one witnessing tab size.
def not_equal_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) != other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
# Return True iff self.indent_level(t) < other.indent_level(t)
# for all t >= 1.
# The algorithm is due to Vincent Broman.
# Easy to prove it's correct.
# XXXpost that.
# Trivial to prove n is sharp (consider T vs ST).
# Unknown whether there's a faster general way. I suspected so at
# first, but no longer.
# For the special (but common!) case where M and N are both of the
# form (T*)(S*), M.less(N) iff M.len() < N.len() and
# M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
# XXXwrite that up.
# Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
def less(self, other):
if self.n >= other.n:
return False
if self.is_simple and other.is_simple:
return self.nt <= other.nt
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
# the self.n >= other.n test already did it for ts=1
for ts in range(2, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
return False
return True
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
# Intended to be used after not self.less(other) is known, in which
# case it will return at least one witnessing tab size.
def not_less_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
def format_witnesses(w):
firsts = map(lambda tup: str(tup[0]), w)
prefix = "at tab size"
if len(w) > 1:
prefix = prefix + "s"
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE
JUNK = tokenize.COMMENT, tokenize.NL
indents = [Whitespace("")]
check_equal = 0
for (type, token, start, end, line) in tokens:
if type == NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = 1
elif type == INDENT:
check_equal = 0
thisguy = Whitespace(token)
if not indents[-1].less(thisguy):
witness = indents[-1].not_less_witness(thisguy)
msg = "indent not greater e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
indents.append(thisguy)
elif type == DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
# Ouch! This assert triggers if the last line of the source
# is indented *and* lacks a newline -- then DEDENTs pop out
# of thin air.
# assert check_equal # else no earlier NEWLINE, or an earlier INDENT
check_equal = 1
del indents[-1]
elif check_equal and type not in JUNK:
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
check_equal = 0
thisguy = Whitespace(line)
if not indents[-1].equal(thisguy):
witness = indents[-1].not_equal_witness(thisguy)
msg = "indent not equal e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
if __name__ == '__main__':
main()
|
ryfeus/lambda-packs
|
refs/heads/master
|
Tensorflow_LightGBM_Scipy_nightly/source/setuptools/command/install.py
|
529
|
from distutils.errors import DistutilsArgError
import inspect
import glob
import warnings
import platform
import distutils.command.install as orig
import setuptools
# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
# now. See https://github.com/pypa/setuptools/issues/199/
_install = orig.install
class install(orig.install):
"""Use easy_install to install the package, w/dependencies"""
user_options = orig.install.user_options + [
('old-and-unmanageable', None, "Try not to use this!"),
('single-version-externally-managed', None,
"used by system package builders to create 'flat' eggs"),
]
boolean_options = orig.install.boolean_options + [
'old-and-unmanageable', 'single-version-externally-managed',
]
new_commands = [
('install_egg_info', lambda self: True),
('install_scripts', lambda self: True),
]
_nc = dict(new_commands)
def initialize_options(self):
orig.install.initialize_options(self)
self.old_and_unmanageable = None
self.single_version_externally_managed = None
def finalize_options(self):
orig.install.finalize_options(self)
if self.root:
self.single_version_externally_managed = True
elif self.single_version_externally_managed:
if not self.root and not self.record:
raise DistutilsArgError(
"You must specify --record or --root when building system"
" packages"
)
def handle_extra_path(self):
if self.root or self.single_version_externally_managed:
# explicit backward-compatibility mode, allow extra_path to work
return orig.install.handle_extra_path(self)
# Ignore extra_path when installing an egg (or being run by another
# command without --root or --single-version-externally-managed
self.path_file = None
self.extra_dirs = ''
def run(self):
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return orig.install.run(self)
if not self._called_from_setup(inspect.currentframe()):
# Run in backward-compatibility mode to support bdist_* commands.
orig.install.run(self)
else:
self.do_egg_install()
@staticmethod
def _called_from_setup(run_frame):
"""
Attempt to detect whether run() was called from setup() or by another
command. If called by setup(), the parent caller will be the
'run_command' method in 'distutils.dist', and *its* caller will be
the 'run_commands' method. If called any other way, the
immediate caller *might* be 'run_command', but it won't have been
called by 'run_commands'. Return True in that case or if a call stack
is unavailable. Return False otherwise.
"""
if run_frame is None:
msg = "Call stack not available. bdist_* commands may fail."
warnings.warn(msg)
if platform.python_implementation() == 'IronPython':
msg = "For best results, pass -X:Frames to enable call stack."
warnings.warn(msg)
return True
res = inspect.getouterframes(run_frame)[2]
caller, = res[:1]
info = inspect.getframeinfo(caller)
caller_module = caller.f_globals.get('__name__', '')
return (
caller_module == 'distutils.dist'
and info.function == 'run_commands'
)
def do_egg_install(self):
easy_install = self.distribution.get_command_class('easy_install')
cmd = easy_install(
self.distribution, args="x", root=self.root, record=self.record,
)
cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
cmd.always_copy_from = '.' # make sure local-dir eggs get installed
# pick up setup-dir .egg files only: no .egg-info
cmd.package_index.scan(glob.glob('*.egg'))
self.run_command('bdist_egg')
args = [self.distribution.get_command_obj('bdist_egg').egg_output]
if setuptools.bootstrap_install_from:
# Bootstrap self-installation of setuptools
args.insert(0, setuptools.bootstrap_install_from)
cmd.args = args
cmd.run()
setuptools.bootstrap_install_from = None
# XXX Python 3.1 doesn't see _nc if this is inside the class
install.sub_commands = (
[cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
install.new_commands
)
|
bbsan2k/nzbToMedia
|
refs/heads/nightly
|
core/transmissionrpc/constants.py
|
4
|
# -*- coding: utf-8 -*-
# Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com>
# Licensed under the MIT license.
import logging
from core.transmissionrpc.six import iteritems
LOGGER = logging.getLogger('transmissionrpc')
LOGGER.setLevel(logging.ERROR)
def mirror_dict(source):
"""
Creates a dictionary with all values as keys and all keys as values.
"""
source.update(dict((value, key) for key, value in iteritems(source)))
return source
DEFAULT_PORT = 9091
DEFAULT_TIMEOUT = 30.0
TR_PRI_LOW = -1
TR_PRI_NORMAL = 0
TR_PRI_HIGH = 1
PRIORITY = mirror_dict({
'low': TR_PRI_LOW,
'normal': TR_PRI_NORMAL,
'high': TR_PRI_HIGH
})
TR_RATIOLIMIT_GLOBAL = 0 # follow the global settings
TR_RATIOLIMIT_SINGLE = 1 # override the global settings, seeding until a certain ratio
TR_RATIOLIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of ratio
RATIO_LIMIT = mirror_dict({
'global': TR_RATIOLIMIT_GLOBAL,
'single': TR_RATIOLIMIT_SINGLE,
'unlimited': TR_RATIOLIMIT_UNLIMITED
})
TR_IDLELIMIT_GLOBAL = 0 # follow the global settings
TR_IDLELIMIT_SINGLE = 1 # override the global settings, seeding until a certain idle time
TR_IDLELIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of activity
IDLE_LIMIT = mirror_dict({
'global': TR_RATIOLIMIT_GLOBAL,
'single': TR_RATIOLIMIT_SINGLE,
'unlimited': TR_RATIOLIMIT_UNLIMITED
})
# A note on argument maps
# These maps are used to verify *-set methods. The information is structured in
# a tree.
# set +- <argument1> - [<type>, <added version>, <removed version>, <previous argument name>, <next argument name>, <description>]
# | +- <argument2> - [<type>, <added version>, <removed version>, <previous argument name>, <next argument name>, <description>]
# |
# get +- <argument1> - [<type>, <added version>, <removed version>, <previous argument name>, <next argument name>, <description>]
# +- <argument2> - [<type>, <added version>, <removed version>, <previous argument name>, <next argument name>, <description>]
# Arguments for torrent methods
TORRENT_ARGS = {
'get': {
'activityDate': ('number', 1, None, None, None, 'Last time of upload or download activity.'),
'addedDate': ('number', 1, None, None, None, 'The date when this torrent was first added.'),
'announceResponse': ('string', 1, 7, None, None, 'The announce message from the tracker.'),
'announceURL': ('string', 1, 7, None, None, 'Current announce URL.'),
'bandwidthPriority': ('number', 5, None, None, None, 'Bandwidth priority. Low (-1), Normal (0) or High (1).'),
'comment': ('string', 1, None, None, None, 'Torrent comment.'),
'corruptEver': ('number', 1, None, None, None, 'Number of bytes of corrupt data downloaded.'),
'creator': ('string', 1, None, None, None, 'Torrent creator.'),
'dateCreated': ('number', 1, None, None, None, 'Torrent creation date.'),
'desiredAvailable': ('number', 1, None, None, None, 'Number of bytes avalable and left to be downloaded.'),
'doneDate': ('number', 1, None, None, None, 'The date when the torrent finished downloading.'),
'downloadDir': ('string', 4, None, None, None, 'The directory path where the torrent is downloaded to.'),
'downloadedEver': ('number', 1, None, None, None, 'Number of bytes of good data downloaded.'),
'downloaders': ('number', 4, 7, None, None, 'Number of downloaders.'),
'downloadLimit': ('number', 1, None, None, None, 'Download limit in Kbps.'),
'downloadLimited': ('boolean', 5, None, None, None, 'Download limit is enabled'),
'downloadLimitMode': (
'number', 1, 5, None, None, 'Download limit mode. 0 means global, 1 means signle, 2 unlimited.'),
'error': ('number', 1, None, None, None,
'Kind of error. 0 means OK, 1 means tracker warning, 2 means tracker error, 3 means local error.'),
'errorString': ('number', 1, None, None, None, 'Error message.'),
'eta': ('number', 1, None, None, None,
'Estimated number of seconds left when downloading or seeding. -1 means not available and -2 means unknown.'),
'etaIdle': ('number', 15, None, None, None,
'Estimated number of seconds left until the idle time limit is reached. -1 means not available and -2 means unknown.'),
'files': (
'array', 1, None, None, None, 'Array of file object containing key, bytesCompleted, length and name.'),
'fileStats': (
'array', 5, None, None, None, 'Aray of file statistics containing bytesCompleted, wanted and priority.'),
'hashString': ('string', 1, None, None, None, 'Hashstring unique for the torrent even between sessions.'),
'haveUnchecked': ('number', 1, None, None, None, 'Number of bytes of partial pieces.'),
'haveValid': ('number', 1, None, None, None, 'Number of bytes of checksum verified data.'),
'honorsSessionLimits': ('boolean', 5, None, None, None, 'True if session upload limits are honored'),
'id': ('number', 1, None, None, None, 'Session unique torrent id.'),
'isFinished': ('boolean', 9, None, None, None, 'True if the torrent is finished. Downloaded and seeded.'),
'isPrivate': ('boolean', 1, None, None, None, 'True if the torrent is private.'),
'isStalled': ('boolean', 14, None, None, None, 'True if the torrent has stalled (been idle for a long time).'),
'lastAnnounceTime': ('number', 1, 7, None, None, 'The time of the last announcement.'),
'lastScrapeTime': ('number', 1, 7, None, None, 'The time af the last successful scrape.'),
'leechers': ('number', 1, 7, None, None, 'Number of leechers.'),
'leftUntilDone': ('number', 1, None, None, None, 'Number of bytes left until the download is done.'),
'magnetLink': ('string', 7, None, None, None, 'The magnet link for this torrent.'),
'manualAnnounceTime': ('number', 1, None, None, None, 'The time until you manually ask for more peers.'),
'maxConnectedPeers': ('number', 1, None, None, None, 'Maximum of connected peers.'),
'metadataPercentComplete': ('number', 7, None, None, None, 'Download progress of metadata. 0.0 to 1.0.'),
'name': ('string', 1, None, None, None, 'Torrent name.'),
'nextAnnounceTime': ('number', 1, 7, None, None, 'Next announce time.'),
'nextScrapeTime': ('number', 1, 7, None, None, 'Next scrape time.'),
'peer-limit': ('number', 5, None, None, None, 'Maximum number of peers.'),
'peers': ('array', 2, None, None, None, 'Array of peer objects.'),
'peersConnected': ('number', 1, None, None, None, 'Number of peers we are connected to.'),
'peersFrom': (
'object', 1, None, None, None, 'Object containing download peers counts for different peer types.'),
'peersGettingFromUs': ('number', 1, None, None, None, 'Number of peers we are sending data to.'),
'peersKnown': ('number', 1, 13, None, None, 'Number of peers that the tracker knows.'),
'peersSendingToUs': ('number', 1, None, None, None, 'Number of peers sending to us'),
'percentDone': ('double', 5, None, None, None, 'Download progress of selected files. 0.0 to 1.0.'),
'pieces': ('string', 5, None, None, None, 'String with base64 encoded bitfield indicating finished pieces.'),
'pieceCount': ('number', 1, None, None, None, 'Number of pieces.'),
'pieceSize': ('number', 1, None, None, None, 'Number of bytes in a piece.'),
'priorities': ('array', 1, None, None, None, 'Array of file priorities.'),
'queuePosition': ('number', 14, None, None, None, 'The queue position.'),
'rateDownload': ('number', 1, None, None, None, 'Download rate in bps.'),
'rateUpload': ('number', 1, None, None, None, 'Upload rate in bps.'),
'recheckProgress': ('double', 1, None, None, None, 'Progress of recheck. 0.0 to 1.0.'),
'secondsDownloading': ('number', 15, None, None, None, ''),
'secondsSeeding': ('number', 15, None, None, None, ''),
'scrapeResponse': ('string', 1, 7, None, None, 'Scrape response message.'),
'scrapeURL': ('string', 1, 7, None, None, 'Current scrape URL'),
'seeders': ('number', 1, 7, None, None, 'Number of seeders reported by the tracker.'),
'seedIdleLimit': ('number', 10, None, None, None, 'Idle limit in minutes.'),
'seedIdleMode': ('number', 10, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'),
'seedRatioLimit': ('double', 5, None, None, None, 'Seed ratio limit.'),
'seedRatioMode': ('number', 5, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'),
'sizeWhenDone': ('number', 1, None, None, None, 'Size of the torrent download in bytes.'),
'startDate': ('number', 1, None, None, None, 'The date when the torrent was last started.'),
'status': ('number', 1, None, None, None, 'Current status, see source'),
'swarmSpeed': ('number', 1, 7, None, None, 'Estimated speed in Kbps in the swarm.'),
'timesCompleted': ('number', 1, 7, None, None, 'Number of successful downloads reported by the tracker.'),
'trackers': ('array', 1, None, None, None, 'Array of tracker objects.'),
'trackerStats': ('object', 7, None, None, None, 'Array of object containing tracker statistics.'),
'totalSize': ('number', 1, None, None, None, 'Total size of the torrent in bytes'),
'torrentFile': ('string', 5, None, None, None, 'Path to .torrent file.'),
'uploadedEver': ('number', 1, None, None, None, 'Number of bytes uploaded, ever.'),
'uploadLimit': ('number', 1, None, None, None, 'Upload limit in Kbps'),
'uploadLimitMode': (
'number', 1, 5, None, None, 'Upload limit mode. 0 means global, 1 means signle, 2 unlimited.'),
'uploadLimited': ('boolean', 5, None, None, None, 'Upload limit enabled.'),
'uploadRatio': ('double', 1, None, None, None, 'Seed ratio.'),
'wanted': ('array', 1, None, None, None, 'Array of booleans indicated wanted files.'),
'webseeds': ('array', 1, None, None, None, 'Array of webseeds objects'),
'webseedsSendingToUs': ('number', 1, None, None, None, 'Number of webseeds seeding to us.'),
},
'set': {
'bandwidthPriority': ('number', 5, None, None, None, 'Priority for this transfer.'),
'downloadLimit': ('number', 5, None, 'speed-limit-down', None, 'Set the speed limit for download in Kib/s.'),
'downloadLimited': ('boolean', 5, None, 'speed-limit-down-enabled', None, 'Enable download speed limiter.'),
'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."),
'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."),
'honorsSessionLimits': ('boolean', 5, None, None, None,
"Enables or disables the transfer to honour the upload limit set in the session."),
'location': ('array', 1, None, None, None, 'Local download location.'),
'peer-limit': ('number', 1, None, None, None, 'The peer limit for the torrents.'),
'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."),
'priority-low': ('array', 1, None, None, None, "A list of file id's that should have normal priority."),
'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have low priority."),
'queuePosition': ('number', 14, None, None, None, 'Position of this transfer in its queue.'),
'seedIdleLimit': ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'),
'seedIdleMode': ('number', 10, None, None, None,
'Seed inactivity mode. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'),
'seedRatioLimit': ('double', 5, None, None, None, 'Seeding ratio.'),
'seedRatioMode': ('number', 5, None, None, None,
'Which ratio to use. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'),
'speed-limit-down': ('number', 1, 5, None, 'downloadLimit', 'Set the speed limit for download in Kib/s.'),
'speed-limit-down-enabled': ('boolean', 1, 5, None, 'downloadLimited', 'Enable download speed limiter.'),
'speed-limit-up': ('number', 1, 5, None, 'uploadLimit', 'Set the speed limit for upload in Kib/s.'),
'speed-limit-up-enabled': ('boolean', 1, 5, None, 'uploadLimited', 'Enable upload speed limiter.'),
'trackerAdd': ('array', 10, None, None, None, 'Array of string with announce URLs to add.'),
'trackerRemove': ('array', 10, None, None, None, 'Array of ids of trackers to remove.'),
'trackerReplace': (
'array', 10, None, None, None, 'Array of (id, url) tuples where the announce URL should be replaced.'),
'uploadLimit': ('number', 5, None, 'speed-limit-up', None, 'Set the speed limit for upload in Kib/s.'),
'uploadLimited': ('boolean', 5, None, 'speed-limit-up-enabled', None, 'Enable upload speed limiter.'),
},
'add': {
'bandwidthPriority': ('number', 8, None, None, None, 'Priority for this transfer.'),
'download-dir': (
'string', 1, None, None, None, 'The directory where the downloaded contents will be saved in.'),
'cookies': ('string', 13, None, None, None, 'One or more HTTP cookie(s).'),
'filename': ('string', 1, None, None, None, "A file path or URL to a torrent file or a magnet link."),
'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."),
'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."),
'metainfo': ('string', 1, None, None, None, 'The content of a torrent file, base64 encoded.'),
'paused': ('boolean', 1, None, None, None, 'If True, does not start the transfer when added.'),
'peer-limit': ('number', 1, None, None, None, 'Maximum number of peers allowed.'),
'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."),
'priority-low': ('array', 1, None, None, None, "A list of file id's that should have low priority."),
'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have normal priority."),
}
}
# Arguments for session methods
SESSION_ARGS = {
'get': {
"alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'),
"alt-speed-enabled": (
'boolean', 5, None, None, None, 'True if alternate global download speed limiter is ebabled.'),
"alt-speed-time-begin": (
'number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'),
"alt-speed-time-enabled": ('boolean', 5, None, None, None, 'True if alternate speeds scheduling is enabled.'),
"alt-speed-time-end": (
'number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'),
"alt-speed-time-day": ('number', 5, None, None, None, 'Days alternate speeds scheduling is enabled.'),
"alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s)'),
"blocklist-enabled": ('boolean', 5, None, None, None, 'True when blocklist is enabled.'),
"blocklist-size": ('number', 5, None, None, None, 'Number of rules in the blocklist'),
"blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'),
"cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'),
"config-dir": ('string', 8, None, None, None, 'location of transmissions configuration directory'),
"dht-enabled": ('boolean', 6, None, None, None, 'True if DHT enabled.'),
"download-dir": ('string', 1, None, None, None, 'The download directory.'),
"download-dir-free-space": ('number', 12, None, None, None, 'Free space in the download directory, in bytes'),
"download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'),
"download-queue-enabled": ('boolean', 14, None, None, None, 'True if the download queue is enabled.'),
"encryption": (
'string', 1, None, None, None, 'Encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'),
"idle-seeding-limit": ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'),
"idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'True if the seed activity limit is enabled.'),
"incomplete-dir": (
'string', 7, None, None, None, 'The path to the directory for incomplete torrent transfer data.'),
"incomplete-dir-enabled": ('boolean', 7, None, None, None, 'True if the incomplete dir is enabled.'),
"lpd-enabled": ('boolean', 9, None, None, None, 'True if local peer discovery is enabled.'),
"peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'),
"peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'),
"peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'),
"pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'True if PEX is allowed.'),
"pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'True if PEX is enabled.'),
"port": ('number', 1, 5, None, 'peer-port', 'Peer port.'),
"peer-port": ('number', 5, None, 'port', None, 'Peer port.'),
"peer-port-random-on-start": (
'boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'),
"port-forwarding-enabled": ('boolean', 1, None, None, None, 'True if port forwarding is enabled.'),
"queue-stalled-minutes": (
'number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'),
"queue-stalled-enabled": ('boolean', 14, None, None, None, 'True if stalled tracking of transfers is enabled.'),
"rename-partial-files": ('boolean', 8, None, None, None, 'True if ".part" is appended to incomplete files'),
"rpc-version": ('number', 4, None, None, None, 'Transmission RPC API Version.'),
"rpc-version-minimum": ('number', 4, None, None, None, 'Minimum accepted RPC API Version.'),
"script-torrent-done-enabled": ('boolean', 9, None, None, None, 'True if the done script is enabled.'),
"script-torrent-done-filename": (
'string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'),
"seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'),
"seedRatioLimited": ('boolean', 5, None, None, None, 'True if seed ration limit is enabled.'),
"seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'),
"seed-queue-enabled": ('boolean', 14, None, None, None, 'True if upload queue is enabled.'),
"speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'),
"speed-limit-down-enabled": ('boolean', 1, None, None, None, 'True if the download speed is limited.'),
"speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'),
"speed-limit-up-enabled": ('boolean', 1, None, None, None, 'True if the upload speed is limited.'),
"start-added-torrents": ('boolean', 9, None, None, None, 'When true uploaded torrents will start right away.'),
"trash-original-torrent-files": (
'boolean', 9, None, None, None, 'When true added .torrent files will be deleted.'),
'units': ('object', 10, None, None, None, 'An object containing units for size and speed.'),
'utp-enabled': ('boolean', 13, None, None, None, 'True if Micro Transport Protocol (UTP) is enabled.'),
"version": ('string', 3, None, None, None, 'Transmission version.'),
},
'set': {
"alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'),
"alt-speed-enabled": ('boolean', 5, None, None, None, 'Enables alternate global download speed limiter.'),
"alt-speed-time-begin": (
'number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'),
"alt-speed-time-enabled": ('boolean', 5, None, None, None, 'Enables alternate speeds scheduling.'),
"alt-speed-time-end": (
'number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'),
"alt-speed-time-day": ('number', 5, None, None, None, 'Enables alternate speeds scheduling these days.'),
"alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s).'),
"blocklist-enabled": ('boolean', 5, None, None, None, 'Enables the block list'),
"blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'),
"cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'),
"dht-enabled": ('boolean', 6, None, None, None, 'Enables DHT.'),
"download-dir": ('string', 1, None, None, None, 'Set the session download directory.'),
"download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'),
"download-queue-enabled": ('boolean', 14, None, None, None, 'Enables download queue.'),
"encryption": ('string', 1, None, None, None,
'Set the session encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'),
"idle-seeding-limit": ('number', 10, None, None, None, 'The default seed inactivity limit in minutes.'),
"idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'Enables the default seed inactivity limit'),
"incomplete-dir": ('string', 7, None, None, None, 'The path to the directory of incomplete transfer data.'),
"incomplete-dir-enabled": ('boolean', 7, None, None, None,
'Enables the incomplete transfer data directory. Otherwise data for incomplete transfers are stored in the download target.'),
"lpd-enabled": ('boolean', 9, None, None, None, 'Enables local peer discovery for public torrents.'),
"peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'),
"peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'),
"peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'),
"pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'Allowing PEX in public torrents.'),
"pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'Allowing PEX in public torrents.'),
"port": ('number', 1, 5, None, 'peer-port', 'Peer port.'),
"peer-port": ('number', 5, None, 'port', None, 'Peer port.'),
"peer-port-random-on-start": (
'boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'),
"port-forwarding-enabled": ('boolean', 1, None, None, None, 'Enables port forwarding.'),
"rename-partial-files": ('boolean', 8, None, None, None, 'Appends ".part" to incomplete files'),
"queue-stalled-minutes": (
'number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'),
"queue-stalled-enabled": ('boolean', 14, None, None, None, 'Enable tracking of stalled transfers.'),
"script-torrent-done-enabled": ('boolean', 9, None, None, None, 'Whether or not to call the "done" script.'),
"script-torrent-done-filename": (
'string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'),
"seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'),
"seed-queue-enabled": ('boolean', 14, None, None, None, 'Enables upload queue.'),
"seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'),
"seedRatioLimited": ('boolean', 5, None, None, None, 'Enables seed ration limit.'),
"speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'),
"speed-limit-down-enabled": ('boolean', 1, None, None, None, 'Enables download speed limiting.'),
"speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'),
"speed-limit-up-enabled": ('boolean', 1, None, None, None, 'Enables upload speed limiting.'),
"start-added-torrents": ('boolean', 9, None, None, None, 'Added torrents will be started right away.'),
"trash-original-torrent-files": (
'boolean', 9, None, None, None, 'The .torrent file of added torrents will be deleted.'),
'utp-enabled': ('boolean', 13, None, None, None, 'Enables Micro Transport Protocol (UTP).'),
},
}
|
ice9js/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/html5lib/doc/conf.py
|
436
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# html5lib documentation build configuration file, created by
# sphinx-quickstart on Wed May 8 00:04:49 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'html5lib'
copyright = '2006 - 2013, James Graham, Geoffrey Sneddon, and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
sys.path.append(os.path.abspath('..'))
from html5lib import __version__
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'theme']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'html5libdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'html5lib.tex', 'html5lib Documentation',
'James Graham, Geoffrey Sneddon, and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'html5lib', 'html5lib Documentation',
['James Graham, Geoffrey Sneddon, and contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'html5lib', 'html5lib Documentation',
'James Graham, Geoffrey Sneddon, and contributors', 'html5lib', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
class CExtMock(object):
"""Required for autodoc on readthedocs.org where you cannot build C extensions."""
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return CExtMock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return CExtMock()
try:
import lxml # flake8: noqa
except ImportError:
sys.modules['lxml'] = CExtMock()
sys.modules['lxml.etree'] = CExtMock()
print("warning: lxml modules mocked.")
try:
import genshi # flake8: noqa
except ImportError:
sys.modules['genshi'] = CExtMock()
sys.modules['genshi.core'] = CExtMock()
print("warning: genshi modules mocked.")
|
xuleiboy1234/autoTitle
|
refs/heads/master
|
tensorflow/tensorflow/contrib/distributions/python/ops/mvn_diag.py
|
60
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import mvn_linear_operator as mvn_linop
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalDiagWithSoftplusScale",
]
class MultivariateNormalDiag(
mvn_linop.MultivariateNormalLinearOperator):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T` where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
ds = tf.contrib.distributions
# Initialize a single 2-variate Gaussian.
mvn = ds.MultivariateNormalDiag(
loc=[1., -1],
scale_diag=[1, 2.])
mvn.mean().eval()
# ==> [1., -1]
mvn.stddev().eval()
# ==> [1., 2]
# Evaluate this on an observation in `R^2`, returning a scalar.
mvn.prob([-1., 0]).eval() # shape: []
# Initialize a 3-batch, 2-variate scaled-identity Gaussian.
mvn = ds.MultivariateNormalDiag(
loc=[1., -1],
scale_identity_multiplier=[1, 2., 3])
mvn.mean().eval() # shape: [3, 2]
# ==> [[1., -1]
# [1, -1],
# [1, -1]]
mvn.stddev().eval() # shape: [3, 2]
# ==> [[1., 1],
# [2, 2],
# [3, 3]]
# Evaluate this on an observation in `R^2`, returning a length-3 vector.
mvn.prob([-1., 0]).eval() # shape: [3]
# Initialize a 2-batch of 3-variate Gaussians.
mvn = ds.MultivariateNormalDiag(
loc=[[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag=[[1., 2, 3],
[0.5, 1, 1.5]]) # shape: [2, 3]
# Evaluate this on a two observations, each in `R^3`, returning a length-2
# vector.
x = [[-1., 0, 1],
[-11, 0, 11.]] # shape: [2, 3].
mvn.prob(x).eval() # shape: [2]
```
"""
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiag"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = locals()
with ops.name_scope(name):
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier]):
# No need to validate_args while making diag_scale. The returned
# LinearOperatorDiag has an assert_non_singular method that is called by
# the Bijector.
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
super(MultivariateNormalDiag, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
class MultivariateNormalDiagWithSoftplusScale(MultivariateNormalDiag):
"""MultivariateNormalDiag with `diag_stddev = softplus(diag_stddev)`."""
def __init__(self,
loc,
scale_diag,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagWithSoftplusScale"):
parameters = locals()
with ops.name_scope(name, values=[scale_diag]):
super(MultivariateNormalDiagWithSoftplusScale, self).__init__(
loc=loc,
scale_diag=nn.softplus(scale_diag),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
PolymorhicCode/Veil-Evasion
|
refs/heads/master
|
tools/__init__.py
|
12133432
| |
agiliq/django
|
refs/heads/master
|
tests/m2m_signals/__init__.py
|
12133432
| |
eugena/django
|
refs/heads/master
|
tests/expressions_case/__init__.py
|
12133432
| |
hvy/chainer
|
refs/heads/master
|
chainer/functions/pooling/average_pooling_nd.py
|
5
|
import functools
import operator
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.pooling import average_pooling_nd_kernel
from chainer.functions.pooling import pooling_nd
from chainer.utils import conv
from chainer.utils import conv_nd
import chainerx
def _get_conv_slices(
size, k, s, p, cover_all=False, d=1, include_pad=True, dtype='l'):
"""Returns the patch slices.
Returns:
A tuple of two 1-D :class:`numpy.ndarrays`\\ s.
Each represents starting and ending indices of the patches.
"""
n = conv.get_conv_outsize(size, k, s, p, cover_all, d)
starts = -p + numpy.arange(n, dtype=dtype) * s
ends = starts + k
if not include_pad:
starts = numpy.maximum(starts, 0)
ends = numpy.minimum(ends, size)
return starts, ends
class AveragePoolingND(pooling_nd._PoolingND):
"""Average pooling over a set of N-dimensional planes.
.. warning::
This feature is experimental. The interface can change in the future.
"""
def __init__(
self, ndim, ksize, stride=None, pad=0, cover_all=False,
pad_value=0):
if not (pad_value is None or pad_value == 0):
raise ValueError(
'pad_value must be either 0 or None, not {}.'.format(
pad_value))
# TODO(takagi) Support cover_all mode.
if cover_all is True:
raise ValueError('`cover_all` mode is not supported yet.')
super(AveragePoolingND, self).__init__(
ndim, ksize, stride=stride, pad=pad, cover_all=cover_all)
self.pad_value = pad_value
def _get_pooling_width(self, xp, dims, dtype):
width = None
for d, k, s, p in six.moves.zip(
dims, self.ksize, self.stride, self.pad):
starts, ends = _get_conv_slices(
d, k, s, p, cover_all=self.cover_all, include_pad=False,
dtype=dtype)
w = ends - starts
if width is None:
width = w
else:
width = numpy.tensordot(width[..., None], w[None, ...], axes=1)
if xp is cuda.cupy:
width = cuda.cupy.array(width)
return width
def forward_chainerx(self, inputs):
ndim = self.ndim
ksize = self.ksize
stride = self.stride
pad = self.pad
pad_value = self.pad_value
x, = inputs
if x.device.backend.name == 'cuda' and ndim not in (2, 3):
return chainer.Fallback
if pad_value == 0:
pad_mode = 'zero'
elif pad_value is None:
pad_mode = 'ignore'
else:
assert False
y = chainerx.average_pool(x, ksize, stride, pad, pad_mode)
return y,
def forward_cpu(self, inputs):
ksize = self.ksize
stride = self.stride
pad = self.pad
pad_value = self.pad_value
cover_all = self.cover_all
x, = inputs
in_shape = x.shape
in_dtype = x.dtype
col = conv_nd.im2col_nd_cpu(x, ksize, stride, pad, cover_all=cover_all)
# mean along (_, _, k_1, k_2, ..., k_N, _, ..., _)
y_axis = tuple(six.moves.range(2, 2 + len(ksize)))
if pad_value is None:
dims = x.shape[2:]
width = self._get_pooling_width(numpy, dims, x.dtype)
y = col.sum(axis=y_axis) / width
else:
assert pad_value == 0
y = col.mean(axis=y_axis)
width = None
self.width = width
self._in_shape = in_shape
self._in_dtype = in_dtype
return y,
def forward_gpu(self, inputs):
if chainer.should_use_cudnn('>=auto') and 2 <= self.ndim <= 3:
# With cuDNN v3 or greater, use cuDNN implementation for inputs
# with spatial dimensions of two or more.
return self.forward_cudnn(inputs)
ndim = self.ndim
ksize = self.ksize
stride = self.stride
pad = self.pad
pad_value = self.pad_value
cover_all = self.cover_all
x, = inputs
in_shape = x.shape
in_dtype = x.dtype
n, c = in_shape[:2]
idims = in_shape[2:]
odims = tuple(
conv.get_conv_outsize(d, k, s, p, cover_all=cover_all)
for (d, k, s, p) in six.moves.zip(idims, ksize, stride, pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + odims
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
if pad_value is None:
coeff = self._get_pooling_width(cuda.cupy, idims, x.dtype)
coeff = cuda.cupy.reciprocal(coeff, out=coeff)
else:
assert pad_value == 0
coeff = 1. / functools.reduce(operator.mul, ksize)
in_params, out_params, operation, name = \
average_pooling_nd_kernel.AveragePoolingNDKernelForward.generate(
ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x.reduced_view(),
*(idims + odims + ksize + stride + pad + (coeff, y)))
self.coeff = coeff
self._in_shape = in_shape
self._in_dtype = in_dtype
return y,
def backward(self, indexes, gy):
return AveragePoolingNDGrad(self).apply(gy)
def get_cudnn_pool_mode(self):
if self.pad_value is None:
return cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING
else:
assert self.pad_value == 0
return cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING
class AveragePoolingNDGrad(function_node.FunctionNode):
def __init__(self, apoolnd):
self.func = apoolnd
def forward_cpu(self, gys):
func = self.func
pad_value = func.pad_value
ksize = func.ksize
stride = func.stride
pad = func.pad
in_shape = func._in_shape
gy, = gys
idims = in_shape[2:]
odims = gy.shape[2:]
colon = slice(None, None, None)
is_pad_value_none = pad_value is None
if is_pad_value_none:
numpy.divide(gy, func.width, out=gy)
gy_index = (colon, colon) + (None,) * len(idims)
gcol_reps = (1, 1) + ksize + (1,) * len(odims)
gcol = numpy.tile(gy[gy_index], gcol_reps)
gx = conv_nd.col2im_nd_cpu(gcol, stride, pad, idims)
if not is_pad_value_none:
gx /= functools.reduce(operator.mul, ksize)
return gx,
def forward_gpu(self, gys):
func = self.func
if func.is_cudnn_used:
return func.backward_cudnn(gys)
ndim = func.ndim
pad_value = func.pad_value
ksize = func.ksize
stride = func.stride
pad = func.pad
in_shape = func._in_shape
in_dtype = func._in_dtype
is_pad_value_none = pad_value is None
gy, = gys
n, c = in_shape[:2]
idims = in_shape[2:]
odims = gy.shape[2:]
if is_pad_value_none:
# This conversion from chainerx to cupy exists here for
# double backward of chainerx on cuda.
coeff = backend.from_chx(func.coeff)
gy *= coeff
gx = cuda.cupy.empty(in_shape, in_dtype)
in_params, out_params, operation, name = \
average_pooling_nd_kernel.AveragePoolingNDKernelBackward.generate(
ndim)
cuda.elementwise(in_params, out_params, operation, name)(
gy.reduced_view(),
*(idims + odims + ksize + stride + pad + (gx,)))
if not is_pad_value_none:
gx /= functools.reduce(operator.mul, ksize)
return gx,
def backward(self, indexes, grad_outputs):
func = self.func
ndim = func.ndim
pad_value = func.pad_value
ksize = func.ksize
stride = func.stride
pad = func.pad
return AveragePoolingND(
ndim, ksize, stride, pad, cover_all=False, pad_value=pad_value
).apply(grad_outputs)
def average_pooling_nd(x, ksize, stride=None, pad=0, pad_value=0):
"""N-dimensionally spatial average pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
This function provides a N-dimensionally generalized version of
:func:`~chainer.functions.average_pooling_2d`. This acts similarly to
:func:`~chainer.functions.convolution_nd`, but it computes the average of
input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x(~chainer.Variable): Input variable.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
pad_value (0 or None):
Value to fill the padded region when calculating average.
If ``None`` is specified, such region is ignored.
The default value is ``0``, therefore the averages are biased
towards zero.
Returns:
~chainer.Variable: Output variable.
.. note::
This function currently does not support ``cover_all`` mode as
:func:`max_pooling_nd`. Average pooling runs in non-cover-all mode.
"""
ndim = len(x.shape[2:])
return AveragePoolingND(
ndim, ksize, stride=stride, pad=pad, pad_value=pad_value
).apply((x,))[0]
def average_pooling_1d(x, ksize, stride=None, pad=0, pad_value=0):
"""1-dimensional spatial average pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.average_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.average_pooling_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return average_pooling_nd(x, ksize, stride, pad, pad_value)
def average_pooling_3d(x, ksize, stride=None, pad=0, pad_value=0):
"""3-dimensional spatial average pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.average_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.average_pooling_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return average_pooling_nd(x, ksize, stride, pad, pad_value)
|
persiaAziz/trafficserver
|
refs/heads/master
|
tests/gold_tests/headers/normalize_ae_observer.py
|
8
|
'''
Extract the protocol information from the Accept-Encoding headers and store it in a log file for later verification.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
log = open('normalize_ae.log', 'w')
def observe(headers):
seen = False
for h in headers.items():
if h[0] == "X-Au-Test":
log.write("X-Au-Test: {}\n".format(h[1]))
if h[0] == "Accept-Encoding":
log.write("{}\n".format(h[1]))
seen = True
if not seen:
log.write("ACCEPT-ENCODING MISSING\n")
log.write("-\n")
log.flush()
Hooks.register(Hooks.ReadRequestHook, observe)
|
garg10may/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/ccc.py
|
107
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
qualities,
unified_strdate,
)
class CCCIE(InfoExtractor):
IE_NAME = 'media.ccc.de'
_VALID_URL = r'https?://(?:www\.)?media\.ccc\.de/[^?#]+/[^?#/]*?_(?P<id>[0-9]{8,})._[^?#/]*\.html'
_TEST = {
'url': 'http://media.ccc.de/browse/congress/2013/30C3_-_5443_-_en_-_saal_g_-_201312281830_-_introduction_to_processor_design_-_byterazor.html#video',
'md5': '3a1eda8f3a29515d27f5adb967d7e740',
'info_dict': {
'id': '20131228183',
'ext': 'mp4',
'title': 'Introduction to Processor Design',
'description': 'md5:5ddbf8c734800267f2cee4eab187bc1b',
'thumbnail': 're:^https?://.*\.jpg$',
'view_count': int,
'upload_date': '20131229',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if self._downloader.params.get('prefer_free_formats'):
preference = qualities(['mp3', 'opus', 'mp4-lq', 'webm-lq', 'h264-sd', 'mp4-sd', 'webm-sd', 'mp4', 'webm', 'mp4-hd', 'h264-hd', 'webm-hd'])
else:
preference = qualities(['opus', 'mp3', 'webm-lq', 'mp4-lq', 'webm-sd', 'h264-sd', 'mp4-sd', 'webm', 'mp4', 'webm-hd', 'mp4-hd', 'h264-hd'])
title = self._html_search_regex(
r'(?s)<h1>(.*?)</h1>', webpage, 'title')
description = self._html_search_regex(
r"(?s)<p class='description'>(.*?)</p>",
webpage, 'description', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
r"(?s)<span class='[^']*fa-calendar-o'></span>(.*?)</li>",
webpage, 'upload date', fatal=False))
view_count = int_or_none(self._html_search_regex(
r"(?s)<span class='[^']*fa-eye'></span>(.*?)</li>",
webpage, 'view count', fatal=False))
matches = re.finditer(r'''(?xs)
<(?:span|div)\s+class='label\s+filetype'>(?P<format>.*?)</(?:span|div)>\s*
<a\s+download\s+href='(?P<http_url>[^']+)'>\s*
(?:
.*?
<a\s+href='(?P<torrent_url>[^']+\.torrent)'
)?''', webpage)
formats = []
for m in matches:
format = m.group('format')
format_id = self._search_regex(
r'.*/([a-z0-9_-]+)/[^/]*$',
m.group('http_url'), 'format id', default=None)
vcodec = 'h264' if 'h264' in format_id else (
'none' if format_id in ('mp3', 'opus') else None
)
formats.append({
'format_id': format_id,
'format': format,
'url': m.group('http_url'),
'vcodec': vcodec,
'preference': preference(format_id),
})
if m.group('torrent_url'):
formats.append({
'format_id': 'torrent-%s' % (format if format_id is None else format_id),
'format': '%s (torrent)' % format,
'proto': 'torrent',
'format_note': '(unsupported; will just download the .torrent file)',
'vcodec': vcodec,
'preference': -100 + preference(format_id),
'url': m.group('torrent_url'),
})
self._sort_formats(formats)
thumbnail = self._html_search_regex(
r"<video.*?poster='([^']+)'", webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'view_count': view_count,
'upload_date': upload_date,
'formats': formats,
}
|
shenyy/lily2-gem5
|
refs/heads/master
|
src/arch/x86/isa/insts/general_purpose/data_transfer/xchg.py
|
89
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# All the memory versions need to use LOCK, regardless of if it was set
def macroop XCHG_R_R
{
# Use the xor trick instead of moves to reduce register pressure.
# This probably doesn't make much of a difference, but it's easy.
xor reg, reg, regm
xor regm, regm, reg
xor reg, reg, regm
};
def macroop XCHG_R_M
{
mfence
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_R_P
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_M_R
{
mfence
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_P_R
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_LOCKED_M_R
{
mfence
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_LOCKED_P_R
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mfence
mov reg, reg, t1
};
'''
|
40223151/2014c2g9
|
refs/heads/master
|
wsgi/static/reeborg/src/libraries/brython/Lib/platform.py
|
11
|
#! /usr/bin/python3.3
""" This module tries to retrieve as much platform-identifying data as
possible. It makes this information available via function APIs.
If called from the command line, it prints the platform
information concatenated as single string to stdout. The output
format is useable as part of a filename.
"""
# This module is maintained by Marc-Andre Lemburg <mal@egenix.com>.
# If you find problems, please submit bug reports/patches via the
# Python bug tracker (http://bugs.python.org) and assign them to "lemburg".
#
# Still needed:
# * more support for WinCE
# * support for MS-DOS (PythonDX ?)
# * support for Amiga and other still unsupported platforms running Python
# * support for additional Linux distributions
#
# Many thanks to all those who helped adding platform-specific
# checks (in no particular order):
#
# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter
#
# History:
#
# <see CVS and SVN checkin messages for history>
#
# 1.0.7 - added DEV_NULL
# 1.0.6 - added linux_distribution()
# 1.0.5 - fixed Java support to allow running the module on Jython
# 1.0.4 - added IronPython support
# 1.0.3 - added normalization of Windows system name
# 1.0.2 - added more Windows support
# 1.0.1 - reformatted to make doc.py happy
# 1.0.0 - reformatted a bit and checked into Python CVS
# 0.8.0 - added sys.version parser and various new access
# APIs (python_version(), python_compiler(), etc.)
# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
# 0.7.1 - added support for Caldera OpenLinux
# 0.7.0 - some fixes for WinCE; untabified the source file
# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
# vms_lib.getsyi() configured
# 0.6.1 - added code to prevent 'uname -p' on platforms which are
# known not to support it
# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
# did some cleanup of the interfaces - some APIs have changed
# 0.5.5 - fixed another type in the MacOS code... should have
# used more coffee today ;-)
# 0.5.4 - fixed a few typos in the MacOS code
# 0.5.3 - added experimental MacOS support; added better popen()
# workarounds in _syscmd_ver() -- still not 100% elegant
# though
# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
# return values (the system uname command tends to return
# 'unknown' instead of just leaving the field emtpy)
# 0.5.1 - included code for slackware dist; added exception handlers
# to cover up situations where platforms don't have os.popen
# (e.g. Mac) or fail on socket.gethostname(); fixed libc
# detection RE
# 0.5.0 - changed the API names referring to system commands to *syscmd*;
# added java_ver(); made syscmd_ver() a private
# API (was system_ver() in previous versions) -- use uname()
# instead; extended the win32_ver() to also return processor
# type information
# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
# 0.3.4 - fixed a bug in _follow_symlinks()
# 0.3.3 - fixed popen() and "file" command invokation bugs
# 0.3.2 - added architecture() API and support for it in platform()
# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
# 0.3.0 - added system alias support
# 0.2.3 - removed 'wince' again... oh well.
# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
# 0.2.1 - added cache logic and changed the platform string format
# 0.2.0 - changed the API to use functions instead of module globals
# since some action take too long to be run on module import
# 0.1.0 - first release
#
# You can always get the latest version of this module at:
#
# http://www.egenix.com/files/python/platform.py
#
# If that URL should fail, try contacting the author.
__copyright__ = """
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation or portions thereof, including modifications,
that you make.
EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
__version__ = '1.0.7'
import collections
import sys, os, re, subprocess
### Globals & Constants
# Determine the platform's /dev/null device
try:
DEV_NULL = os.devnull
except AttributeError:
# os.devnull was added in Python 2.4, so emulate it for earlier
# Python versions
if sys.platform in ('dos','win32','win16','os2'):
# Use the old CP/M NUL as device name
DEV_NULL = 'NUL'
else:
# Standard Unix uses /dev/null
DEV_NULL = '/dev/null'
### Platform specific APIs
_libc_search = re.compile(b'(__libc_init)'
b'|'
b'(GLIBC_([0-9.]+))'
b'|'
br'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)', re.ASCII)
def libc_ver(executable=sys.executable,lib='',version='',
chunksize=16384):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
f = open(executable,'rb')
binary = f.read(chunksize)
pos = 0
while 1:
if b'libc' in binary or b'GLIBC' in binary:
m = _libc_search.search(binary,pos)
else:
m = None
if not m:
binary = f.read(chunksize)
if not binary:
break
pos = 0
continue
libcinit,glibc,glibcversion,so,threads,soversion = [
s.decode('latin1') if s is not None else s
for s in m.groups()]
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif glibcversion > version:
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and soversion > version:
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
f.close()
return lib,version
def _dist_try_harder(distname,version,id):
""" Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
Slackware Linux distributions.
"""
if os.path.exists('/var/adm/inst-log/info'):
# SuSE Linux stores distribution information in that file
distname = 'SuSE'
for line in open('/var/adm/inst-log/info'):
tv = line.split()
if len(tv) == 2:
tag,value = tv
else:
continue
if tag == 'MIN_DIST_VERSION':
version = value.strip()
elif tag == 'DIST_IDENT':
values = value.split('-')
id = values[2]
return distname,version,id
if os.path.exists('/etc/.installed'):
# Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
for line in open('/etc/.installed'):
pkg = line.split('-')
if len(pkg) >= 2 and pkg[0] == 'OpenLinux':
# XXX does Caldera support non Intel platforms ? If yes,
# where can we find the needed id ?
return 'OpenLinux',pkg[1],id
if os.path.isdir('/usr/lib/setup'):
# Check for slackware verson tag file (thanks to Greg Andruk)
verfiles = os.listdir('/usr/lib/setup')
for n in range(len(verfiles)-1, -1, -1):
if verfiles[n][:14] != 'slack-version-':
del verfiles[n]
if verfiles:
verfiles.sort()
distname = 'slackware'
version = verfiles[-1][14:]
return distname,version,id
return distname,version,id
_release_filename = re.compile(r'(\w+)[-_](release|version)', re.ASCII)
_lsb_release_version = re.compile(r'(.+)'
' release '
'([\d.]+)'
'[^(]*(?:\((.+)\))?', re.ASCII)
_release_version = re.compile(r'([^0-9]+)'
'(?: release )?'
'([\d.]+)'
'[^(]*(?:\((.+)\))?', re.ASCII)
# See also http://www.novell.com/coolsolutions/feature/11251.html
# and http://linuxmafia.com/faq/Admin/release-files.html
# and http://data.linux-ntfs.org/rpm/whichrpm
# and http://www.die.net/doc/linux/man/man1/lsb_release.1.html
_supported_dists = (
'SuSE', 'debian', 'fedora', 'redhat', 'centos',
'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo',
'UnitedLinux', 'turbolinux', 'arch', 'mageia', 'Ubuntu')
def _parse_release_file(firstline):
# Default to empty 'version' and 'id' strings. Both defaults are used
# when 'firstline' is empty. 'id' defaults to empty when an id can not
# be deduced.
return '', '', ''
_distributor_id_file_re = re.compile("(?:DISTRIB_ID\s*=)\s*(.*)", re.I)
_release_file_re = re.compile("(?:DISTRIB_RELEASE\s*=)\s*(.*)", re.I)
_codename_file_re = re.compile("(?:DISTRIB_CODENAME\s*=)\s*(.*)", re.I)
def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
# check for the Debian/Ubuntu /etc/lsb-release file first, needed so
# that the distribution doesn't get identified as Debian.
return distname, version, id
# To maintain backwards compatibility:
def dist(distname='',version='',id='',
supported_dists=_supported_dists):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
return linux_distribution(distname, version, id,
supported_dists=supported_dists,
full_distribution_name=0)
def popen(cmd, mode='r', bufsize=-1):
""" Portable popen() interface.
"""
import warnings
warnings.warn('use os.popen instead', DeprecationWarning, stacklevel=2)
return os.popen(cmd, mode, bufsize)
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
version string using the format major.minor.build (or patchlevel).
"""
return '%s.%s.0' % (sys.version[0], sys.version[1])
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
'.*'
'\[.* ([\d.]+)\])')
# Examples of VER command output:
#
# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]
# Windows XP: Microsoft Windows XP [Version 5.1.2600]
# Windows Vista: Microsoft Windows [Version 6.0.6002]
#
# Note that the "Version" string gets localized on different
# Windows versions.
def _syscmd_ver(system='', release='', version='',
supported_platforms=('win32','win16','dos','os2')):
""" Tries to figure out the OS version used and returns
a tuple (system,release,version).
It uses the "ver" shell command for this which is known
to exists on Windows, DOS and OS/2. XXX Others too ?
In case this fails, the given parameters are used as
defaults.
"""
if sys.platform not in supported_platforms:
return system,release,version
# Try some common cmd strings
for cmd in ('ver','command /c ver','cmd /c ver'):
try:
pipe = popen(cmd)
info = pipe.read()
if pipe.close():
raise os.error('command failed')
# XXX How can I suppress shell errors from being written
# to stderr ?
except os.error as why:
#print 'Command %s failed: %s' % (cmd,why)
continue
except IOError as why:
#print 'Command %s failed: %s' % (cmd,why)
continue
else:
break
else:
return system,release,version
# Parse the output
info = info.strip()
m = _ver_output.match(info)
if m is not None:
system,release,version = m.groups()
# Strip trailing dots from version and release
if release[-1] == '.':
release = release[:-1]
if version[-1] == '.':
version = version[:-1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return system,release,version
def _win32_getvalue(key,name,default=''):
""" Read a value for name from the registry key.
In case this fails, default is returned.
"""
try:
# Use win32api if available
from win32api import RegQueryValueEx
except ImportError:
# On Python 2.0 and later, emulate using winreg
import winreg
RegQueryValueEx = winreg.QueryValueEx
try:
return RegQueryValueEx(key,name)
except:
return default
def win32_ver(release='',version='',csd='',ptype=''):
""" Get additional version information from the Windows Registry
and return a tuple (version,csd,ptype) referring to version
number, CSD level (service pack), and OS type (multi/single
processor).
As a hint: ptype returns 'Uniprocessor Free' on single
processor NT machines and 'Multiprocessor Free' on multi
processor machines. The 'Free' refers to the OS version being
free of debugging code. It could also state 'Checked' which
means the OS version uses debugging code, i.e. code that
checks arguments, ranges, etc. (Thomas Heller).
Note: this function works best with Mark Hammond's win32
package installed, but also on Python 2.3 and later. It
obviously only runs on Win32 compatible platforms.
"""
# XXX Is there any way to find out the processor type on WinXX ?
# XXX Is win32 available on Windows CE ?
#
# Adapted from code posted by Karl Putland to comp.lang.python.
#
# The mappings between reg. values and release names can be found
# here: http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
# Import the needed APIs
try:
import win32api
from win32api import RegQueryValueEx, RegOpenKeyEx, \
RegCloseKey, GetVersionEx
from win32con import HKEY_LOCAL_MACHINE, VER_PLATFORM_WIN32_NT, \
VER_PLATFORM_WIN32_WINDOWS, VER_NT_WORKSTATION
except ImportError:
# Emulate the win32api module using Python APIs
try:
sys.getwindowsversion
except AttributeError:
# No emulation possible, so return the defaults...
return release,version,csd,ptype
else:
# Emulation using winreg (added in Python 2.0) and
# sys.getwindowsversion() (added in Python 2.3)
import winreg
GetVersionEx = sys.getwindowsversion
RegQueryValueEx = winreg.QueryValueEx
RegOpenKeyEx = winreg.OpenKeyEx
RegCloseKey = winreg.CloseKey
HKEY_LOCAL_MACHINE = winreg.HKEY_LOCAL_MACHINE
VER_PLATFORM_WIN32_WINDOWS = 1
VER_PLATFORM_WIN32_NT = 2
VER_NT_WORKSTATION = 1
VER_NT_SERVER = 3
REG_SZ = 1
# Find out the registry key and some general version infos
winver = GetVersionEx()
maj,min,buildno,plat,csd = winver
version = '%i.%i.%i' % (maj,min,buildno & 0xFFFF)
if hasattr(winver, "service_pack"):
if winver.service_pack != "":
csd = 'SP%s' % winver.service_pack_major
else:
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
if plat == VER_PLATFORM_WIN32_WINDOWS:
regkey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
# Try to guess the release name
if maj == 4:
if min == 0:
release = '95'
elif min == 10:
release = '98'
elif min == 90:
release = 'Me'
else:
release = 'postMe'
elif maj == 5:
release = '2000'
elif plat == VER_PLATFORM_WIN32_NT:
regkey = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
if maj <= 4:
release = 'NT'
elif maj == 5:
if min == 0:
release = '2000'
elif min == 1:
release = 'XP'
elif min == 2:
release = '2003Server'
else:
release = 'post2003'
elif maj == 6:
if hasattr(winver, "product_type"):
product_type = winver.product_type
else:
product_type = VER_NT_WORKSTATION
# Without an OSVERSIONINFOEX capable sys.getwindowsversion(),
# or help from the registry, we cannot properly identify
# non-workstation versions.
try:
key = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
name, type = RegQueryValueEx(key, "ProductName")
# Discard any type that isn't REG_SZ
if type == REG_SZ and name.find("Server") != -1:
product_type = VER_NT_SERVER
except WindowsError:
# Use default of VER_NT_WORKSTATION
pass
if min == 0:
if product_type == VER_NT_WORKSTATION:
release = 'Vista'
else:
release = '2008Server'
elif min == 1:
if product_type == VER_NT_WORKSTATION:
release = '7'
else:
release = '2008ServerR2'
elif min == 2:
if product_type == VER_NT_WORKSTATION:
release = '8'
else:
release = '2012Server'
else:
release = 'post2012Server'
else:
if not release:
# E.g. Win3.1 with win32s
release = '%i.%i' % (maj,min)
return release,version,csd,ptype
# Open the registry key
try:
keyCurVer = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
# Get a value to make sure the key exists...
RegQueryValueEx(keyCurVer, 'SystemRoot')
except:
return release,version,csd,ptype
# Parse values
#subversion = _win32_getvalue(keyCurVer,
# 'SubVersionNumber',
# ('',1))[0]
#if subversion:
# release = release + subversion # 95a, 95b, etc.
build = _win32_getvalue(keyCurVer,
'CurrentBuildNumber',
('',1))[0]
ptype = _win32_getvalue(keyCurVer,
'CurrentType',
(ptype,1))[0]
# Normalize version
version = _norm_version(version,build)
# Close key
RegCloseKey(keyCurVer)
return release,version,csd,ptype
def _mac_ver_lookup(selectors,default=None):
from _gestalt import gestalt
l = []
append = l.append
for selector in selectors:
try:
append(gestalt(selector))
except (RuntimeError, OSError):
append(default)
return l
def _bcd2str(bcd):
return hex(bcd)[2:]
def _mac_ver_gestalt():
"""
Thanks to Mark R. Levinson for mailing documentation links and
code examples for this function. Documentation for the
gestalt() API is available online at:
http://www.rgaros.nl/gestalt/
"""
# Check whether the version info module is available
try:
import _gestalt
except ImportError:
return None
# Get the infos
sysv, sysa = _mac_ver_lookup(('sysv','sysa'))
# Decode the infos
if sysv:
major = (sysv & 0xFF00) >> 8
minor = (sysv & 0x00F0) >> 4
patch = (sysv & 0x000F)
if (major, minor) >= (10, 4):
# the 'sysv' gestald cannot return patchlevels
# higher than 9. Apple introduced 3 new
# gestalt codes in 10.4 to deal with this
# issue (needed because patch levels can
# run higher than 9, such as 10.4.11)
major,minor,patch = _mac_ver_lookup(('sys1','sys2','sys3'))
release = '%i.%i.%i' %(major, minor, patch)
else:
release = '%s.%i.%i' % (_bcd2str(major),minor,patch)
if sysa:
machine = {0x1: '68k',
0x2: 'PowerPC',
0xa: 'i386'}.get(sysa,'')
versioninfo=('', '', '')
return release,versioninfo,machine
def _mac_ver_xml():
fn = '/System/Library/CoreServices/SystemVersion.plist'
if not os.path.exists(fn):
return None
try:
import plistlib
except ImportError:
return None
pl = plistlib.readPlist(fn)
release = pl['ProductVersion']
versioninfo=('', '', '')
machine = os.uname().machine
if machine in ('ppc', 'Power Macintosh'):
# for compatibility with the gestalt based code
machine = 'PowerPC'
return release,versioninfo,machine
def mac_ver(release='',versioninfo=('','',''),machine=''):
""" Get MacOS version information and return it as tuple (release,
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
Entries which cannot be determined are set to the paramter values
which default to ''. All tuple entries are strings.
"""
# First try reading the information from an XML file which should
# always be present
info = _mac_ver_xml()
if info is not None:
return info
# If that doesn't work for some reason fall back to reading the
# information using gestalt calls.
info = _mac_ver_gestalt()
if info is not None:
return info
# If that also doesn't work return the default values
return release,versioninfo,machine
def _java_getprop(name,default):
from java.lang import System
try:
value = System.getProperty(name)
if value is None:
return default
return value
except AttributeError:
return default
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
""" Version interface for Jython.
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
a tuple (vm_name,vm_release,vm_vendor) and osinfo being a
tuple (os_name,os_version,os_arch).
Values which cannot be determined are set to the defaults
given as parameters (which all default to '').
"""
# Import the needed APIs
try:
import java.lang
except ImportError:
return release,vendor,vminfo,osinfo
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
vm_name, vm_release, vm_vendor = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = vm_name, vm_release, vm_vendor
os_name, os_version, os_arch = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = os_name, os_version, os_arch
return release, vendor, vminfo, osinfo
### System name aliasing
def system_alias(system,release,version):
""" Returns (system,release,version) aliased to common
marketing names used for some systems.
It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
if system == 'Rhapsody':
# Apple's BSD derivative
# XXX How can we determine the marketing release number ?
return 'MacOS X Server',system+release,version
elif system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return system,release,version
# Modify release (marketing release = SunOS release - 3)
l = release.split('.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = '.'.join(l)
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system == 'IRIX64':
# IRIX reports IRIX64 on platforms with 64-bit support; yet it
# is really a version and not a different platform, since 32-bit
# apps are also supported..
system = 'IRIX'
if version:
version = version + ' (64bit)'
else:
version = '64bit'
elif system in ('win32','win16'):
# In case one of the other tricks
system = 'Windows'
return system,release,version
### Various internal helpers
def _platform(*args):
""" Helper to format the platform string in a filename
compatible format e.g. "system-version-machine".
"""
# Format the platform string
platform = '-'.join(x.strip() for x in filter(len, args))
# Cleanup some possible filename obstacles...
platform = platform.replace(' ','_')
platform = platform.replace('/','-')
platform = platform.replace('\\','-')
platform = platform.replace(':','-')
platform = platform.replace(';','-')
platform = platform.replace('"','-')
platform = platform.replace('(','-')
platform = platform.replace(')','-')
# No need to report 'unknown' information...
platform = platform.replace('unknown','')
# Fold '--'s and remove trailing '-'
while 1:
cleaned = platform.replace('--','-')
if cleaned == platform:
break
platform = cleaned
while platform[-1] == '-':
platform = platform[:-1]
return platform
def _node(default=''):
""" Helper to determine the node name of this machine.
"""
try:
import socket
except ImportError:
# No sockets...
return default
try:
return socket.gethostname()
except socket.error:
# Still not working...
return default
def _follow_symlinks(filepath):
""" In case filepath is a symlink, follow it until a
real file is reached.
"""
filepath = os.path.abspath(filepath)
while os.path.islink(filepath):
filepath = os.path.normpath(
os.path.join(os.path.dirname(filepath),os.readlink(filepath)))
return filepath
def _syscmd_uname(option,default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> %s' % (option, DEV_NULL))
except (AttributeError,os.error):
return default
output = f.read().strip()
rc = f.close()
if not output or rc:
return default
else:
return output
def _syscmd_file(target,default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
omit the filename in its output. Follow the symlinks. It returns
default in case the command should fail.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except (AttributeError,os.error):
return default
output = proc.communicate()[0].decode('latin-1')
rc = proc.wait()
if not output or rc:
return default
else:
return output
### Information about the used architecture
# Default values for architecture; non-empty strings override the
# defaults given as parameters
_default_architecture = {
'win32': ('','WindowsPE'),
'win16': ('','Windows'),
'dos': ('','MSDOS'),
}
def architecture(executable=sys.executable,bits='',linkage=''):
""" Queries the given executable (defaults to the Python interpreter
binary) for various architecture information.
Returns a tuple (bits,linkage) which contains information about
the bit architecture and the linkage format used for the
executable. Both values are returned as strings.
Values that cannot be determined are returned as given by the
parameter presets. If bits is given as '', the sizeof(pointer)
(or sizeof(long) on Python version < 1.5.2) is used as
indicator for the supported pointer size.
The function relies on the system's "file" command to do the
actual work. This is available on most if not all Unix
platforms. On some non-Unix platforms where the "file" command
does not exist and the executable is set to the Python interpreter
binary defaults from _default_architecture are used.
"""
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
try:
size = struct.calcsize('P')
except struct.error:
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size*8) + 'bit'
# Get data from the 'file' system command
if executable:
fileout = _syscmd_file(executable, '')
else:
fileout = ''
if not fileout and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if sys.platform in _default_architecture:
b,l = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return bits,linkage
if 'executable' not in fileout:
# Format not supported
return bits,linkage
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif 'N32' in fileout:
# On Irix only
bits = 'n32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return bits,linkage
### Portable uname() interface
uname_result = collections.namedtuple("uname_result",
"system node release version machine processor")
_uname_cache = None
def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system,node,release,version,machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not list(filter(None, (system, node, release, version, machine))):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system,release,version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32','win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release,vendor,vminfo,osinfo = java_ver()
system = 'Java'
version = ', '.join(vminfo)
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p','')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = uname_result(system,node,release,version,machine,processor)
return _uname_cache
### Direct interfaces to some of the uname() return values
def system():
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname().system
def node():
""" Returns the computer's network name (which may not be fully
qualified)
An empty string is returned if the value cannot be determined.
"""
return uname().node
def release():
""" Returns the system's release, e.g. '2.2.0' or 'NT'
An empty string is returned if the value cannot be determined.
"""
return uname().release
def version():
""" Returns the system's release version, e.g. '#3 on degas'
An empty string is returned if the value cannot be determined.
"""
return uname().version
def machine():
""" Returns the machine type, e.g. 'i386'
An empty string is returned if the value cannot be determined.
"""
return uname().machine
def processor():
""" Returns the (true) processor name, e.g. 'amdk6'
An empty string is returned if the value cannot be
determined. Note that many platforms do not provide this
information or simply return the same value as for machine(),
e.g. NetBSD does this.
"""
return uname().processor
### Various APIs for extracting information from sys.version
_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[([^\]]+)\]?', re.ASCII)
_ironpython_sys_version_parser = re.compile(
r'IronPython\s*'
'([\d\.]+)'
'(?: \(([\d\.]+)\))?'
' on (.NET [\d\.]+)', re.ASCII)
_pypy_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[PyPy [^\]]+\]?')
_sys_version_cache = {}
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
_builddate=sys_version[2][:8]
_version='%s.%s' % (sys_version[0], sys_version[1])
return ("Brython", _version, '', '', 'default', _builddate, '')
# Build and cache the result
#result = (name, version, branch, revision, buildno, builddate, compiler)
#_sys_version_cache[sys_version] = result
#return result
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python),
'PyPy' (Python implementation of Python).
"""
return "Brython" #_sys_version()[0]
def python_version():
""" Returns the Python version as string 'major.minor.patchlevel'
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
#return _sys_version()[1]
return '%s.%s' % (sys.version[0], sys.version[1])
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
#return tuple(_sys_version()[1].split('.'))
return tuple(sys.version[0], sys.version[1], 0)
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the Subversion branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
#return _sys_version()[2]
return ''
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the Subversion revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
#return _sys_version()[3]
return ''
def python_build():
""" Returns a tuple (buildno, builddate) stating the Python
build number and date as strings.
"""
#return _sys_version()[4:6]
return tuple('', '')
def python_compiler():
""" Returns a string identifying the compiler used for compiling
Python.
"""
#return _sys_version()[6]
return ''
### The Opus Magnum of platform strings :-)
_platform_cache = {}
def platform(aliased=0, terse=0):
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
If "aliased" is true, the function will use aliases for
various platforms that report system names which differ from
their common names, e.g. SunOS will be reported as
Solaris. The system_alias() function is used to implement
this.
Setting terse to true causes the function to return only the
absolute minimum information needed to identify the platform.
"""
return "Browser"
### Command line interface
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
terse = ('terse' in sys.argv or '--terse' in sys.argv)
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
print(platform(aliased,terse))
sys.exit(0)
|
mcuringa/vid-analyzer
|
refs/heads/master
|
web/vidan/models.py
|
1
|
from django.db import models
from django import forms
from django.forms import ModelForm
from django.contrib.auth.models import User
class DataSet(models.Model):
"""The dataset holds the collection of videos that will
be analyzed as a group."""
name = models.CharField(max_length=120)
creator = models.ForeignKey(User)
class DataSetForm(ModelForm):
class Meta:
model = DataSet
exclude = ["creator"]
class Video(models.Model):
"""The full video that is uploaded to Kaltura"""
filename = models.CharField(max_length=500)
k_video_entry = models.CharField(max_length=40)
dataset = models.ForeignKey(DataSet)
class Query(models.Model):
"""A query term made against the clip database"""
term = models.CharField(max_length=120)
dataset = models.ForeignKey(DataSet)
class QueryForm(ModelForm):
class Meta:
model = Query
exclude = ["dataset"]
class QueryResult(models.Model):
"""All of the video hit for a query"""
k_video_id = models.IntegerField()
# the seconds from start where the term was found
hit_time = models.IntegerField()
query = models.ForeignKey(Query)
class Collection(models.Model):
"""A user curated collection of videos, from a given query"""
name = models.CharField(max_length=120)
dataset = models.ForeignKey(DataSet)
class Clip(models.Model):
start = models.IntegerField()
end = models.IntegerField()
video = models.ForeignKey(Video)
|
shiblon/pytour
|
refs/heads/master
|
static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/test/test_email.py
|
91
|
# Copyright (C) 2001,2002 Python Software Foundation
# email package unit tests
# The specific tests now live in Lib/email/test
from email.test.test_email import suite
from email.test.test_email_renamed import suite as suite2
from test import test_support
def test_main():
test_support.run_unittest(suite())
test_support.run_unittest(suite2())
if __name__ == '__main__':
test_main()
|
blacktop/docker-elk
|
refs/heads/master
|
5.5/config/misc/test_index.py
|
56
|
from datetime import datetime
from elasticsearch import Elasticsearch
es = Elasticsearch()
for i in range(10000):
doc = {'author': 'kimchy', 'text': 'Elasticsearch: cool. bonsai cool.', 'timestamp': datetime.now()}
res = es.index(index="test-index", doc_type='tweet', id=i, body=doc)
# print(res['created'])
res = es.get(index="test-index", doc_type='tweet', id=1)
print(res['_source'])
es.indices.refresh(index="test-index")
res = es.search(index="test-index", body={"query": {"match_all": {}}})
print("Got %d Hits:" % res['hits']['total'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(author)s: %(text)s" % hit["_source"])
|
ta2-1/pootle
|
refs/heads/master
|
pootle/apps/pootle_store/migrations/0003_remove_unit_ordering.py
|
10
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pootle_store', '0002_make_suggestion_user_not_null'),
]
operations = [
migrations.AlterModelOptions(
name='unit',
options={'get_latest_by': 'mtime'},
),
]
|
Sparky88/IA-B1-ProcesareaImaginilor
|
refs/heads/master
|
Retele neuronale/Iordache Iustin/Clasifier/libsvm.py
|
2
|
#!/usr/bin/env python
"""
Taken and modified from easy.py from the libsvm package,
which is under following copyright:
Copyright (c) 2000-2012 Chih-Chung Chang and Chih-Jen Lin
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither name of copyright holders nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
from subprocess import *
import numpy
def cmd_output(args, **kwds):
# get the output of the subprocess
kwds.setdefault("stdout", subprocess.PIPE)
kwds.setdefault("stderr", subprocess.STDOUT)
p = subprocess.Popen(args, **kwds)
return p.communicate()[0]
def test(test_pathname, model_file):
is_win32 = (sys.platform == 'win32')
if not is_win32:
svmscale_exe = "libsvm/svm-scale"
svmtrain_exe = "libsvm/svm-train"
svmpredict_exe = "libsvm/svm-predict"
grid_py = "./grid.py"
gnuplot_exe = "/usr/bin/gnuplot"
else:
# example for windows
svmscale_exe = r"..\windows\svm-scale.exe"
svmtrain_exe = r"..\windows\svm-train.exe"
svmpredict_exe = r"..\windows\svm-predict.exe"
gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe"
grid_py = r".\grid.py"
assert os.path.exists(svmscale_exe),"svm-scale executable not found"
assert os.path.exists(svmtrain_exe),"svm-train executable not found"
assert os.path.exists(svmpredict_exe),"svm-predict executable not found"
assert os.path.exists(grid_py),"grid.py not found"
assert os.path.exists(test_pathname),"training file not found"
trunc_filename = os.path.splitext(model_file)[0]
scaled_test_file = trunc_filename + ".scale"
range_file = trunc_filename + ".range"
predict_test_file = trunc_filename + ".prediction"
cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file)
print('Scaling testing data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file)
print('Testing...')
print('Output prediction: {0}'.format(predict_test_file))
result = Popen(cmd, shell = True, stdout = PIPE).communicate()
pred_class = []
with open(predict_test_file,'r') as f:
for line in f:
pred_class.append(int(line))
#print result
pred_class = numpy.asarray(pred_class)
result = result[0]
pivot = result.find(' = ')
result = result[pivot+3:]
pivot = result.find('%')
result = result[:pivot]
accuracy = float(result)
return pred_class
def grid(train_pathname,test_pathname=None, png_filename=None):
is_win32 = (sys.platform == 'win32')
if not is_win32:
svmscale_exe = "libsvm/svm-scale"
svmtrain_exe = "libsvm/svm-train"
svmpredict_exe = "libsvm/svm-predict"
grid_py = "./grid.py"
gnuplot_exe = "/usr/bin/gnuplot"
else:
# example for windows
svmscale_exe = r"..\windows\svm-scale.exe"
svmtrain_exe = r"..\windows\svm-train.exe"
svmpredict_exe = r"..\windows\svm-predict.exe"
gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe"
grid_py = r".\grid.py"
assert os.path.exists(svmscale_exe),"svm-scale executable not found"
assert os.path.exists(svmtrain_exe),"svm-train executable not found"
assert os.path.exists(svmpredict_exe),"svm-predict executable not found"
assert os.path.exists(grid_py),"grid.py not found"
assert os.path.exists(train_pathname),"training file not found"
scaled_file = train_pathname + ".scale"
model_file = train_pathname + ".model"
range_file = train_pathname + ".range"
if test_pathname != None:
assert os.path.exists(test_pathname),"testing file not found"
scaled_test_file = test_pathname + ".scale"
predict_test_file = test_pathname + ".predict"
if png_filename != None:
png_filename = '-png {0}'.format(png_filename)
else:
png_filename = ''
cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file)
print('Scaling training data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = 'python {0} -svmtrain "{1}" -gnuplot "{2}" {3} "{4}"'.format(grid_py, svmtrain_exe, gnuplot_exe, png_filename, scaled_file,)
print "------------------------------"
print cmd
print('Cross validation...')
f = Popen(cmd, shell = True, stdout = PIPE).stdout
line = ''
while True:
last_line = line
line = f.readline()
if not line: break
c,g,rate = map(float,last_line.split())
print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate))
cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe,c,g,scaled_file,model_file)
print('Training...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
print('Output model: {0}'.format(model_file))
if test_pathname != None:
cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file)
print('Scaling testing data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file)
print('Testing...')
print('Output prediction: {0}'.format(predict_test_file))
result = Popen(cmd, shell = True, stdout = PIPE).communicate()
pred_class = []
with open(predict_test_file,'r') as f:
for line in f:
pred_class.append(int(line))
print result
pred_class = numpy.asarray(pred_class)
result = result[0]
pivot = result.find(' = ')
result = result[pivot+3:]
pivot = result.find('%')
result = result[:pivot]
accuracy = float(result)
return accuracy, pred_class, c, g, rate
else:
return c, g, rate, model_file
|
blueboxgroup/neutron
|
refs/heads/master
|
neutron/tests/functional/api/test_v2_plugin.py
|
5
|
# Copyright 2014, Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module implements BaseNeutronClient for the programmatic plugin
api and configures the api tests with scenarios targeting individual
plugins.
"""
import testscenarios
from neutron.common import exceptions as q_exc
from neutron import context
from neutron import manager
from neutron.tests.api import base_v2
from neutron.tests.unit.ml2 import test_ml2_plugin
from neutron.tests.unit import testlib_api
from neutron.tests.unit import testlib_plugin
# Each plugin must add a class to plugin_configurations that can configure the
# plugin for use with PluginClient. For a given plugin, the setup
# used for NeutronDbPluginV2TestCase can usually be reused. See the
# configuration classes listed below for examples of this reuse.
#TODO(marun) Discover plugin conf via a metaclass
plugin_configurations = [
test_ml2_plugin.Ml2PluginConf,
]
# Required to generate tests from scenarios. Not compatible with nose.
load_tests = testscenarios.load_tests_apply_scenarios
class PluginClient(base_v2.BaseNeutronClient):
@property
def ctx(self):
if not hasattr(self, '_ctx'):
self._ctx = context.Context('', 'test-tenant')
return self._ctx
@property
def plugin(self):
return manager.NeutronManager.get_plugin()
@property
def NotFound(self):
return q_exc.NetworkNotFound
def create_network(self, **kwargs):
# Supply defaults that are expected to be set by the api
# framwork
kwargs.setdefault('admin_state_up', True)
kwargs.setdefault('shared', False)
data = dict(network=kwargs)
result = self.plugin.create_network(self.ctx, data)
return base_v2.AttributeDict(result)
def update_network(self, id_, **kwargs):
data = dict(network=kwargs)
result = self.plugin.update_network(self.ctx, id_, data)
return base_v2.AttributeDict(result)
def get_network(self, *args, **kwargs):
result = self.plugin.get_network(self.ctx, *args, **kwargs)
return base_v2.AttributeDict(result)
def get_networks(self, *args, **kwargs):
result = self.plugin.get_networks(self.ctx, *args, **kwargs)
return [base_v2.AttributeDict(x) for x in result]
def delete_network(self, id_):
self.plugin.delete_network(self.ctx, id_)
def get_scenarios():
scenarios = []
client = PluginClient()
for conf in plugin_configurations:
name = conf.plugin_name
class_name = name[name.rfind('.') + 1:]
scenarios.append((class_name, {'client': client, 'plugin_conf': conf}))
return scenarios
class TestPluginApi(base_v2.BaseTestApi,
testlib_api.SqlTestCase,
testlib_plugin.PluginSetupHelper):
scenarios = get_scenarios()
def setUp(self):
# BaseTestApi is not based on BaseTestCase to avoid import
# errors when importing Tempest. When targeting the plugin
# api, it is necessary to avoid calling BaseTestApi's parent
# setUp, since that setup will be called by SqlTestCase.setUp.
super(TestPluginApi, self).setUp(setup_parent=False)
testlib_api.SqlTestCase.setUp(self)
self.setup_coreplugin(self.plugin_conf.plugin_name)
self.plugin_conf.setUp(self)
|
WillisXChen/django-oscar
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/yaml/serializer.py
|
561
|
__all__ = ['Serializer', 'SerializerError']
from error import YAMLError
from events import *
from nodes import *
class SerializerError(YAMLError):
pass
class Serializer(object):
ANCHOR_TEMPLATE = u'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
|
isNeilLin/matplotlib-study
|
refs/heads/master
|
事故处理统计/transform.py
|
1
|
def transform(name):
data = {
'京':'北京市','津':'天津市','冀':'河北省','晋':'山西省','蒙':'内蒙古自治区','辽':'辽宁省','吉':'吉林省','黑':'黑龙江省','沪':'上海市','苏':'江苏省','浙':'浙江省','皖':'安徽省','闽':'福建省','赣':'江西省','鲁':'山东省','豫':'河南省','鄂':'湖北省','湘':'湖南省','粤':'广东省','桂':'广西壮族自治区','琼':'海南省','川':'四川省','贵':'贵州省','云':'云南省','渝':'重庆市','藏':'西藏自治区','陕':'陕西省','甘':'甘肃省','青':'青海省','宁':'宁夏回族自治区','新':'新疆维吾尔自治区'
}
if data.get(name):
return data[name]
else:
return None
|
hortonworks/hortonworks-sandbox
|
refs/heads/master
|
desktop/core/ext-py/Paste-1.7.2/paste/url.py
|
28
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
This module implements a class for handling URLs.
"""
import urllib
import cgi
from paste import request
# Imported lazily from FormEncode:
variabledecode = None
__all__ = ["URL", "Image"]
def html_quote(v):
if v is None:
return ''
return cgi.escape(str(v), 1)
def url_quote(v):
if v is None:
return ''
return urllib.quote(str(v))
url_unquote = urllib.unquote
def js_repr(v):
if v is None:
return 'null'
elif v is False:
return 'false'
elif v is True:
return 'true'
elif isinstance(v, list):
return '[%s]' % ', '.join(map(js_repr, v))
elif isinstance(v, dict):
return '{%s}' % ', '.join(
['%s: %s' % (js_repr(key), js_repr(value))
for key, value in v])
elif isinstance(v, str):
return repr(v)
elif isinstance(v, unicode):
# @@: how do you do Unicode literals in Javascript?
return repr(v.encode('UTF-8'))
elif isinstance(v, (float, int)):
return repr(v)
elif isinstance(v, long):
return repr(v).lstrip('L')
elif hasattr(v, '__js_repr__'):
return v.__js_repr__()
else:
raise ValueError(
"I don't know how to turn %r into a Javascript representation"
% v)
class URLResource(object):
"""
This is an abstract superclass for different kinds of URLs
"""
default_params = {}
def __init__(self, url, vars=None, attrs=None,
params=None):
self.url = url or '/'
self.vars = vars or []
self.attrs = attrs or {}
self.params = self.default_params.copy()
self.original_params = params or {}
if params:
self.params.update(params)
#@classmethod
def from_environ(cls, environ, with_query_string=True,
with_path_info=True, script_name=None,
path_info=None, querystring=None):
url = request.construct_url(
environ, with_query_string=False,
with_path_info=with_path_info, script_name=script_name,
path_info=path_info)
if with_query_string:
if querystring is None:
vars = request.parse_querystring(environ)
else:
vars = cgi.parse_qsl(
querystring,
keep_blank_values=True,
strict_parsing=False)
else:
vars = None
v = cls(url, vars=vars)
return v
from_environ = classmethod(from_environ)
def __call__(self, *args, **kw):
res = self._add_positional(args)
res = res._add_vars(kw)
return res
def __getitem__(self, item):
if '=' in item:
name, value = item.split('=', 1)
return self._add_vars({url_unquote(name): url_unquote(value)})
return self._add_positional((item,))
def attr(self, **kw):
for key in kw.keys():
if key.endswith('_'):
kw[key[:-1]] = kw[key]
del kw[key]
new_attrs = self.attrs.copy()
new_attrs.update(kw)
return self.__class__(self.url, vars=self.vars,
attrs=new_attrs,
params=self.original_params)
def param(self, **kw):
new_params = self.original_params.copy()
new_params.update(kw)
return self.__class__(self.url, vars=self.vars,
attrs=self.attrs,
params=new_params)
def coerce_vars(self, vars):
global variabledecode
need_variable_encode = False
for key, value in vars.items():
if isinstance(value, dict):
need_variable_encode = True
if key.endswith('_'):
vars[key[:-1]] = vars[key]
del vars[key]
if need_variable_encode:
if variabledecode is None:
from formencode import variabledecode
vars = variabledecode.variable_encode(vars)
return vars
def var(self, **kw):
kw = self.coerce_vars(kw)
new_vars = self.vars + kw.items()
return self.__class__(self.url, vars=new_vars,
attrs=self.attrs,
params=self.original_params)
def setvar(self, **kw):
"""
Like ``.var(...)``, except overwrites keys, where .var simply
extends the keys. Setting a variable to None here will
effectively delete it.
"""
kw = self.coerce_vars(kw)
new_vars = []
for name, values in self.vars:
if name in kw:
continue
new_vars.append((name, values))
new_vars.extend(kw.items())
return self.__class__(self.url, vars=new_vars,
attrs=self.attrs,
params=self.original_params)
def setvars(self, **kw):
"""
Creates a copy of this URL, but with all the variables set/reset
(like .setvar(), except clears past variables at the same time)
"""
return self.__class__(self.url, vars=kw.items(),
attrs=self.attrs,
params=self.original_params)
def addpath(self, *paths):
u = self
for path in paths:
path = str(path).lstrip('/')
new_url = u.url
if not new_url.endswith('/'):
new_url += '/'
u = u.__class__(new_url+path, vars=u.vars,
attrs=u.attrs,
params=u.original_params)
return u
__div__ = addpath
def become(self, OtherClass):
return OtherClass(self.url, vars=self.vars,
attrs=self.attrs,
params=self.original_params)
def href__get(self):
s = self.url
if self.vars:
s += '?'
vars = []
for name, val in self.vars:
if isinstance(val, (list, tuple)):
val = [v for v in val if v is not None]
elif val is None:
continue
vars.append((name, val))
s += urllib.urlencode(vars, True)
return s
href = property(href__get)
def __repr__(self):
base = '<%s %s' % (self.__class__.__name__,
self.href or "''")
if self.attrs:
base += ' attrs(%s)' % (
' '.join(['%s="%s"' % (html_quote(n), html_quote(v))
for n, v in self.attrs.items()]))
if self.original_params:
base += ' params(%s)' % (
', '.join(['%s=%r' % (n, v)
for n, v in self.attrs.items()]))
return base + '>'
def html__get(self):
if not self.params.get('tag'):
raise ValueError(
"You cannot get the HTML of %r until you set the "
"'tag' param'" % self)
content = self._get_content()
tag = '<%s' % self.params.get('tag')
attrs = ' '.join([
'%s="%s"' % (html_quote(n), html_quote(v))
for n, v in self._html_attrs()])
if attrs:
tag += ' ' + attrs
tag += self._html_extra()
if content is None:
return tag + ' />'
else:
return '%s>%s</%s>' % (tag, content, self.params.get('tag'))
html = property(html__get)
def _html_attrs(self):
return self.attrs.items()
def _html_extra(self):
return ''
def _get_content(self):
"""
Return the content for a tag (for self.html); return None
for an empty tag (like ``<img />``)
"""
raise NotImplementedError
def _add_vars(self, vars):
raise NotImplementedError
def _add_positional(self, args):
raise NotImplementedError
class URL(URLResource):
r"""
>>> u = URL('http://localhost')
>>> u
<URL http://localhost>
>>> u = u['view']
>>> str(u)
'http://localhost/view'
>>> u['//foo'].param(content='view').html
'<a href="http://localhost/view/foo">view</a>'
>>> u.param(confirm='Really?', content='goto').html
'<a href="http://localhost/view" onclick="return confirm(\'Really?\')">goto</a>'
>>> u(title='See "it"', content='goto').html
'<a href="http://localhost/view?title=See+%22it%22">goto</a>'
>>> u('another', var='fuggetaboutit', content='goto').html
'<a href="http://localhost/view/another?var=fuggetaboutit">goto</a>'
>>> u.attr(content='goto').html
Traceback (most recent call last):
....
ValueError: You must give a content param to <URL http://localhost/view attrs(content="goto")> generate anchor tags
>>> str(u['foo=bar%20stuff'])
'http://localhost/view?foo=bar+stuff'
"""
default_params = {'tag': 'a'}
def __str__(self):
return self.href
def _get_content(self):
if not self.params.get('content'):
raise ValueError(
"You must give a content param to %r generate anchor tags"
% self)
return self.params['content']
def _add_vars(self, vars):
url = self
for name in ('confirm', 'content'):
if name in vars:
url = url.param(**{name: vars.pop(name)})
if 'target' in vars:
url = url.attr(target=vars.pop('target'))
return url.var(**vars)
def _add_positional(self, args):
return self.addpath(*args)
def _html_attrs(self):
attrs = self.attrs.items()
attrs.insert(0, ('href', self.href))
if self.params.get('confirm'):
attrs.append(('onclick', 'return confirm(%s)'
% js_repr(self.params['confirm'])))
return attrs
def onclick_goto__get(self):
return 'location.href=%s; return false' % js_repr(self.href)
onclick_goto = property(onclick_goto__get)
def button__get(self):
return self.become(Button)
button = property(button__get)
def js_popup__get(self):
return self.become(JSPopup)
js_popup = property(js_popup__get)
class Image(URLResource):
r"""
>>> i = Image('/images')
>>> i = i / '/foo.png'
>>> i.html
'<img src="/images/foo.png" />'
>>> str(i['alt=foo'])
'<img src="/images/foo.png" alt="foo" />'
>>> i.href
'/images/foo.png'
"""
default_params = {'tag': 'img'}
def __str__(self):
return self.html
def _get_content(self):
return None
def _add_vars(self, vars):
return self.attr(**vars)
def _add_positional(self, args):
return self.addpath(*args)
def _html_attrs(self):
attrs = self.attrs.items()
attrs.insert(0, ('src', self.href))
return attrs
class Button(URLResource):
r"""
>>> u = URL('/')
>>> u = u / 'delete'
>>> b = u.button['confirm=Sure?'](id=5, content='del')
>>> str(b)
'<button onclick="if (confirm(\'Sure?\')) {location.href=\'/delete?id=5\'}; return false">del</button>'
"""
default_params = {'tag': 'button'}
def __str__(self):
return self.html
def _get_content(self):
if self.params.get('content'):
return self.params['content']
if self.attrs.get('value'):
return self.attrs['content']
# @@: Error?
return None
def _add_vars(self, vars):
button = self
if 'confirm' in vars:
button = button.param(confirm=vars.pop('confirm'))
if 'content' in vars:
button = button.param(content=vars.pop('content'))
return button.var(**vars)
def _add_positional(self, args):
return self.addpath(*args)
def _html_attrs(self):
attrs = self.attrs.items()
onclick = 'location.href=%s' % js_repr(self.href)
if self.params.get('confirm'):
onclick = 'if (confirm(%s)) {%s}' % (
js_repr(self.params['confirm']), onclick)
onclick += '; return false'
attrs.insert(0, ('onclick', onclick))
return attrs
class JSPopup(URLResource):
r"""
>>> u = URL('/')
>>> u = u / 'view'
>>> j = u.js_popup(content='view')
>>> j.html
'<a href="/view" onclick="window.open(\'/view\', \'_blank\'); return false" target="_blank">view</a>'
"""
default_params = {'tag': 'a', 'target': '_blank'}
def _add_vars(self, vars):
button = self
for var in ('width', 'height', 'stripped', 'content'):
if var in vars:
button = button.param(**{var: vars.pop(var)})
return button.var(**vars)
def _window_args(self):
p = self.params
features = []
if p.get('stripped'):
p['location'] = p['status'] = p['toolbar'] = '0'
for param in 'channelmode directories fullscreen location menubar resizable scrollbars status titlebar'.split():
if param not in p:
continue
v = p[param]
if v not in ('yes', 'no', '1', '0'):
if v:
v = '1'
else:
v = '0'
features.append('%s=%s' % (param, v))
for param in 'height left top width':
if not p.get(param):
continue
features.append('%s=%s' % (param, p[param]))
args = [self.href, p['target']]
if features:
args.append(','.join(features))
return ', '.join(map(js_repr, args))
def _html_attrs(self):
attrs = self.attrs.items()
onclick = ('window.open(%s); return false'
% self._window_args())
attrs.insert(0, ('target', self.params['target']))
attrs.insert(0, ('onclick', onclick))
attrs.insert(0, ('href', self.href))
return attrs
def _get_content(self):
if not self.params.get('content'):
raise ValueError(
"You must give a content param to %r generate anchor tags"
% self)
return self.params['content']
def _add_positional(self, args):
return self.addpath(*args)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
smashwilson/ansible-modules-core
|
refs/heads/devel
|
cloud/rackspace/rax_network.py
|
43
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_network
short_description: create / delete an isolated network in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud isolated network.
version_added: "1.4"
options:
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
label:
description:
- Label (name) to give the network
default: null
cidr:
description:
- cidr of the network being created
default: null
author: Christopher H. Laco, Jesse Keating
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build an Isolated Network
gather_facts: False
tasks:
- name: Network create request
local_action:
module: rax_network
credentials: ~/.raxpub
label: my-net
cidr: 192.168.3.0/24
state: present
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_network(module, state, label, cidr):
changed = False
network = None
networks = []
if not pyrax.cloud_networks:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not cidr:
module.fail_json(msg='missing required arguments: cidr')
try:
network = pyrax.cloud_networks.find_network_by_label(label)
except pyrax.exceptions.NetworkNotFound:
try:
network = pyrax.cloud_networks.create(label, cidr=cidr)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
network = pyrax.cloud_networks.find_network_by_label(label)
network.delete()
changed = True
except pyrax.exceptions.NetworkNotFound:
pass
except Exception, e:
module.fail_json(msg='%s' % e.message)
if network:
instance = dict(id=network.id,
label=network.label,
cidr=network.cidr)
networks.append(instance)
module.exit_json(changed=changed, networks=networks)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present',
choices=['present', 'absent']),
label=dict(required=True),
cidr=dict()
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
cidr = module.params.get('cidr')
setup_rax_module(module, pyrax)
cloud_network(module, state, label, cidr)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
|
huqa/pyfibot_modules
|
refs/heads/master
|
module_aija.py
|
1
|
# -*- coding: utf-8 -*-
'''
Created on Feb 4, 2013
@author: huqa
'''
from random import randint
aija_mestat = ["thaikuis",
"briteis",
"jenkeis",
"indois",
"baaris",
"balil",
"japanis",
"malil",
"mäkis",
"pohjoisnaval",
"turkis",
"olympialaisis",
"ausseis",
"brasseis",
"meksikos",
"kanadas",
"gobin aavikol",
"kapkaupungis",
"lontoos",
"intias",
"asuntomessuil",
"pakistanis",
"etelä-naval",
"tiibetis",
"kiinas",
"siperias",
"x-gamesis",
"ymca:s",
"tongal",
"tulivuores",
"lontoos",
"muukalaislegioonas",
"vietnamis",
"etelä-koreas",
"luolas",
"vankilassa",
"fudiksen mm-finaalis",
"pohjois-koreas",
"viidakos",
"hervannas",
"superbowlissa",
"hesburgeris",
"lastentarhassa"]
aija_teot = ["surffaa",
"skeittaa",
"reilaa",
"roadtripil",
"daivaa",
"suunnistaa",
"kiipeilee",
"ryyppää",
"parkouraa",
"seilaa",
"wakeboardaa",
"työharjottelus",
"kokkaa",
"metsästää",
"ampumas",
"juoksee",
"bodaamas",
"deejiinä",
"ratsastaa",
"pyöräilee",
"töis",
"travellaa",
"reissaa",
"räppää",
"tappelemas",
"kouluttaa",
"suihkussa",
"punnertaa",
"snowboardaa",
"maratoonis",
"piirtää",
"maalaan",
"paskal",
"kusel",
"nyrkkeilee",
"meditoimas"]
aija_tanaa = ["tänää meikä kokkaa",
"tänää meikäijä kokkaa",
"tänää mä väsään",
"tänään meikä tekee",
"tänään meitsi väsää dinneriks",
"tänään mä duunaan",
"pistän koht tost snadit väännöt"]
aija_ruoat = ["äijäwokkii",
"viikon marinoitunutta kengurufilettä",
"täytettyjä crepejä",
"äijäpihvii",
"paahdettuu lammast",
"pakurikääpää",
"kanttarellej",
"virtahepoo",
"koiraa",
"aasinpotkaa",
"kaviaarii",
"miekkakalaa",
"torvisienii",
"jättiläismustekalaa",
"hanhenmaksaa",
"kobe-pihvii",
"kateenkorvaa",
"porsaankylkee",
"äijäsalaattii",
"hampurilaisii",
"kebabbii",
"kissaa",
"banaaneita",
"falafelii",
"kanansiipii",
"valaanlihaa",
"kenguruu",
"sammalta",
"pizzaa",
"perunoit",
"gorillaa",
"vyötiäistä",
"hamstereit",
"nokkosii",
"apinanaivoja",
"pässin kiveksii",
"merihevost",
"etanoit",
"merimakkaraa",
"muurahaiskarhuu",
"haggista",
"karitsaa",
"käärmettä"]
aija_lisukkeet = ["wasabiemulsiol",
"ranskalaisil",
"pastal",
"korianteril",
"hummeril",
"mädätettynä",
"kanansiivil",
"riisillä",
"ruisleiväl",
"keitettynä",
"sushil",
"käristettynä",
"couscousil",
"sokerikuorrutuksel",
"juustol",
"virtahevon suolessa",
"kermaviilil",
"yrttiöljyl",
"maustekurkumal",
"katkaravuil",
"friteerattuna",
"keittona",
"kaviaaril",
"höyrytettynä",
"muurahaisilla",
"paistettuna",
"liekitettynä",
"fazerin sinisellä",
"makkaral",
"silvottuna",
"jugurtil",
"vetisenä"]
aija_yrtit = ["tashimoto-heinää jonka poimin shiribetsu-joen rannalt kun olin reilaa japanis",
"abessiinialaist kurttuviikunaa jota saan paikalliselt tarhurilt etiopiast",
"mökin takapihalt poimittuu pitkälehtikihokkii",
"sichuanin maakunnast poimittuu sareptaninsinappii",
"tämmösii tyrnimustikka-risteytysmarjoi joita sain turun yliopiston genetiikan laitoksen äijilt",
"perus suomalaist japaninpersiljaa jota ny löytyy kaikkien pihast",
"neidonhiuspuu-uutet",
"mustanmeren merilevää",
"jauhettuu ruusunjuurta",
"dodon höyhenii",
"omakasvattamaa ananast",
"jauhettuu kääpiöponinkavioo",
"mustanmerenruusua jotka poimin georgian haikil",
"kuopas paahdettui maakastanjoit",
"frendin luomutilal kasvattamaa mukulakirvelii",
"makeen kirpeit ananaskirsikoit",
"saframii",
"tasmanian tuholaisen karvoi",
"basilikaa",
"sitruunamehuu",
"jättiläispunapuun ydintä",
"jakinmaitorahkaa",
"valaanrasvaa",
"vaimon kasvattamaa minttuu",
"jauhettuu ykssarvisen sarvee",
"viimesen dinosauruksen suomuja",
"murkkujen kusta",
"koivun kaarnaa",
"mes-juustoo pari siivuu"]
aija_dressing = ["vatikaanist saatuu balsamicoo, terveisii vaa konklaavin äijille :D",
"maapähkinä-vinegrettee",
"timjamis liuotettuu inkiväärii",
"tämmöst viskisiirappii",
"oliiviöljyä",
"sivetindroppingei",
"orpolapsien kyynelii",
"savulohismetanaa",
"tummaa rommii",
"kolaa",
"vladimirin pirtuu",
"kossuu",
"hp-kastiket",
"ketsuppii",
"poron verta",
"meduusan limaa",
"sinivalaan verta"]
aija_toimenpiteet = ["pyöräytä valkokastikkees",
"glaseerataan nopee",
"pyöräytetää pannul limen kaa",
"flambeerataa punkul",
"paistetaan neljä tuntii",
"keitetään etikassa",
"suurustetaan",
"kuivatetaan"]
aija_loppuun = ["loppuun viel pikku suola",
"lopuks viel silaus curacaoo",
"lopuks viel pikku pippurit",
"lopuks heitetään koko paska roskiin",
"lopuks viel pienet öljyt",
"lopuks viel annetaan paahtua pari tuntii",
"lopuks viel pikku limet",
"lopuks viel pikku chilit",
"lopuks viel pienet pyöräytykset",
"lopuks annetaan jäähtyy pari päivää",
"mut alkuun pienet äijätumut",
"mut alkuun otetaa pienet paukut",
"lopuks annetaan hautuu pari minsaa"]
aija_tuo = ["tuomaan",
"antaan",
"lisään"]
aija_mitatuo = ["semmost syvyyt siihe",
"vähä semmost itämaist twistii siihe",
"terävyyttä tähä",
"pehmeyttä reunoihi",
"vähä siihe semmost twistii",
"vähä semmost äijämäisyyt sekaa",
"makuhermoil vähä lomafiilist",
"vähä semmost bläästii siihe",
"tulista twistii siihe"]
aija_siistii = ["siistii",
"hyvä",
"helmee",
"äijää",
"siistii",
"asiallist",
"kuulii"]
aija_aijat = ["äijät",
"leidit",
"frendit",
"äijä",
"vaimo",
"kundi",
"jätkät",
"homiet",
"homot",
"pellet",
"dudet",
"jäbä",
"spede",
"dude"]
def aija_story():
aijat = aija_aijat[randint(0,len(aija_aijat)-1)]
siistii = aija_siistii[randint(0,len(aija_siistii)-1)]
mestat = aija_mestat[randint(0,len(aija_mestat)-1)]
teot = aija_teot[randint(0,len(aija_teot)-1)]
tanaa = aija_tanaa[randint(0,len(aija_tanaa)-1)]
ruoka = aija_ruoat[randint(0,len(aija_ruoat)-1)]
lisuke = aija_lisukkeet[randint(0,len(aija_lisukkeet)-1)]
yrtit = aija_yrtit[randint(0,len(aija_yrtit)-1)]
tuo = aija_tuo[randint(0,len(aija_tuo)-1)]
mita = aija_mitatuo[randint(0,len(aija_mitatuo)-1)]
#moro x
output = "moro %s :D mitä %s." % (aijat, aijat)
lots = aijat[-1] == 't'
#siisti nähä teit
if lots:
output = output + " %s nähä teit :D " % (siistii,)
else:
output = output + " %s nähä sua :D " % (siistii,)
#tänää mä väsään
if lots:
output = output + "%s teil " % (tanaa,)
else:
output = output + "%s sulle " % (tanaa,)
#ruoaks
output = output + ruoka + " %s." % (lisuke)
#resepti
output = output + " tän reseptin opin kun olin %s %s :D" % (mestat, teot)
#sekaan
output = output + " pistetää sekaa vähä %s %s %s :D" % (yrtit, tuo, mita)
if randint(1,100) > 50:
dressing = aija_dressing[randint(0,len(aija_dressing)-1)]
output = output + " dressingiks %s." % (dressing,)
else:
toimenpide = aija_toimenpiteet[randint(0,len(aija_toimenpiteet)-1)]
output = output + " ja sit viel %s :D" % (toimenpide,)
if randint(1,100) > 50:
lopuks = aija_loppuun[randint(0,len(aija_loppuun)-1)]
output = output + " %s." % (lopuks,)
output = output + " nonii toivottavasti maistuu. "
if lots:
output = output + "mä rakastan teit %s :D" % (aijat,)
else:
output = output + "mä rakastan sua %s :D" % (aijat,)
return output
def command_aija(bot, user, channel, args):
output = aija_story()
return bot.say(channel, output)
def command_spurdoaija(bot, user, channel, args):
output = aija_story()
output = output.replace("t","d").replace("c","g").replace("k", "g").replace("p", "b").replace("x","gs").replace("z","ds")
return bot.say(channel, output)
|
omtinez/micropython
|
refs/heads/master
|
tests/basics/string_replace.py
|
96
|
print("".replace("a", "b"))
print("aaa".replace("b", "c"))
print("aaa".replace("a", "b", 0))
print("aaa".replace("a", "b", -5))
print("asdfasdf".replace("a", "b"))
print("aabbaabbaabbaa".replace("aa", "cc", 3))
print("a".replace("aa", "bb"))
print("testingtesting".replace("ing", ""))
print("testINGtesting".replace("ing", "ING!"))
print("".replace("", "1"))
print("A".replace("", "1"))
print("AB".replace("", "1"))
print("AB".replace("", "12"))
try:
'abc'.replace(1, 2)
except TypeError:
print('TypeError')
try:
'abc'.replace('1', 2)
except TypeError:
print('TypeError')
|
prakritish/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/known_hosts.py
|
46
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import hmac
import re
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from hashlib import sha1
except ImportError:
import sha as sha1
HASHED_KEY_MAGIC = "|1|"
def add_git_host_key(module, url, accept_hostkey=True, create_dir=True):
""" idempotently add a git url hostkey """
if is_ssh_url(url):
fqdn, port = get_fqdn_and_port(url)
if fqdn:
known_host = check_hostkey(module, fqdn)
if not known_host:
if accept_hostkey:
rc, out, err = add_host_key(module, fqdn, port=port, create_dir=create_dir)
if rc != 0:
module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err))
else:
module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True "
"or manually add the hostkey prior to running the git module" % fqdn)
def is_ssh_url(url):
""" check if url is ssh """
if "@" in url and "://" not in url:
return True
for scheme in "ssh://", "git+ssh://", "ssh+git://":
if url.startswith(scheme):
return True
return False
def get_fqdn_and_port(repo_url):
""" chop the hostname and port out of a url """
fqdn = None
port = None
ipv6_re = re.compile('(\[[^]]*\])(?::([0-9]+))?')
if "@" in repo_url and "://" not in repo_url:
# most likely an user@host:path or user@host/path type URL
repo_url = repo_url.split("@", 1)[1]
match = ipv6_re.match(repo_url)
# For this type of URL, colon specifies the path, not the port
if match:
fqdn, path = match.groups()
elif ":" in repo_url:
fqdn = repo_url.split(":")[0]
elif "/" in repo_url:
fqdn = repo_url.split("/")[0]
elif "://" in repo_url:
# this should be something we can parse with urlparse
parts = urlparse.urlparse(repo_url)
# parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
# ensure we actually have a parts[1] before continuing.
if parts[1] != '':
fqdn = parts[1]
if "@" in fqdn:
fqdn = fqdn.split("@", 1)[1]
match = ipv6_re.match(fqdn)
if match:
fqdn, port = match.groups()
elif ":" in fqdn:
fqdn, port = fqdn.split(":")[0:2]
return fqdn, port
def check_hostkey(module, fqdn):
return not not_in_host_file(module, fqdn)
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
# the paramiko code to import and use this.
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
host_file_list.append("/etc/openssh/ssh_known_hosts")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
host_fh = open(hf)
except IOError:
hfiles_not_found += 1
continue
else:
data = host_fh.read()
host_fh.close()
for line in data.split("\n"):
if line is None or " " not in line:
continue
tokens = line.split()
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt,kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
return True
def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
""" use ssh-keyscan to add the hostkey """
keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
if 'USER' in os.environ:
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_ssh_dir = "~/.ssh/"
user_host_file = "~/.ssh/known_hosts"
user_ssh_dir = os.path.expanduser(user_ssh_dir)
if not os.path.exists(user_ssh_dir):
if create_dir:
try:
os.makedirs(user_ssh_dir, int('700', 8))
except:
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
else:
module.fail_json(msg="%s does not exist" % user_ssh_dir)
elif not os.path.isdir(user_ssh_dir):
module.fail_json(msg="%s is not a directory" % user_ssh_dir)
if port:
this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn)
else:
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
rc, out, err = module.run_command(this_cmd)
# ssh-keyscan gives a 0 exit code and prints nothins on timeout
if rc != 0 or not out:
module.fail_json(msg='failed to get the hostkey for %s' % fqdn)
module.append_to_file(user_host_file, out)
return rc, out, err
|
chetan/ansible
|
refs/heads/master
|
test/units/TestVaultEditor.py
|
28
|
#!/usr/bin/env python
from unittest import TestCase
import getpass
import os
import shutil
import time
import tempfile
from binascii import unhexlify
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible import errors
from ansible.utils.vault import VaultLib
from ansible.utils.vault import VaultEditor
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
class TestVaultEditor(TestCase):
def test_methods_exist(self):
v = VaultEditor(None, None, None)
slots = ['create_file',
'decrypt_file',
'edit_file',
'encrypt_file',
'rekey_file',
'read_data',
'write_data',
'shuffle_files']
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
def test_decrypt_1_0(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
dirpath = tempfile.mkdtemp()
filename = os.path.join(dirpath, "foo-ansible-1.0.yml")
shutil.rmtree(dirpath)
shutil.copytree("vault_test_data", dirpath)
ve = VaultEditor(None, "ansible", filename)
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file()
except errors.AnsibleError, e:
error_hit = True
# verify decrypted content
f = open(filename, "rb")
fdata = f.read()
f.close()
shutil.rmtree(dirpath)
assert error_hit == False, "error decrypting 1.0 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_decrypt_1_0_newline(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
dirpath = tempfile.mkdtemp()
filename = os.path.join(dirpath, "foo-ansible-1.0-ansible-newline-ansible.yml")
shutil.rmtree(dirpath)
shutil.copytree("vault_test_data", dirpath)
ve = VaultEditor(None, "ansible\nansible\n", filename)
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file()
except errors.AnsibleError, e:
error_hit = True
# verify decrypted content
f = open(filename, "rb")
fdata = f.read()
f.close()
shutil.rmtree(dirpath)
assert error_hit == False, "error decrypting 1.0 file with newline in password"
#assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_decrypt_1_1(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
dirpath = tempfile.mkdtemp()
filename = os.path.join(dirpath, "foo-ansible-1.1.yml")
shutil.rmtree(dirpath)
shutil.copytree("vault_test_data", dirpath)
ve = VaultEditor(None, "ansible", filename)
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file()
except errors.AnsibleError, e:
error_hit = True
# verify decrypted content
f = open(filename, "rb")
fdata = f.read()
f.close()
shutil.rmtree(dirpath)
assert error_hit == False, "error decrypting 1.0 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_rekey_migration(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
dirpath = tempfile.mkdtemp()
filename = os.path.join(dirpath, "foo-ansible-1.0.yml")
shutil.rmtree(dirpath)
shutil.copytree("vault_test_data", dirpath)
ve = VaultEditor(None, "ansible", filename)
# make sure the password functions for the cipher
error_hit = False
try:
ve.rekey_file('ansible2')
except errors.AnsibleError, e:
error_hit = True
# verify decrypted content
f = open(filename, "rb")
fdata = f.read()
f.close()
shutil.rmtree(dirpath)
assert error_hit == False, "error rekeying 1.0 file to 1.1"
# ensure filedata can be decrypted, is 1.1 and is AES256
vl = VaultLib("ansible2")
dec_data = None
error_hit = False
try:
dec_data = vl.decrypt(fdata)
except errors.AnsibleError, e:
error_hit = True
assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name
assert error_hit == False, "error decrypting migrated 1.0 file"
assert dec_data.strip() == "foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data
|
ygol/odoo
|
refs/heads/8.0
|
addons/hr_timesheet/wizard/hr_timesheet_sign_in_out.py
|
340
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_so_project(osv.osv_memory):
_name = 'hr.sign.out.project'
_description = 'Sign Out By Project'
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Project / Analytic Account', domain=[('type','=','normal')]),
'info': fields.char('Work Description', required=True),
'date_start': fields.datetime('Starting Date', readonly=True),
'date': fields.datetime('Closing Date'),
'analytic_amount': fields.float('Minimum Analytic Amount'),
'name': fields.char('Employee\'s Name', required=True, readonly=True),
'state': fields.related('emp_id', 'state', string='Current Status', type='selection', selection=[('present', 'Present'), ('absent', 'Absent')], required=True, readonly=True),
'server_date': fields.datetime('Current Date', required=True, readonly=True),
'emp_id': fields.many2one('hr.employee', 'Employee ID')
}
def _get_empid(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
emp_ids = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if emp_ids:
for employee in emp_obj.browse(cr, uid, emp_ids, context=context):
return {'name': employee.name, 'state': employee.state, 'emp_id': emp_ids[0], 'server_date':time.strftime('%Y-%m-%d %H:%M:%S')}
def _get_empid2(self, cr, uid, context=None):
res = self._get_empid(cr, uid, context=context)
cr.execute('select name,action from hr_attendance where employee_id=%s order by name desc limit 1', (res['emp_id'],))
res['server_date'] = time.strftime('%Y-%m-%d %H:%M:%S')
date_start = cr.fetchone()
if date_start:
res['date_start'] = date_start[0]
return res
def default_get(self, cr, uid, fields_list, context=None):
res = super(hr_so_project, self).default_get(cr, uid, fields_list, context=context)
res.update(self._get_empid2(cr, uid, context=context))
return res
def _write(self, cr, uid, data, emp_id, context=None):
timesheet_obj = self.pool.get('hr.analytic.timesheet')
emp_obj = self.pool.get('hr.employee')
if context is None:
context = {}
hour = (time.mktime(time.strptime(data['date'] or time.strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')) -
time.mktime(time.strptime(data['date_start'], '%Y-%m-%d %H:%M:%S'))) / 3600.0
minimum = data['analytic_amount']
if minimum:
hour = round(round((hour + minimum / 2) / minimum) * minimum, 2)
res = timesheet_obj.default_get(cr, uid, ['product_id','product_uom_id'], context=context)
if not res['product_uom_id']:
raise osv.except_osv(_('User Error!'), _('Please define cost unit for this employee.'))
up = timesheet_obj.on_change_unit_amount(cr, uid, False, res['product_id'], hour,False, res['product_uom_id'])['value']
res['name'] = data['info']
res['account_id'] = data['account_id'].id
res['unit_amount'] = hour
emp_journal = emp_obj.browse(cr, uid, emp_id, context=context).journal_id
res['journal_id'] = emp_journal and emp_journal.id or False
res.update(up)
up = timesheet_obj.on_change_account_id(cr, uid, [], res['account_id']).get('value', {})
res.update(up)
return timesheet_obj.create(cr, uid, res, context=context)
def sign_out_result_end(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
emp_obj.attendance_action_change(cr, uid, [emp_id], {'action':'sign_out', 'action_date':data.date})
self._write(cr, uid, data, emp_id, context=context)
return {'type': 'ir.actions.act_window_close'}
def sign_out_result(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
emp_obj.attendance_action_change(cr, uid, [emp_id], {'action':'action', 'action_date':data.date})
self._write(cr, uid, data, emp_id, context=context)
return {'type': 'ir.actions.act_window_close'}
class hr_si_project(osv.osv_memory):
_name = 'hr.sign.in.project'
_description = 'Sign In By Project'
_columns = {
'name': fields.char('Employee\'s Name', readonly=True),
'state': fields.related('emp_id', 'state', string='Current Status', type='selection', selection=[('present', 'Present'), ('absent', 'Absent')], required=True, readonly=True),
'date': fields.datetime('Starting Date'),
'server_date': fields.datetime('Current Date', readonly=True),
'emp_id': fields.many2one('hr.employee', 'Employee ID')
}
def view_init(self, cr, uid, fields, context=None):
"""
This function checks for precondition before wizard executes
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param fields: List of fields for default value
@param context: A standard dictionary for contextual values
"""
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if not emp_id:
raise osv.except_osv(_('User Error!'), _('Please define employee for your user.'))
return False
def check_state(self, cr, uid, ids, context=None):
obj_model = self.pool.get('ir.model.data')
emp_id = self.default_get(cr, uid, ['emp_id'], context)['emp_id']
# get the latest action (sign_in or out) for this employee
cr.execute('select action from hr_attendance where employee_id=%s and action in (\'sign_in\',\'sign_out\') order by name desc limit 1', (emp_id,))
res = (cr.fetchone() or ('sign_out',))[0]
in_out = (res == 'sign_out') and 'in' or 'out'
#TODO: invert sign_in et sign_out
model_data_ids = obj_model.search(cr,uid,[('model','=','ir.ui.view'),('name','=','view_hr_timesheet_sign_%s' % in_out)], context=context)
resource_id = obj_model.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Sign in / Sign out'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.sign.%s.project' % in_out,
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new'
}
def sign_in_result(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
emp_obj.attendance_action_change(cr, uid, [emp_id], {'action':'sign_in', 'action_date':data.date})
return {'type': 'ir.actions.act_window_close'}
def default_get(self, cr, uid, fields_list, context=None):
res = super(hr_si_project, self).default_get(cr, uid, fields_list, context=context)
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if emp_id:
for employee in emp_obj.browse(cr, uid, emp_id, context=context):
res.update({'name': employee.name, 'state': employee.state, 'emp_id': emp_id[0], 'server_date':time.strftime('%Y-%m-%d %H:%M:%S')})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
charles1018/The-f2fs-filesystem
|
refs/heads/master
|
tools/perf/python/twatch.py
|
219
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(type = perf.TYPE_SOFTWARE,
config = perf.COUNT_SW_DUMMY,
task = 1, comm = 1, mmap = 0, freq = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
"""What we want are just the PERF_RECORD_ lifetime events for threads,
using the default, PERF_TYPE_HARDWARE + PERF_COUNT_HW_CYCLES & freq=1
(the default), makes perf reenable irq_vectors:local_timer_entry, when
disabling nohz, not good for some use cases where all we want is to get
threads comes and goes... So use (perf.TYPE_SOFTWARE, perf_COUNT_SW_DUMMY,
freq=0) instead."""
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
javierblasco/tensorflow
|
refs/heads/master
|
tensorflow/python/framework/ops_test.py
|
5
|
"""Tests for tensorflow.python.framework.ops."""
import tensorflow.python.platform
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_kernel_label_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework import types
from tensorflow.python.ops import common_shapes
from tensorflow.python.platform import googletest
class TensorTest(test_util.TensorFlowTestCase):
def testShape(self):
op = ops.Operation(ops._NodeDef("noop", "myop"), ops.Graph(),
[], [types.float32])
t = op.outputs[0]
self.assertEquals(tensor_shape.unknown_shape(), t.get_shape())
t.set_shape([1, 2, 3])
self.assertEquals([1, 2, 3], t.get_shape())
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
nodedef = ops._NodeDef("noop", "bar")
self.assertProtoEquals("op: 'noop' name: 'bar'", nodedef)
def testArgs(self):
nodedef = ops._NodeDef("foo", "bar", device="/device:baz:*")
self.assertProtoEquals("op:'foo' name:'bar' device:'/device:baz:*'",
nodedef)
nodedef = ops._NodeDef("foo", "bar", device=pydev.Device(job="j"))
self.assertProtoEquals("op:'foo' name:'bar' device:'/job:j'", nodedef)
# NOTE(mrry): Dummy shape registrations for ops used in the tests.
ops.RegisterShape("a")(None)
ops.RegisterShape("b")(None)
ops.RegisterShape("c")(None)
ops.RegisterShape("add")(None)
ops.RegisterShape("an_op")(None)
ops.RegisterShape("const")(None)
ops.RegisterShape("copy")(None)
ops.RegisterShape("foo")(None)
ops.RegisterShape("identity")(None)
ops.RegisterShape("mul")(None)
ops.RegisterShape("nonrefop")(None)
ops.RegisterShape("noop")(None)
ops.RegisterShape("refop")(None)
def _apply_op(g, *args, **kwargs):
op = g.create_op(*args, **kwargs)
if len(op.outputs) == 1:
return op.outputs[0]
else:
return op.outputs
class OperationTest(test_util.TensorFlowTestCase):
def testNoInputs(self):
op = ops.Operation(ops._NodeDef("noop", "myop"), ops.Graph(),
[],
[types.float32, types.string])
self.assertEquals(2, len(op.values()))
self.assertEquals(0, len(op.inputs))
self.assertEquals("myop", op.name)
float_t, label_str_t = op.values()
self.assertEquals(types.float32, float_t.dtype)
self.assertEquals(op, float_t.op)
self.assertEquals(0, float_t._value_index)
self.assertEquals(0, len(float_t._consumers))
self.assertEquals("myop", float_t._as_node_def_input())
self.assertEquals(types.string, label_str_t.dtype)
self.assertEquals(op, label_str_t.op)
self.assertEquals(1, label_str_t._value_index)
self.assertEquals(0, len(label_str_t._consumers))
self.assertEquals("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'noop' name:'myop'", op.node_def)
def testNoOutputs(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("noop", "myop1"), g, [], [types.float32])
float_t, = op1.values()
op2 = ops.Operation(ops._NodeDef("reop", "myop2"), g, [float_t], [])
self.assertEquals(0, len(op2.values()))
self.assertEquals(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEquals(1, len(float_t._consumers))
self.assertEquals(op2, float_t._consumers[0])
self.assertProtoEquals("op:'noop' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'reop' name:'myop2' input:'myop1'",
op2.node_def)
def testInputsAndOutputs(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("noop", "myop1"), g, [], [types.float32])
self.assertEquals(1, len(op1.values()))
float1_t, = op1.values()
op2 = ops.Operation(ops._NodeDef("reop", "myop2"), g,
[], [types.float32, types.string])
self.assertEquals(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = ops.Operation(ops._NodeDef("add", "myop3"), g,
[float1_t, label2_str_t, label2_str_t],
[types.float32, types.int32])
self.assertEquals(2, len(op3.values()))
self.assertEquals(1, len(float1_t._consumers))
self.assertEquals(op3, float1_t._consumers[0])
self.assertEquals(0, len(float2_t._consumers))
self.assertEquals(2, len(label2_str_t._consumers))
self.assertEquals(op3, label2_str_t._consumers[0])
self.assertEquals(op3, label2_str_t._consumers[1])
self.assertProtoEquals("""
op:'add' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceObject(self):
op = ops.Operation(ops._NodeDef("noop", "myop"), ops.Graph(), [], [])
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'noop' name:'myop' device:'/job:goo/device:GPU:0' ",
op.node_def)
op = ops.Operation(ops._NodeDef("noop", "op2"), ops.Graph(), [], [])
op._set_device(pydev.Device(job="muu", device_type="CPU", device_index=0))
self.assertProtoEquals(
"op:'noop' name:'op2' device:'/job:muu/device:CPU:0'",
op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation(ops._NodeDef("noop", "op1"), g, [],
[types.float32_ref, types.float32])
self.assertProtoEquals("op:'noop' name:'op1'",
op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation(
ops._NodeDef("refop", "op2"), g, [ref_t, nonref_t], [],
input_types=[types.float32_ref, types.float32])
self.assertProtoEquals("op:'refop' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = ops.Operation(
ops._NodeDef("nonrefop", "op3"), g, [ref_t, nonref_t], [])
self.assertProtoEquals("op:'nonrefop' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "/invalid"), g)
def testShapeFunctionAbsence(self):
def _test():
pass
g = ops.Graph()
with self.assertRaises(RuntimeError):
g.create_op("shapeless_op", [], [types.float32])
def testNoShapeFunction(self):
g = ops.Graph()
op = ops.Operation(ops._NodeDef("op", "an_op"), g,
output_types = [types.float32])
self.assertEquals(tensor_shape.unknown_shape(),
_apply_op(g, "an_op", [], [types.float32]).get_shape())
class CreateOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("const", [], [types.float32], None, name="myop1")
with g.device("/device:GPU"):
op2 = g.create_op("add",
[],
[types.float32, types.string], None,
name="myop2")
op3 = g.create_op(
"foo",
[op1.values()[0], op2.values()[1], op2.values()[0]],
[types.float32, types.int32], None,
name="myop3")
self.assertEquals(None, op1.device)
self.assertEquals("/device:GPU", op2.device)
self.assertEquals(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'const'", op1.node_def)
self.assertProtoEquals("name:'myop2' op:'add' device:'/device:GPU'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'foo'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op("noop", [],
[types.float32_ref, types.float32], name="op1")
self.assertProtoEquals("op:'noop' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op("refop", [ref_t, nonref_t], [],
input_types=[types.float32_ref, types.float32],
name="op2")
self.assertProtoEquals("op:'refop' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("nonrefop", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals("op:'nonrefop' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("const", [], [types.float32], None, name="myop1")
class ApplyOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
t1 = _apply_op(g, "const", [], [types.float32], name="myop1")
with g.device("/device:GPU"):
t2 = _apply_op(g, "add",
[],
[types.float32, types.string],
name="myop2")
t3 = _apply_op(g, "foo", [t1, t2[1], t2[0]],
[types.float32, types.int32], name="myop3")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, list))
self.assertTrue(isinstance(t3, list))
self.assertTrue(isinstance(t3[0], ops.Tensor))
self.assertEquals("myop1", t1._as_node_def_input())
self.assertEquals("myop2", t2[0]._as_node_def_input())
self.assertEquals("myop2:1", t2[1]._as_node_def_input())
self.assertEquals("myop3", t3[0]._as_node_def_input())
# Validate that we got the right ops as well
self.assertProtoEquals("name:'myop1' op:'const'", t1.op.node_def)
self.assertProtoEquals("name:'myop2' op:'add' device:'/device:GPU'",
t2[0].op.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'foo'",
t3[0].op.node_def)
def testReferenceInput(self):
g = ops.Graph()
ref_t, nonref_t = _apply_op(
g, "noop", [], [types.float32_ref, types.float32], name="op1")
self.assertProtoEquals("op:'noop' name:'op1'", ref_t.op.node_def)
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
out_2 = _apply_op(g, "refop", [ref_t, nonref_t], [types.int32],
input_types=[types.float32_ref, types.float32],
name="op2")
self.assertProtoEquals("op:'refop' name:'op2' input:'op1' input:'op1:1'",
out_2.op.node_def)
out_3 = _apply_op(g, "nonrefop", [ref_t, nonref_t], [types.int32],
name="op3")
self.assertProtoEquals("op:'nonrefop' name:'op3' input:'op1' input:'op1:1'",
out_3.op.node_def)
class NameStackTest(test_util.TensorFlowTestCase):
def testBasics(self):
g = ops.Graph()
self.assertEquals("foo", g.unique_name("foo"))
self.assertEquals("foo_1", g.unique_name("foo"))
self.assertEquals("foo_2", g.unique_name("foo"))
self.assertEquals("foo_1_1", g.unique_name("foo_1"))
self.assertEquals("foo_1_2", g.unique_name("foo_1"))
self.assertEquals("foo_1_2_1", g.unique_name("foo_1_2"))
with g.name_scope("bar"):
self.assertEquals("bar/foo", g.unique_name("foo"))
self.assertEquals("bar/foo_1", g.unique_name("foo"))
with g.name_scope(None):
self.assertEquals("foo_3", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEquals("bar/baz/foo", g.unique_name("foo"))
self.assertEquals("bar/baz/foo_1", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEquals("bar/baz_1/foo", g.unique_name("foo"))
self.assertEquals("bar/baz_1/foo_1", g.unique_name("foo"))
with g.name_scope("quux"):
self.assertEquals("quux/foo", g.unique_name("foo"))
with g.name_scope("bar"):
with g.name_scope("baz"):
self.assertEquals("bar_1/baz/foo", g.unique_name("foo"))
self.assertEquals("foo_4", g.unique_name("foo"))
self.assertEquals("bar_2", g.unique_name("bar"))
def testOutOfOrderUniqueName(self):
g = ops.Graph()
self.assertEquals("foo_2", g.unique_name("foo_2"))
self.assertEquals("foo", g.unique_name("foo"))
self.assertEquals("foo_1", g.unique_name("foo"))
self.assertEquals("foo_3", g.unique_name("foo"))
class NameTest(test_util.TensorFlowTestCase):
def testGenerateName(self):
g = ops.Graph()
op0 = g.create_op("const", [], [types.float32, types.float32])
self.assertEquals("const", op0.name)
self.assertEquals("const:0", op0.outputs[0].name)
self.assertEquals("const:1", op0.outputs[1].name)
op1 = g.create_op("const", [], [types.float32])
self.assertEquals("const_1", op1.name)
self.assertEquals("const_1:0", op1.outputs[0].name)
op2 = g.create_op("const", [], [types.float32], name="my_op")
self.assertEquals("my_op", op2.name)
self.assertEquals("my_op:0", op2.outputs[0].name)
def testname_scope(self):
g = ops.Graph()
with g.name_scope("foo") as foo:
self.assertEquals(foo, "foo/")
with g.name_scope("foo2") as foo2:
self.assertEquals(foo2, "foo/foo2/")
with g.name_scope(None) as empty1:
self.assertEquals(empty1, "")
with g.name_scope("foo3") as foo3:
self.assertEquals(foo3, "foo3/")
with g.name_scope("") as empty2:
self.assertEquals(empty2, "")
self.assertEquals("const",
g.create_op("const", [], [types.float32]).name)
with g.name_scope("bar") as scope:
self.assertEquals("bar/const",
g.create_op("const", [], [types.float32]).name)
self.assertEquals("bar/const_1",
g.create_op("const", [], [types.float32]).name)
# If you use the value from "with .. as", that values is used as-is.
self.assertEquals(
"bar",
g.create_op("const", [], [types.float32], name=scope).name)
with g.name_scope("baz") as scope:
with g.name_scope("quux"):
self.assertEquals("baz/quux/const",
g.create_op("const", [], [types.float32]).name)
# If you use the value from the enclosing "with .. as", nothing is pushed.
with g.name_scope(scope):
self.assertEquals("baz/const",
g.create_op("const", [], [types.float32]).name)
self.assertEquals("baz",
g.create_op("const", [], [types.float32],
name=scope).name)
self.assertEquals("trailing",
g.create_op("const", [], [types.float32],
name="trailing/").name)
with g.name_scope("bar"):
self.assertEquals("bar_1/const",
g.create_op("const", [], [types.float32]).name)
with g.name_scope("bar/"):
self.assertEquals("bar/const_2",
g.create_op("const", [], [types.float32]).name)
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
g = ops.Graph()
op = g.create_op("an_op", [], [types.float32])
self.assertEqual(None, op.device)
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op" }
""", gd)
def testDevicePartialString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("an_op", [], [types.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op" device: "/job:worker/replica:2" }
""", gd)
def testDeviceFull(self):
g = ops.Graph()
with g.device(pydev.Device(job="worker", replica=2, task=0,
device_type="CPU",
device_index=3)):
g.create_op("an_op", [], [types.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/task:0/device:CPU:3" }
""", gd)
def testNesting(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("an_op", [], [types.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("an_op", [], [types.float32])
g.create_op("an_op", [], [types.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/replica:3/task:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2" }
""", gd)
def testNestingString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("an_op", [], [types.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("an_op", [], [types.float32])
g.create_op("an_op", [], [types.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/replica:3/task:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2" }
""", gd)
def testNestingOverrideGpuCpu(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("an_op", [], [types.float32])
with g.device("/job:worker/replica:2/device:GPU:2"):
g.create_op("an_op", [], [types.float32])
g.create_op("an_op", [], [types.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/replica:2/device:GPU:2" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNestingWithMergeDeviceFunction(self):
g = ops.Graph()
with g.device(pydev.merge_device("/device:GPU:0")):
g.create_op("an_op", [], [types.float32])
with g.device(pydev.merge_device("/job:worker")):
g.create_op("an_op", [], [types.float32])
with g.device(pydev.merge_device("/device:CPU:0")):
g.create_op("an_op", [], [types.float32])
with g.device(pydev.merge_device("/job:ps")):
g.create_op("an_op", [], [types.float32])
with g.device(pydev.merge_device(None)):
g.create_op("an_op", [], [types.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op"
device: "/device:GPU:0" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/device:GPU:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/device:CPU:0" }
node { name: "an_op_3" op: "an_op"
device: "/job:ps/device:CPU:0" }
node { name: "an_op_4" op: "an_op"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNoneClearsDefault(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("an_op", [], [types.float32])
with g.device(None):
g.create_op("an_op", [], [types.float32])
g.create_op("an_op", [], [types.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "an_op_1" op: "an_op" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
class ObjectWithName(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class CollectionTest(test_util.TensorFlowTestCase):
def testadd_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEquals(["foo"], g.get_collection("other"))
self.assertEquals([12, 34], g.get_collection("key"))
self.assertEquals([], g.get_collection("nothing"))
self.assertEquals([27, blank1, blank2], g.get_collection("blah"))
self.assertEquals([blank1], g.get_collection("blah", "prefix"))
def testDefaulGraph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEquals([90, 100], ops.get_collection("key"))
def an_op(g):
return _apply_op(g, "an_op", [], [types.float32])
ops.NoGradient("an_op")
def copy_op(x):
return _apply_op(x.graph, "copy", [x], [x.dtype])
@ops.RegisterGradient("copy")
def _CopyGrad(op, x_grad):
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad):
_ = op
return x_grad
class RegistrationTest(test_util.TensorFlowTestCase):
def testRegisterGradients(self):
g = ops.Graph()
x = an_op(g)
y = copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEquals(_CopyGrad, fn)
def testOverrideGradients(self):
g = ops.Graph()
x = an_op(g)
with g.gradient_override_map({"copy": "copy_override"}):
y = copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEquals(_CopyOverrideGrad, fn)
def testNonExistentOverride(self):
g = ops.Graph()
x = an_op(g)
with g.gradient_override_map({"copy": "unknown_override"}):
y = copy_op(x)
with self.assertRaisesRegexp(LookupError, "unknown_override"):
fn = ops.get_gradient_function(y.op)
class ComparisonTest(test_util.TensorFlowTestCase):
def testMembershipAllowed(self):
g = ops.Graph()
t1 = _apply_op(g, "const", [], [types.float32], name="myop1")
t2 = _apply_op(g, "const", [], [types.float32], name="myop2")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, ops.Tensor))
self.assertTrue(t1 in [t1])
self.assertTrue(t1 not in [t2])
class ControlDependenciesTest(test_util.TensorFlowTestCase):
def testBasic(self):
g = ops.Graph()
a = _apply_op(g, "const", [], [types.float32])
b = _apply_op(g, "const", [], [types.float32])
with g.control_dependencies([a]):
c = _apply_op(g, "const", [], [types.float32])
d = _apply_op(g, "identity", [b], [types.float32])
e = _apply_op(g, "identity", [c], [types.float32])
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "const", [], [types.float32])
a_2 = _apply_op(g, "const", [], [types.float32])
a_3 = _apply_op(g, "const", [], [types.float32])
a_4 = _apply_op(g, "const", [], [types.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "const", [], [types.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "const", [], [types.float32])
self.assertItemsEqual(
[a_1.op, a_2.op, a_3.op, a_4.op], b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "const", [], [types.float32])
a_2 = _apply_op(g, "const", [], [types.float32])
a_3 = _apply_op(g, "const", [], [types.float32])
a_4 = _apply_op(g, "const", [], [types.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "mul", [a_3, a_4], [types.float32])
c_1 = _apply_op(g, "mul", [a_1, b_1], [types.float32])
d_1 = _apply_op(g, "mul", [b_1, c_1], [types.float32])
e_1 = _apply_op(g, "const", [], [types.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "mul", [a_3, a_4], [types.float32])
c_2 = _apply_op(g, "mul", [a_1, b_1], [types.float32])
d_2 = _apply_op(g, "mul", [b_2, c_2], [types.float32])
e_2 = _apply_op(g, "mul", [e_1, e_1], [types.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "mul", [a_3, a_4], [types.float32])
c_3 = _apply_op(g, "mul", [a_1, b_1], [types.float32])
d_3 = _apply_op(g, "mul", [b_3, c_3], [types.float32])
e_3 = _apply_op(g, "mul", [e_2, e_2], [types.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "mul", [a_3, a_4], [types.float32])
c_4 = _apply_op(g, "mul", [a_1, b_1], [types.float32])
d_4 = _apply_op(g, "mul", [b_4, c_4], [types.float32])
e_4 = _apply_op(g, "mul", [e_3, e_3], [types.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("foo", [], [types.float32, types.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "const", [], [types.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "const", [], [types.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "const", [], [types.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "identity", [a], [types.float32])
self.assertEqual(b.op.control_inputs, [])
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
ops.reset_default_graph()
def _AssertDefault(self, expected):
self.assertIs(expected, ops.get_default_graph())
def testGraphContextManager(self):
g0 = ops.Graph()
with g0.as_default() as g1:
self.assertIs(g0, g1)
def testDefaultGraph(self):
orig = ops.get_default_graph()
self._AssertDefault(orig)
g0 = ops.Graph()
self._AssertDefault(orig)
context_manager_0 = g0.as_default()
self._AssertDefault(orig)
with context_manager_0 as g0:
self._AssertDefault(g0)
with ops.Graph().as_default() as g1:
self._AssertDefault(g1)
self._AssertDefault(g0)
self._AssertDefault(orig)
def testAsGraphElementConversions(self):
class ConvertibleObj(object):
def _as_graph_element(self):
return "const:0"
class NonConvertibleObj(object):
pass
g = ops.Graph()
a = _apply_op(g, "const", [], [types.float32])
self.assertEqual(a, g.as_graph_element(ConvertibleObj()))
with self.assertRaises(TypeError):
g.as_graph_element(NonConvertibleObj())
def testAssertSameGraph(self):
g0 = ops.Graph()
a = g0.create_op("a", [], [types.float32])
b = g0.create_op("b", [], [types.float32])
ops.assert_same_graph([a, b])
ops.assert_same_graph([a, b], g0)
g1 = ops.Graph()
c = g1.create_op("c", [], [types.float32])
self.assertRaises(ValueError, ops.assert_same_graph, [a, b, c])
self.assertRaises(ValueError, ops.assert_same_graph, [c], g0)
self.assertRaises(ValueError, ops.assert_same_graph, [a], g1)
sparse = ops.SparseTensor(
_apply_op(g0, "const", [], [types.int64]),
_apply_op(g0, "const", [], [types.float32]),
_apply_op(g0, "const", [], [types.int64]))
ops.assert_same_graph([sparse, a, b])
ops.assert_same_graph([sparse, a, b], g0)
self.assertRaises(ValueError, ops.assert_same_graph, [sparse, a, c])
self.assertRaises(ValueError, ops.assert_same_graph, [sparse, a, c], g1)
ops.RegisterShape("KernelLabel")(common_shapes.scalar_shape)
class KernelLabelTest(test_util.TensorFlowTestCase):
def testNoLabel(self):
with self.test_session():
self.assertAllEqual("My label is: default",
test_kernel_label_op.kernel_label().eval())
def testLabelMap(self):
with self.test_session() as sess:
default_1 = test_kernel_label_op.kernel_label()
# pylint: disable=protected-access
with sess.graph._kernel_label_map({"KernelLabel": "overload_1"}):
overload_1_1 = test_kernel_label_op.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": "overload_2"}):
overload_2 = test_kernel_label_op.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": ""}):
default_2 = test_kernel_label_op.kernel_label()
overload_1_2 = test_kernel_label_op.kernel_label()
# pylint: enable=protected-access
default_3 = test_kernel_label_op.kernel_label()
self.assertAllEqual("My label is: default", default_1.eval())
self.assertAllEqual("My label is: default", default_2.eval())
self.assertAllEqual("My label is: default", default_3.eval())
self.assertAllEqual("My label is: overload_1", overload_1_1.eval())
self.assertAllEqual("My label is: overload_1", overload_1_2.eval())
self.assertAllEqual("My label is: overload_2", overload_2.eval())
if __name__ == "__main__":
googletest.main()
|
bbondy/brianbondy.gae
|
refs/heads/master
|
libs/flask/module.py
|
850
|
# -*- coding: utf-8 -*-
"""
flask.module
~~~~~~~~~~~~
Implements a class that represents module blueprints.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
from .blueprints import Blueprint
def blueprint_is_module(bp):
"""Used to figure out if something is actually a module"""
return isinstance(bp, Module)
class Module(Blueprint):
"""Deprecated module support. Until Flask 0.6 modules were a different
name of the concept now available as blueprints in Flask. They are
essentially doing the same but have some bad semantics for templates and
static files that were fixed with blueprints.
.. versionchanged:: 0.7
Modules were deprecated in favor for blueprints.
"""
def __init__(self, import_name, name=None, url_prefix=None,
static_path=None, subdomain=None):
if name is None:
assert '.' in import_name, 'name required if package name ' \
'does not point to a submodule'
name = import_name.rsplit('.', 1)[1]
Blueprint.__init__(self, name, import_name, url_prefix=url_prefix,
subdomain=subdomain, template_folder='templates')
if os.path.isdir(os.path.join(self.root_path, 'static')):
self._static_folder = 'static'
|
abn/yabgp
|
refs/heads/master
|
yabgp/common/constants.py
|
2
|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" All BGP constant values """
# some handy things to know
BGP_MAX_PACKET_SIZE = 4096
BGP_MARKER_SIZE = 16 # size of BGP marker
BGP_HEADER_SIZE = 19 # size of BGP header, including marker
BGP_MIN_OPEN_MSG_SIZE = 29
BGP_MIN_UPDATE_MSG_SIZE = 23
BGP_MIN_NOTIFICATION_MSG_SIZE = 21
BGP_MIN_KEEPALVE_MSG_SIZE = BGP_HEADER_SIZE
BGP_TCP_PORT = 179
BGP_ROUTE_DISTINGUISHER_SIZE = 8
# BGP message types
BGP_OPEN = 1
BGP_UPDATE = 2
BGP_NOTIFICATION = 3
BGP_KEEPALIVE = 4
BGP_ROUTE_REFRESH = 5
BGP_CAPABILITY = 6
BGP_ROUTE_REFRESH_CISCO = 0x80
BGP_SIZE_OF_PATH_ATTRIBUTE = 2
# attribute flags, from RFC1771
BGP_ATTR_FLAG_OPTIONAL = 0x80
BGP_ATTR_FLAG_TRANSITIVE = 0x40
BGP_ATTR_FLAG_PARTIAL = 0x20
BGP_ATTR_FLAG_EXTENDED_LENGTH = 0x10
# SSA flags
BGP_SSA_TRANSITIVE = 0x8000
BGP_SSA_TYPE = 0x7FFF
# SSA Types
BGP_SSA_L2TPv3 = 1
BGP_SSA_mGRE = 2
BGP_SSA_IPSec = 3
BGP_SSA_MPLS = 4
BGP_SSA_L2TPv3_IN_IPSec = 5
BGP_SSA_mGRE_IN_IPSec = 6
# AS_PATH segment types
AS_SET = 1 # RFC1771
AS_SEQUENCE = 2 # RFC1771
AS_CONFED_SET = 4 # RFC1965 has the wrong values, corrected in
AS_CONFED_SEQUENCE = 3 # draft-ietf-idr-bgp-confed-rfc1965bis-01.txt
# OPEN message Optional Parameter types
BGP_OPTION_AUTHENTICATION = 1 # RFC1771
BGP_OPTION_CAPABILITY = 2 # RFC2842
# attribute types
BGPTYPE_ORIGIN = 1 # RFC1771
BGPTYPE_AS_PATH = 2 # RFC1771
BGPTYPE_NEXT_HOP = 3 # RFC1771
BGPTYPE_MULTI_EXIT_DISC = 4 # RFC1771
BGPTYPE_LOCAL_PREF = 5 # RFC1771
BGPTYPE_ATOMIC_AGGREGATE = 6 # RFC1771
BGPTYPE_AGGREGATOR = 7 # RFC1771
BGPTYPE_COMMUNITIES = 8 # RFC1997
BGPTYPE_ORIGINATOR_ID = 9 # RFC2796
BGPTYPE_CLUSTER_LIST = 10 # RFC2796
BGPTYPE_DPA = 11 # work in progress
BGPTYPE_ADVERTISER = 12 # RFC1863
BGPTYPE_RCID_PATH = 13 # RFC1863
BGPTYPE_MP_REACH_NLRI = 14 # RFC2858
BGPTYPE_MP_UNREACH_NLRI = 15 # RFC2858
BGPTYPE_EXTENDED_COMMUNITY = 16 # Draft Ramachandra
BGPTYPE_NEW_AS_PATH = 17 # draft-ietf-idr-as4bytes
BGPTYPE_NEW_AGGREGATOR = 18 # draft-ietf-idr-as4bytes
BGPTYPE_SAFI_SPECIFIC_ATTR = 19 # draft-kapoor-nalawade-idr-bgp-ssa-00.txt
BGPTYPE_TUNNEL_ENCAPS_ATTR = 23 # RFC5512
BGPTYPE_LINK_STATE = 99
BGPTYPE_ATTRIBUTE_SET = 128
# VPN Route Target #
BGP_EXT_COM_RT_0 = 0x0002 # Route Target,Format AS(2bytes):AN(4bytes)
BGP_EXT_COM_RT_1 = 0x0102 # Route Target,Format IPv4 address(4bytes):AN(2bytes)
BGP_EXT_COM_RT_2 = 0x0202 # Route Target,Format AS(4bytes):AN(2bytes)
# Route Origin (SOO site of Origin)
BGP_EXT_COM_RO_0 = 0x0003 # Route Origin,Format AS(2bytes):AN(4bytes)
BGP_EXT_COM_RO_1 = 0x0103 # Route Origin,Format IP address:AN(2bytes)
BGP_EXT_COM_RO_2 = 0x0203 # Route Origin,Format AS(2bytes):AN(4bytes)
# BGP Flow Spec
BGP_EXT_TRA_RATE = 0x8006 # traffic-rate 2-byte as#, 4-byte float
BGP_EXT_TRA_ACTION = 0x8007 # traffic-action bitmask
BGP_EXT_REDIRECT = 0x8008 # redirect 6-byte Route Target
BGP_EXT_TRA_MARK = 0x8009 # traffic-marking DSCP value
# BGP cost cummunity
BGP_EXT_COM_COST = 0x4301
# BGP link bandwith
BGP_EXT_COM_LINK_BW = 0x4004
# Unkonw
BGP_EXT_COM_UNKNOW = 0x0000
# route distinguisher type
BGP_ROUTE_DISTINGUISHER_TYPE_0 = 0x0000
BGP_ROUTE_DISTINGUISHER_TYPE_1 = 0x0001
BGP_ROUTE_DISTINGUISHER_TYPE_2 = 0x0001
# NLRI type as define in BGP flow spec RFC
BGPNLRI_FSPEC_DST_PFIX = 1 # RFC 5575
BGPNLRI_FSPEC_SRC_PFIX = 2 # RFC 5575
BGPNLRI_FSPEC_IP_PROTO = 3 # RFC 5575
BGPNLRI_FSPEC_PORT = 4 # RFC 5575
BGPNLRI_FSPEC_DST_PORT = 5 # RFC 5575
BGPNLRI_FSPEC_SRC_PORT = 6 # RFC 5575
BGPNLRI_FSPEC_ICMP_TP = 7 # RFC 5575
BGPNLRI_FSPEC_ICMP_CD = 8 # RFC 5575
BGPNLRI_FSPEC_TCP_FLAGS = 9 # RFC 5575
BGPNLRI_FSPEC_PCK_LEN = 10 # RFC 5575
BGPNLRI_FSPEC_DSCP = 11 # RFC 5575
BGPNLRI_FSPEC_FRAGMENT = 12 # RFC 5575
# BGP message Constants
VERSION = 4
PORT = 179
HDR_LEN = 19
MAX_LEN = 4096
# BGP messages type
MSG_OPEN = 1
MSG_UPDATE = 2
MSG_NOTIFICATION = 3
MSG_KEEPALIVE = 4
MSG_ROUTEREFRESH = 5
MSG_CISCOROUTEREFRESH = 128
# BGP Capabilities Support
SUPPORT_4AS = False
CISCO_ROUTE_REFRESH = False
NEW_ROUTE_REFRESH = False
GRACEFUL_RESTART = False
# AFI_SAFI mapping
AFI_SAFI_DICT = {
(1, 1): 'ipv4',
(2, 1): 'ipv6',
(1, 133): 'flowspec'
}
AFI_SAFI_STR_DICT = {
'ipv6': (2, 1),
'ipv4': (1, 1),
'flowspec': (1, 133)
}
# BGP FSM State
ST_IDLE = 1
ST_CONNECT = 2
ST_ACTIVE = 3
ST_OPENSENT = 4
ST_OPENCONFIRM = 5
ST_ESTABLISHED = 6
# BGP Timer (seconds)
DELAY_OPEN_TIME = 10
ROUTE_REFRESH_TIME = 10
LARGER_HOLD_TIME = 4 * 60
CONNECT_RETRY_TIME = 30
IDLEHOLD_TIME = 30
HOLD_TIME = 120
stateDescr = {
ST_IDLE: "IDLE",
ST_CONNECT: "CONNECT",
ST_ACTIVE: "ACTIVE",
ST_OPENSENT: "OPENSENT",
ST_OPENCONFIRM: "OPENCONFIRM",
ST_ESTABLISHED: "ESTABLISHED"
}
# Notification error codes
ERR_MSG_HDR = 1
ERR_MSG_OPEN = 2
ERR_MSG_UPDATE = 3
ERR_HOLD_TIMER_EXPIRED = 4
ERR_FSM = 5
ERR_CEASE = 6
# Notification suberror codes
ERR_MSG_HDR_CONN_NOT_SYNC = 1
ERR_MSG_HDR_BAD_MSG_LEN = 2
ERR_MSG_HDR_BAD_MSG_TYPE = 3
ERR_MSG_OPEN_UNSUP_VERSION = 1
ERR_MSG_OPEN_BAD_PEER_AS = 2
ERR_MSG_OPEN_BAD_BGP_ID = 3
ERR_MSG_OPEN_UNSUP_OPT_PARAM = 4
ERR_MSG_OPEN_UNACCPT_HOLD_TIME = 6
ERR_MSG_OPEN_UNSUP_CAPA = 7 # RFC 5492
ERR_MSG_OPEN_UNKNO = 8
ERR_MSG_UPDATE_MALFORMED_ATTR_LIST = 1
ERR_MSG_UPDATE_UNRECOGNIZED_WELLKNOWN_ATTR = 2
ERR_MSG_UPDATE_MISSING_WELLKNOWN_ATTR = 3
ERR_MSG_UPDATE_ATTR_FLAGS = 4
ERR_MSG_UPDATE_ATTR_LEN = 5
ERR_MSG_UPDATE_INVALID_ORIGIN = 6
ERR_MSG_UPDATE_INVALID_NEXTHOP = 8
ERR_MSG_UPDATE_OPTIONAL_ATTR = 9
ERR_MSG_UPDATE_INVALID_NETWORK_FIELD = 10
ERR_MSG_UPDATE_MALFORMED_ASPATH = 11
ERR_MSG_UPDATE_UNKOWN_ATTR = 12
ATTRIBUTE_ID_2_STR = {
1: 'ORIGIN',
2: 'AS_PATH',
3: 'NEXT_HOP',
4: 'MULTI_EXIT_DISC',
5: 'LOCAL_PREF',
6: 'ATOMIC_AGGREGATE',
7: 'AGGREGATOR',
8: 'COMMUNITY',
9: 'ORIGINATOR_ID',
10: 'CLUSTER_LIST',
14: 'MP_REACH_NLRI',
15: 'MP_UNREACH_NLRI',
16: 'EXTENDED_COMMUNITY',
17: 'AS4_PATH',
18: 'AS4_AGGREGATOR'
}
ATTRIBUTE_STR_2_ID = dict([(k, v) for (k, v) in ATTRIBUTE_ID_2_STR.items()])
WELL_KNOW_COMMUNITY_INT_2_STR = {
0xFFFF0000: 'PLANNED_SHUT',
0xFFFF0001: 'ACCEPT_OWN',
0xFFFF0002: 'ROUTE_FILTER_TRANSLATED_v4',
0xFFFF0003: 'ROUTE_FILTER_v4',
0xFFFF0004: 'ROUTE_FILTER_TRANSLATED_v6',
0xFFFF0005: 'ROUTE_FILTER_v6',
0xFFFFFF01: 'NO_EXPORT',
0xFFFFFF02: 'NO_ADVERTISE',
0xFFFFFF03: 'NO_EXPORT_SUBCONFED',
0xFFFFFF04: 'NOPEER'
}
WELL_KNOW_COMMUNITY_STR_2_INT = dict(
[(r, l) for (l, r) in WELL_KNOW_COMMUNITY_INT_2_STR.items()])
TCP_MD5SIG_MAXKEYLEN = 80
SS_PADSIZE_IPV4 = 120
TCP_MD5SIG = 14
SS_PADSIZE_IPV6 = 100
SIN6_FLOWINFO = 0
SIN6_SCOPE_ID = 0
COMMUNITY_DICT = False
|
shoopio/shoop
|
refs/heads/master
|
shuup/xtheme/plugins/category_links.py
|
2
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django import forms
from django.utils.translation import ugettext_lazy as _
from shuup.core.models import Category
from shuup.xtheme import TemplatedPlugin
from shuup.xtheme.plugins.forms import GenericPluginForm, TranslatableField
from shuup.xtheme.plugins.widgets import XThemeSelect2ModelMultipleChoiceField
class CategoryLinksConfigForm(GenericPluginForm):
"""
A configuration form for the CategoryLinksPlugin
"""
def populate(self):
"""
A custom populate method to display category choices
"""
for field in self.plugin.fields:
if isinstance(field, tuple):
name, value = field
value.initial = self.plugin.config.get(name, value.initial)
self.fields[name] = value
self.fields["categories"] = XThemeSelect2ModelMultipleChoiceField(
model="shuup.category",
required=False,
label=_("Categories"),
initial=self.plugin.config.get("categories"),
extra_widget_attrs={
"data-search-mode": "visible"
}
)
class CategoryLinksPlugin(TemplatedPlugin):
"""
A plugin for displaying links to visible categories on the shop front
"""
identifier = "category_links"
name = _("Category Links")
template_name = "shuup/xtheme/plugins/category_links.jinja"
editor_form_class = CategoryLinksConfigForm
fields = [
("title", TranslatableField(label=_("Title"), required=False, initial="")),
("show_all_categories", forms.BooleanField(
label=_("Show all categories"),
required=False,
initial=True,
help_text=_("All categories are shown, even if not selected"),
)),
"categories",
]
def get_context_data(self, context):
"""
A custom get_context_data method to return only visible categories
for request customer.
"""
selected_categories = self.config.get("categories", [])
show_all_categories = self.config.get("show_all_categories", True)
request = context.get("request")
categories = Category.objects.all_visible(
customer=getattr(request, "customer"),
shop=getattr(request, "shop")
).prefetch_related("translations")
if not show_all_categories:
categories = categories.filter(id__in=selected_categories)
return {
"title": self.get_translated_value("title"),
"categories": categories,
}
|
cgroza/gEcrit
|
refs/heads/master
|
data/plugins/ClipboardViewer.py
|
1
|
# Copyright (C) 2011 Groza Cristian
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/python
# -*- coding: utf-8 -*-
from data.plugins.categories import Passive
import yapsy.IPlugin
import wx
class ClipboardViewer(wx.Frame, Passive, yapsy.IPlugin.IPlugin):
def __init__(self):
self.name = "Clipboard Viewer"
def Init(self, parent):
self.parent = parent
wx.Frame.__init__(self, self.parent)
self.main_panel = wx.Panel(self)
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.button_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.clip_view_descr = wx.StaticText(self.main_panel, -1,
"Clipboard Contents:", size = (-1, -1))
self.clip_view = wx.TextCtrl(self.main_panel, -1,
style = wx.TE_MULTILINE)
self.update_clp = wx.Button(self.main_panel, -1, "Update Clipboard")
self.refresh_view = wx.Button(self.main_panel, -1, "Refresh")
self.update_clp.Bind(wx.EVT_BUTTON, self.OnUpdate)
self.refresh_view.Bind(wx.EVT_BUTTON, self.OnRefresh)
self.plugins_menu = wx.Menu()
show_entry = self.plugins_menu.Append(-1,"Show Clipboard")
self.menu_item = self.parent.AddToMenuBar("Clipboard Viewer",
self.plugins_menu)
self.parent.BindMenubarEvent(show_entry, self.ShowMe)
self.button_sizer.Add(self.update_clp)
self.button_sizer.AddSpacer(5)
self.button_sizer.Add(self.refresh_view)
self.main_sizer.Add(self.clip_view_descr)
self.main_sizer.AddSpacer(10)
self.main_sizer.Add(self.clip_view, 1, wx.EXPAND)
self.main_sizer.Add(self.button_sizer)
self.main_panel.SetSizerAndFit(self.main_sizer)
self.Bind(wx.EVT_CLOSE, self.HideMe)
self.Hide()
def ReadClipboard(self):
#opening the clipboard
if not wx.TheClipboard.IsOpened():
wx.TheClipboard.Open()
#reading the clipboard
txt = wx.TextDataObject()
success = wx.TheClipboard.GetData(txt)
#loading the text to the clip_view
if success:
self.clip_view.SetValue( txt.GetText() )
def OnRefresh(self, event):
self.ReadClipboard()
def OnUpdate(self, event):
#creating the data object
data = wx.TextDataObject()
#settinc the data object value
data.SetText(self.clip_view.GetValue())
#writing the data object to clipboard
if not wx.TheClipboard.IsOpened():
wx.TheClipboard.Open()
wx.TheClipboard.SetData(data)
wx.TheClipboard.Close()
def HideMe(self, event):
self.Hide()
def ShowMe(self, event):
self.ReadClipboard()
self.Show()
|
gwpy/gwpy.github.io
|
refs/heads/master
|
docs/2.0.1/examples/signal/qscan-1.py
|
6
|
from gwosc import datasets
from gwpy.timeseries import TimeSeries
gps = datasets.event_gps('GW170817')
data = TimeSeries.fetch_open_data('L1', gps-34, gps+34, tag='C00')
|
paweljasinski/ironpython3
|
refs/heads/master
|
Src/StdLib/Lib/test/test_filecmp.py
|
5
|
import os, filecmp, shutil, tempfile
import unittest
from test import support
class FileCompareTestCase(unittest.TestCase):
def setUp(self):
self.name = support.TESTFN
self.name_same = support.TESTFN + '-same'
self.name_diff = support.TESTFN + '-diff'
data = 'Contents of file go here.\n'
for name in [self.name, self.name_same, self.name_diff]:
output = open(name, 'w')
output.write(data)
output.close()
output = open(self.name_diff, 'a+')
output.write('An extra line.\n')
output.close()
self.dir = tempfile.gettempdir()
def tearDown(self):
os.unlink(self.name)
os.unlink(self.name_same)
os.unlink(self.name_diff)
def test_matching(self):
self.assertTrue(filecmp.cmp(self.name, self.name_same),
"Comparing file to itself fails")
self.assertTrue(filecmp.cmp(self.name, self.name_same, shallow=False),
"Comparing file to itself fails")
self.assertTrue(filecmp.cmp(self.name, self.name, shallow=False),
"Comparing file to identical file fails")
self.assertTrue(filecmp.cmp(self.name, self.name),
"Comparing file to identical file fails")
def test_different(self):
self.assertFalse(filecmp.cmp(self.name, self.name_diff),
"Mismatched files compare as equal")
self.assertFalse(filecmp.cmp(self.name, self.dir),
"File and directory compare as equal")
def test_cache_clear(self):
first_compare = filecmp.cmp(self.name, self.name_same, shallow=False)
second_compare = filecmp.cmp(self.name, self.name_diff, shallow=False)
filecmp.clear_cache()
self.assertTrue(len(filecmp._cache) == 0,
"Cache not cleared after calling clear_cache")
class DirCompareTestCase(unittest.TestCase):
def setUp(self):
tmpdir = tempfile.gettempdir()
self.dir = os.path.join(tmpdir, 'dir')
self.dir_same = os.path.join(tmpdir, 'dir-same')
self.dir_diff = os.path.join(tmpdir, 'dir-diff')
# Another dir is created under dir_same, but it has a name from the
# ignored list so it should not affect testing results.
self.dir_ignored = os.path.join(self.dir_same, '.hg')
self.caseinsensitive = os.path.normcase('A') == os.path.normcase('a')
data = 'Contents of file go here.\n'
for dir in (self.dir, self.dir_same, self.dir_diff, self.dir_ignored):
shutil.rmtree(dir, True)
os.mkdir(dir)
if self.caseinsensitive and dir is self.dir_same:
fn = 'FiLe' # Verify case-insensitive comparison
else:
fn = 'file'
output = open(os.path.join(dir, fn), 'w')
output.write(data)
output.close()
output = open(os.path.join(self.dir_diff, 'file2'), 'w')
output.write('An extra file.\n')
output.close()
def tearDown(self):
for dir in (self.dir, self.dir_same, self.dir_diff):
shutil.rmtree(dir)
def test_default_ignores(self):
self.assertIn('.hg', filecmp.DEFAULT_IGNORES)
def test_cmpfiles(self):
self.assertTrue(filecmp.cmpfiles(self.dir, self.dir, ['file']) ==
(['file'], [], []),
"Comparing directory to itself fails")
self.assertTrue(filecmp.cmpfiles(self.dir, self.dir_same, ['file']) ==
(['file'], [], []),
"Comparing directory to same fails")
# Try it with shallow=False
self.assertTrue(filecmp.cmpfiles(self.dir, self.dir, ['file'],
shallow=False) ==
(['file'], [], []),
"Comparing directory to itself fails")
self.assertTrue(filecmp.cmpfiles(self.dir, self.dir_same, ['file'],
shallow=False),
"Comparing directory to same fails")
# Add different file2
output = open(os.path.join(self.dir, 'file2'), 'w')
output.write('Different contents.\n')
output.close()
self.assertFalse(filecmp.cmpfiles(self.dir, self.dir_same,
['file', 'file2']) ==
(['file'], ['file2'], []),
"Comparing mismatched directories fails")
def test_dircmp(self):
# Check attributes for comparison of two identical directories
left_dir, right_dir = self.dir, self.dir_same
d = filecmp.dircmp(left_dir, right_dir)
self.assertEqual(d.left, left_dir)
self.assertEqual(d.right, right_dir)
if self.caseinsensitive:
self.assertEqual([d.left_list, d.right_list],[['file'], ['FiLe']])
else:
self.assertEqual([d.left_list, d.right_list],[['file'], ['file']])
self.assertEqual(d.common, ['file'])
self.assertTrue(d.left_only == d.right_only == [])
self.assertEqual(d.same_files, ['file'])
self.assertEqual(d.diff_files, [])
# Check attributes for comparison of two different directories
left_dir, right_dir = self.dir, self.dir_diff
d = filecmp.dircmp(left_dir, right_dir)
self.assertEqual(d.left, left_dir)
self.assertEqual(d.right, right_dir)
self.assertEqual(d.left_list, ['file'])
self.assertTrue(d.right_list == ['file', 'file2'])
self.assertEqual(d.common, ['file'])
self.assertEqual(d.left_only, [])
self.assertEqual(d.right_only, ['file2'])
self.assertEqual(d.same_files, ['file'])
self.assertEqual(d.diff_files, [])
# Add different file2
output = open(os.path.join(self.dir, 'file2'), 'w')
output.write('Different contents.\n')
output.close()
d = filecmp.dircmp(self.dir, self.dir_diff)
self.assertEqual(d.same_files, ['file'])
self.assertEqual(d.diff_files, ['file2'])
def test_main():
support.run_unittest(FileCompareTestCase, DirCompareTestCase)
if __name__ == "__main__":
test_main()
|
brandond/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/radware/vdirect_commit.py
|
14
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Radware LTD.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
module: vdirect_commit
author: Evgeny Fedoruk @ Radware LTD (@evgenyfedoruk)
short_description: Commits pending configuration changes on Radware devices
description:
- Commits pending configuration changes on one or more Radware devices via vDirect server.
- For Alteon ADC device, apply, sync and save actions will be performed by default.
Skipping of an action is possible by explicit parameter specifying.
- For Alteon VX Container device, no sync operation will be performed
since sync action is only relevant for Alteon ADC devices.
- For DefensePro and AppWall devices, a bulk commit action will be performed.
Explicit apply, sync and save actions specifying is not relevant.
notes:
- Requires the Radware vdirect-client Python package on the host. This is as easy as
C(pip install vdirect-client)
version_added: "2.5"
options:
vdirect_ip:
description:
- Primary vDirect server IP address, may be set as C(VDIRECT_IP) environment variable.
required: true
vdirect_user:
description:
- vDirect server username, may be set as C(VDIRECT_USER) environment variable.
required: true
vdirect_password:
description:
- vDirect server password, may be set as C(VDIRECT_PASSWORD) environment variable.
required: true
vdirect_secondary_ip:
description:
- Secondary vDirect server IP address, may be set as C(VDIRECT_SECONDARY_IP) environment variable.
vdirect_wait:
description:
- Wait for async operation to complete, may be set as C(VDIRECT_WAIT) environment variable.
type: bool
default: 'yes'
vdirect_https_port:
description:
- vDirect server HTTPS port number, may be set as C(VDIRECT_HTTPS_PORT) environment variable.
default: 2189
vdirect_http_port:
description:
- vDirect server HTTP port number, may be set as C(VDIRECT_HTTP_PORT) environment variable.
default: 2188
vdirect_timeout:
description:
- Amount of time to wait for async operation completion [seconds],
- may be set as C(VDIRECT_TIMEOUT) environment variable.
default: 60
vdirect_use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection,
- may be set as C(VDIRECT_HTTPS) or C(VDIRECT_USE_SSL) environment variable.
type: bool
default: 'yes'
vdirect_validate_certs:
description:
- If C(no), SSL certificates will not be validated,
- may be set as C(VDIRECT_VALIDATE_CERTS) or C(VDIRECT_VERIFY) environment variable.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
devices:
description:
- List of Radware Alteon device names for commit operations.
required: true
apply:
description:
- If C(no), apply action will not be performed. Relevant for ADC devices only.
type: bool
default: 'yes'
save:
description:
- If C(no), save action will not be performed. Relevant for ADC devices only.
type: bool
default: 'yes'
sync:
description:
- If C(no), sync action will not be performed. Relevant for ADC devices only.
type: bool
default: 'yes'
requirements:
- "vdirect-client >= 4.1.1"
'''
EXAMPLES = '''
- name: vdirect_commit
vdirect_commit:
vdirect_ip: 10.10.10.10
vdirect_user: vDirect
vdirect_password: radware
devices: ['dev1', 'dev2']
sync: no
'''
RETURN = '''
result:
description: Message detailing actions result
returned: success
type: str
sample: "Requested actions were successfully performed on all devices."
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from vdirect_client import rest_client
HAS_REST_CLIENT = True
except ImportError:
HAS_REST_CLIENT = False
SUCCESS = 'Requested actions were successfully performed on all devices.'
FAILURE = 'Failure occurred while performing requested actions on devices. See details'
ADC_DEVICE_TYPE = 'Adc'
CONTAINER_DEVICE_TYPE = 'Container'
PARTITIONED_CONTAINER_DEVICE_TYPE = 'AlteonPartitioned'
APPWALL_DEVICE_TYPE = 'AppWall'
DP_DEVICE_TYPE = 'DefensePro'
SUCCEEDED = 'succeeded'
FAILED = 'failed'
NOT_PERFORMED = 'not performed'
meta_args = dict(
vdirect_ip=dict(required=True, fallback=(env_fallback, ['VDIRECT_IP'])),
vdirect_user=dict(required=True, fallback=(env_fallback, ['VDIRECT_USER'])),
vdirect_password=dict(
required=True, fallback=(env_fallback, ['VDIRECT_PASSWORD']),
no_log=True, type='str'),
vdirect_secondary_ip=dict(
required=False, fallback=(env_fallback, ['VDIRECT_SECONDARY_IP']),
default=None),
vdirect_use_ssl=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTPS', 'VDIRECT_USE_SSL']),
default=True, type='bool'),
vdirect_wait=dict(
required=False, fallback=(env_fallback, ['VDIRECT_WAIT']),
default=True, type='bool'),
vdirect_timeout=dict(
required=False, fallback=(env_fallback, ['VDIRECT_TIMEOUT']),
default=60, type='int'),
vdirect_validate_certs=dict(
required=False, fallback=(env_fallback, ['VDIRECT_VERIFY', 'VDIRECT_VALIDATE_CERTS']),
default=True, type='bool'),
vdirect_https_port=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTPS_PORT']),
default=2189, type='int'),
vdirect_http_port=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTP_PORT']),
default=2188, type='int'),
devices=dict(
required=True, type='list'),
apply=dict(
required=False, default=True, type='bool'),
save=dict(
required=False, default=True, type='bool'),
sync=dict(
required=False, default=True, type='bool'),
)
class CommitException(Exception):
def __init__(self, reason, details):
self.reason = reason
self.details = details
def __str__(self):
return 'Reason: {0}. Details:{1}.'.format(self.reason, self.details)
class MissingDeviceException(CommitException):
def __init__(self, device_name):
super(MissingDeviceException, self).__init__(
'Device missing',
'Device ' + repr(device_name) + ' does not exist')
class VdirectCommit(object):
def __init__(self, params):
self.client = rest_client.RestClient(params['vdirect_ip'],
params['vdirect_user'],
params['vdirect_password'],
wait=params['vdirect_wait'],
secondary_vdirect_ip=params['vdirect_secondary_ip'],
https_port=params['vdirect_https_port'],
http_port=params['vdirect_http_port'],
timeout=params['vdirect_timeout'],
https=params['vdirect_use_ssl'],
verify=params['vdirect_validate_certs'])
self.devices = params['devices']
self.apply = params['apply']
self.save = params['save']
self.sync = params['sync']
self.devicesMap = {}
def _validate_devices(self):
for device in self.devices:
try:
res = self.client.adc.get(device)
if res[rest_client.RESP_STATUS] == 200:
self.devicesMap.update({device: ADC_DEVICE_TYPE})
continue
res = self.client.container.get(device)
if res[rest_client.RESP_STATUS] == 200:
if res[rest_client.RESP_DATA]['type'] == PARTITIONED_CONTAINER_DEVICE_TYPE:
self.devicesMap.update({device: CONTAINER_DEVICE_TYPE})
continue
res = self.client.appWall.get(device)
if res[rest_client.RESP_STATUS] == 200:
self.devicesMap.update({device: APPWALL_DEVICE_TYPE})
continue
res = self.client.defensePro.get(device)
if res[rest_client.RESP_STATUS] == 200:
self.devicesMap.update({device: DP_DEVICE_TYPE})
continue
except Exception as e:
raise CommitException('Failed to communicate with device ' + device, str(e))
raise MissingDeviceException(device)
def _perform_action_and_update_result(self, device, action, perform, failure_occurred, actions_result):
if not perform or failure_occurred:
actions_result[action] = NOT_PERFORMED
return True
try:
if self.devicesMap[device] == ADC_DEVICE_TYPE:
res = self.client.adc.control_device(device, action)
elif self.devicesMap[device] == CONTAINER_DEVICE_TYPE:
res = self.client.container.control(device, action)
elif self.devicesMap[device] == APPWALL_DEVICE_TYPE:
res = self.client.appWall.control_device(device, action)
elif self.devicesMap[device] == DP_DEVICE_TYPE:
res = self.client.defensePro.control_device(device, action)
if res[rest_client.RESP_STATUS] in [200, 204]:
actions_result[action] = SUCCEEDED
else:
actions_result[action] = FAILED
actions_result['failure_description'] = res[rest_client.RESP_STR]
return False
except Exception as e:
actions_result[action] = FAILED
actions_result['failure_description'] = 'Exception occurred while performing '\
+ action + ' action. Exception: ' + str(e)
return False
return True
def commit(self):
self._validate_devices()
result_to_return = dict()
result_to_return['details'] = list()
for device in self.devices:
failure_occurred = False
device_type = self.devicesMap[device]
actions_result = dict()
actions_result['device_name'] = device
actions_result['device_type'] = device_type
if device_type in [DP_DEVICE_TYPE, APPWALL_DEVICE_TYPE]:
failure_occurred = not self._perform_action_and_update_result(
device, 'commit', True, failure_occurred, actions_result)\
or failure_occurred
else:
failure_occurred = not self._perform_action_and_update_result(
device, 'apply', self.apply, failure_occurred, actions_result)\
or failure_occurred
if device_type != CONTAINER_DEVICE_TYPE:
failure_occurred = not self._perform_action_and_update_result(
device, 'sync', self.sync, failure_occurred, actions_result)\
or failure_occurred
failure_occurred = not self._perform_action_and_update_result(
device, 'save', self.save, failure_occurred, actions_result)\
or failure_occurred
result_to_return['details'].extend([actions_result])
if failure_occurred:
result_to_return['msg'] = FAILURE
if 'msg' not in result_to_return:
result_to_return['msg'] = SUCCESS
return result_to_return
def main():
module = AnsibleModule(argument_spec=meta_args)
if not HAS_REST_CLIENT:
module.fail_json(msg="The python vdirect-client module is required")
try:
vdirect_commit = VdirectCommit(module.params)
result = vdirect_commit.commit()
result = dict(result=result)
module.exit_json(**result)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
blooparksystems/sale_commission
|
refs/heads/master
|
__unported__/stock_block_prodlots/product.py
|
5
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 Pexego (<www.pexego.es>). All Rights Reserved
# $Omar Castiñeira Saavedra$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""adds functionally on product object from block prodlots"""
from osv import fields, osv
class product_template(osv.osv):
"""adds functionally on product object from block prodlots"""
_inherit = "product.template"
_columns = {
'property_waste': fields.property(
'stock.location',
type='many2one',
relation='stock.location',
string="Waste Location",
method=True,
view_load=True,
help="For the current product (template), this stock location will be used, instead of the default one, as a virtual location where the products go when remove"),
}
product_template()
|
jiwang576/incubator-airflow
|
refs/heads/master
|
airflow/utils/state.py
|
55
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from builtins import object
class State(object):
"""
Static class with task instance states constants and color method to
avoid hardcoding.
"""
# scheduler
NONE = None
REMOVED = "removed"
SCHEDULED = "scheduled"
# set by the executor (t.b.d.)
# LAUNCHED = "launched"
# set by a task
QUEUED = "queued"
RUNNING = "running"
SUCCESS = "success"
SHUTDOWN = "shutdown" # External request to shut down
FAILED = "failed"
UP_FOR_RETRY = "up_for_retry"
UPSTREAM_FAILED = "upstream_failed"
SKIPPED = "skipped"
task_states = (
SUCCESS,
RUNNING,
FAILED,
UPSTREAM_FAILED,
UP_FOR_RETRY,
QUEUED,
)
dag_states = (
SUCCESS,
RUNNING,
FAILED,
)
state_color = {
QUEUED: 'gray',
RUNNING: 'lime',
SUCCESS: 'green',
SHUTDOWN: 'blue',
FAILED: 'red',
UP_FOR_RETRY: 'gold',
UPSTREAM_FAILED: 'orange',
SKIPPED: 'pink',
REMOVED: 'lightgrey',
SCHEDULED: 'white',
}
@classmethod
def color(cls, state):
if state in cls.state_color:
return cls.state_color[state]
else:
return 'white'
@classmethod
def color_fg(cls, state):
color = cls.color(state)
if color in ['green', 'red']:
return 'white'
else:
return 'black'
@classmethod
def finished(cls):
"""
A list of states indicating that a task started and completed a
run attempt. Note that the attempt could have resulted in failure or
have been interrupted; in any case, it is no longer running.
"""
return [
cls.SUCCESS,
cls.SHUTDOWN,
cls.FAILED,
cls.SKIPPED,
]
@classmethod
def unfinished(cls):
"""
A list of states indicating that a task either has not completed
a run or has not even started.
"""
return [
cls.NONE,
cls.SCHEDULED,
cls.QUEUED,
cls.RUNNING,
cls.UP_FOR_RETRY
]
|
mysociety/pombola
|
refs/heads/master
|
pombola/za_hansard/management/commands/za_hansard_load_za_akomantoso.py
|
1
|
from speeches.management.import_commands import ImportCommand
from pombola.za_hansard.importers.import_za_akomantoso import ImportZAAkomaNtoso
class Command(ImportCommand):
importer_class = ImportZAAkomaNtoso
document_extension = 'xml'
|
agconti/njode
|
refs/heads/master
|
env/lib/python2.7/site-packages/pygments/util.py
|
29
|
# -*- coding: utf-8 -*-
"""
pygments.util
~~~~~~~~~~~~~
Utility functions.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
split_path_re = re.compile(r'[/\\ ]')
doctype_lookup_re = re.compile(r'''(?smx)
(<\?.*?\?>)?\s*
<!DOCTYPE\s+(
[a-zA-Z_][a-zA-Z0-9]*
(?: \s+ # optional in HTML5
[a-zA-Z_][a-zA-Z0-9]*\s+
"[^"]*")?
)
[^>]*>
''')
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>(?uism)')
xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
class ClassNotFound(ValueError):
"""Raised if one of the lookup functions didn't find a matching class."""
class OptionError(Exception):
pass
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option %s must be one of %s' %
(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, string_types):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError('Invalid value %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except ValueError:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname))
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
if isinstance(val, string_types):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError('Invalid type %r for option %s; you '
'must give a list value' % (
val, optname))
def docstring_headline(obj):
if not obj.__doc__:
return ''
res = []
for line in obj.__doc__.strip().splitlines():
if line.strip():
res.append(" " + line.strip())
else:
break
return ''.join(res).lstrip()
def make_analysator(f):
"""Return a static text analyser function that returns float values."""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
def shebang_matches(text, regex):
"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile('^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
def doctype_matches(text, regex):
"""Check if the doctype matches a regular expression (if present).
Note that this method only checks the first part of a DOCTYPE.
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
"""
m = doctype_lookup_re.match(text)
if m is None:
return False
doctype = m.group(2)
return re.compile(regex, re.I).match(doctype.strip()) is not None
def html_doctype_matches(text):
"""Check if the file looks like it has a html doctype."""
return doctype_matches(text, r'html')
_looks_like_xml_cache = {}
def looks_like_xml(text):
"""Check if a doctype exists or if we have some tags."""
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
# Python narrow build compatibility
def _surrogatepair(c):
# Given a unicode character code
# with length greater than 16 bits,
# return the two 16 bit surrogate pair.
# From example D28 of:
# http://www.unicode.org/book/ch03.pdf
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
def unirange(a, b):
"""Returns a regular expression string to match the given non-BMP range."""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')'
def format_lines(var_name, seq, raw=False, indent_level=0):
"""Formats a sequence of strings for output."""
lines = []
base_indent = ' ' * indent_level * 4
inner_indent = ' ' * (indent_level + 1) * 4
lines.append(base_indent + var_name + ' = (')
if raw:
# These should be preformatted reprs of, say, tuples.
for i in seq:
lines.append(inner_indent + i + ',')
else:
for i in seq:
# Force use of single quotes
r = repr(i + '"')
lines.append(inner_indent + r[:-2] + r[-1] + ',')
lines.append(base_indent + ')')
return '\n'.join(lines)
def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst
class Future(object):
"""Generic class to defer some work.
Handled specially in RegexLexerMeta, to support regex string construction at
first use.
"""
def get(self):
raise NotImplementedError
def guess_decode(text):
"""Decode *text* with guessed encoding.
First try UTF-8; this should fail for non-UTF-8 encodings.
Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
try:
text = text.decode('utf-8')
return text, 'utf-8'
except UnicodeDecodeError:
try:
import locale
prefencoding = locale.getpreferredencoding()
text = text.decode()
return text, prefencoding
except (UnicodeDecodeError, LookupError):
text = text.decode('latin1')
return text, 'latin1'
def guess_decode_from_terminal(text, term):
"""Decode *text* coming from terminal *term*.
First try the terminal encoding, if given.
Then try UTF-8. Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
if getattr(term, 'encoding', None):
try:
text = text.decode(term.encoding)
except UnicodeDecodeError:
pass
else:
return text, term.encoding
return guess_decode(text)
def terminal_encoding(term):
"""Return our best guess of encoding for the given *term*."""
if getattr(term, 'encoding', None):
return term.encoding
import locale
return locale.getpreferredencoding()
# Python 2/3 compatibility
if sys.version_info < (3, 0):
unichr = unichr
xrange = xrange
string_types = (str, unicode)
text_type = unicode
u_prefix = 'u'
iteritems = dict.iteritems
itervalues = dict.itervalues
import StringIO, cStringIO
# unfortunately, io.StringIO in Python 2 doesn't accept str at all
StringIO = StringIO.StringIO
BytesIO = cStringIO.StringIO
else:
unichr = chr
xrange = range
string_types = (str,)
text_type = str
u_prefix = ''
iteritems = dict.items
itervalues = dict.values
from io import StringIO, BytesIO
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
|
xiebinhqy/Dynamomysql
|
refs/heads/master
|
nodes/0.7.x/python/View.SetSolarStudyActiveFrameNumber.py
|
16
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
view = UnwrapElement(IN[0])
frame = IN[1]
TransactionManager.Instance.EnsureInTransaction(doc)
try:
view.SunAndShadowSettings.ActiveFrame = frame
success = True
except:
success = False
TransactionManager.Instance.TransactionTaskDone()
OUT = (view,success)
|
paweljasinski/ironpython3
|
refs/heads/master
|
Tests/modules/misc/operator_test.py
|
3
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from iptest.assert_util import *
if sys.platform!="win32":
load_iron_python_test()
from IronPythonTest import *
import clr
if not is_silverlight:
clr.AddReferenceByPartialName("System.Drawing")
@skip("win32 silverlight")
def test_sys_drawing():
from System.Drawing import Point, Size, PointF, SizeF, Rectangle, RectangleF
x = Point()
Assert(x == Point(0,0))
x = Size()
Assert(x == Size(0,0))
x = PointF()
Assert(x == PointF(0,0))
x = SizeF()
Assert(x == SizeF(0,0))
x = Rectangle()
Assert(x == Rectangle(0,0,0,0))
x = RectangleF()
Assert(x == RectangleF(0,0,0,0))
p = Point(3,4)
s = Size(2,9)
q = p + s
Assert(q == Point(5,13))
Assert(q != Point(13,5))
q = p - s
Assert(q == Point(1,-5))
Assert(q != Point(0,4))
q += s
Assert(q == Point(3,4))
Assert(q != Point(2,4))
q -= Size(1,2)
Assert(q == Point(2,2))
Assert(q != Point(1))
t = s
Assert(t == s)
Assert(t != s - Size(1,0))
t += Size(3,1)
Assert(t == Size(5,10))
Assert(t != Size(5,0))
t -= Size(2,8)
Assert(t == Size(3,2))
Assert(t != Size(0,2))
t = s + Size(-1,-2)
Assert(t == Size(1,7))
Assert(t != Size(1,5))
t = s - Size(1,2)
Assert(t == Size(1,7))
Assert(t != Size(1,3))
def weekdays(enum):
return enum.Mon|enum.Tue|enum.Wed|enum.Thu|enum.Fri
def weekend(enum):
return enum.Sat|enum.Sun
def enum_helper(enum):
days = [enum.Mon,enum.Tue,enum.Wed,enum.Thu,enum.Fri,enum.Sat,enum.Sun]
x = enum.Mon|enum.Tue|enum.Wed|enum.Thu|enum.Fri|enum.Sat|enum.Sun
y = enum.Mon
for day in days:
y |= day
Assert(x == y)
Assert((x <> y) == False)
if x == y: # EqualRetBool
b = True
else :
b = False
Assert(b)
Assert(x == weekdays(enum)|weekend(enum))
Assert(x == (weekdays(enum)^weekend(enum)))
Assert((weekdays(enum)&weekend(enum)) == enum.None)
Assert(weekdays(enum) == enum.Weekdays)
Assert(weekend(enum) == enum.Weekend)
Assert(weekdays(enum) != enum.Weekend)
Assert(weekdays(enum) != weekend(enum))
for e in [DaysInt, DaysShort, DaysLong, DaysSByte, DaysByte, DaysUShort, DaysUInt, DaysULong]:
enum_helper(e)
for e in [DaysInt, DaysShort, DaysLong, DaysSByte]:
z = operator.inv(e.Mon)
AreEqual(type(z), e)
AreEqual(z.ToString(), "-2")
for (e, v) in [ (DaysByte,254), (DaysUShort,65534), (DaysUInt,4294967294), (DaysULong,18446744073709551614) ]:
z = operator.inv(e.Mon)
AreEqual(type(z), e)
AreEqual(z.ToString(), str(v))
AssertError(ValueError, lambda: DaysInt.Mon & DaysShort.Mon)
AssertError(ValueError, lambda: DaysInt.Mon | DaysShort.Mon)
AssertError(ValueError, lambda: DaysInt.Mon ^ DaysShort.Mon)
AssertError(ValueError, lambda: DaysInt.Mon & 1)
AssertError(ValueError, lambda: DaysInt.Mon | 1)
AssertError(ValueError, lambda: DaysInt.Mon ^ 1)
def f():
if DaysInt.Mon == DaysShort.Mon: return True
return False
AreEqual(f(), False)
Assert(not DaysInt.Mon == None)
Assert(DaysInt.Mon != None)
@skip("win32 silverlight")
def test_cp3982():
from System.Drawing import Color
test_funcs = [ lambda x: x,
lambda x: [x],
lambda x: (x),
lambda x: [[x]],
lambda x: [(x)],
lambda x: ((x)),
lambda x: ([x]),
lambda x: [[[x]]],
lambda x: (((x))),
lambda x: [x, x],
lambda x: (x, x),
lambda x: [(x), [x, x]],
lambda x: ([x, x], (x)),
]
for test_func in test_funcs:
Assert(test_func(Color.Red)==test_func(Color.Red))
Assert(test_func(Color.Red)!=test_func(Color.Green))
Assert(test_func(Color.Green)!=test_func(Color.Red))
Assert( [Color.Green, Color.Red] == [Color.Green, Color.Red])
Assert([(Color.Green, Color.Red)] == [(Color.Green, Color.Red)])
Assert( [Color.Green, Color.Red] != (Color.Green, Color.Red))
Assert( [Color.Green, Color.Red] != [Color.Green, Color.Black])
#------------------------------------------------------------------------------
import operator
def test_operator_module():
x = ['a','b','c','d']
g = operator.itemgetter(2)
AreEqual(g(x), 'c')
class C:
a = 10
g = operator.attrgetter("a")
AreEqual(g(C), 10)
AreEqual(g(C()), 10)
a = { 'k' : 'v' }
g = operator.itemgetter('x')
AssertError(KeyError, g, a)
x = True
AreEqual(x, True)
AreEqual(not x, False)
x = False
AreEqual(x, False)
AreEqual(not x, True)
class C:
def func(self):
pass
a = C.func
b = C.func
AreEqual(a, b)
c = C()
a = c.func
b = c.func
AreEqual(a, b)
# __setitem__
x = {}
operator.__setitem__(x, 'abc', 'def')
AreEqual(x, {'abc':'def'})
# __not__
x = True
AreEqual(operator.__not__(x), False)
########################
# string multiplication
def test_string_mult():
class foo(int): pass
fooInst = foo(3)
AreEqual('aaa', 'a' * 3)
AreEqual('aaa', 'a' * 3L)
AreEqual('aaa', 'a' * fooInst)
AreEqual('', 'a' * False)
AreEqual('a', 'a' * True)
###############################
# (not)equals overloading semantics
def test_eq_ne_overloads():
class CustomEqual:
def __eq__(self, other):
return 7
AreEqual((CustomEqual() == 1), 7)
for base_type in [
dict, list, tuple,
float, long, int, complex,
str, unicode,
object,
]:
class F(base_type):
def __eq__(self, other):
return other == 'abc'
def __ne__(self, other):
return other == 'def'
AreEqual(F() == 'abc', True)
AreEqual(F() != 'def', True)
AreEqual(F() == 'qwe', False)
AreEqual(F() != 'qwe', False)
# Test binary operators for all numeric types and types inherited from them
def test_num_binary_ops():
class myint(int): pass
class mylong(long): pass
class myfloat(float): pass
class mycomplex(complex): pass
l = [2, 10L, (1+2j), 3.4, myint(7), mylong(5), myfloat(2.32), mycomplex(3, 2), True]
if is_cli or is_silverlight:
l.append(System.Int64.Parse("5"))
def add(a, b): return a + b
def sub(a, b): return a - b
def mul(a, b): return a * b
def div(a, b): return a / b
def mod(a, b): return a % b
def truediv(a,b): return a / b
def floordiv(a,b): return a // b
def pow(a,b): return a ** b
op = [
('+', add, True),
('-', sub, True),
('*', mul, True),
('/', div, True),
('%', mod, False),
('//', floordiv, False),
('**', pow, True)
]
for a in l:
for b in l:
for sym, fnc, cmp in op:
if cmp or (not isinstance(a, complex) and not isinstance(b, complex)):
try:
r = fnc(a,b)
except:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
Fail("Binary operator failed: %s, %s: %s %s %s (Message=%s)" % (type(a).__name__, type(b).__name__, str(a), sym, str(b), str(exc_value)))
threes = [ 3, 3L, 3.0 ]
zeroes = [ 0, 0L, 0.0 ]
if is_cli or is_silverlight:
threes.append(System.Int64.Parse("3"))
zeroes.append(System.Int64.Parse("0"))
for i in threes:
for j in zeroes:
for fnc in [div, mod, truediv, floordiv]:
try:
r = fnc(i, j)
except ZeroDivisionError:
pass
else:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
Fail("Didn't get ZeroDivisionError %s, %s, %s, %s, %s (Message=%s)" % (str(func), type(i).__name__, type(j).__name__, str(i), str(j), str(exc_value)))
#------------------------------------------------------------------------------
def test_unary_ops():
if is_cli or is_silverlight:
unary = UnaryClass(9)
AreEqual(-(unary.value), (-unary).value)
AreEqual(~(unary.value), (~unary).value)
# testing customized unary op
class C1:
def __pos__(self):
return -10
def __neg__(self):
return 10
def __invert__(self):
return 20
def __abs__(self):
return 30
class C2(object):
def __pos__(self):
return -10
def __neg__(self):
return 10
def __invert__(self):
return 20
def __abs__(self):
return 30
for x in C1(), C2():
AreEqual(+x, -10)
AreEqual(-x, 10)
AreEqual(~x, 20)
AreEqual(abs(x), 30)
#------------------------------------------------------------------------------
# testing custom divmod operator
def test_custom_divmod():
class DM:
def __divmod__(self, other):
return "__divmod__"
class NewDM(int): pass
class Callable:
def __call__(self, other):
return "__call__"
class CallDM:
__divmod__ = Callable()
AreEqual(divmod(DM(), DM()), "__divmod__")
AreEqual(divmod(DM(), 10), "__divmod__")
AreEqual(divmod(NewDM(10), NewDM(5)), (2, 0))
AreEqual(divmod(CallDM(), 2), "__call__")
#####################################################################
# object identity of booleans - __ne__ should return "True" or "False", not a new boxed bool
def test_bool_obj_id():
AreEqual(id(complex.__ne__(1+1j, 1+1j)), id(False))
AreEqual(id(complex.__ne__(1+1j, 1+2j)), id(True))
#------------------------------------------------------------------------------
def test_sanity():
'''
Performs a set of simple sanity checks on most operators.
'''
#__abs__
AreEqual(operator.__abs__(0), 0)
AreEqual(operator.__abs__(1), 1)
AreEqual(operator.__abs__(-1), 1)
AreEqual(operator.__abs__(0.0), 0.0)
AreEqual(operator.__abs__(1.1), 1.1)
AreEqual(operator.__abs__(-1.1), 1.1)
AreEqual(operator.__abs__(0L), 0L)
AreEqual(operator.__abs__(1L), 1L)
AreEqual(operator.__abs__(-1L), 1L)
#__neg__
AreEqual(operator.__neg__(0), 0)
AreEqual(operator.__neg__(1), -1)
AreEqual(operator.__neg__(-1), 1)
AreEqual(operator.__neg__(0.0), 0.0)
AreEqual(operator.__neg__(1.1), -1.1)
AreEqual(operator.__neg__(-1.1), 1.1)
AreEqual(operator.__neg__(0L), 0L)
AreEqual(operator.__neg__(1L), -1L)
AreEqual(operator.__neg__(-1L), 1L)
#__pos__
AreEqual(operator.__pos__(0), 0)
AreEqual(operator.__pos__(1), 1)
AreEqual(operator.__pos__(-1), -1)
AreEqual(operator.__pos__(0.0), 0.0)
AreEqual(operator.__pos__(1.1), 1.1)
AreEqual(operator.__pos__(-1.1), -1.1)
AreEqual(operator.__pos__(0L), 0L)
AreEqual(operator.__pos__(1L), 1L)
AreEqual(operator.__pos__(-1L), -1L)
#__add__
AreEqual(operator.__add__(0, 0), 0)
AreEqual(operator.__add__(1, 2), 3)
AreEqual(operator.__add__(-1, 2), 1)
AreEqual(operator.__add__(0.0, 0.0), 0.0)
AreEqual(operator.__add__(1.1, 2.1), 3.2)
AreEqual(operator.__add__(-1.1, 2.1), 1.0)
AreEqual(operator.__add__(0L, 0L), 0L)
AreEqual(operator.__add__(1L, 2L), 3L)
AreEqual(operator.__add__(-1L, 2L), 1L)
#__sub__
AreEqual(operator.__sub__(0, 0), 0)
AreEqual(operator.__sub__(1, 2), -1)
AreEqual(operator.__sub__(-1, 2), -3)
AreEqual(operator.__sub__(0.0, 0.0), 0.0)
AreEqual(operator.__sub__(1.1, 2.1), -1.0)
AreEqual(operator.__sub__(-1.1, 2.1), -3.2)
AreEqual(operator.__sub__(0L, 0L), 0L)
AreEqual(operator.__sub__(1L, 2L), -1L)
AreEqual(operator.__sub__(-1L, 2L), -3L)
#__mul__
AreEqual(operator.__mul__(0, 0), 0)
AreEqual(operator.__mul__(1, 2), 2)
AreEqual(operator.__mul__(-1, 2), -2)
AreEqual(operator.__mul__(0.0, 0.0), 0.0)
AreEqual(operator.__mul__(2.0, 3.0), 6.0)
AreEqual(operator.__mul__(-2.0, 3.0), -6.0)
AreEqual(operator.__mul__(0L, 0L), 0L)
AreEqual(operator.__mul__(1L, 2L), 2L)
AreEqual(operator.__mul__(-1L, 2L), -2L)
#__div__
AreEqual(operator.__div__(0, 1), 0)
AreEqual(operator.__div__(4, 2), 2)
AreEqual(operator.__div__(-1, 2), -1)
AreEqual(operator.__div__(0.0, 1.0), 0.0)
AreEqual(operator.__div__(4.0, 2.0), 2.0)
AreEqual(operator.__div__(-4.0, 2.0), -2.0)
AreEqual(operator.__div__(0L, 1L), 0L)
AreEqual(operator.__div__(4L, 2L), 2L)
AreEqual(operator.__div__(-4L, 2L), -2L)
#__floordiv__
AreEqual(operator.__floordiv__(0, 1), 0)
AreEqual(operator.__floordiv__(4, 2), 2)
AreEqual(operator.__floordiv__(-1, 2), -1)
AreEqual(operator.__floordiv__(0.0, 1.0), 0.0)
AreEqual(operator.__floordiv__(4.0, 2.0), 2.0)
AreEqual(operator.__floordiv__(-4.0, 2.0), -2.0)
AreEqual(operator.__floordiv__(0L, 1L), 0L)
AreEqual(operator.__floordiv__(4L, 2L), 2L)
AreEqual(operator.__floordiv__(-4L, 2L), -2L)
#__truediv__
AreEqual(operator.__truediv__(0, 1), 0)
AreEqual(operator.__truediv__(4, 2), 2)
AreEqual(operator.__truediv__(-1, 2), -0.5)
AreEqual(operator.__truediv__(0.0, 1.0), 0.0)
AreEqual(operator.__truediv__(4.0, 2.0), 2.0)
AreEqual(operator.__truediv__(-1.0, 2.0), -0.5)
AreEqual(operator.__truediv__(0L, 1L), 0L)
AreEqual(operator.__truediv__(4L, 2L), 2L)
AreEqual(operator.__truediv__(-4L, 2L), -2L)
#__mod__
AreEqual(operator.__mod__(0, 1), 0)
AreEqual(operator.__mod__(4, 2), 0)
AreEqual(operator.__mod__(-1, 2), 1)
AreEqual(operator.__mod__(0.0, 1.0), 0.0)
AreEqual(operator.__mod__(4.0, 2.0), 0.0)
AreEqual(operator.__mod__(-1.0, 2.0), 1.0)
AreEqual(operator.__mod__(0L, 1L), 0L)
AreEqual(operator.__mod__(4L, 2L), 0L)
AreEqual(operator.__mod__(-4L, 2L), 0L)
#__inv__
AreEqual(operator.__inv__(0), -1)
AreEqual(operator.__inv__(1), -2)
AreEqual(operator.__inv__(-1), 0)
AreEqual(operator.__inv__(0L), -1L)
AreEqual(operator.__inv__(1L), -2L)
AreEqual(operator.__inv__(-1L), 0L)
#__invert__
AreEqual(operator.__invert__(0), -1)
AreEqual(operator.__invert__(1), -2)
AreEqual(operator.__invert__(-1), 0)
AreEqual(operator.__invert__(0L), -1L)
AreEqual(operator.__invert__(1L), -2L)
AreEqual(operator.__invert__(-1L), 0L)
#__lshift__
AreEqual(operator.__lshift__(0, 1), 0)
AreEqual(operator.__lshift__(1, 1), 2)
AreEqual(operator.__lshift__(-1, 1), -2)
AreEqual(operator.__lshift__(0L, 1), 0L)
AreEqual(operator.__lshift__(1L, 1), 2L)
AreEqual(operator.__lshift__(-1L, 1), -2L)
#__rshift__
AreEqual(operator.__rshift__(1, 1), 0)
AreEqual(operator.__rshift__(2, 1), 1)
AreEqual(operator.__rshift__(-1, 1), -1)
AreEqual(operator.__rshift__(1L, 1), 0L)
AreEqual(operator.__rshift__(2L, 1), 1L)
AreEqual(operator.__rshift__(-1L, 1), -1L)
#__not__
AreEqual(operator.__not__(0), 1)
AreEqual(operator.__not__(1), 0)
AreEqual(operator.__not__(-1), 0)
AreEqual(operator.__not__(0L), 1)
AreEqual(operator.__not__(1L), 0)
AreEqual(operator.__not__(-1L), 0)
#__and__
AreEqual(operator.__and__(0, 0), 0)
AreEqual(operator.__and__(1, 1), 1)
AreEqual(operator.__and__(0, 1), 0)
AreEqual(operator.__and__(1, 0), 0)
#__xor__
AreEqual(operator.__xor__(0, 0), 0)
AreEqual(operator.__xor__(1, 1), 0)
AreEqual(operator.__xor__(0, 1), 1)
AreEqual(operator.__xor__(1, 0), 1)
#__or__
AreEqual(operator.__or__(0, 0), 0)
AreEqual(operator.__or__(1, 1), 1)
AreEqual(operator.__or__(0, 1), 1)
AreEqual(operator.__or__(1, 0), 1)
#__concat__
AreEqual(operator.__concat__([0], [1]), [0,1])
AreEqual(operator.__concat__([2], [1]), [2,1])
AreEqual(operator.__concat__([-1], [1]), [-1,1])
#__contains__
Assert(operator.__contains__("abc", "c"))
Assert(not operator.__contains__("abc", "d"))
Assert(operator.__contains__("abc", ""))
Assert(not operator.__contains__("", "c"))
Assert(operator.__contains__([1,2,3], 1))
Assert(not operator.__contains__([1,2,3], 4))
#__getitem__
AreEqual(operator.__getitem__("abc", 2), "c")
AssertError(IndexError, operator.__getitem__, "abc", 3)
AreEqual(operator.__getitem__([1,2,3], 2), 3)
AssertError(IndexError, operator.__getitem__, [1,2,3], 3)
#__setitem__
AssertError(TypeError, operator.__setitem__, "abc", 2, "d")
t_list = [1,2,3]
operator.__setitem__(t_list, 2, 4)
AreEqual(t_list, [1,2,4])
AssertError(IndexError, operator.__setitem__, [1,2,3], 4, 9)
#__delitem__
#UNIMPLEMENTED
#AssertError(TypeError, operator.__delitem__, "abc", 2)
t_list = [1,2,3]
operator.__delitem__(t_list, 2)
AreEqual(t_list, [1,2])
AssertError(IndexError, operator.__delitem__, [1,2,3], 4)
#__repeat__
AreEqual(operator.__repeat__("abc", 2), "abcabc")
AreEqual(operator.__repeat__("", 2), "")
AreEqual(operator.__repeat__([1,2,3], 2), [1,2,3,1,2,3])
#__getslice__
AreEqual(operator.__getslice__("abc", 1, 2), "b")
AreEqual(operator.__getslice__("abc", 0, 3), "abc")
AreEqual(operator.__getslice__("", 0, 0), "")
AreEqual(operator.__getslice__([1,2,3], 1, 2), [2])
AreEqual(operator.__getslice__([1,2,3], 0, 3), [1,2,3])
AreEqual(operator.__getslice__([], 0, 0), [])
#__delslice__
t_list = [1,2,3]
operator.__delslice__(t_list, 1, 2)
AreEqual(t_list, [1,3])
t_list = [1,2,3]
operator.__delslice__(t_list, 0, 3)
AreEqual(t_list, [])
t_list = [1,2,3]
operator.__delslice__(t_list, 0, 0)
AreEqual(t_list, [1,2,3])
#__setslice__
t_list = [1,2,3]
operator.__setslice__(t_list, 1, 2, [9])
AreEqual(t_list, [1,9,3])
t_list = [1,2,3]
operator.__setslice__(t_list, 0, 3, [9, 8])
AreEqual(t_list, [9, 8])
t_list = [1,2,3]
operator.__setslice__(t_list, 0, 0, [9])
AreEqual(t_list, [9,1, 2,3])
def test_py25_operator():
ops = ['iadd', 'isub', 'idiv', 'ilshift', 'imod', 'imul', 'ior', 'ipow', 'irshift', 'isub', 'itruediv', 'ifloordiv', 'ixor']
class foo(object):
for x in ops:
exec 'def __%s__(self, other): return "%s", other' % (x, x)
for x in ops:
AreEqual(getattr(operator, x)(foo(), 42), (x, 42))
AreEqual(getattr(operator, '__' + x + '__')(foo(), 42), (x, 42))
def test_concat_repeat():
AssertError(TypeError, operator.concat, 2, 3)
AssertError(TypeError, operator.repeat, 2, 3)
def test_addition_error():
AssertErrorWithMessage(TypeError, "unsupported operand type(s) for +: 'int' and 'str'", lambda : 2 + 'abc')
run_test(__name__)
|
yongshengwang/hue
|
refs/heads/master
|
desktop/core/ext-py/Paste-2.0.1/paste/auth/grantip.py
|
50
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Grant roles and logins based on IP address.
"""
import six
from paste.util import ip4
class GrantIPMiddleware(object):
"""
On each request, ``ip_map`` is checked against ``REMOTE_ADDR``
and logins and roles are assigned based on that.
``ip_map`` is a map of {ip_mask: (username, roles)}. Either
``username`` or ``roles`` may be None. Roles may also be prefixed
with ``-``, like ``'-system'`` meaning that role should be
revoked. ``'__remove__'`` for a username will remove the username.
If ``clobber_username`` is true (default) then any user
specification will override the current value of ``REMOTE_USER``.
``'__remove__'`` will always clobber the username.
``ip_mask`` is something that `paste.util.ip4:IP4Range
<class-paste.util.ip4.IP4Range.html>`_ can parse. Simple IP
addresses, IP/mask, ip<->ip ranges, and hostnames are allowed.
"""
def __init__(self, app, ip_map, clobber_username=True):
self.app = app
self.ip_map = []
for key, value in ip_map.items():
self.ip_map.append((ip4.IP4Range(key),
self._convert_user_role(value[0], value[1])))
self.clobber_username = clobber_username
def _convert_user_role(self, username, roles):
if roles and isinstance(roles, six.string_types):
roles = roles.split(',')
return (username, roles)
def __call__(self, environ, start_response):
addr = ip4.ip2int(environ['REMOTE_ADDR'], False)
remove_user = False
add_roles = []
for range, (username, roles) in self.ip_map:
if addr in range:
if roles:
add_roles.extend(roles)
if username == '__remove__':
remove_user = True
elif username:
if (not environ.get('REMOTE_USER')
or self.clobber_username):
environ['REMOTE_USER'] = username
if (remove_user and 'REMOTE_USER' in environ):
del environ['REMOTE_USER']
if roles:
self._set_roles(environ, add_roles)
return self.app(environ, start_response)
def _set_roles(self, environ, roles):
cur_roles = environ.get('REMOTE_USER_TOKENS', '').split(',')
# Get rid of empty roles:
cur_roles = list(filter(None, cur_roles))
remove_roles = []
for role in roles:
if role.startswith('-'):
remove_roles.append(role[1:])
else:
if role not in cur_roles:
cur_roles.append(role)
for role in remove_roles:
if role in cur_roles:
cur_roles.remove(role)
environ['REMOTE_USER_TOKENS'] = ','.join(cur_roles)
def make_grantip(app, global_conf, clobber_username=False, **kw):
"""
Grant roles or usernames based on IP addresses.
Config looks like this::
[filter:grant]
use = egg:Paste#grantip
clobber_username = true
# Give localhost system role (no username):
127.0.0.1 = -:system
# Give everyone in 192.168.0.* editor role:
192.168.0.0/24 = -:editor
# Give one IP the username joe:
192.168.0.7 = joe
# And one IP is should not be logged in:
192.168.0.10 = __remove__:-editor
"""
from paste.deploy.converters import asbool
clobber_username = asbool(clobber_username)
ip_map = {}
for key, value in kw.items():
if ':' in value:
username, role = value.split(':', 1)
else:
username = value
role = ''
if username == '-':
username = ''
if role == '-':
role = ''
ip_map[key] = value
return GrantIPMiddleware(app, ip_map, clobber_username)
|
tima/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/digital_ocean/digital_ocean_sshkey.py
|
14
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_sshkey
short_description: Manage DigitalOcean SSH keys
description:
- Create/delete DigitalOcean SSH keys.
version_added: "2.4"
author: "Patrick Marques (@pmarques)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
fingerprint:
description:
- This is a unique identifier for the SSH key used to delete a key
required: false
default: None
version_added: 2.4
aliases: ['id']
name:
description:
- The name for the SSH key
required: false
default: None
ssh_pub_key:
description:
- The Public SSH key to add.
required: false
default: None
oauth_token:
description:
- DigitalOcean OAuth token.
required: true
version_added: 2.4
notes:
- Version 2 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
- name: "Create ssh key"
digital_ocean_sshkey:
oauth_token: "{{ oauth_token }}"
name: "My SSH Public Key"
ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example"
state: present
register: result
- name: "Delete ssh key"
digital_ocean_sshkey:
oauth_token: "{{ oauth_token }}"
state: "absent"
fingerprint: "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa"
'''
RETURN = '''
# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#list-all-keys
data:
description: This is only present when C(state=present)
returned: when C(state=present)
type: dict
sample: {
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"name": "My SSH Public Key",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example"
}
}
'''
import json
import hashlib
import base64
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(self.info["body"])
return None
try:
return json.loads(self.body)
except ValueError:
return None
@property
def status_code(self):
return self.info["status"]
class Rest(object):
def __init__(self, module, headers):
self.module = module
self.headers = headers
self.baseurl = 'https://api.digitalocean.com/v2'
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.baseurl, path)
def send(self, method, path, data=None, headers=None):
url = self._url_builder(path)
data = self.module.jsonify(data)
timeout = self.module.params['timeout']
resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method, timeout=timeout)
# Exceptions in fetch_url may result in a status -1, the ensures a
if info['status'] == -1:
self.module.fail_json(msg=info['msg'])
return Response(resp, info)
def get(self, path, data=None, headers=None):
return self.send('GET', path, data, headers)
def put(self, path, data=None, headers=None):
return self.send('PUT', path, data, headers)
def post(self, path, data=None, headers=None):
return self.send('POST', path, data, headers)
def delete(self, path, data=None, headers=None):
return self.send('DELETE', path, data, headers)
def core(module):
api_token = module.params['oauth_token']
state = module.params['state']
fingerprint = module.params['fingerprint']
name = module.params['name']
ssh_pub_key = module.params['ssh_pub_key']
rest = Rest(module, {'Authorization': 'Bearer {0}'.format(api_token),
'Content-type': 'application/json'})
fingerprint = fingerprint or ssh_key_fingerprint(ssh_pub_key)
response = rest.get('account/keys/{0}'.format(fingerprint))
status_code = response.status_code
json = response.json
if status_code not in (200, 404):
module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format(
status_code, response.json['message']), fingerprint=fingerprint)
if state in ('present'):
if status_code == 404:
# IF key not found create it!
if module.check_mode:
module.exit_json(changed=True)
payload = {
'name': name,
'public_key': ssh_pub_key
}
response = rest.post('account/keys', data=payload)
status_code = response.status_code
json = response.json
if status_code == 201:
module.exit_json(changed=True, data=json)
module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format(
status_code, response.json['message']))
elif status_code == 200:
# If key found was found, check if name needs to be updated
if name is None or json['ssh_key']['name'] == name:
module.exit_json(changed=False, data=json)
if module.check_mode:
module.exit_json(changed=True)
payload = {
'name': name,
}
response = rest.put('account/keys/{0}'.format(fingerprint), data=payload)
status_code = response.status_code
json = response.json
if status_code == 200:
module.exit_json(changed=True, data=json)
module.fail_json(msg='Error updating ssh key name [{0}: {1}]'.format(
status_code, response.json['message']), fingerprint=fingerprint)
elif state in ('absent'):
if status_code == 404:
module.exit_json(changed=False)
if module.check_mode:
module.exit_json(changed=True)
response = rest.delete('account/keys/{0}'.format(fingerprint))
status_code = response.status_code
json = response.json
if status_code == 204:
module.exit_json(changed=True)
module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format(
status_code, response.json['message']))
def ssh_key_fingerprint(ssh_pub_key):
key = ssh_pub_key.split(None, 2)[1]
fingerprint = hashlib.md5(base64.decodestring(key)).hexdigest()
return ':'.join(a + b for a, b in zip(fingerprint[::2], fingerprint[1::2]))
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
fingerprint=dict(aliases=['id'], required=False),
name=dict(required=False),
ssh_pub_key=dict(required=False),
oauth_token=dict(
no_log=True,
# Support environment variable for DigitalOcean OAuth Token
fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN']),
required=True,
),
validate_certs=dict(type='bool', default=True),
timeout=dict(type='int', default=30),
),
required_one_of=(
('fingerprint', 'ssh_pub_key'),
),
supports_check_mode=True,
)
core(module)
if __name__ == '__main__':
main()
|
flgiordano/netcash
|
refs/heads/master
|
+/google-cloud-sdk/lib/surface/compute/instances/start.py
|
1
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for starting an instance."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils
class Start(base_classes.NoOutputAsyncMutator):
"""Start a stopped virtual machine instance.
*{command}* is used to start a stopped Google Compute Engine virtual machine.
Only a stopped virtual machine can be started.
"""
@staticmethod
def Args(parser):
utils.AddZoneFlag(
parser,
resource_type='instance',
operation_type='start')
parser.add_argument(
'name',
nargs='+',
completion_resource='compute.instances',
help='The names of the instances to start.')
@property
def service(self):
return self.compute.instances
@property
def method(self):
return 'Start'
@property
def resource_type(self):
return 'instances'
def CreateRequests(self, args):
request_list = []
for name in args.name:
instance_ref = self.CreateZonalReference(name, args.zone)
request = self.messages.ComputeInstancesStartRequest(
instance=instance_ref.Name(),
project=self.project,
zone=instance_ref.zone)
request_list.append(request)
return request_list
def Display(self, _, resources):
# There is no need to display anything when starting an
# instance. Instead, we consume the generator returned from Run()
# to invoke the logic that waits for the start to complete.
list(resources)
|
creativcoder/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/css/tools/w3ctestlib/Indexer.py
|
26
|
#!/usr/bin/python
# CSS Test Suite Manipulation Library
# Initial code by fantasai, joint copyright 2010 W3C and Microsoft
# Licensed under BSD 3-Clause: <http://www.w3.org/Consortium/Legal/2008/03-bsd-license>
# Define contains vmethod for Template Toolkit
from template.stash import list_op
@list_op("contains")
def list_contains(l, x):
return x in l
import sys
import re
import os
import codecs
from os.path import join, exists, abspath
from template import Template
import w3ctestlib
from Utils import listfiles, escapeToNamedASCII
from OutputFormats import ExtensionMap
import shutil
class Section:
def __init__(self, uri, title, numstr):
self.uri = uri
self.title = title
self.numstr = numstr
self.tests = []
def __cmp__(self, other):
return cmp(self.natsortkey(), other.natsortkey())
def chapterNum(self):
return self.numstr.partition('.')[0]
def natsortkey(self):
chunks = self.numstr.partition('.#')[0].split('.')
for index in range(len(chunks)):
if chunks[index].isdigit():
# wrap in tuple with '0' to explicitly specify numbers come first
chunks[index] = (0, int(chunks[index]))
else:
chunks[index] = (1, chunks[index])
return (chunks, self.numstr)
class Indexer:
def __init__(self, suite, sections, suites, flags, splitChapter=False, templatePathList=None,
extraData=None, overviewTmplNames=None, overviewCopyExts=('.css', 'htaccess')):
"""Initialize indexer with TestSuite `suite` toc data file
`tocDataPath` and additional template paths in list `templatePathList`.
The toc data file should be list of tab-separated records, one
per line, of each spec section's uri, number/letter, and title.
`splitChapter` selects a single page index if False, chapter
indicies if True.
`extraData` can be a dictionary whose data gets passed to the templates.
`overviewCopyExts` lists file extensions that should be found
and copied from the template path into the main build directory.
The default value is ['.css', 'htaccess'].
`overviewTemplateNames` lists template names that should be
processed from the template path into the main build directory.
The '.tmpl' extension, if any, is stripped from the output filename.
The default value is ['index.htm.tmpl', 'index.xht.tmpl', 'testinfo.data.tmpl']
"""
self.suite = suite
self.splitChapter = splitChapter
self.extraData = extraData
self.overviewCopyExtPat = re.compile('.*(%s)$' % '|'.join(overviewCopyExts))
self.overviewTmplNames = overviewTmplNames if overviewTmplNames is not None \
else ['index.htm.tmpl', 'index.xht.tmpl', 'testinfo.data.tmpl',
'implementation-report-TEMPLATE.data.tmpl']
# Initialize template engine
self.templatePath = [join(w3ctestlib.__path__[0], 'templates')]
if templatePathList:
self.templatePath.extend(templatePathList)
self.templatePath = [abspath(path) for path in self.templatePath]
self.tt = Template({
'INCLUDE_PATH': self.templatePath,
'ENCODING' : 'utf-8',
'PRE_CHOMP' : 1,
'POST_CHOMP' : 0,
})
# Load toc data
self.sections = {}
for uri, numstr, title in sections:
uri = intern(uri.encode('ascii'))
uriKey = intern(self._normalizeScheme(uri))
numstr = escapeToNamedASCII(numstr)
title = escapeToNamedASCII(title) if title else None
self.sections[uriKey] = Section(uri, title, numstr)
self.suites = suites
self.flags = flags
# Initialize storage
self.errors = {}
self.contributors = {}
self.alltests = []
def _normalizeScheme(self, uri):
if (uri and uri.startswith('http:')):
return 'https:' + uri[5:]
return uri
def indexGroup(self, group):
for test in group.iterTests():
data = test.getMetadata()
if data: # Shallow copy for template output
data = dict(data)
data['file'] = '/'.join((group.name, test.relpath)) \
if group.name else test.relpath
if (data['scripttest']):
data['flags'].append(intern('script'))
self.alltests.append(data)
for uri in data['links']:
uri = self._normalizeScheme(uri)
uri = uri.replace(self._normalizeScheme(self.suite.draftroot), self._normalizeScheme(self.suite.specroot))
if self.sections.has_key(uri):
testlist = self.sections[uri].tests.append(data)
for credit in data['credits']:
self.contributors[credit[0]] = credit[1]
else:
self.errors[test.sourcepath] = test.errors
def __writeTemplate(self, template, data, outfile):
o = self.tt.process(template, data)
f = open(outfile, 'w')
f.write(o.encode('utf-8'))
f.close()
def writeOverview(self, destDir, errorOut=sys.stderr, addTests=[]):
"""Write format-agnostic pages such as test suite overview pages,
test data files, and error reports.
Indexed errors are reported to errorOut, which must be either
an output handle such as sys.stderr, a tuple of
(template filename string, output filename string)
or None to suppress error output.
`addTests` is a list of additional test paths, relative to the
overview root; it is intended for indexing raw tests
"""
# Set common values
data = self.extraData.copy()
data['suitetitle'] = self.suite.title
data['suite'] = self.suite.name
data['specroot'] = self.suite.specroot
data['draftroot'] = self.suite.draftroot
data['contributors'] = self.contributors
data['tests'] = self.alltests
data['extmap'] = ExtensionMap({'.xht':'', '.html':'', '.htm':'', '.svg':''})
data['formats'] = self.suite.formats
data['addtests'] = addTests
data['suites'] = self.suites
data['flagInfo'] = self.flags
data['formatInfo'] = { 'html4': { 'report': True, 'path': 'html4', 'ext': 'htm', 'filter': 'nonHTML'},
'html5': { 'report': True, 'path': 'html', 'ext': 'htm', 'filter': 'nonHTML' },
'xhtml1': { 'report': True, 'path': 'xhtml1', 'ext': 'xht', 'filter': 'HTMLonly' },
'xhtml1print': { 'report': False, 'path': 'xhtml1print', 'ext': 'xht', 'filter': 'HTMLonly' },
'svg': { 'report': True, 'path': 'svg', 'ext': 'svg', 'filter': 'HTMLonly' }
}
# Copy simple copy files
for tmplDir in reversed(self.templatePath):
files = listfiles(tmplDir)
for file in files:
if self.overviewCopyExtPat.match(file):
shutil.copy(join(tmplDir, file), join(destDir, file))
# Generate indexes
for tmpl in self.overviewTmplNames:
out = tmpl[0:-5] if tmpl.endswith('.tmpl') else tmpl
self.__writeTemplate(tmpl, data, join(destDir, out))
# Report errors
if (self.errors):
if type(errorOut) is type(('tmpl','out')):
data['errors'] = errors
self.__writeTemplate(errorOut[0], data, join(destDir, errorOut[1]))
else:
sys.stdout.flush()
for errorLocation in self.errors:
print >> errorOut, "Error in %s: %s" % \
(errorLocation, ' '.join([str(error) for error in self.errors[errorLocation]]))
def writeIndex(self, format):
"""Write indices into test suite build output through format `format`.
"""
# Set common values
data = self.extraData.copy()
data['suitetitle'] = self.suite.title
data['suite'] = self.suite.name
data['specroot'] = self.suite.specroot
data['draftroot'] = self.suite.draftroot
data['indexext'] = format.indexExt
data['isXML'] = format.indexExt.startswith('.x')
data['formatdir'] = format.formatDirName
data['extmap'] = format.extMap
data['tests'] = self.alltests
data['suites'] = self.suites
data['flagInfo'] = self.flags
# Generate indices:
# Reftest indices
self.__writeTemplate('reftest-toc.tmpl', data,
format.dest('reftest-toc%s' % format.indexExt))
self.__writeTemplate('reftest.tmpl', data,
format.dest('reftest.list'))
# Table of Contents
sectionlist = sorted(self.sections.values())
if self.splitChapter:
# Split sectionlist into chapters
chapters = []
lastChapNum = '$' # some nonmatching initial char
chap = None
for section in sectionlist:
if (section.title and (section.chapterNum() != lastChapNum)):
lastChapNum = section.chapterNum()
chap = section
chap.sections = []
chap.testcount = 0
chap.testnames = set()
chapters.append(chap)
chap.testnames.update([test['name'] for test in section.tests])
chap.testcount = len(chap.testnames)
chap.sections.append(section)
# Generate main toc
data['chapters'] = chapters
self.__writeTemplate('chapter-toc.tmpl', data,
format.dest('toc%s' % format.indexExt))
del data['chapters']
# Generate chapter tocs
for chap in chapters:
data['chaptertitle'] = chap.title
data['testcount'] = chap.testcount
data['sections'] = chap.sections
self.__writeTemplate('test-toc.tmpl', data, format.dest('chapter-%s%s' \
% (chap.numstr, format.indexExt)))
else: # not splitChapter
data['chapters'] = sectionlist
self.__writeTemplate('test-toc.tmpl', data,
format.dest('toc%s' % format.indexExt))
del data['chapters']
|
yoer/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/i18n/patterns/tests.py
|
57
|
from __future__ import unicode_literals
import os
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse, clear_url_caches
from django.test import TestCase
from django.test.utils import override_settings
from django.template import Template, Context
from django.utils._os import upath
from django.utils import translation
@override_settings(
USE_I18N=True,
LOCALE_PATHS=(
os.path.join(os.path.dirname(upath(__file__)), 'locale'),
),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
LANGUAGE_CODE='en-us',
LANGUAGES=(
('nl', 'Dutch'),
('en', 'English'),
('pt-br', 'Brazilian Portuguese'),
),
MIDDLEWARE_CLASSES=(
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
),
)
class URLTestCaseBase(TestCase):
"""
TestCase base-class for the URL tests.
"""
urls = 'i18n.patterns.urls.default'
def setUp(self):
# Make sure the cache is empty before we are doing our tests.
clear_url_caches()
def tearDown(self):
# Make sure we will leave an empty cache for other testcases.
clear_url_caches()
class URLPrefixTests(URLTestCaseBase):
"""
Tests if the `i18n_patterns` is adding the prefix correctly.
"""
def test_not_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
with translation.override('nl'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
def test_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/en/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/nl/prefixed/')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.wrong')
def test_invalid_prefix_use(self):
self.assertRaises(ImproperlyConfigured, lambda: reverse('account:register'))
class URLDisabledTests(URLTestCaseBase):
urls = 'i18n.patterns.urls.disabled'
@override_settings(USE_I18N=False)
def test_prefixed_i18n_disabled(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
class PathUnusedTests(URLTestCaseBase):
"""
Check that if no i18n_patterns is used in root urlconfs, then no
language activation happens based on url prefix.
"""
urls = 'i18n.patterns.urls.path_unused'
def test_no_lang_activate(self):
response = self.client.get('/nl/foo/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
class URLTranslationTests(URLTestCaseBase):
"""
Tests if the pattern-strings are translated correctly (within the
`i18n_patterns` and the normal `patterns` function).
"""
def test_no_prefix_translated(self):
with translation.override('en'):
self.assertEqual(reverse('no-prefix-translated'), '/translated/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/translated/yeah/')
with translation.override('nl'):
self.assertEqual(reverse('no-prefix-translated'), '/vertaald/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/vertaald/yeah/')
with translation.override('pt-br'):
self.assertEqual(reverse('no-prefix-translated'), '/traduzidos/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/traduzidos/yeah/')
def test_users_url(self):
with translation.override('en'):
self.assertEqual(reverse('users'), '/en/users/')
with translation.override('nl'):
self.assertEqual(reverse('users'), '/nl/gebruikers/')
self.assertEqual(reverse('prefixed_xml'), '/nl/prefixed.xml')
with translation.override('pt-br'):
self.assertEqual(reverse('users'), '/pt-br/usuarios/')
class URLNamespaceTests(URLTestCaseBase):
"""
Tests if the translations are still working within namespaces.
"""
def test_account_register(self):
with translation.override('en'):
self.assertEqual(reverse('account:register'), '/en/account/register/')
with translation.override('nl'):
self.assertEqual(reverse('account:register'), '/nl/profiel/registeren/')
class URLRedirectTests(URLTestCaseBase):
"""
Tests if the user gets redirected to the right URL when there is no
language-prefix in the request URL.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_en_redirect_wrong_url(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
def test_nl_redirect(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertRedirects(response, '/nl/profiel/registeren/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_nl_redirect_wrong_url(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertEqual(response.status_code, 404)
def test_pt_br_redirect(self):
response = self.client.get('/conta/registre-se/', HTTP_ACCEPT_LANGUAGE='pt-br')
self.assertRedirects(response, '/pt-br/conta/registre-se/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_pl_pl_redirect(self):
# language from outside of the supported LANGUAGES list
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='pl-pl')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
class URLVaryAcceptLanguageTests(URLTestCaseBase):
"""
Tests that 'Accept-Language' is not added to the Vary header when using
prefixed URLs.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Vary'), 'Accept-Language')
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
self.assertFalse(response.get('Vary'))
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
self.assertFalse(response.get('Vary'))
class URLRedirectWithoutTrailingSlashTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=True`).
"""
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/not-prefixed/', 301)
def test_en_redirect(self):
response = self.client.get('/account/register', HTTP_ACCEPT_LANGUAGE='en', follow=True)
# target status code of 301 because of CommonMiddleware redirecting
self.assertIn(('http://testserver/en/account/register/', 301), response.redirect_chain)
self.assertRedirects(response, '/en/account/register/', 302)
response = self.client.get('/prefixed.xml', HTTP_ACCEPT_LANGUAGE='en', follow=True)
self.assertRedirects(response, '/en/prefixed.xml', 302)
class URLRedirectWithoutTrailingSlashSettingTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=False`).
"""
@override_settings(APPEND_SLASH=False)
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=False)
def test_en_redirect(self):
response = self.client.get('/account/register-without-slash', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register-without-slash', 302)
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
class URLResponseTests(URLTestCaseBase):
"""
Tests if the response has the right language-code.
"""
def test_not_prefixed_with_prefix(self):
response = self.client.get('/en/not-prefixed/')
self.assertEqual(response.status_code, 404)
def test_en_url(self):
response = self.client.get('/en/account/register/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
def test_nl_url(self):
response = self.client.get('/nl/profiel/registeren/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'nl')
self.assertEqual(response.context['LANGUAGE_CODE'], 'nl')
def test_wrong_en_prefix(self):
response = self.client.get('/en/profiel/registeren/')
self.assertEqual(response.status_code, 404)
def test_wrong_nl_prefix(self):
response = self.client.get('/nl/account/register/')
self.assertEqual(response.status_code, 404)
def test_pt_br_url(self):
response = self.client.get('/pt-br/conta/registre-se/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'pt-br')
self.assertEqual(response.context['LANGUAGE_CODE'], 'pt-br')
class URLTagTests(URLTestCaseBase):
"""
Test if the language tag works.
"""
def test_strings_only(self):
t = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(t.render(Context({})).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_context(self):
ctx = Context({'lang1':'nl', 'lang2':'pt-br'})
tpl = Template("""{% load i18n %}
{% language lang1 %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language lang2 %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(tpl.render(ctx).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_args(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
def test_kwargs(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
|
gramps-project/gramps
|
refs/heads/master
|
gramps/gen/datehandler/_date_hu.py
|
5
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2006 Donald N. Allingham
# Copyright (C) 2015 Lajos Nemeséri <nemeseril@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Hungarian-specific classes for parsing and displaying dates.
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..lib.date import Date
from ._dateparser import DateParser
from ._datedisplay import DateDisplay
from ._datehandler import register_datehandler
#-------------------------------------------------------------------------
#
# Hungarian parser
#
#
#-------------------------------------------------------------------------
class DateParserHU(DateParser):
month_to_int = DateParser.month_to_int
month_to_int["-"]=0 #to make the Zero month to work
month_to_int["január"] = 1
month_to_int["jan"] = 1
month_to_int["jan."] = 1
month_to_int["I"] = 1
# month_to_int["i"] = 1
month_to_int["február"] = 2
month_to_int["feb"] = 2
month_to_int["feb."] = 2
month_to_int["II"] = 2
# month_to_int["ii"] = 2
month_to_int["március"] = 3
month_to_int["márc"] = 3
month_to_int["márc."] = 3
month_to_int["III"] = 3
# month_to_int["iii"] = 3
month_to_int["április"] = 4
month_to_int["ápr"] = 4
month_to_int["ápr."] = 4
month_to_int["IV"] = 4
# month_to_int["iv"] = 4
month_to_int["május"] = 5
month_to_int["máj"] = 5
month_to_int["máj."] = 5
month_to_int["V"] = 5
# month_to_int["v"] = 5
month_to_int["június"] = 6
month_to_int["jún"] = 6
month_to_int["jún."] = 6
month_to_int["VI"] = 6
# month_to_int["vi"] = 6
month_to_int["július"] = 7
month_to_int["júl"] = 7
month_to_int["júl."] = 7
month_to_int["VII"] = 7
# month_to_int["vii"] = 7
month_to_int["augusztus"] = 8
month_to_int["aug"] = 8
month_to_int["aug."] = 8
month_to_int["VIII"] = 8
# month_to_int["viii"] = 8
month_to_int["szeptember"] = 9
month_to_int["szept"] = 9
month_to_int["szept."] = 9
month_to_int["IX"] = 9
# month_to_int["ix"] = 9
month_to_int["október"] = 10
month_to_int["okt"] = 10
month_to_int["okt."] = 10
month_to_int["X"] = 10
# month_to_int["x"] = 10
month_to_int["november"] = 11
month_to_int["nov"] = 11
month_to_int["nov."] = 11
month_to_int["XI"] = 11
# month_to_int["xi"] = 11
month_to_int["december"] = 12
month_to_int["dec"] = 12
month_to_int["dec."] = 12
month_to_int["XII"] = 12
# month_to_int["xii"] = 12
#-----------------------------------------------------------------------
#
# Alternative and latin names - not verified
#
#-----------------------------------------------------------------------
# Other common latin names
# month_to_int["januaris"] = 01
# month_to_int["januarii"] = 01
# month_to_int["januarius"] = 01
# month_to_int["februaris"] = 02
# month_to_int["februarii"] = 02
# month_to_int["februarius"] = 02
# month_to_int["martii"] = 03
# month_to_int["martius"] = 03
# month_to_int["aprilis"] = 04
# month_to_int["maius"] = 05
# month_to_int["maii"] = 05
# month_to_int["junius"] = 06
# month_to_int["junii"] = 06
# month_to_int["julius"] = 07
# month_to_int["julii"] = 07
# month_to_int["augustus"] = 08
# month_to_int["augusti"] = 08
# month_to_int["septembris"] = 09
# month_to_int["7bris"] = 09
# month_to_int["september"] = 09
# month_to_int["october"] = 10
# month_to_int["octobris"] = 10
# month_to_int["8bris"] = 10
# month_to_int["novembris"] = 11
# month_to_int["9bris"] = 11
# month_to_int["november"] = 11
# month_to_int["decembris"] = 12
# month_to_int["10bris"] = 12
# month_to_int["xbris"] = 12
# month_to_int["december"] = 12
# old Hungarian names
# month_to_int["Boldogasszony hava"] = 01
# month_to_int["Fergeteg hava"] = 01
# month_to_int["Böjtelő hava"] = 02
# month_to_int["Jégbontó hava"] = 02
# month_to_int["Böjtmás hava"] = 03
# month_to_int["Kikelet hava"] = 03
# month_to_int["Szent György hava"] = 04
# month_to_int["Szelek hava"] = 04
# month_to_int["Pünkösd hava"] = 05
# month_to_int["Ígéret hava"] = 05
# month_to_int["Szent Iván hava"] = 06
# month_to_int["Napisten hava"] = 06
# month_to_int["Szent Jakab hava"] = 07
# month_to_int["Áldás hava"] = 07
# month_to_int["Kisasszony hava"] = 08
# month_to_int["Újkenyér hava"] = 08
# month_to_int["Szent Mihály hava"] = 09
# month_to_int["Földanya hava"] = 09
# month_to_int["Mindszent hava"] = 10
# month_to_int["Magvető hava"] = 10
# month_to_int["Szent András hava"] = 11
# month_to_int["Enyészet hava"] = 11
# month_to_int["Karácsony hava"] = 12
# month_to_int["Álom hava"] = 12
modifier_after_to_int={
'előtt' : Date.MOD_BEFORE,
'körül' : Date.MOD_ABOUT,
'után' : Date.MOD_AFTER,
}
quality_to_int = {
'becsült' : Date.QUAL_ESTIMATED,
'hozzávetőleg' : Date.QUAL_ESTIMATED,
'becs.' : Date.QUAL_ESTIMATED,
'számított' : Date.QUAL_CALCULATED,
'körülbelül' : Date.QUAL_ESTIMATED,
'számolt' : Date.QUAL_CALCULATED,
'szám.' : Date.QUAL_CALCULATED,
}
bce = ["időszámításunk előtt", "időszámítás előtt", "i. e.",
"Krisztus előtt", "Krisztus előtti", "Kr. e."] + DateParser.bce
calendar_to_int = {
'Gergely' : Date.CAL_GREGORIAN,
'Julián' : Date.CAL_JULIAN,
'héber' : Date.CAL_HEBREW,
'iszlám' : Date.CAL_ISLAMIC,
'francia köztársasági' : Date.CAL_FRENCH,
'perzsa' : Date.CAL_PERSIAN,
'svéd' : Date.CAL_SWEDISH,
}
def init_strings(self):
# Compiles regular expression strings for matching dates
DateParser.init_strings(self)
self._numeric = re.compile(
r"((\d+)[/\.])?\s*((\d+)[/\.])?\s*(\d+)[/\. ]?$")
# this next RE has the (possibly-slashed) year at the string's start
self._text2 = re.compile(r'((\d+)(/\d+)?\.)?\s+?%s\.?\s*(\d+\.)?\s*$'
% self._mon_str, re.IGNORECASE)
_span_1 = [r'-tó\(ő\)l', '-tól', '-től']
_span_2 = ['-ig']
_range_1 = ['és']
_range_2 = ['között']
self._span = re.compile(r"(?P<start>.+)(%s)\s+(?P<stop>.+)(%s)" %
('|'.join(_span_1), '|'.join(_span_2)),
re.IGNORECASE)
self._range = re.compile(r"(?P<start>.+)\s+(%s)\s+(?P<stop>.+)\s+(%s)"
% ('|'.join(_range_1), '|'.join(_range_2)),
re.IGNORECASE)
def _get_int(self, val):
"""
Convert the string to an integer if the value is not None. If the
value is None, a zero is returned
"""
if val is None:
return 0
else:
return int(val.replace('.', ''))
#-------------------------------------------------------------------------
#
# Hungarian display
#
#-------------------------------------------------------------------------
class DateDisplayHU(DateDisplay):
"""
Hungarian language date display class.
"""
_bce_str = "i. e. %s"
roman_months=(
"-.", "I.", "II.", "III.", "IV.", "V.", "VI.",
"VII.", "VIII.", "IX.", "X.", "XI.", "XII."
)
formats = (
"ÉÉÉÉ-HH-NN (ISO)", # 0
"Alapértelmezett éééé. hh. nn.", # 1
"Év hónap nap", # year, full month name, day # 2
"Év hó nap", #year, short month name, day # 3
"Év római h.sz. nap" #year, Roman number, day # 4
)
# this definition must agree with its "_display_calendar" method
display = DateDisplay.display_formatted
def _display_calendar(self, date_val, long_months, short_months = None,
inflect=""):
# this must agree with its locale-specific "formats" definition
year = self._slash_year(date_val[2], date_val[3])
if short_months is None:
# Let the short formats work the same as long formats
short_months = long_months
if self.format == 0:
return self.display_iso(date_val)
elif self.format == 1:
# Base defined Hungarian form
if date_val[3]:
return self.display_iso(date_val)
else:
if date_val[0]==0: #No day
if date_val[1]==0: #No month -> year
value="%s" % year
else:
value="%s. %02d." % (year, date_val[1]) #If no day -> year, month
else:
value="%s. %02d. %02d." % (year, date_val[1], date_val[0])
elif self.format == 2:
# year, full month name, day
if date_val[0]==0:
if date_val[1]==0:
value="%s" % year
else:
value="%s. %s" % (year, self.long_months[date_val[1]]) #If no day -> year, month
else:
if date_val[1]==0:
value="%s. %s %02d." % (year, '-', date_val[0]) #To indicate somehow if the month is missing
else:
value="%s. %s %02d." % (year, self.long_months[date_val[1]], date_val[0])
elif self.format == 3:
#year, short month name, day
if date_val[0]==0:
if date_val[1]==0:
value="%s" % year
else:
value="%s. %s" % (year, self.short_months[date_val[1]]) #If no day -> year, month
else:
if date_val[1]==0:
value="%s. %s %02d." % (year, '-.', date_val[0]) #To indicate somehow if the month is missing
else:
value="%s. %s %02d." % (year, self.short_months[date_val[1]], date_val[0])
elif self.format == 4:
#year, Roman number, day
if date_val[0]==0:
if date_val[1]==0:
value="%s" % year
else:
value="%s. %s" % (year, self.roman_months[date_val[1]]) #If no day -> year, month
else:
value="%s. %s %02d." % (year, self.roman_months[date_val[1]], date_val[0])
else:
# day month_name year
value = self.dd_dformat04(date_val, inflect, long_months)
if date_val[2] < 0:
# TODO fix BUG 7064: non-Gregorian calendars wrongly use BCE notation for negative dates
return self._bce_str % value
else:
return value
#-------------------------------------------------------------------------
#
# Register classes
#
#-------------------------------------------------------------------------
register_datehandler(
('hu_HU', 'hu', 'hungarian', 'Hungarian', 'magyar', ('%Y-%m-%d',)),
DateParserHU, DateDisplayHU)
|
colab/colab-superarchives-plugin
|
refs/heads/master
|
src/colab_superarchives/widgets/dashboard_latest_threads.py
|
1
|
from colab.widgets.widget_manager import Widget
from colab_superarchives.utils.collaborations import get_user_threads
from colab_superarchives.utils import mailman
from colab_superarchives.models import Thread
class DashboardLatestThreadsWidget(Widget):
name = 'latest threads'
template = 'widgets/dashboard_latest_threads.html'
def generate_content(self, **kwargs):
all_threads = Thread.objects.all()
lists_for_user = []
if kwargs['context']['user'].is_authenticated():
lists_for_user = mailman.get_user_mailinglists(
kwargs['context']['user'])
kwargs['context']['latest_threads'] = get_user_threads(
all_threads, lists_for_user, lambda t: t)[:10]
return super(DashboardLatestThreadsWidget,
self).generate_content(**kwargs)
|
xiangel/hue
|
refs/heads/master
|
desktop/core/ext-py/tablib-0.10.0/tablib/packages/xlrd/licences.py
|
153
|
# -*- coding: cp1252 -*-
"""
Portions copyright © 2005-2009, Stephen John Machin, Lingfo Pty Ltd
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. None of the names of Stephen John Machin, Lingfo Pty Ltd and any
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
/*-
* Copyright (c) 2001 David Giffin.
* All rights reserved.
*
* Based on the the Java version: Andrew Khan Copyright (c) 2000.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by
* David Giffin <david@giffin.org>."
*
* 4. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by
* David Giffin <david@giffin.org>."
*
* THIS SOFTWARE IS PROVIDED BY DAVID GIFFIN ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID GIFFIN OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
"""
|
commonsearch/cosr-back
|
refs/heads/master
|
plugins/webgraph.py
|
1
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
from pyspark.sql import types as SparkTypes
from cosrlib.url import URL
from cosrlib.spark import createDataFrame, sql, SparkPlugin
from cosrlib import re, py2_long
from urlserver.id_generator import _fast_make_domain_id
_RE_STRIP_FRAGMENT = re.compile(r"#.*")
class WebGraphPlugin(SparkPlugin):
""" Base class for WebGraph plugins """
include_external = True
include_internal = True
def hook_spark_pipeline_init(self, sc, sqlc, schema, indexer):
if self.include_external:
schema.append(
SparkTypes.StructField("external_links", SparkTypes.ArrayType(SparkTypes.StructType([
SparkTypes.StructField("href", SparkTypes.StringType(), nullable=False),
SparkTypes.StructField("text", SparkTypes.StringType(), nullable=True)
])), nullable=True)
)
if self.include_internal:
schema.append(
SparkTypes.StructField("internal_links", SparkTypes.ArrayType(SparkTypes.StructType([
SparkTypes.StructField("path", SparkTypes.StringType(), nullable=False),
SparkTypes.StructField("text", SparkTypes.StringType(), nullable=True)
])), nullable=True)
)
def hook_document_post_index(self, document, metadata):
""" Collect all unique normalized external URLs """
if self.include_external:
seen = set()
for link in document.get_external_hyperlinks(exclude_nofollow=self.exclude_nofollow):
key = (link["href"].normalized, link["text"])
if key not in seen:
seen.add(key)
metadata.setdefault("external_links", [])
metadata["external_links"].append(key)
if self.include_internal:
seen = set()
metadata["internal_links"] = []
for link in document.get_internal_hyperlinks(): # exclude_nofollow=self.exclude_nofollow):
key = (_RE_STRIP_FRAGMENT.sub("", link["path"]), link["text"])
if key not in seen:
seen.add(key)
metadata.setdefault("internal_links", [])
metadata["internal_links"].append(key)
def init(self):
self.exclude_nofollow = (self.args.get("include_nofollow") != "1")
if self.args.get("output"):
if os.path.isdir(os.path.join(self.args["output"], "edges")):
shutil.rmtree(os.path.join(self.args["output"], "edges"))
if os.path.isdir(os.path.join(self.args["output"], "vertices")):
shutil.rmtree(os.path.join(self.args["output"], "vertices"))
class DomainToDomain(WebGraphPlugin):
""" Saves a graph of domain=>domain links in text format """
include_internal = False
def hook_spark_pipeline_action(self, sc, sqlc, df, indexer):
# Get all unique (host1 => host2) pairs
domain_pairs = sql(sqlc, """
SELECT parse_url(url, "HOST") as d1, parse_url(CONCAT("http://", link), "HOST") as d2
FROM (
SELECT url, EXPLODE(external_links.href) as link FROM df
) as pairs
""", {"df": df}).distinct()
# Format as csv
lines = sql(sqlc, """
SELECT CONCAT(d1, " ", d2) as r
FROM pairs
""", {"pairs": domain_pairs})
self.save_dataframe(lines, "text")
return True
class DomainToDomainParquet(WebGraphPlugin):
""" Saves a graph of domain=>domain links in Apache Parquet format """
include_internal = False
def hook_spark_pipeline_action(self, sc, sqlc, df, indexer):
self.save_vertex_graph(sqlc, df)
self.save_edge_graph(sqlc, df)
return True
def save_vertex_graph(self, sqlc, df):
""" Transforms a document metadata DataFrame into a Parquet dump of the vertices of the webgraph """
vertex_graph_schema = SparkTypes.StructType([
SparkTypes.StructField("id", SparkTypes.LongType(), nullable=False),
SparkTypes.StructField("domain", SparkTypes.StringType(), nullable=False)
])
# TODO ?!
if self.args.get("shuffle_partitions"):
sqlc.setConf("spark.sql.shuffle.partitions", self.args["shuffle_partitions"])
# We collect all unique domains from the page URLs & destination of all external links
d1_df = sql(sqlc, """
SELECT parse_url(url, "HOST") as domain from df
""", {"df": df}).distinct()
d2_df = sql(sqlc, """
SELECT parse_url(CONCAT("http://", link), "HOST") as domain
FROM (
SELECT EXPLODE(external_links.href) as link FROM df
) as pairs
""", {"df": df})
all_domains_df = d1_df.unionAll(d2_df).distinct()
def iter_domain(record):
""" Transforms Row(domain=www.example.com) into tuple([int64 ID], "example.com") """
domain = record["domain"]
if not domain or not domain.strip():
return []
name = URL("http://" + domain).normalized_domain
try:
_id = _fast_make_domain_id(name)
except Exception: # pylint: disable=broad-except
return []
return [(py2_long(_id), str(name))]
rdd_domains = all_domains_df.rdd.flatMap(iter_domain)
vertex_df = createDataFrame(sqlc, rdd_domains, vertex_graph_schema).distinct()
coalesce = int(self.args.get("coalesce_vertices") or self.args.get("coalesce", 1) or 0)
if coalesce > 0:
vertex_df = vertex_df.coalesce(coalesce)
vertex_df.write.parquet(os.path.join(self.args["output"], "vertices"))
def save_edge_graph(self, sqlc, df):
""" Transforms a document metadata DataFrame into a Parquet dump of the edges of the webgraph """
edge_graph_schema = SparkTypes.StructType([
SparkTypes.StructField("src", SparkTypes.LongType(), nullable=False),
SparkTypes.StructField("dst", SparkTypes.LongType(), nullable=False),
# Sum of weights must be 1
# This field will automatically be added by the SQL query
# SparkTypes.StructField("weight", SparkTypes.FloatType(), nullable=True)
])
# TODO?!
if self.args.get("shuffle_partitions"):
sqlc.setConf("spark.sql.shuffle.partitions", self.args["shuffle_partitions"])
# Get all unique (host1 => host2) pairs
new_df = sql(sqlc, """
SELECT parse_url(url, "HOST") as d1, parse_url(CONCAT("http://", link), "HOST") as d2
FROM (
SELECT url, EXPLODE(external_links.href) as link FROM df
) as pairs
""", {"df": df}).distinct()
def iter_links_domain(record):
""" Transforms Row(d1="x.com", d2="y.com") into tuple([int64 ID], [int64 ID]) """
d1 = record["d1"]
d2 = record["d2"]
if not d1 or not d2:
return []
try:
from_domain = _fast_make_domain_id(d1)
to_domain = _fast_make_domain_id(d2)
except Exception: # pylint: disable=broad-except
return []
if from_domain == to_domain:
return []
else:
return [(py2_long(from_domain), py2_long(to_domain))]
rdd_couples = new_df.rdd.flatMap(iter_links_domain)
edge_df = createDataFrame(sqlc, rdd_couples, edge_graph_schema).distinct()
# After collecting all the unique (from_id, to_id) pairs, we add the weight of every edge
# The current algorithm is naive: edge weight is equally split between all the links, with
# the sum of all weights for a source domain always = 1.
weights_df = sql(sqlc, """
SELECT src id, cast(1 / count(*) as float) weight
FROM edges
GROUP BY src
""", {"edges": edge_df})
weighted_edge_df = sql(sqlc, """
SELECT cast(src as long) src, cast(dst as long) dst, cast(weights.weight as float) weight
FROM edges
JOIN weights on edges.src = weights.id
""", {"edges": edge_df, "weights": weights_df})
coalesce = int(self.args.get("coalesce_edges") or self.args.get("coalesce", 1) or 0)
if coalesce > 0:
weighted_edge_df = weighted_edge_df.coalesce(coalesce)
weighted_edge_df.write.parquet(os.path.join(self.args["output"], "edges"))
|
denys-duchier/django
|
refs/heads/master
|
tests/inline_formsets/__init__.py
|
12133432
| |
Quikling/gpdb
|
refs/heads/master
|
src/test/tinc/tincrepo/mpp/gpdb/lib/models/sql/__init__.py
|
12133432
| |
mttr/django
|
refs/heads/master
|
django/core/cache/backends/__init__.py
|
12133432
| |
willysbrewing/willys_website
|
refs/heads/main
|
willys_website/core/migrations/__init__.py
|
12133432
| |
AO-StreetArt/FinalFlowChartExample
|
refs/heads/master
|
src/__init__.py
|
12133432
| |
brianjgeiger/osf.io
|
refs/heads/develop
|
website/registries/__init__.py
|
12133432
| |
tbeadle/django
|
refs/heads/master
|
django/urls/exceptions.py
|
133
|
from __future__ import unicode_literals
from django.http import Http404
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
pass
|
krvss/django-social-auth
|
refs/heads/master
|
social_auth/backends/contrib/orkut.py
|
5
|
"""
Orkut OAuth support.
This contribution adds support for Orkut OAuth service. The scope is
limited to http://orkut.gmodules.com/social/ by default, but can be
extended with ORKUT_EXTRA_SCOPE on project settings. Also name, display
name and emails are the default requested user data, but extra values
can be specified by defining ORKUT_EXTRA_DATA setting.
OAuth settings ORKUT_CONSUMER_KEY and ORKUT_CONSUMER_SECRET are needed
to enable this service support.
"""
from django.utils import simplejson
from social_auth.utils import setting, dsa_urlopen
from social_auth.backends import OAuthBackend
from social_auth.backends.google import BaseGoogleOAuth
# Orkut configuration
# default scope, specify extra scope in settings as in:
# ORKUT_EXTRA_SCOPE = ['...']
ORKUT_SCOPE = ['http://orkut.gmodules.com/social/']
ORKUT_REST_ENDPOINT = 'http://www.orkut.com/social/rpc'
ORKUT_DEFAULT_DATA = 'name,displayName,emails'
class OrkutBackend(OAuthBackend):
"""Orkut OAuth authentication backend"""
name = 'orkut'
def get_user_details(self, response):
"""Return user details from Orkut account"""
try:
emails = response['emails'][0]['value']
except (KeyError, IndexError):
emails = ''
return {'username': response['displayName'],
'email': emails,
'fullname': response['displayName'],
'first_name': response['name']['givenName'],
'last_name': response['name']['familyName']}
class OrkutAuth(BaseGoogleOAuth):
"""Orkut OAuth authentication mechanism"""
AUTH_BACKEND = OrkutBackend
SETTINGS_KEY_NAME = 'ORKUT_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'ORKUT_CONSUMER_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from Orkut service"""
fields = ORKUT_DEFAULT_DATA
if setting('ORKUT_EXTRA_DATA'):
fields += ',' + setting('ORKUT_EXTRA_DATA')
scope = ORKUT_SCOPE + setting('ORKUT_EXTRA_SCOPE', [])
params = {'method': 'people.get',
'id': 'myself',
'userId': '@me',
'groupId': '@self',
'fields': fields,
'scope': ' '.join(scope)}
request = self.oauth_request(access_token, ORKUT_REST_ENDPOINT, params)
response = dsa_urlopen(request.to_url()).read()
try:
return simplejson.loads(response)['data']
except (ValueError, KeyError):
return None
def oauth_request(self, token, url, extra_params=None):
extra_params = extra_params or {}
scope = ORKUT_SCOPE + setting('ORKUT_EXTRA_SCOPE', [])
extra_params['scope'] = ' '.join(scope)
return super(OrkutAuth, self).oauth_request(token, url, extra_params)
# Backend definition
BACKENDS = {
'orkut': OrkutAuth,
}
|
MarcJoan/django
|
refs/heads/master
|
django/core/urlresolvers.py
|
42
|
"""
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a ResolverMatch object which provides access to all
attributes of the resolved URL match.
"""
from __future__ import unicode_literals
import functools
import re
from importlib import import_module
from threading import local
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.utils import lru_cache, six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text, iri_to_uri
from django.utils.functional import cached_property, lazy
from django.utils.http import RFC3986_SUBDELIMS, urlquote
from django.utils.module_loading import module_has_submodule
from django.utils.regex_helper import normalize
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import get_language, override
# SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for
# the current thread (which is the only one we ever access), it is assumed to
# be empty.
_prefixes = local()
# Overridden URLconfs for each thread are stored here.
_urlconfs = local()
class ResolverMatch(object):
def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.url_name = url_name
# If a URLRegexResolver doesn't have a namespace or app_name, it passes
# in an empty value.
self.app_names = [x for x in app_names if x] if app_names else []
self.app_name = ':'.join(self.app_names)
if namespaces:
self.namespaces = [x for x in namespaces if x]
else:
self.namespaces = []
self.namespace = ':'.join(self.namespaces)
if not hasattr(func, '__name__'):
# A class-based view
self._func_path = '.'.join([func.__class__.__module__, func.__class__.__name__])
else:
# A function-based view
self._func_path = '.'.join([func.__module__, func.__name__])
view_path = url_name or self._func_path
self.view_name = ':'.join(self.namespaces + [view_path])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s)" % (
self._func_path, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces)
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
pass
@lru_cache.lru_cache(maxsize=None)
def get_callable(lookup_view):
"""
Return a callable corresponding to lookup_view.
* If lookup_view is already a callable, return it.
* If lookup_view is a string import path that can be resolved to a callable,
import that callable and return it, otherwise raise an exception
(ImportError or ViewDoesNotExist).
"""
if callable(lookup_view):
return lookup_view
if not isinstance(lookup_view, six.string_types):
raise ViewDoesNotExist("'%s' is not a callable or a dot-notation path" % lookup_view)
mod_name, func_name = get_mod_func(lookup_view)
if not func_name: # No '.' in lookup_view
raise ImportError("Could not import '%s'. The path must be fully qualified." % lookup_view)
try:
mod = import_module(mod_name)
except ImportError:
parentmod, submod = get_mod_func(mod_name)
if submod and not module_has_submodule(import_module(parentmod), submod):
raise ViewDoesNotExist(
"Could not import '%s'. Parent module %s does not exist." %
(lookup_view, mod_name)
)
else:
raise
else:
try:
view_func = getattr(mod, func_name)
except AttributeError:
raise ViewDoesNotExist(
"Could not import '%s'. View does not exist in module %s." %
(lookup_view, mod_name)
)
else:
if not callable(view_func):
raise ViewDoesNotExist(
"Could not import '%s.%s'. View is not callable." %
(mod_name, func_name)
)
return view_func
@lru_cache.lru_cache(maxsize=None)
def get_resolver(urlconf=None):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
@lru_cache.lru_cache(maxsize=None)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent urlconf pattern.
# This makes it possible to have captured parameters in the parent
# urlconf pattern.
ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot + 1:]
class LocaleRegexProvider(object):
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using ugettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
@property
def regex(self):
"""
Returns a compiled regular expression, depending upon the activated
language-code.
"""
language_code = get_language()
if language_code not in self._regex_dict:
if isinstance(self._regex, six.string_types):
regex = self._regex
else:
regex = force_text(self._regex)
try:
compiled_regex = re.compile(regex, re.UNICODE)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' %
(regex, six.text_type(e)))
self._regex_dict[language_code] = compiled_regex
return self._regex_dict[language_code]
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
self.callback = callback # the view
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern))
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
@cached_property
def lookup_str(self):
"""
A string that identifies the view (e.g. 'path.to.view_function' or
'path.to.ClassBasedView').
"""
callback = self.callback
if isinstance(callback, functools.partial):
callback = callback.func
if not hasattr(callback, '__name__'):
return callback.__module__ + "." + callback.__class__.__name__
else:
return callback.__module__ + "." + callback.__name__
class RegexURLResolver(LocaleRegexProvider):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
LocaleRegexProvider.__init__(self, regex)
# urlconf_name is the dotted Python path to the module defining
# urlpatterns. It may also be an object with an urlpatterns attribute
# or urlpatterns itself.
self.urlconf_name = urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
# set of dotted paths to all functions and classes that are used in
# urlpatterns
self._callback_strs = set()
self._populated = False
def __repr__(self):
if isinstance(self.urlconf_name, list) and len(self.urlconf_name):
# Don't bother to output the whole list, it can be huge
urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return str('<%s %s (%s:%s) %s>') % (
self.__class__.__name__, urlconf_repr, self.app_name,
self.namespace, self.regex.pattern)
def _populate(self):
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for pattern in reversed(self.url_patterns):
if isinstance(pattern, RegexURLPattern):
self._callback_strs.add(pattern.lookup_str)
p_pattern = pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(pattern, RegexURLResolver):
if pattern.namespace:
namespaces[pattern.namespace] = (p_pattern, pattern)
if pattern.app_name:
apps.setdefault(pattern.app_name, []).append(pattern.namespace)
else:
parent_pat = pattern.regex.pattern
for name in pattern.reverse_dict:
for matches, pat, defaults in pattern.reverse_dict.getlist(name):
new_matches = normalize(parent_pat + pat)
lookups.appendlist(
name,
(
new_matches,
p_pattern + pat,
dict(defaults, **pattern.default_kwargs),
)
)
for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
self._callback_strs.update(pattern._callback_strs)
else:
bits = normalize(p_pattern)
lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
if pattern.name is not None:
lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
self._reverse_dict[language_code] = lookups
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
self._populated = True
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
def _is_callback(self, name):
if not self._populated:
self._populate()
return name in self._callback_strs
def resolve(self, path):
path = force_text(path) # path may be a reverse_lazy object
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([pattern] + t for t in sub_tried)
else:
tried.append([pattern])
else:
if sub_match:
# Merge captured arguments in match with submatch
sub_match_dict = dict(match.groupdict(), **self.default_kwargs)
sub_match_dict.update(sub_match.kwargs)
# If there are *any* named groups, ignore all non-named groups.
# Otherwise, pass all non-named arguments as positional arguments.
sub_match_args = sub_match.args
if not sub_match_dict:
sub_match_args = match.groups() + sub_match.args
return ResolverMatch(
sub_match.func,
sub_match_args,
sub_match_dict,
sub_match.url_name,
[self.app_name] + sub_match.app_names,
[self.namespace] + sub_match.namespaces
)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path': path})
@cached_property
def urlconf_module(self):
if isinstance(self.urlconf_name, six.string_types):
return import_module(self.urlconf_name)
else:
return self.urlconf_name
@cached_property
def url_patterns(self):
# urlconf_module might be a valid set of patterns, so we default to it
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
msg = (
"The included urlconf '{name}' does not appear to have any "
"patterns in it. If you see valid patterns in the file then "
"the issue is probably caused by a circular import."
)
raise ImproperlyConfigured(msg.format(name=self.urlconf_name))
return patterns
def resolve_error_handler(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use default
# Lazy import, since django.urls imports this file
from django.conf import urls
callback = getattr(urls, 'handler%s' % view_type)
return get_callable(callback), {}
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
text_args = [force_text(v) for v in args]
text_kwargs = {k: force_text(v) for (k, v) in kwargs.items()}
if not self._populated:
self._populate()
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern, defaults in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
candidate_subs = dict(zip(params, text_args))
else:
if (set(kwargs.keys()) | set(defaults.keys()) != set(params) |
set(defaults.keys())):
continue
matches = True
for k, v in defaults.items():
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
candidate_subs = text_kwargs
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = _prefix.replace('%', '%%') + result
if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % candidate_subs, re.UNICODE):
# safe characters from `pchar` definition of RFC 3986
url = urlquote(candidate_pat % candidate_subs, safe=RFC3986_SUBDELIMS + str('/~:@'))
# Don't allow construction of scheme relative urls.
if url.startswith('//'):
url = '/%%2F%s' % url[2:]
return url
# lookup_view can be URL name or callable, but callables are not
# friendly in error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
patterns = [pattern for (possibility, pattern, defaults) in possibilities]
raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found. %d pattern(s) tried: %s" %
(lookup_view_s, args, kwargs, len(patterns), patterns))
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
"""
def __init__(self, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
super(LocaleRegexURLResolver, self).__init__(
None, urlconf_name, default_kwargs, app_name, namespace)
@property
def regex(self):
language_code = get_language()
if language_code not in self._regex_dict:
regex_compiled = re.compile('^%s/' % language_code, re.UNICODE)
self._regex_dict[language_code] = regex_compiled
return self._regex_dict[language_code]
def resolve(path, urlconf=None):
if urlconf is None:
urlconf = get_urlconf()
return get_resolver(urlconf).resolve(path)
def reverse(viewname, urlconf=None, args=None, kwargs=None, current_app=None):
if urlconf is None:
urlconf = get_urlconf()
resolver = get_resolver(urlconf)
args = args or []
kwargs = kwargs or {}
prefix = get_script_prefix()
if not isinstance(viewname, six.string_types):
view = viewname
else:
parts = viewname.split(':')
parts.reverse()
view = parts[0]
path = parts[1:]
if current_app:
current_path = current_app.split(':')
current_path.reverse()
else:
current_path = None
resolved_path = []
ns_pattern = ''
while path:
ns = path.pop()
current_ns = current_path.pop() if current_path else None
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_ns and current_ns in app_list:
# If we are reversing for a particular app,
# use that namespace
ns = current_ns
elif ns not in app_list:
# The name isn't shared by one of the instances
# (i.e., the default) so just pick the first instance
# as the default.
ns = app_list[0]
except KeyError:
pass
if ns != current_ns:
current_path = None
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
ns_pattern = ns_pattern + extra
except KeyError as key:
if resolved_path:
raise NoReverseMatch(
"%s is not a registered namespace inside '%s'" %
(key, ':'.join(resolved_path)))
else:
raise NoReverseMatch("%s is not a registered namespace" %
key)
if ns_pattern:
resolver = get_ns_resolver(ns_pattern, resolver)
return force_text(iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs)))
reverse_lazy = lazy(reverse, six.text_type)
def clear_url_caches():
get_callable.cache_clear()
get_resolver.cache_clear()
get_ns_resolver.cache_clear()
def set_script_prefix(prefix):
"""
Sets the script prefix for the current thread.
"""
if not prefix.endswith('/'):
prefix += '/'
_prefixes.value = prefix
def get_script_prefix():
"""
Returns the currently active script prefix. Useful for client code that
wishes to construct their own URLs manually (although accessing the request
instance is normally going to be a lot cleaner).
"""
return getattr(_prefixes, "value", '/')
def clear_script_prefix():
"""
Unsets the script prefix for the current thread.
"""
try:
del _prefixes.value
except AttributeError:
pass
def set_urlconf(urlconf_name):
"""
Sets the URLconf for the current thread (overriding the default one in
settings). Set to None to revert back to the default.
"""
if urlconf_name:
_urlconfs.value = urlconf_name
else:
if hasattr(_urlconfs, "value"):
del _urlconfs.value
def get_urlconf(default=None):
"""
Returns the root URLconf to use for the current thread if it has been
changed from the default one.
"""
return getattr(_urlconfs, "value", default)
def is_valid_path(path, urlconf=None):
"""
Returns True if the given path resolves against the default URL resolver,
False otherwise.
This is a convenience method to make working with "is this a match?" cases
easier, avoiding unnecessarily indented try...except blocks.
"""
try:
resolve(path, urlconf)
return True
except Resolver404:
return False
def translate_url(url, lang_code):
"""
Given a URL (absolute or relative), try to get its translated version in
the `lang_code` language (either by i18n_patterns or by translated regex).
Return the original URL if no translated version is found.
"""
parsed = urlsplit(url)
try:
match = resolve(parsed.path)
except Resolver404:
pass
else:
to_be_reversed = "%s:%s" % (match.namespace, match.url_name) if match.namespace else match.url_name
with override(lang_code):
try:
url = reverse(to_be_reversed, args=match.args, kwargs=match.kwargs)
except NoReverseMatch:
pass
else:
url = urlunsplit((parsed.scheme, parsed.netloc, url, parsed.query, parsed.fragment))
return url
|
codeboy/coddy-sitetools
|
refs/heads/master
|
sitetools/coddy_site/views.py
|
1
|
# -*- coding: utf-8 -*-
from django.views.generic.simple import direct_to_template
from django.template import Context, RequestContext
from django.shortcuts import render_to_response, render, redirect
from django.template.loader import render_to_string, get_template
from django.http import HttpResponse
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import simplejson
from django.core import serializers
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
#from projectile.baseapp.models import Home
from sitetools.coddy_auth.forms import ConsumerLoginForm
from procesors import custom_processor as cp
from utils import page_message
from django.template import RequestContext
#@login_required
def index(request):
"""
вывод
"""
template = 'site/main.html'
data = dict()
# try:
# q_company = Company.objects.get(owner = request.user)
# data['list'] = q_company
# except (WorkObject.DoesNotExist, Company.DoesNotExist):
# page_message(request, 50, None, 'error')
# return redirect('/')
t, c = (get_template(template), RequestContext(request,data, processors=[cp]))
return HttpResponse(t.render(c))
def site_login(request):
url = '/' if not request.GET.get('next') else request.GET.get('next')
if request.user.is_authenticated():
redirect(url)
else:
template = 'baseapp/ba-auth.html'
data = dict()
if request.method == 'POST':
form = ConsumerLoginForm(request.POST)
if form.is_valid():
user = authenticate(username=form.cleaned_data['user_code'],
password=form.cleaned_data['user_password'])
if user:
login(request, user)
return redirect(url)
else:
page_message(request, 40, None, 'error')
data['message'] = 'Неправильное имя или пароль.'
form = ConsumerLoginForm(request.POST)
else:
form = ConsumerLoginForm()
data['form'] = form
t, c = (get_template(template), RequestContext(request,data, processors=[cp]))
return HttpResponse(t.render(c))
def site_logout(request):
logout(request)
return redirect('base:ba_base')
|
saurabhbajaj207/CarpeDiem
|
refs/heads/master
|
venv/Lib/encodings/zlib_codec.py
|
58
|
""" Python 'zlib_codec' Codec - zlib compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
def zlib_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
_is_text_encoding=False,
)
|
oaastest/azure-linux-extensions
|
refs/heads/master
|
OSPatching/test/FakePatching3.py
|
8
|
#!/usr/bin/python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+
import sys
import time
sys.path.append('../patch')
from AbstractPatching import AbstractPatching
class FakePatching(AbstractPatching):
def __init__(self, hutil=None):
super(FakePatching,self).__init__(hutil)
self.pkg_query_cmd = 'dpkg-query -L'
self.gap_between_stage = 20
self.download_duration = 60
self.security_download_list = ['a', 'b', 'c', 'd', 'e']
self.all_download_list = ['1', '2', '3', '4', 'a', 'b', 'c', 'd', 'e']
def install(self):
"""
Install for dependencies.
"""
pass
def check(self, category):
"""
Check valid upgrades,
Return the package list to download & upgrade
"""
if category == 'important':
return 0, self.security_download_list
else:
return 0, self.all_download_list
def download_package(self, package):
return 0
def patch_package(self, package):
return 0
def check_reboot(self):
return False
|
unindented/streamcode
|
refs/heads/master
|
client/static/jsrepl/extern/python/reloop-closured/lib/python2.7/pdb.py
|
60
|
#! /usr/bin/env python
"""A Python debugger."""
# (See pdb.doc for documentation.)
import sys
import linecache
import cmd
import bdb
from repr import Repr
import os
import re
import pprint
import traceback
class Restart(Exception):
"""Causes a debugger to be restarted for the debugged python program."""
pass
# Create a custom safe Repr instance and increase its maxstring.
# The default of 30 truncates error messages too easily.
_repr = Repr()
_repr.maxstring = 200
_saferepr = _repr.repr
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while 1:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno = lineno + 1
fp.close()
return answer
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None):
bdb.Bdb.__init__(self, skip=skip)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = 0
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
rcFile = open(os.path.join(envHome, ".pdbrc"))
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
try:
rcFile = open(".pdbrc")
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt
# must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace
# must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining
# a command list
self.commands_bnum = None # The breakpoint number for which we are
# defining a list
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
def setup(self, f, t):
self.forget()
self.stack, self.curindex = self.get_stack(f, t)
self.curframe = self.stack[self.curindex][0]
# The f_locals dictionary is updated from the actual frame
# locals whenever the .f_locals accessor is called, so we
# cache it here to ensure that modifications are not overwritten.
self.curframe_locals = self.curframe.f_locals
self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if self.rcLines:
# Make local copy because of recursion
rcLines = self.rcLines
# executed only once
self.rcLines = []
for line in rcLines:
line = line[:-1]
if len(line) > 0 and line[0] != '#':
self.onecmd(line)
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
print >>self.stdout, '--Call--'
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self,frame):
"""Call every command that was set for the current active breakpoint
(if there is one).
Returns True if the normal interaction function must be called,
False otherwise."""
# self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit
if getattr(self, "currentbp", False) and \
self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self.cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
if self._wait_for_mainpyfile:
return
frame.f_locals['__return__'] = return_value
print >>self.stdout, '--Return--'
self.interaction(frame, None)
def user_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
if self._wait_for_mainpyfile:
return
exc_type, exc_value, exc_traceback = exc_info
frame.f_locals['__exception__'] = exc_type, exc_value
if type(exc_type) == type(''):
exc_type_name = exc_type
else: exc_type_name = exc_type.__name__
print >>self.stdout, exc_type_name + ':', _saferepr(exc_value)
self.interaction(frame, exc_traceback)
# General interaction function
def interaction(self, frame, traceback):
self.setup(frame, traceback)
self.print_stack_entry(self.stack[self.curindex])
self.cmdloop()
self.forget()
def displayhook(self, obj):
"""Custom displayhook for the exec in default(), which prevents
assignment of the _ variable in the builtins.
"""
# reproduce the behavior of the standard displayhook, not printing None
if obj is not None:
print repr(obj)
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
save_displayhook = sys.displayhook
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
sys.displayhook = self.displayhook
exec code in globals, locals
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
sys.displayhook = save_displayhook
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', v
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii = ii + 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self,line):
"""Handles one command line during command list definition."""
cmd, arg, line = self.parseline(line)
if not cmd:
return
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if arg:
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
# one of the resuming commands
if func.func_name in self.commands_resuming:
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
do_h = cmd.Cmd.do_help
def do_commands(self, arg):
"""Defines a list of commands associated to a breakpoint.
Those commands will be executed whenever the breakpoint causes
the program to stop execution."""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber)-1
else:
try:
bnum = int(arg)
except:
print >>self.stdout, "Usage : commands [bnum]\n ..." \
"\n end"
return
self.commands_bnum = bnum
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
try:
self.cmdloop()
finally:
self.commands_defining = False
self.prompt = prompt_back
def do_break(self, arg, temporary = 0):
# break [ ([filename:]lineno | function) [, "condition"] ]
if not arg:
if self.breaks: # There's at least one
print >>self.stdout, "Num Type Disp Enb Where"
for bp in bdb.Breakpoint.bpbynumber:
if bp:
bp.bpprint(self.stdout)
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
print >>self.stdout, '*** ', repr(filename),
print >>self.stdout, 'not found from sys.path'
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError, msg:
print >>self.stdout, '*** Bad lineno:', arg
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe_locals)
except:
func = arg
try:
if hasattr(func, 'im_func'):
func = func.im_func
code = func.func_code
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
print >>self.stdout, '*** The specified object',
print >>self.stdout, repr(arg),
print >>self.stdout, 'is not a function'
print >>self.stdout, 'or was not found along sys.path.'
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err: print >>self.stdout, '***', err
else:
bp = self.get_breaks(filename, line)[-1]
print >>self.stdout, "Breakpoint %d at %s:%d" % (bp.number,
bp.file,
bp.line)
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
def do_tbreak(self, arg):
self.do_break(arg, 1)
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
# this method should be callable before starting debugging, so default
# to "no globals" if there is no current frame
globs = self.curframe.f_globals if hasattr(self, 'curframe') else None
line = linecache.getline(filename, lineno, globs)
if not line:
print >>self.stdout, 'End of file'
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
print >>self.stdout, '*** Blank or comment'
return 0
return lineno
def do_enable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.enable()
def do_disable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.disable()
def do_condition(self, arg):
# arg is breakpoint number and condition
args = arg.split(' ', 1)
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
cond = args[1]
except:
cond = None
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.cond = cond
if not cond:
print >>self.stdout, 'Breakpoint', bpnum,
print >>self.stdout, 'is now unconditional.'
def do_ignore(self,arg):
"""arg is bp number followed by ignore count."""
args = arg.split()
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.ignore = count
if count > 0:
reply = 'Will ignore next '
if count > 1:
reply = reply + '%d crossings' % count
else:
reply = reply + '1 crossing'
print >>self.stdout, reply + ' of breakpoint %d.' % bpnum
else:
print >>self.stdout, 'Will stop next time breakpoint',
print >>self.stdout, bpnum, 'is reached.'
def do_clear(self, arg):
"""Three possibilities, tried in this order:
clear -> clear all breaks, ask for confirmation
clear file:lineno -> clear all breaks at file:lineno
clear bpno bpno ... -> clear breakpoints by number"""
if not arg:
try:
reply = raw_input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
self.clear_all_breaks()
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
err = self.clear_break(filename, lineno)
if err: print >>self.stdout, '***', err
return
numberlist = arg.split()
for i in numberlist:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
err = self.clear_bpbynumber(i)
if err:
print >>self.stdout, '***', err
else:
print >>self.stdout, 'Deleted breakpoint', i
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
def do_where(self, arg):
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def do_up(self, arg):
if self.curindex == 0:
print >>self.stdout, '*** Oldest frame'
else:
self.curindex = self.curindex - 1
self.curframe = self.stack[self.curindex][0]
self.curframe_locals = self.curframe.f_locals
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_u = do_up
def do_down(self, arg):
if self.curindex + 1 == len(self.stack):
print >>self.stdout, '*** Newest frame'
else:
self.curindex = self.curindex + 1
self.curframe = self.stack[self.curindex][0]
self.curframe_locals = self.curframe.f_locals
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_d = do_down
def do_until(self, arg):
self.set_until(self.curframe)
return 1
do_unt = do_until
def do_step(self, arg):
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
self.set_next(self.curframe)
return 1
do_n = do_next
def do_run(self, arg):
"""Restart program by raising an exception to be caught in the main
debugger loop. If arguments were given, set them in sys.argv."""
if arg:
import shlex
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
raise Restart
do_restart = do_run
def do_return(self, arg):
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
if self.curindex + 1 != len(self.stack):
print >>self.stdout, "*** You can only jump within the bottom frame"
return
try:
arg = int(arg)
except ValueError:
print >>self.stdout, "*** The 'jump' command requires a line number."
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError, e:
print >>self.stdout, '*** Jump failed:', e
do_j = do_jump
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
print >>self.stdout, "ENTERING RECURSIVE DEBUGGER"
sys.call_tracing(p.run, (arg, globals, locals))
print >>self.stdout, "LEAVING RECURSIVE DEBUGGER"
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_quit(self, arg):
self._user_requested_quit = 1
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
print >>self.stdout
self._user_requested_quit = 1
self.set_quit()
return 1
def do_args(self, arg):
co = self.curframe.f_code
dict = self.curframe_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
print >>self.stdout, name, '=',
if name in dict: print >>self.stdout, dict[name]
else: print >>self.stdout, "*** undefined ***"
do_a = do_args
def do_retval(self, arg):
if '__return__' in self.curframe_locals:
print >>self.stdout, self.curframe_locals['__return__']
else:
print >>self.stdout, '*** Not yet returned!'
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals,
self.curframe_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
raise
def do_p(self, arg):
try:
print >>self.stdout, repr(self._getval(arg))
except:
pass
def do_pp(self, arg):
try:
pprint.pprint(self._getval(arg), self.stdout)
except:
pass
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print >>self.stdout, '*** Error in argument:', repr(arg)
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno,
self.curframe.f_globals)
if not line:
print >>self.stdout, '[EOF]'
break
else:
s = repr(lineno).rjust(3)
if len(s) < 4: s = s + ' '
if lineno in breaklist: s = s + 'B'
else: s = s + ' '
if lineno == self.curframe.f_lineno:
s = s + '->'
print >>self.stdout, s + '\t' + line,
self.lineno = lineno
except KeyboardInterrupt:
pass
do_l = do_list
def do_whatis(self, arg):
try:
value = eval(arg, self.curframe.f_globals,
self.curframe_locals)
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
return
code = None
# Is it a function?
try: code = value.func_code
except: pass
if code:
print >>self.stdout, 'Function', code.co_name
return
# Is it an instance method?
try: code = value.im_func.func_code
except: pass
if code:
print >>self.stdout, 'Method', code.co_name
return
# None of the above...
print >>self.stdout, type(value)
def do_alias(self, arg):
args = arg.split()
if len(args) == 0:
keys = self.aliases.keys()
keys.sort()
for alias in keys:
print >>self.stdout, "%s = %s" % (alias, self.aliases[alias])
return
if args[0] in self.aliases and len(args) == 1:
print >>self.stdout, "%s = %s" % (args[0], self.aliases[args[0]])
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
#list of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
print >>self.stdout, '>',
else:
print >>self.stdout, ' ',
print >>self.stdout, self.format_stack_entry(frame_lineno,
prompt_prefix)
# Help methods (derived from pdb.doc)
def help_help(self):
self.help_h()
def help_h(self):
print >>self.stdout, """h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command
"help pdb" pipes the full documentation file to the $PAGER
"help exec" gives help on the ! command"""
def help_where(self):
self.help_w()
def help_w(self):
print >>self.stdout, """w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command."""
help_bt = help_w
def help_down(self):
self.help_d()
def help_d(self):
print >>self.stdout, """d(own)
Move the current frame one level down in the stack trace
(to a newer frame)."""
def help_up(self):
self.help_u()
def help_u(self):
print >>self.stdout, """u(p)
Move the current frame one level up in the stack trace
(to an older frame)."""
def help_break(self):
self.help_b()
def help_b(self):
print >>self.stdout, """b(reak) ([file:]lineno | function) [, condition]
With a line number argument, set a break there in the current
file. With a function name, set a break at first executable line
of that function. Without argument, list all breaks. If a second
argument is present, it is a string specifying an expression
which must evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on sys.path;
the .py suffix may be omitted."""
def help_clear(self):
self.help_cl()
def help_cl(self):
print >>self.stdout, "cl(ear) filename:lineno"
print >>self.stdout, """cl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
Note that the argument is different from previous versions of
the debugger (in python distributions 1.5.1 and before) where
a linenumber was used instead of either filename:lineno or
breakpoint numbers."""
def help_tbreak(self):
print >>self.stdout, """tbreak same arguments as break, but breakpoint
is removed when first hit."""
def help_enable(self):
print >>self.stdout, """enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
bp numbers."""
def help_disable(self):
print >>self.stdout, """disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
bp numbers."""
def help_ignore(self):
print >>self.stdout, """ignore bpnumber count
Sets the ignore count for the given breakpoint number. A breakpoint
becomes active when the ignore count is zero. When non-zero, the
count is decremented each time the breakpoint is reached and the
breakpoint is not disabled and any associated condition evaluates
to true."""
def help_condition(self):
print >>self.stdout, """condition bpnumber str_condition
str_condition is a string specifying an expression which
must evaluate to true before the breakpoint is honored.
If str_condition is absent, any existing condition is removed;
i.e., the breakpoint is made unconditional."""
def help_step(self):
self.help_s()
def help_s(self):
print >>self.stdout, """s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function)."""
def help_until(self):
self.help_unt()
def help_unt(self):
print """unt(il)
Continue execution until the line with a number greater than the current
one is reached or until the current frame returns"""
def help_next(self):
self.help_n()
def help_n(self):
print >>self.stdout, """n(ext)
Continue execution until the next line in the current function
is reached or it returns."""
def help_return(self):
self.help_r()
def help_r(self):
print >>self.stdout, """r(eturn)
Continue execution until the current function returns."""
def help_continue(self):
self.help_c()
def help_cont(self):
self.help_c()
def help_c(self):
print >>self.stdout, """c(ont(inue))
Continue execution, only stop when a breakpoint is encountered."""
def help_jump(self):
self.help_j()
def help_j(self):
print >>self.stdout, """j(ump) lineno
Set the next line that will be executed."""
def help_debug(self):
print >>self.stdout, """debug code
Enter a recursive debugger that steps through the code argument
(which is an arbitrary expression or statement to be executed
in the current environment)."""
def help_list(self):
self.help_l()
def help_l(self):
print >>self.stdout, """l(ist) [first [,last]]
List source code for the current file.
Without arguments, list 11 lines around the current line
or continue the previous listing.
With one argument, list 11 lines starting at that line.
With two arguments, list the given range;
if the second argument is less than the first, it is a count."""
def help_args(self):
self.help_a()
def help_a(self):
print >>self.stdout, """a(rgs)
Print the arguments of the current function."""
def help_p(self):
print >>self.stdout, """p expression
Print the value of the expression."""
def help_pp(self):
print >>self.stdout, """pp expression
Pretty-print the value of the expression."""
def help_exec(self):
print >>self.stdout, """(!) statement
Execute the (one-line) statement in the context of
the current stack frame.
The exclamation point can be omitted unless the first word
of the statement resembles a debugger command.
To assign to a global variable you must always prefix the
command with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)"""
def help_run(self):
print """run [args...]
Restart the debugged python program. If a string is supplied, it is
splitted with "shlex" and the result is used as the new sys.argv.
History, breakpoints, actions and debugger options are preserved.
"restart" is an alias for "run"."""
help_restart = help_run
def help_quit(self):
self.help_q()
def help_q(self):
print >>self.stdout, """q(uit) or exit - Quit from the debugger.
The program being executed is aborted."""
help_exit = help_q
def help_whatis(self):
print >>self.stdout, """whatis arg
Prints the type of the argument."""
def help_EOF(self):
print >>self.stdout, """EOF
Handles the receipt of EOF as a command."""
def help_alias(self):
print >>self.stdout, """alias [name [command [parameter parameter ...]]]
Creates an alias called 'name' the executes 'command'. The command
must *not* be enclosed in quotes. Replaceable parameters are
indicated by %1, %2, and so on, while %* is replaced by all the
parameters. If no command is given, the current alias for name
is shown. If no name is given, all aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is recursively
applied to the first word of the command line; all other words
in the line are left alone.
Some useful aliases (especially when placed in the .pdbrc file) are:
#Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
#Print instance variables in self
alias ps pi self
"""
def help_unalias(self):
print >>self.stdout, """unalias name
Deletes the specified alias."""
def help_commands(self):
print >>self.stdout, """commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber. The
commands themselves appear on the following lines. Type a line
containing just 'end' to terminate the commands.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up again.
Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations) terminates
the command list (as if that command was immediately followed by end).
This is because any time you resume execution
(even with a simple next or step), you may encounter
another breakpoint--which could have its own command list, leading to
ambiguities about which list to execute.
If you use the 'silent' command in the command list, the
usual message about stopping at a breakpoint is not printed. This may
be desirable for breakpoints that are to print a specific message and
then continue. If none of the other commands print anything, you
see no sign that the breakpoint was reached.
"""
def help_pdb(self):
help()
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# The script has to run in __main__ namespace (or imports from
# __main__ will break).
#
# So we clear up the __main__ and set several special variables
# (this gets rid of pdb's globals and cleans old variables on restarts).
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({"__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = 0
statement = 'execfile( "%s")' % filename
self.run(statement)
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
p = Pdb()
p.reset()
p.interaction(None, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
for dirname in sys.path:
fullname = os.path.join(dirname, 'pdb.doc')
if os.path.exists(fullname):
sts = os.system('${PAGER-more} '+fullname)
if sts: print '*** Pager exit status:', sts
break
else:
print 'Sorry, can\'t find the help file "pdb.doc"',
print 'along the Python search path'
def main():
if not sys.argv[1:] or sys.argv[1] in ("--help", "-h"):
print "usage: pdb.py scriptfile [arg] ..."
sys.exit(2)
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print 'Error:', mainpyfile, 'does not exist'
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command
# which allows explicit specification of command line arguments.
pdb = Pdb()
while True:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print "The program finished and will be restarted"
except Restart:
print "Restarting", mainpyfile, "with arguments:"
print "\t" + " ".join(sys.argv[1:])
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
print "Running 'cont' or 'step' will restart the program"
t = sys.exc_info()[2]
pdb.interaction(None, t)
print "Post mortem debugger finished. The " + mainpyfile + \
" will be restarted"
# When invoked as main program, invoke the debugger on a script
if __name__ == '__main__':
import pdb
pdb.main()
|
leonardocsantoss/ehventos
|
refs/heads/master
|
lib/grappelli/admin.py
|
1
|
# coding: utf-8
from django.contrib import admin
from models import Navigation, NavigationItem
class NavigationItemInline(admin.StackedInline):
model = NavigationItem
extra = 1
classes = ('collapse-open',)
fieldsets = (
('', {
'fields': ('title', 'link', 'category',)
}),
('', {
'fields': ('groups', 'users',),
}),
('', {
'fields': ('order',),
}),
)
filter_horizontal = ('users',)
# Grappelli Options
allow_add = True
class NavigationOptions(admin.ModelAdmin):
# List Options
list_display = ('order', 'title',)
list_display_links = ('title',)
# Fieldsets
fieldsets = (
('', {
'fields': ('title', 'order',)
}),
)
# Misc
save_as = True
# Inlines
inlines = [NavigationItemInline]
# Grappelli Options
order = 0
admin.site.register(Navigation, NavigationOptions)
|
bigswitch/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/images/images/urls.py
|
3
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from openstack_dashboard.dashboards.project.images.images import views
urlpatterns = [
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<image_id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^(?P<image_id>[^/]+)/$', views.DetailView.as_view(), name='detail'),
]
|
mozbhearsum/balrog
|
refs/heads/master
|
auslib/web/admin/views/history.py
|
2
|
import connexion
from flask import Response, jsonify
from auslib.web.admin.views.base import AdminView
from auslib.web.admin.views.problem import problem
class HistoryView(AdminView):
"""Base class for history views. Provides basics operations to get all
object revisions and revert object to specific revision.
@param table: Table.
@type table: auslib.db.AUSTable
"""
def __init__(self, table, *args, **kwargs):
self.table = table
self.history_table = table.history
super(HistoryView, self).__init__(*args, **kwargs)
def get_revisions(
self,
get_object_callback,
history_filters_callback,
revisions_order_by,
process_revisions_callback=None,
obj_not_found_msg="Requested object does not exist",
response_key="revisions",
):
"""Get revisions for Releases, Rules or ScheduledChanges.
Uses callable parameters to handle specific AUS object data.
@param get_object_callback: A callback to get requested AUS object.
@type get_object_callback: callable
@param history_filters_callback: A callback that get the filters list
to query the history.
@type history_filters_callback: callable
@param process_revisions_callback: A callback that process revisions
according to the requested AUS object.
@type process_revisions_callback: callable
@param revisions_order_by: Fields list to sort history.
@type revisions_order_by: list
@param obj_not_found_msg: Error message for not found AUS object.
@type obj_not_found_msg: string
@param response_key: Dictionary key to wrap returned revisions.
@type response_key: string
"""
page = int(connexion.request.args.get("page", 1))
limit = int(connexion.request.args.get("limit", 10))
obj = get_object_callback()
if not obj:
return problem(status=404, title="Not Found", detail=obj_not_found_msg)
offset = limit * (page - 1)
filters = history_filters_callback(obj)
total_count = self.history_table.count(where=filters)
revisions = self.history_table.select(where=filters, limit=limit, offset=offset, order_by=revisions_order_by)
if process_revisions_callback:
revisions = process_revisions_callback(revisions)
ret = dict()
ret[response_key] = revisions
ret["count"] = total_count
return jsonify(ret)
def revert_to_revision(
self,
get_object_callback,
change_field,
get_what_callback,
changed_by,
response_message,
transaction,
obj_not_found_msg="Requested object does not exist",
):
"""Reverts Releases, Rules or ScheduledChanges object to specific
revision. Uses callable parameters to handle specific AUS object data.
@param get_object_callback: A callback to get requested AUS object.
@type get_object_callback: callable
@param change_field: Specific table field to match revision.
@type change_field: string
@param get_what_callback: Criteria to revert revision.
@type get_what_callback: callable
@param changed_by: User.
@type changed_by: string
@param response_message: Success message.
@type response_message: string
@param transaction: Transaction
@type transaction: auslib.db.AUSTransaction
@param obj_not_found_msg: Error message for not found AUS object.
@type obj_not_found_msg: string
"""
obj = get_object_callback()
if not obj:
return problem(404, "Not Found", obj_not_found_msg)
change_id = None
if connexion.request.get_json():
change_id = connexion.request.get_json().get("change_id")
if not change_id:
self.log.warning("Bad input: %s", "no change_id")
return problem(400, "Bad Request", "No change_id passed in the request body")
change = self.history_table.getChange(change_id=change_id)
if change is None:
return problem(400, "Bad Request", "Invalid change_id : {0} passed in the request body".format(change_id))
obj_id = obj[change_field]
if change[change_field] != obj_id:
return problem(400, "Bad Request", "Bad {0} passed in the request".format(change_field))
old_data_version = obj["data_version"]
# now we're going to make a new insert based on this
what = get_what_callback(change)
where = dict()
where[change_field] = obj_id
self.table.update(changed_by=changed_by, where=where, what=what, old_data_version=old_data_version, transaction=transaction)
return Response(response_message)
|
grap/OCB
|
refs/heads/7.0
|
addons/portal_claim/__init__.py
|
346
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import portal_claim
|
kostyll/micropython
|
refs/heads/master
|
tests/basics/fun_str.py
|
110
|
# test str of function
def f():
pass
print(str(f)[:8])
|
amirouche/hivi.mx
|
refs/heads/master
|
hivimx/hivimx/__init__.py
|
12133432
| |
sbalde/edxplatform
|
refs/heads/master
|
lms/djangoapps/lms_migration/management/commands/__init__.py
|
12133432
| |
fabricedesre/rust
|
refs/heads/master
|
src/etc/unicode.py
|
5
|
#!/usr/bin/env python
# xfail-license
# This digests UnicodeData.txt and DerivedCoreProperties.txt and emits rust
# code covering the core properties. Since this is a pretty rare event we
# just store this out-of-line and check the unicode.rs file into git.
#
# The emitted code is "the minimum we think is necessary for libstd", that
# is, to support basic operations of the compiler and "most nontrivial rust
# programs". It is not meant to be a complete implementation of unicode.
# For that we recommend you use a proper binding to libicu.
import fileinput, re, os, sys
def fetch(f):
if not os.path.exists(f):
os.system("curl -O http://www.unicode.org/Public/UNIDATA/%s"
% f)
if not os.path.exists(f):
sys.stderr.write("cannot load %s" % f)
exit(1)
def load_unicode_data(f):
fetch(f)
gencats = {}
combines = []
canon_decomp = {}
compat_decomp = {}
curr_cat = ""
curr_combine = ""
c_lo = 0
c_hi = 0
com_lo = 0
com_hi = 0
for line in fileinput.input(f):
fields = line.split(";")
if len(fields) != 15:
continue
[code, name, gencat, combine, bidi,
decomp, deci, digit, num, mirror,
old, iso, upcase, lowcase, titlecase ] = fields
code = int(code, 16)
if decomp != "":
if decomp.startswith('<'):
seq = []
for i in decomp.split()[1:]:
seq.append(int(i, 16))
compat_decomp[code] = seq
else:
seq = []
for i in decomp.split():
seq.append(int(i, 16))
canon_decomp[code] = seq
if curr_cat == "":
curr_cat = gencat
c_lo = code
c_hi = code
if curr_cat == gencat:
c_hi = code
else:
if curr_cat not in gencats:
gencats[curr_cat] = []
gencats[curr_cat].append((c_lo, c_hi))
curr_cat = gencat
c_lo = code
c_hi = code
if curr_combine == "":
curr_combine = combine
com_lo = code
com_hi = code
if curr_combine == combine:
com_hi = code
else:
if curr_combine != "0":
combines.append((com_lo, com_hi, curr_combine))
curr_combine = combine
com_lo = code
com_hi = code
return (canon_decomp, compat_decomp, gencats, combines)
def load_properties(f, interestingprops):
fetch(f)
props = {}
re1 = re.compile("^([0-9A-F]+) +; (\w+)")
re2 = re.compile("^([0-9A-F]+)\.\.([0-9A-F]+) +; (\w+)")
for line in fileinput.input(f):
prop = None
d_lo = 0
d_hi = 0
m = re1.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(1)
prop = m.group(2)
else:
m = re2.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(2)
prop = m.group(3)
else:
continue
if prop not in interestingprops:
continue
d_lo = int(d_lo, 16)
d_hi = int(d_hi, 16)
if prop not in props:
props[prop] = []
props[prop].append((d_lo, d_hi))
return props
def escape_char(c):
if c <= 0xff:
return "'\\x%2.2x'" % c
if c <= 0xffff:
return "'\\u%4.4x'" % c
return "'\\U%8.8x'" % c
def ch_prefix(ix):
if ix == 0:
return " "
if ix % 2 == 0:
return ",\n "
else:
return ", "
def emit_bsearch_range_table(f):
f.write("""
fn bsearch_range_table(c: char, r: &'static [(char,char)]) -> bool {
use cmp::{Equal, Less, Greater};
use vec::ImmutableVector;
use option::None;
r.bsearch(|&(lo,hi)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}) != None
}\n\n
""");
def emit_property_module(f, mod, tbl):
f.write("pub mod %s {\n" % mod)
keys = tbl.keys()
keys.sort()
emit_bsearch_range_table(f);
for cat in keys:
if cat == "Cs": continue
f.write(" static %s_table : &'static [(char,char)] = &[\n" % cat)
ix = 0
for pair in tbl[cat]:
f.write(ch_prefix(ix))
f.write("(%s, %s)" % (escape_char(pair[0]), escape_char(pair[1])))
ix += 1
f.write("\n ];\n\n")
f.write(" pub fn %s(c: char) -> bool {\n" % cat)
f.write(" bsearch_range_table(c, %s_table)\n" % cat)
f.write(" }\n\n")
f.write("}\n")
def emit_property_module_old(f, mod, tbl):
f.write("mod %s {\n" % mod)
keys = tbl.keys()
keys.sort()
for cat in keys:
f.write(" fn %s(c: char) -> bool {\n" % cat)
f.write(" ret alt c {\n")
prefix = ' '
for pair in tbl[cat]:
if pair[0] == pair[1]:
f.write(" %c %s\n" %
(prefix, escape_char(pair[0])))
else:
f.write(" %c %s to %s\n" %
(prefix,
escape_char(pair[0]),
escape_char(pair[1])))
prefix = '|'
f.write(" { true }\n")
f.write(" _ { false }\n")
f.write(" };\n")
f.write(" }\n\n")
f.write("}\n")
def format_table_content(f, content, indent):
line = " "*indent
first = True
for chunk in content.split(","):
if len(line) + len(chunk) < 98:
if first:
line += chunk
else:
line += ", " + chunk
first = False
else:
f.write(line + ",\n")
line = " "*indent + chunk
f.write(line)
def emit_decomp_module(f, canon, compat, combine):
canon_keys = canon.keys()
canon_keys.sort()
compat_keys = compat.keys()
compat_keys.sort()
f.write("pub mod decompose {\n");
f.write(" use option::Option;\n");
f.write(" use option::{Some, None};\n");
f.write(" use vec::ImmutableVector;\n");
f.write("""
fn bsearch_table(c: char, r: &'static [(char, &'static [char])]) -> Option<&'static [char]> {
use cmp::{Equal, Less, Greater};
match r.bsearch(|&(val, _)| {
if c == val { Equal }
else if val < c { Less }
else { Greater }
}) {
Some(idx) => {
let (_, result) = r[idx];
Some(result)
}
None => None
}
}\n
""")
f.write("""
fn bsearch_range_value_table(c: char, r: &'static [(char, char, u8)]) -> u8 {
use cmp::{Equal, Less, Greater};
match r.bsearch(|&(lo, hi, _)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}) {
Some(idx) => {
let (_, _, result) = r[idx];
result
}
None => 0
}
}\n\n
""")
f.write(" // Canonical decompositions\n")
f.write(" static canonical_table : &'static [(char, &'static [char])] = &[\n")
data = ""
first = True
for char in canon_keys:
if not first:
data += ","
first = False
data += "(%s,&[" % escape_char(char)
first2 = True
for d in canon[char]:
if not first2:
data += ","
first2 = False
data += escape_char(d)
data += "])"
format_table_content(f, data, 8)
f.write("\n ];\n\n")
f.write(" // Compatibility decompositions\n")
f.write(" static compatibility_table : &'static [(char, &'static [char])] = &[\n")
data = ""
first = True
for char in compat_keys:
if not first:
data += ","
first = False
data += "(%s,&[" % escape_char(char)
first2 = True
for d in compat[char]:
if not first2:
data += ","
first2 = False
data += escape_char(d)
data += "])"
format_table_content(f, data, 8)
f.write("\n ];\n\n")
f.write(" static combining_class_table : &'static [(char, char, u8)] = &[\n")
ix = 0
for pair in combine:
f.write(ch_prefix(ix))
f.write("(%s, %s, %s)" % (escape_char(pair[0]), escape_char(pair[1]), pair[2]))
ix += 1
f.write("\n ];\n")
f.write(" pub fn canonical(c: char, i: |char|) "
+ "{ d(c, i, false); }\n\n")
f.write(" pub fn compatibility(c: char, i: |char|) "
+"{ d(c, i, true); }\n\n")
f.write(" pub fn canonical_combining_class(c: char) -> u8 {\n"
+ " bsearch_range_value_table(c, combining_class_table)\n"
+ " }\n\n")
f.write(" fn d(c: char, i: |char|, k: bool) {\n")
f.write(" use iter::Iterator;\n");
f.write(" if c <= '\\x7f' { i(c); return; }\n")
# First check the canonical decompositions
f.write("""
match bsearch_table(c, canonical_table) {
Some(canon) => {
for x in canon.iter() {
d(*x, |b| i(b), k);
}
return;
}
None => ()
}\n\n""")
# Bottom out if we're not doing compat.
f.write(" if !k { i(c); return; }\n")
# Then check the compatibility decompositions
f.write("""
match bsearch_table(c, compatibility_table) {
Some(compat) => {
for x in compat.iter() {
d(*x, |b| i(b), k);
}
return;
}
None => ()
}\n\n""")
# Finally bottom out.
f.write(" i(c);\n")
f.write(" }\n")
f.write("}\n\n")
r = "unicode.rs"
for i in [r]:
if os.path.exists(i):
os.remove(i);
rf = open(r, "w")
(canon_decomp, compat_decomp, gencats, combines) = load_unicode_data("UnicodeData.txt")
# Preamble
rf.write('''// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The following code was generated by "src/etc/unicode.py"
#[allow(missing_doc)];
#[allow(non_uppercase_statics)];
''')
emit_property_module(rf, "general_category", gencats)
emit_decomp_module(rf, canon_decomp, compat_decomp, combines)
derived = load_properties("DerivedCoreProperties.txt",
["XID_Start", "XID_Continue", "Alphabetic", "Lowercase", "Uppercase"])
emit_property_module(rf, "derived_property", derived)
props = load_properties("PropList.txt", ["White_Space"])
emit_property_module(rf, "property", props)
|
lab305itep/linux
|
refs/heads/vme_patches2_rebase2
|
scripts/gdb/linux/tasks.py
|
630
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# task & thread tools
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
task_type = utils.CachedType("struct task_struct")
def task_lists():
task_ptr_type = task_type.get_type().pointer()
init_task = gdb.parse_and_eval("init_task").address
t = g = init_task
while True:
while True:
yield t
t = utils.container_of(t['thread_group']['next'],
task_ptr_type, "thread_group")
if t == g:
break
t = g = utils.container_of(g['tasks']['next'],
task_ptr_type, "tasks")
if t == init_task:
return
def get_task_by_pid(pid):
for task in task_lists():
if int(task['pid']) == pid:
return task
return None
class LxTaskByPidFunc(gdb.Function):
"""Find Linux task by PID and return the task_struct variable.
$lx_task_by_pid(PID): Given PID, iterate over all tasks of the target and
return that task_struct variable which PID matches."""
def __init__(self):
super(LxTaskByPidFunc, self).__init__("lx_task_by_pid")
def invoke(self, pid):
task = get_task_by_pid(pid)
if task:
return task.dereference()
else:
raise gdb.GdbError("No task of PID " + str(pid))
LxTaskByPidFunc()
class LxPs(gdb.Command):
"""Dump Linux tasks."""
def __init__(self):
super(LxPs, self).__init__("lx-ps", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
for task in task_lists():
gdb.write("{address} {pid} {comm}\n".format(
address=task,
pid=task["pid"],
comm=task["comm"].string()))
LxPs()
thread_info_type = utils.CachedType("struct thread_info")
ia64_task_size = None
def get_thread_info(task):
thread_info_ptr_type = thread_info_type.get_type().pointer()
if utils.is_target_arch("ia64"):
global ia64_task_size
if ia64_task_size is None:
ia64_task_size = gdb.parse_and_eval("sizeof(struct task_struct)")
thread_info_addr = task.address + ia64_task_size
thread_info = thread_info_addr.cast(thread_info_ptr_type)
else:
thread_info = task['stack'].cast(thread_info_ptr_type)
return thread_info.dereference()
class LxThreadInfoFunc (gdb.Function):
"""Calculate Linux thread_info from task variable.
$lx_thread_info(TASK): Given TASK, return the corresponding thread_info
variable."""
def __init__(self):
super(LxThreadInfoFunc, self).__init__("lx_thread_info")
def invoke(self, task):
return get_thread_info(task)
LxThreadInfoFunc()
|
twobraids/socorro
|
refs/heads/master
|
socorro/unittest/external/postgresql/test_graphics_devices.py
|
11
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from nose.plugins.attrib import attr
from nose.tools import eq_, assert_raises
from socorro.external import MissingArgumentError
from socorro.external.postgresql.graphics_devices import GraphicsDevices
from .unittestbase import PostgreSQLTestCase
#==============================================================================
@attr(integration='postgres') # for nosetests
class IntegrationTestGraphicsDevices(PostgreSQLTestCase):
def tearDown(self):
""" Cleanup the database, delete tables and functions """
cursor = self.connection.cursor()
cursor.execute("""
TRUNCATE graphics_device
CASCADE
""")
self.connection.commit()
super(IntegrationTestGraphicsDevices, self).tearDown()
def _insert(self, vendor_hex, adapter_hex,
vendor_name='', adapter_name=''):
assert vendor_hex and adapter_hex
assert vendor_name or adapter_name
sql = """
INSERT INTO graphics_device (
vendor_hex,
adapter_hex,
vendor_name,
adapter_name
) VALUES (%s, %s, %s, %s)
"""
cursor = self.connection.cursor()
params = (vendor_hex, adapter_hex, vendor_name, adapter_name)
cursor.execute(sql, params)
self.connection.commit()
def test_get(self):
"""returning rows by matching vendor_hex and adapter_hex"""
api = GraphicsDevices(config=self.config)
params = {
'vendor_hex': '0x1002',
'adapter_hex': '0x0166',
}
res = api.get(**params)
res_expected = {
'hits': [],
'total': 0
}
eq_(res, res_expected)
# insert something similar
self._insert(
'0x1002', '0x0166',
vendor_name='Logitech Inc.',
adapter_name='Unknown Webcam Pro 9000'
)
self._insert(
'0x1002', '0xc064',
vendor_name='Logitech Inc.',
adapter_name='k251d DELL 6-Button mouse'
)
self._insert(
'0x1222', '0x0166',
vendor_name='Chicony Electronics Co.',
adapter_name='Unknown Webcam Pro 9000'
)
# now we should get something
res = api.get(**params)
res_expected = {
'hits': [{
'vendor_hex': '0x1002',
'adapter_hex': '0x0166',
'vendor_name': 'Logitech Inc.',
'adapter_name': 'Unknown Webcam Pro 9000'
}],
'total': 1
}
eq_(res, res_expected)
def test_get_missing_arguments(self):
"""on .get() the adapter_hex and the vendor_hex is mandatory"""
api = GraphicsDevices(config=self.config)
assert_raises(
MissingArgumentError,
api.get
)
assert_raises(
MissingArgumentError,
api.get,
adapter_hex='something'
)
assert_raises(
MissingArgumentError,
api.get,
vendor_hex='something'
)
assert_raises(
MissingArgumentError,
api.get,
vendor_hex='something',
adapter_hex='' # empty!
)
assert_raises(
MissingArgumentError,
api.get,
vendor_hex='', # empty!
adapter_hex='something'
)
def test_post_insert(self):
payload = [
{
'vendor_hex': '0x1002',
'adapter_hex': '0x0166',
'vendor_name': 'Logitech Inc.',
'adapter_name': 'Unknown Webcam Pro 9000'
},
]
api = GraphicsDevices(config=self.config)
res = api.post(data=json.dumps(payload))
eq_(res, True)
cursor = self.connection.cursor()
cursor.execute("""
select vendor_hex, adapter_hex, vendor_name, adapter_name
from graphics_device
order by vendor_hex, adapter_hex
""")
expect = []
keys = 'vendor_hex', 'adapter_hex', 'vendor_name', 'adapter_name'
for row in cursor.fetchall():
expect.append(dict(zip(keys, row)))
eq_(expect, payload)
def test_post_update(self):
self._insert(
'0x1002', '0x0166',
vendor_name='Logitech Inc.',
adapter_name='Unknown Webcam Pro 9000'
)
payload = [
{
'vendor_hex': '0x1002',
'adapter_hex': '0x0166',
'vendor_name': 'Logitech Inc.',
'adapter_name': 'Known Webcam Pro 10000' # the change
}
]
api = GraphicsDevices(config=self.config)
res = api.post(data=json.dumps(payload))
eq_(res, True)
cursor = self.connection.cursor()
cursor.execute("""
select vendor_hex, adapter_hex, vendor_name, adapter_name
from graphics_device
order by vendor_hex, adapter_hex
""")
expect = []
keys = 'vendor_hex', 'adapter_hex', 'vendor_name', 'adapter_name'
for row in cursor.fetchall():
expect.append(dict(zip(keys, row)))
eq_(expect, payload)
def test_post_upsert(self):
"""on .post() every item you send in the payload causes an upsert"""
# first, insert something that we're going have to do nothing with
# or do an "upsert"
self._insert(
'0x1002', '0x0166',
vendor_name='Logitech Inc.',
adapter_name='Unknown Webcam Pro 9000'
)
self._insert(
'0x1222', '0x0166',
vendor_name='Chicony Electronics Co.',
adapter_name='Unknown Webcam Pro 9000'
)
# note, this is conveniently sorted by
# vendor_hex followed by adapter_hex
payload = [
{
'vendor_hex': '0x1002',
'adapter_hex': '0x0166',
'vendor_name': 'Logitech Inc.',
'adapter_name': 'Unknown Webcam Pro 9000'
},
{
'vendor_hex': '0x1222',
'adapter_hex': '0x0166',
'vendor_name': 'Chicony Electronics Co.',
'adapter_name': 'Something else'
},
{
'vendor_hex': '0x1333',
'adapter_hex': '0x0177',
'vendor_name': 'IBM',
'adapter_name': ''
},
]
api = GraphicsDevices(config=self.config)
res = api.post(data=json.dumps(payload))
eq_(res, True)
cursor = self.connection.cursor()
cursor.execute("""
select vendor_hex, adapter_hex, vendor_name, adapter_name
from graphics_device
order by vendor_hex, adapter_hex
""")
expect = []
keys = 'vendor_hex', 'adapter_hex', 'vendor_name', 'adapter_name'
for row in cursor.fetchall():
expect.append(dict(zip(keys, row)))
eq_(expect, payload)
def test_post_fail(self):
payload = [
{
'rubbish': 'Crap'
},
]
api = GraphicsDevices(config=self.config)
res = api.post(data=json.dumps(payload))
eq_(res, False)
|
samthor/intellij-community
|
refs/heads/master
|
python/testData/formatter/wrapAssignment_after.py
|
83
|
current_report_group = status_reports.values('report_group_id').annotate(rcount=Count('report_group_id')).order_by(
"-report_group_id")[:1]
|
orangeduck/PyAutoC
|
refs/heads/master
|
Python27/Lib/lib2to3/fixes/fix_future.py
|
529
|
"""Remove __future__ imports
from __future__ import foo is replaced with an empty line.
"""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import BlankLine
class FixFuture(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """import_from< 'from' module_name="__future__" 'import' any >"""
# This should be run last -- some things check for the import
run_order = 10
def transform(self, node, results):
new = BlankLine()
new.prefix = node.prefix
return new
|
gurneyalex/odoo
|
refs/heads/13.0-improve_sale_coupon_perf
|
addons/test_mail_full/tests/test_sms_composer.py
|
5
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.sms.tests import common as sms_common
from odoo.addons.test_mail_full.tests import common as test_mail_full_common
class TestSMSComposerComment(test_mail_full_common.BaseFunctionalTest, sms_common.MockSMS, test_mail_full_common.TestRecipients):
""" TODO LIST
* add test for default_res_model / default_res_id and stuff like that;
* add test for comment put in queue;
* add test for language support (set template lang context);
* add test for sanitized / wrong numbers;
"""
@classmethod
def setUpClass(cls):
super(TestSMSComposerComment, cls).setUpClass()
cls._test_body = 'VOID CONTENT'
cls.test_record = cls.env['mail.test.sms'].with_context(**cls._test_context).create({
'name': 'Test',
'customer_id': cls.partner_1.id,
'mobile_nbr': cls.test_numbers[0],
'phone_nbr': cls.test_numbers[1],
})
cls.test_record = cls._reset_mail_context(cls.test_record)
cls.sms_template = cls.env['sms.template'].create({
'name': 'Test Template',
'model_id': cls.env['ir.model']._get('mail.test.sms').id,
'body': 'Dear ${object.display_name} this is an SMS.',
})
def test_composer_comment_not_mail_thread(self):
with self.sudo('employee'):
record = self.env['test_performance.base'].create({'name': 'TestBase'})
composer = self.env['sms.composer'].with_context(
active_model='test_performance.base', active_id=record.id
).create({
'body': self._test_body,
'numbers': ','.join(self.random_numbers),
})
with self.mockSMSGateway():
composer._action_send_sms()
self.assertSMSSent(self.random_numbers_san, self._test_body)
def test_composer_comment_default(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
active_model='mail.test.sms', active_id=self.test_record.id
).create({
'body': self._test_body,
})
with self.mockSMSGateway():
messages = composer._action_send_sms()
self.assertSMSNotification([{'partner': self.test_record.customer_id, 'number': self.test_numbers_san[1]}], self._test_body, messages)
def test_composer_comment_field_1(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
active_model='mail.test.sms', active_id=self.test_record.id,
).create({
'body': self._test_body,
'number_field_name': 'mobile_nbr',
})
with self.mockSMSGateway():
messages = composer._action_send_sms()
self.assertSMSNotification([{'partner': self.test_record.customer_id, 'number': self.test_numbers_san[0]}], self._test_body, messages)
def test_composer_comment_field_2(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
active_model='mail.test.sms', active_id=self.test_record.id,
).create({
'body': self._test_body,
'number_field_name': 'phone_nbr',
})
with self.mockSMSGateway():
messages = composer._action_send_sms()
self.assertSMSNotification([{'partner': self.test_record.customer_id, 'number': self.test_numbers_san[1]}], self._test_body, messages)
def test_composer_comment_field_w_numbers(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
active_model='mail.test.sms', active_id=self.test_record.id,
default_number_field_name='mobile_nbr',
).create({
'body': self._test_body,
'numbers': ','.join(self.random_numbers),
})
with self.mockSMSGateway():
messages = composer._action_send_sms()
self.assertSMSNotification([
{'partner': self.test_record.customer_id, 'number': self.test_record.mobile_nbr},
{'number': self.random_numbers_san[0]}, {'number': self.random_numbers_san[1]}], self._test_body, messages)
def test_composer_comment_field_w_template(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
active_model='mail.test.sms', active_id=self.test_record.id,
default_template_id=self.sms_template.id,
default_number_field_name='mobile_nbr',
).create({})
with self.mockSMSGateway():
messages = composer._action_send_sms()
self.assertSMSNotification([{'partner': self.test_record.customer_id, 'number': self.test_record.mobile_nbr}], 'Dear %s this is an SMS.' % self.test_record.display_name, messages)
def test_composer_numbers_no_model(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='numbers'
).create({
'body': self._test_body,
'numbers': ','.join(self.random_numbers),
})
with self.mockSMSGateway():
composer._action_send_sms()
self.assertSMSSent(self.random_numbers_san, self._test_body)
class TestSMSComposerBatch(test_mail_full_common.BaseFunctionalTest, sms_common.MockSMS):
@classmethod
def setUpClass(cls):
super(TestSMSComposerBatch, cls).setUpClass()
cls._test_body = 'Zizisse an SMS.'
cls._create_records_for_batch('mail.test.sms', 3)
cls.sms_template = cls._create_sms_template('mail.test.sms')
def test_composer_batch_active_domain(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='comment',
default_res_model='mail.test.sms',
default_use_active_domain=True,
active_domain=[('id', 'in', self.records.ids)],
).create({
'body': self._test_body,
})
with self.mockSMSGateway():
messages = composer._action_send_sms()
for record in self.records:
self.assertSMSNotification([{'partner': r.customer_id} for r in self.records], 'Zizisse an SMS.', messages)
def test_composer_batch_active_ids(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='comment',
default_res_model='mail.test.sms',
active_ids=self.records.ids
).create({
'body': self._test_body,
})
with self.mockSMSGateway():
messages = composer._action_send_sms()
for record in self.records:
self.assertSMSNotification([{'partner': r.customer_id} for r in self.records], 'Zizisse an SMS.', messages)
def test_composer_batch_domain(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='comment',
default_res_model='mail.test.sms',
default_use_active_domain=True,
default_active_domain=repr([('id', 'in', self.records.ids)]),
).create({
'body': self._test_body,
})
with self.mockSMSGateway():
messages = composer._action_send_sms()
for record in self.records:
self.assertSMSNotification([{'partner': r.customer_id} for r in self.records], 'Zizisse an SMS.', messages)
def test_composer_batch_res_ids(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='comment',
default_res_model='mail.test.sms',
default_res_ids=repr(self.records.ids),
).create({
'body': self._test_body,
})
with self.mockSMSGateway():
messages = composer._action_send_sms()
for record in self.records:
self.assertSMSNotification([{'partner': r.customer_id} for r in self.records], 'Zizisse an SMS.', messages)
class TestSMSComposerMass(test_mail_full_common.BaseFunctionalTest, sms_common.MockSMS):
@classmethod
def setUpClass(cls):
super(TestSMSComposerMass, cls).setUpClass()
cls._test_body = 'Zizisse an SMS.'
cls._create_records_for_batch('mail.test.sms', 3)
cls.sms_template = cls._create_sms_template('mail.test.sms')
def test_composer_mass_active_domain(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='mass',
default_res_model='mail.test.sms',
default_use_active_domain=True,
active_domain=[('id', 'in', self.records.ids)],
).create({
'body': self._test_body,
'mass_keep_log': False,
})
with self.mockSMSGateway():
composer.action_send_sms()
for record in self.records:
self.assertSMSOutgoing(record.customer_id, None, self._test_body)
def test_composer_mass_active_domain_w_template(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='mass',
default_res_model='mail.test.sms',
default_use_active_domain=True,
active_domain=[('id', 'in', self.records.ids)],
default_template_id=self.sms_template.id,
).create({
'mass_keep_log': False,
})
with self.mockSMSGateway():
composer.action_send_sms()
for record in self.records:
self.assertSMSOutgoing(record.customer_id, None, 'Dear %s this is an SMS.' % record.display_name)
def test_composer_mass_active_ids(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='mass',
default_res_model='mail.test.sms',
active_ids=self.records.ids,
).create({
'body': self._test_body,
'mass_keep_log': False,
})
with self.mockSMSGateway():
composer.action_send_sms()
for partner in self.partners:
self.assertSMSOutgoing(partner, None, self._test_body)
def test_composer_mass_active_ids_w_blacklist(self):
self.env['phone.blacklist'].create([{
'number': p.phone_sanitized,
'active': True,
} for p in self.partners[:5]])
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='mass',
default_res_model='mail.test.sms',
active_ids=self.records.ids,
).create({
'body': self._test_body,
'mass_keep_log': False,
'mass_use_blacklist': True,
})
with self.mockSMSGateway():
composer.action_send_sms()
for partner in self.partners[5:]:
self.assertSMSOutgoing(partner, partner.phone_sanitized, content=self._test_body)
for partner in self.partners[:5]:
self.assertSMSCanceled(partner, partner.phone_sanitized, 'sms_blacklist', content=self._test_body)
def test_composer_mass_active_ids_wo_blacklist(self):
self.env['phone.blacklist'].create([{
'number': p.phone_sanitized,
'active': True,
} for p in self.partners[:5]])
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='mass',
default_res_model='mail.test.sms',
active_ids=self.records.ids,
).create({
'body': self._test_body,
'mass_keep_log': False,
'mass_use_blacklist': False,
})
with self.mockSMSGateway():
composer.action_send_sms()
for partner in self.partners:
self.assertSMSOutgoing(partner, partner.phone_sanitized, content=self._test_body)
def test_composer_mass_active_ids_w_blacklist_and_done(self):
self.env['phone.blacklist'].create([{
'number': p.phone_sanitized,
'active': True,
} for p in self.partners[:5]])
for p in self.partners[8:]:
p.mobile = self.partners[8].mobile
self.assertEqual(p.phone_sanitized, self.partners[8].phone_sanitized)
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='mass',
default_res_model='mail.test.sms',
active_ids=self.records.ids,
).create({
'body': self._test_body,
'mass_keep_log': False,
'mass_use_blacklist': True,
})
with self.mockSMSGateway():
composer.action_send_sms()
for partner in self.partners[8:]:
self.assertSMSOutgoing(partner, partner.phone_sanitized, content=self._test_body)
for partner in self.partners[5:8]:
self.assertSMSCanceled(partner, partner.phone_sanitized, 'sms_duplicate', content=self._test_body)
for partner in self.partners[:5]:
self.assertSMSCanceled(partner, partner.phone_sanitized, 'sms_blacklist', content=self._test_body)
def test_composer_mass_active_ids_w_template(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='mass',
default_res_model='mail.test.sms',
active_ids=self.records.ids,
default_template_id=self.sms_template.id,
).create({
'mass_keep_log': False,
})
with self.mockSMSGateway():
composer.action_send_sms()
for record in self.records:
self.assertSMSOutgoing(record.customer_id, None, 'Dear %s this is an SMS.' % record.display_name)
def test_composer_mass_active_ids_w_template_and_lang(self):
self.env.ref('base.lang_fr').write({'active': True})
self.env['ir.translation'].create({
'type': 'model',
'name': 'sms.template,body',
'lang': 'fr_FR',
'res_id': self.sms_template.id,
'src': self.sms_template.body,
'value': 'Cher·e· ${object.display_name} ceci est un SMS.',
})
# set template to try to use customer lang
self.sms_template.write({
'lang': '${object.customer_id.lang}',
})
# set one customer as french speaking
self.partners[2].write({'lang': 'fr_FR'})
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='mass',
default_res_model='mail.test.sms',
active_ids=self.records.ids,
default_template_id=self.sms_template.id,
).create({
'mass_keep_log': False,
})
with self.mockSMSGateway():
composer.action_send_sms()
for record in self.records:
if record.customer_id == self.partners[2]:
self.assertSMSOutgoing(record.customer_id, None, 'Cher·e· %s ceci est un SMS.' % record.display_name)
else:
self.assertSMSOutgoing(record.customer_id, None, 'Dear %s this is an SMS.' % record.display_name)
def test_composer_mass_active_ids_w_template_and_log(self):
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='mass',
default_res_model='mail.test.sms',
active_ids=self.records.ids,
default_template_id=self.sms_template.id,
).create({
'mass_keep_log': True,
})
with self.mockSMSGateway():
composer.action_send_sms()
for record in self.records:
self.assertSMSOutgoing(record.customer_id, None, 'Dear %s this is an SMS.' % record.display_name)
self.assertSMSLogged(record, 'Dear %s this is an SMS.' % record.display_name)
def test_composer_template_context_action(self):
""" Test the context action from a SMS template (Add context action button)
and the usage with the sms composer """
# Create the lang info
self.env.ref('base.lang_fr').write({'active': True})
self.env['ir.translation'].create({
'type': 'model',
'name': 'sms.template,body',
'lang': 'fr_FR',
'res_id': self.sms_template.id,
'src': self.sms_template.body,
'value': "Hello ${object.display_name} ceci est en français.",
})
# set template to try to use customer lang
self.sms_template.write({
'lang': '${object.customer_id.lang}',
})
# create a second record linked to a customer in another language
self.partners[2].write({'lang': 'fr_FR'})
test_record_2 = self.env['mail.test.sms'].create({
'name': 'Test',
'customer_id': self.partners[2].id,
})
test_record_1 = self.env['mail.test.sms'].create({
'name': 'Test',
'customer_id': self.partners[1].id,
})
# Composer creation with context from a template context action (simulate) - comment (single recipient)
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='guess',
default_res_ids=[test_record_2.id],
default_res_id=test_record_2.id,
active_ids=[test_record_2.id],
active_id=test_record_2.id,
active_model='mail.test.sms',
default_template_id=self.sms_template.id,
).create({
'mass_keep_log': False,
})
# Call manually the onchange
composer._onchange_template_id()
self.assertEquals(composer.composition_mode, "comment")
self.assertEquals(composer.body, "Hello %s ceci est en français." % test_record_2.display_name)
with self.mockSMSGateway():
messages = composer._action_send_sms()
number = self.partners[2].phone_get_sanitized_number()
self.assertSMSNotification([{'partner': test_record_2.customer_id, 'number': number}], "Hello %s ceci est en français." % test_record_2.display_name, messages)
# Composer creation with context from a template context action (simulate) - mass (multiple recipient)
with self.sudo('employee'):
composer = self.env['sms.composer'].with_context(
default_composition_mode='guess',
default_res_ids=[test_record_1.id, test_record_2.id],
default_res_id=test_record_1.id,
active_ids=[test_record_1.id, test_record_2.id],
active_id=test_record_1.id,
active_model='mail.test.sms',
default_template_id=self.sms_template.id,
).create({
'mass_keep_log': True,
})
# Call manually the onchange
composer._onchange_template_id()
self.assertEquals(composer.composition_mode, "mass")
# In english because by default but when sinding depending of record
self.assertEquals(composer.body, "Dear ${object.display_name} this is an SMS.")
with self.mockSMSGateway():
composer.action_send_sms()
self.assertSMSOutgoing(test_record_1.customer_id, None, 'Dear %s this is an SMS.' % test_record_1.display_name)
self.assertSMSOutgoing(test_record_2.customer_id, None, "Hello %s ceci est en français." % test_record_2.display_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.