text stringlengths 4 1.02M | meta dict |
|---|---|
import logging
import numpy as np
import time
from ray.rllib.models.jax.jax_modelv2 import JAXModelV2
from ray.rllib.models.jax.misc import SlimFC
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_jax
jax, flax = try_import_jax()
logger = logging.getLogger(__name__)
class FullyConnectedNetwork(JAXModelV2):
"""Generic fully connected network."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super().__init__(obs_space, action_space, num_outputs, model_config,
name)
self.key = jax.random.PRNGKey(int(time.time()))
activation = model_config.get("fcnet_activation")
hiddens = model_config.get("fcnet_hiddens", [])
no_final_linear = model_config.get("no_final_linear")
self.vf_share_layers = model_config.get("vf_share_layers")
self.free_log_std = model_config.get("free_log_std")
# Generate free-floating bias variables for the second half of
# the outputs.
if self.free_log_std:
assert num_outputs % 2 == 0, (
"num_outputs must be divisible by two", num_outputs)
num_outputs = num_outputs // 2
self._hidden_layers = []
prev_layer_size = int(np.product(obs_space.shape))
self._logits = None
# Create layers 0 to second-last.
for size in hiddens[:-1]:
self._hidden_layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=size,
activation_fn=activation))
prev_layer_size = size
# The last layer is adjusted to be of size num_outputs, but it's a
# layer with activation.
if no_final_linear and num_outputs:
self._hidden_layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=num_outputs,
activation_fn=activation))
prev_layer_size = num_outputs
# Finish the layers with the provided sizes (`hiddens`), plus -
# iff num_outputs > 0 - a last linear layer of size num_outputs.
else:
if len(hiddens) > 0:
self._hidden_layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=hiddens[-1],
activation_fn=activation))
prev_layer_size = hiddens[-1]
if num_outputs:
self._logits = SlimFC(
in_size=prev_layer_size,
out_size=num_outputs,
activation_fn=None)
else:
self.num_outputs = (
[int(np.product(obs_space.shape))] + hiddens[-1:])[-1]
# Layer to add the log std vars to the state-dependent means.
if self.free_log_std and self._logits:
raise ValueError("`free_log_std` not supported for JAX yet!")
self._value_branch_separate = None
if not self.vf_share_layers:
# Build a parallel set of hidden layers for the value net.
prev_vf_layer_size = int(np.product(obs_space.shape))
vf_layers = []
for size in hiddens:
vf_layers.append(
SlimFC(
in_size=prev_vf_layer_size,
out_size=size,
activation_fn=activation,
))
prev_vf_layer_size = size
self._value_branch_separate = vf_layers
self._value_branch = SlimFC(
in_size=prev_layer_size, out_size=1, activation_fn=None)
# Holds the current "base" output (before logits layer).
self._features = None
# Holds the last input, in case value branch is separate.
self._last_flat_in = None
@override(JAXModelV2)
def forward(self, input_dict, state, seq_lens):
self._last_flat_in = input_dict["obs_flat"]
x = self._last_flat_in
for layer in self._hidden_layers:
x = layer(x)
self._features = x
logits = self._logits(self._features) if self._logits else \
self._features
if self.free_log_std:
logits = self._append_free_log_std(logits)
return logits, state
@override(JAXModelV2)
def value_function(self):
assert self._features is not None, "must call forward() first"
if self._value_branch_separate:
return self._value_branch(
self._value_branch_separate(self._last_flat_in)).squeeze(1)
else:
return self._value_branch(self._features).squeeze(1)
| {
"content_hash": "2f7598bdf6d7cf0c47aac9add27be7b1",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 76,
"avg_line_length": 38.168,
"alnum_prop": 0.5535527143156571,
"repo_name": "pcmoritz/ray-1",
"id": "1cec5eb5e8a665b1dcab83bd061a9aa84d82f17e",
"size": "4771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/models/jax/fcnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from unittest import main, TestCase
from tempfile import mkdtemp
from os.path import join, exists, dirname, isdir, abspath, realpath
from urllib import quote
from os import environ
from shutil import rmtree, copytree
from uuid import uuid4
import sys
from chime.repo_functions import ChimeRepo
from slugify import slugify
import json
import logging
import tempfile
logging.disable(logging.CRITICAL)
repo_root = abspath(join(dirname(__file__), '..'))
sys.path.insert(0, repo_root)
from git.cmd import GitCommandError
from box.util.rotunicode import RotUnicode
from chime import jekyll_functions, repo_functions, edit_functions, view_functions
from chime import constants
from chime import chime_activity
import codecs
codecs.register(RotUnicode.search_function)
# these patterns help us search the HTML of a response to determine if the expected page loaded
PATTERN_BRANCH_COMMENT = u'<!-- branch: {} -->'
PATTERN_AUTHOR_COMMENT = u'<!-- author: {} -->'
PATTERN_TASK_COMMENT = u'<!-- task: {} -->'
PATTERN_TEMPLATE_COMMENT = u'<!-- template name: {} -->'
PATTERN_FILE_COMMENT = u'<!-- file type: {file_type}, file name: {file_name}, file title: {file_title} -->'
PATTERN_OVERVIEW_ITEM_CREATED = u'<p>The "{created_name}" {created_type} was created by {author_email}.</p>'
PATTERN_OVERVIEW_ACTIVITY_STARTED = u'<p>The "{activity_name}" activity was started by {author_email}.</p>'
PATTERN_OVERVIEW_COMMENT_BODY = u'<div class="comment__body">{comment_body}</div>'
PATTERN_OVERVIEW_ITEM_DELETED = u'<p>The "{deleted_name}" {deleted_type} {deleted_also}was deleted by {author_email}.</p>'
PATTERN_FLASH_TASK_DELETED = u'You deleted the "{description}" activity!'
PATTERN_FLASH_SAVED_CATEGORY = u'<li class="flash flash--notice">Saved changes to the {title} topic! Remember to submit this change for feedback when you\'re ready to go live.</li>'
PATTERN_FLASH_CREATED_CATEGORY = u'Created a new topic named {title}! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_CREATED_ARTICLE = u'Created a new article named {title}! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_SAVED_ARTICLE = u'Saved changes to the {title} article! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_DELETED_ARTICLE = u'The "{title}" article was deleted! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FORM_CATEGORY_TITLE = u'<input name="en-title" type="text" value="{title}" class="directory-modify__name" placeholder="Crime Statistics and Maps">'
PATTERN_FORM_CATEGORY_DESCRIPTION = u'<textarea name="en-description" class="directory-modify__description" placeholder="Crime statistics and reports by district and map">{description}</textarea>'
# review stuff
PATTERN_REQUEST_FEEDBACK_BUTTON = u'<button class="toolbar__item button button--orange" type="submit" name="request_feedback">Request Feedback</button>'
PATTERN_UNREVIEWED_EDITS_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Unreviewed Edits</a>'
PATTERN_ENDORSE_BUTTON = u'<button class="toolbar__item button button--green" type="submit" name="endorse_edits">Endorse Edits</button>'
PATTERN_FEEDBACK_REQUESTED_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Feedback requested</a>'
PATTERN_PUBLISH_BUTTON = u'<button class="toolbar__item button button--blue" type="submit" name="merge">Publish</button>'
PATTERN_READY_TO_PUBLISH_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Ready to publish</a>'
class TestRepo (TestCase):
def setUp(self):
self.old_tempdir, tempfile.tempdir = tempfile.tempdir, mkdtemp(prefix='chime-TestRepo-')
self.work_path = mkdtemp(prefix='chime-repo-clones-')
repo_path = dirname(abspath(__file__)) + '/../test-app.git'
temp_repo_dir = mkdtemp(prefix='chime-root')
temp_repo_path = temp_repo_dir + '/test-app.git'
copytree(repo_path, temp_repo_path)
self.origin = ChimeRepo(temp_repo_path)
repo_functions.ignore_task_metadata_on_merge(self.origin)
self.clone1 = self.origin.clone(mkdtemp(prefix='chime-'))
repo_functions.ignore_task_metadata_on_merge(self.clone1)
self.clone2 = self.origin.clone(mkdtemp(prefix='chime-'))
repo_functions.ignore_task_metadata_on_merge(self.clone2)
self.session = dict(email=str(uuid4()))
environ['GIT_AUTHOR_NAME'] = ' '
environ['GIT_COMMITTER_NAME'] = ' '
environ['GIT_AUTHOR_EMAIL'] = self.session['email']
environ['GIT_COMMITTER_EMAIL'] = self.session['email']
def tearDown(self):
rmtree(tempfile.tempdir)
tempfile.tempdir = self.old_tempdir
# in TestRepo
def test_repo_features(self):
self.assertTrue(self.origin.bare)
branch_names = [b.name for b in self.origin.branches]
self.assertEqual(set(branch_names), set(['master', 'title', 'body']))
# in TestRepo
def test_get_start_branch(self):
''' Make a simple edit in a clone, verify that it appears in the other.
'''
task_description = str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description, u'erica@example.com')
self.assertTrue(branch1.name in self.clone1.branches)
self.assertTrue(branch1.name in self.origin.branches)
#
# Make a change to the branch and push it.
#
branch1.checkout()
message = str(uuid4())
with open(join(self.clone1.working_dir, 'index.md'), 'a') as file:
file.write('\n\n...')
args = self.clone1, 'index.md', message, branch1.commit.hexsha, 'master'
repo_functions.save_working_file(*args)
#
# See if the branch made it to clone 2
#
branch2 = repo_functions.get_existing_branch(self.clone2, 'master', branch1.name)
self.assertTrue(branch2.name in self.clone2.branches)
self.assertEqual(branch2.commit.hexsha, branch1.commit.hexsha)
self.assertEqual(branch2.commit.message, message)
# in TestRepo
def test_get_start_branch_2(self):
''' Make a simple edit in a clone, verify that it appears in the other.
'''
task_description = str(uuid4())
#
# Check out both clones.
#
self.clone1.branches.master.checkout()
self.clone2.branches.master.checkout()
self.assertEqual(self.clone1.refs['master'].commit.hexsha, self.origin.refs['master'].commit.hexsha)
self.assertEqual(self.clone1.refs['master'].commit.hexsha, self.clone2.refs['master'].commit.hexsha)
#
# Make a change to the first clone and push it.
#
with open(join(self.clone1.working_dir, 'index.md'), 'a') as file:
file.write('\n\n...')
message = str(uuid4())
args = self.clone1, 'index.md', message, self.clone1.commit().hexsha, 'master'
repo_functions.save_working_file(*args)
#
# Origin now has the updated master, but the second clone does not.
#
self.assertEqual(self.clone1.refs['master'].commit.hexsha, self.origin.refs['master'].commit.hexsha)
self.assertNotEquals(self.clone1.refs['master'].commit.hexsha, self.clone2.refs['master'].commit.hexsha)
#
# Now start a new branch from the second clone, and look for the new master commit.
#
branch2 = repo_functions.get_start_branch(self.clone2, 'master', task_description, self.session['email'])
self.assertTrue(branch2.name in self.clone2.branches)
# compare the second-to-last commit on branch2 (by adding ".parents[0]", as
# the most recent one is the creation of the task metadata file
self.assertEqual(branch2.commit.parents[0].hexsha, self.origin.refs['master'].commit.hexsha)
# in TestRepo
def test_delete_missing_branch(self):
''' Delete a branch in a clone that's still in origin, see if it can be deleted anyway.
'''
task_description = str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description, u'erica@example.com')
self.assertTrue(branch1.name in self.origin.branches)
self.clone2.git.fetch('origin')
self.assertTrue(branch1.name in self.origin.branches)
self.assertTrue('origin/' + branch1.name in self.clone2.refs)
repo_functions.abandon_branch(self.clone2, 'master', branch1.name)
self.assertFalse(branch1.name in self.origin.branches)
self.assertFalse('origin/' + branch1.name in self.clone2.refs)
self.assertFalse(branch1.name in self.clone2.branches)
# in TestRepo
def test_new_file(self):
''' Make a new file and delete an old file in a clone, verify that the changes appear in the other.
'''
task_description = str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description, self.session['email'])
self.assertTrue(branch1.name in self.clone1.branches)
self.assertTrue(branch1.name in self.origin.branches)
#
# Make a new file in the branch and push it.
#
branch1.checkout()
edit_functions.create_new_page(self.clone1, '', 'hello.md',
dict(title='Hello'), 'Hello hello.')
args = self.clone1, 'hello.md', str(uuid4()), branch1.commit.hexsha, 'master'
repo_functions.save_working_file(*args)
#
# Delete an existing file in the branch and push it.
#
message = str(uuid4())
edit_functions.delete_file(self.clone1, 'index.md')
args = self.clone1, 'index.md', message, branch1.commit.hexsha, 'master'
repo_functions.save_working_file(*args)
#
# See if the changes made it to clone 2
#
branch2 = repo_functions.get_existing_branch(self.clone2, 'master', branch1.name)
self.assertTrue(branch2.name in self.clone2.branches)
self.assertEqual(branch2.commit.hexsha, branch1.commit.hexsha)
self.assertEqual(branch2.commit.message, message)
self.assertEqual(branch2.commit.author.email, self.session['email'])
self.assertEqual(branch2.commit.committer.email, self.session['email'])
branch2.checkout()
with open(join(self.clone2.working_dir, 'hello.md')) as file:
front, body = jekyll_functions.load_jekyll_doc(file)
self.assertEqual(front['title'], 'Hello')
self.assertEqual(body, 'Hello hello.')
self.assertFalse(exists(join(self.clone2.working_dir, 'index.md')))
# in TestRepo
def test_try_to_create_existing_category(self):
''' We can't create a category that exists already.
'''
first_result = view_functions.add_article_or_category(self.clone1, 'categories', 'My New Category', constants.CATEGORY_LAYOUT)
self.assertEqual(u'The "My New Category" topic was created\n\n[{"action": "create", "file_path": "categories/my-new-category/index.markdown", "display_type": "category", "title": "My New Category"}]', first_result[0])
self.assertEqual(u'categories/my-new-category/index.markdown', first_result[1])
self.assertEqual(u'categories/my-new-category/', first_result[2])
self.assertEqual(True, first_result[3])
second_result = view_functions.add_article_or_category(self.clone1, 'categories', 'My New Category', constants.CATEGORY_LAYOUT)
self.assertEqual('Topic "My New Category" already exists', second_result[0])
self.assertEqual(u'categories/my-new-category/index.markdown', second_result[1])
self.assertEqual(u'categories/my-new-category/', second_result[2])
self.assertEqual(False, second_result[3])
# in TestRepo
def test_try_to_create_existing_article(self):
''' We can't create an article that exists already
'''
first_result = view_functions.add_article_or_category(self.clone1, 'categories/example', 'New Article', constants.ARTICLE_LAYOUT)
self.assertEqual(u'The "New Article" article was created\n\n[{"action": "create", "file_path": "categories/example/new-article/index.markdown", "display_type": "article", "title": "New Article"}]', first_result[0])
self.assertEqual(u'categories/example/new-article/index.markdown', first_result[1])
self.assertEqual(u'categories/example/new-article/index.markdown', first_result[2])
self.assertEqual(True, first_result[3])
second_result = view_functions.add_article_or_category(self.clone1, 'categories/example', 'New Article', constants.ARTICLE_LAYOUT)
self.assertEqual('Article "New Article" already exists', second_result[0])
self.assertEqual(u'categories/example/new-article/index.markdown', first_result[1])
self.assertEqual(u'categories/example/new-article/index.markdown', first_result[2])
self.assertEqual(False, second_result[3])
# in TestRepo
def test_create_category_with_slash_in_name(self):
''' Trying to create an category with /s in its name creates a single category
'''
category_name = u'Kristen/Melissa/Kate/Leslie'
category_slug = slugify(category_name)
add_result = view_functions.add_article_or_category(self.clone1, 'categories', category_name, constants.CATEGORY_LAYOUT)
self.assertEqual(u'The "{category_name}" topic was created\n\n[{{"action": "create", "file_path": "categories/{category_slug}/index.markdown", "display_type": "category", "title": "{category_name}"}}]'.format(category_name=category_name, category_slug=category_slug), add_result[0])
self.assertEqual(u'categories/{category_slug}/index.markdown'.format(category_slug=category_slug), add_result[1])
self.assertEqual(u'categories/{category_slug}/'.format(category_slug=category_slug), add_result[2])
self.assertEqual(True, add_result[3])
# in TestRepo
def test_create_article_with_slash_in_name(self):
''' Trying to create an article with /s in its name creates a single article
'''
article_name = u'Erin/Abby/Jillian/Patty'
article_slug = slugify(article_name)
add_result = view_functions.add_article_or_category(self.clone1, 'categories/example', article_name, constants.ARTICLE_LAYOUT)
self.assertEqual(u'The "{article_name}" article was created\n\n[{{"action": "create", "file_path": "categories/example/{article_slug}/index.markdown", "display_type": "article", "title": "{article_name}"}}]'.format(article_name=article_name, article_slug=article_slug), add_result[0])
self.assertEqual(u'categories/example/{article_slug}/index.markdown'.format(article_slug=article_slug), add_result[1])
self.assertEqual(u'categories/example/{article_slug}/index.markdown'.format(article_slug=article_slug), add_result[2])
self.assertEqual(True, add_result[3])
# in TestRepo
def test_delete_directory(self):
''' Make a new file and directory and delete them.
'''
task_description = str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description, u'erica@example.com')
self.assertTrue(branch1.name in self.clone1.branches)
self.assertTrue(branch1.name in self.origin.branches)
#
# Make a new file in a directory on the branch and push it.
#
branch1.checkout()
edit_functions.create_new_page(self.clone1, 'hello/', 'hello.md',
dict(title='Hello'), 'Hello hello.')
args = self.clone1, 'hello/hello.md', str(uuid4()), branch1.commit.hexsha, 'master'
repo_functions.save_working_file(*args)
#
# Delete the file and folder just created and push the changes.
#
message = str(uuid4())
edit_functions.delete_file(self.clone1, 'hello/hello.md')
args = self.clone1, 'hello/hello.md', message, branch1.commit.hexsha, 'master'
repo_functions.save_working_file(*args)
self.assertFalse(exists(join(self.clone1.working_dir, 'hello/hello.md')))
edit_functions.delete_file(self.clone1, 'hello/')
self.assertFalse(exists(join(self.clone1.working_dir, 'hello/')))
# in TestRepo
def test_move_file(self):
''' Change the path of a file.
'''
task_description = str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description, u'erica@example.com')
self.assertTrue(branch1.name in self.clone1.branches)
self.assertTrue(branch1.name in self.origin.branches)
#
# Rename a file in the branch.
#
branch1.checkout()
args = self.clone1, 'index.md', 'hello/world.md', branch1.commit.hexsha, 'master'
repo_functions.move_existing_file(*args)
#
# See if the new file made it to clone 2
#
branch2 = repo_functions.get_existing_branch(self.clone2, 'master', branch1.name)
branch2.checkout()
self.assertTrue(exists(join(self.clone2.working_dir, 'hello/world.md')))
self.assertFalse(exists(join(self.clone2.working_dir, 'index.md')))
# in TestRepo
def test_content_merge(self):
''' Test that non-conflicting changes on the same file merge cleanly.
'''
title_branch_name = 'title'
body_branch_name = 'body'
title_branch = repo_functions.get_existing_branch(self.clone1, 'master', title_branch_name)
body_branch = repo_functions.get_existing_branch(self.clone2, 'master', body_branch_name)
title_branch.checkout()
body_branch.checkout()
with open(self.clone1.working_dir + '/index.md') as file:
front1, _ = jekyll_functions.load_jekyll_doc(file)
with open(self.clone2.working_dir + '/index.md') as file:
_, body2 = jekyll_functions.load_jekyll_doc(file)
#
# Show that only the title branch title is now present on master.
#
repo_functions.complete_branch(self.clone1, 'master', title_branch_name)
with open(self.clone1.working_dir + '/index.md') as file:
front1b, body1b = jekyll_functions.load_jekyll_doc(file)
title_key_name = 'title'
self.assertEqual(front1b[title_key_name], front1[title_key_name])
self.assertNotEqual(body1b, body2)
#
# Show that the body branch body is also now present on master.
#
repo_functions.complete_branch(self.clone2, 'master', 'body')
with open(self.clone2.working_dir + '/index.md') as file:
front2b, body2b = jekyll_functions.load_jekyll_doc(file)
self.assertEqual(front2b[title_key_name], front1[title_key_name])
self.assertEqual(body2b, body2)
self.assertTrue(repo_functions.ACTIVITY_PUBLISHED_MESSAGE in self.clone2.commit().message)
# in TestRepo
def test_content_merge_extra_change(self):
''' Test that non-conflicting changes on the same file merge cleanly.
'''
title_branch_name = 'title'
body_branch_name = 'body'
title_branch = repo_functions.get_existing_branch(self.clone1, 'master', title_branch_name)
body_branch = repo_functions.get_existing_branch(self.clone2, 'master', body_branch_name)
title_branch.checkout()
body_branch.checkout()
with open(self.clone1.working_dir + '/index.md') as file:
front1, _ = jekyll_functions.load_jekyll_doc(file)
with open(self.clone2.working_dir + '/index.md') as file:
front2, body2 = jekyll_functions.load_jekyll_doc(file)
#
# Show that only the title branch title is now present on master.
#
repo_functions.complete_branch(self.clone1, 'master', title_branch_name)
with open(self.clone1.working_dir + '/index.md') as file:
front1b, body1b = jekyll_functions.load_jekyll_doc(file)
title_key_name = 'title'
self.assertEqual(front1b[title_key_name], front1[title_key_name])
self.assertNotEqual(body1b, body2)
#
# Show that the body branch body is also now present on master.
#
edit_functions.update_page(self.clone2, 'index.md',
front2, 'Another change to the body')
repo_functions.save_working_file(self.clone2, 'index.md', 'A new change',
self.clone2.commit().hexsha, 'master')
#
# Show that upstream changes from master have NOT been merged here.
#
with open(self.clone2.working_dir + '/index.md') as file:
front2b, body2b = jekyll_functions.load_jekyll_doc(file)
self.assertNotEqual(front2b[title_key_name], front1[title_key_name])
self.assertEqual(body2b.strip(), 'Another change to the body')
self.assertFalse(self.clone2.commit().message.startswith('Merged work from'))
# There's no conflict; the merge would be clean.
self.assertIsNone(repo_functions.get_conflict(self.clone2, 'master'), "There is no conflict")
self.assertIsNotNone(repo_functions.get_changed(self.clone2, 'master'), "A change should be visible")
# in TestRepo
def test_multifile_merge(self):
''' Test that two non-conflicting new files merge cleanly.
'''
fake_author_email = u'erica@example.com'
task_description = str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
branch2 = repo_functions.get_start_branch(self.clone2, 'master', task_description, fake_author_email)
branch1_name, branch2_name = branch1.name, branch2.name
#
# Make new files in each branch and save them.
#
branch1.checkout()
branch2.checkout()
edit_functions.create_new_page(self.clone1, '', 'file1.md',
dict(title='Hello'), 'Hello hello.')
edit_functions.create_new_page(self.clone2, '', 'file2.md',
dict(title='Goodbye'), 'Goodbye goodbye.')
#
# Show that the changes from the first branch made it to origin.
#
args1 = self.clone1, 'file1.md', '...', branch1.commit.hexsha, 'master'
commit1 = repo_functions.save_working_file(*args1)
self.assertEqual(self.origin.branches[branch1_name].commit, commit1)
self.assertEqual(self.origin.branches[branch1_name].commit.author.email, self.session['email'])
self.assertEqual(self.origin.branches[branch1_name].commit.committer.email, self.session['email'])
self.assertEqual(commit1, branch1.commit)
#
# Show that the changes from the second branch also made it to origin.
#
args2 = self.clone2, 'file2.md', '...', branch2.commit.hexsha, 'master'
commit2 = repo_functions.save_working_file(*args2)
self.assertEqual(self.origin.branches[branch2_name].commit, commit2)
self.assertEqual(self.origin.branches[branch2_name].commit.author.email, self.session['email'])
self.assertEqual(self.origin.branches[branch2_name].commit.committer.email, self.session['email'])
self.assertEqual(commit2, branch2.commit)
#
# Show that the merge from the second branch made it back to the first.
#
branch1b = repo_functions.get_existing_branch(self.clone1, 'master', branch2.name)
self.assertEqual(branch1b.commit, branch2.commit)
self.assertEqual(branch1b.commit.author.email, self.session['email'])
self.assertEqual(branch1b.commit.committer.email, self.session['email'])
# in TestRepo
def test_same_branch_conflict(self):
''' Test that a conflict in two branches appears at the right spot.
'''
fake_author_email = u'erica@example.com'
task_description = str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
branch2 = repo_functions.get_existing_branch(self.clone2, 'master', branch1.name)
branch1_name = branch1.name
self.assertIsNotNone(branch2)
self.assertEqual(branch2.name, branch1_name)
#
# Make new files in each branch and save them.
#
branch1.checkout()
branch2.checkout()
edit_functions.create_new_page(self.clone1, '', 'conflict.md',
dict(title='Hello'), 'Hello hello.')
edit_functions.create_new_page(self.clone2, '', 'conflict.md',
dict(title='Goodbye'), 'Goodbye goodbye.')
#
# Show that the changes from the first branch made it to origin.
#
args1 = self.clone1, 'conflict.md', '...', branch1.commit.hexsha, 'master'
commit1 = repo_functions.save_working_file(*args1)
self.assertEqual(self.origin.branches[branch1_name].commit, commit1)
self.assertEqual(commit1, branch1.commit)
#
# Show that the changes from the second branch conflict with the first.
#
with self.assertRaises(repo_functions.MergeConflict) as conflict:
args2 = self.clone2, 'conflict.md', '...', branch2.commit.hexsha, 'master'
repo_functions.save_working_file(*args2)
self.assertEqual(conflict.exception.remote_commit, commit1)
diffs = conflict.exception.remote_commit.diff(conflict.exception.local_commit)
self.assertEqual(len(diffs), 1)
self.assertEqual(diffs[0].a_blob.name, 'conflict.md')
self.assertEqual(diffs[0].b_blob.name, 'conflict.md')
# in TestRepo
def test_upstream_pull_conflict(self):
''' Test that a conflict in two branches appears at the right spot.
'''
fake_author_email = u'erica@example.com'
task_description1, task_description2 = str(uuid4()), str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description1, fake_author_email)
branch2 = repo_functions.get_start_branch(self.clone2, 'master', task_description2, fake_author_email)
branch1_name = branch1.name
#
# Make new files in each branch and save them.
#
branch1.checkout()
branch2.checkout()
edit_functions.create_new_page(self.clone1, '', 'conflict.md',
dict(title='Hello'), 'Hello hello.')
#
# Show that the changes from the first branch made it to origin.
#
args1 = self.clone1, 'conflict.md', '...', branch1.commit.hexsha, 'master'
commit1 = repo_functions.save_working_file(*args1)
self.assertEqual(self.origin.branches[branch1_name].commit, commit1)
self.assertEqual(commit1, branch1.commit)
#
# Merge the first branch to master.
#
commit2 = repo_functions.complete_branch(self.clone1, 'master', branch1_name)
self.assertFalse(branch1_name in self.origin.branches)
#
# Show that the changes from the second branch conflict with the first.
#
self.assertIsNone(repo_functions.get_conflict(self.clone2, 'master'),
"Shouldn't see any conflict yet")
edit_functions.create_new_page(self.clone2, '', 'conflict.md',
dict(title='Goodbye'), 'Goodbye goodbye.')
args2 = self.clone2, 'conflict.md', '...', branch2.commit.hexsha, 'master'
repo_functions.save_working_file(*args2)
conflict = repo_functions.get_conflict(self.clone2, 'master')
self.assertTrue(bool(conflict))
diffs = conflict.remote_commit.diff(conflict.local_commit)
# there are two diffs; the first is addition of the
# task metadata file, the second is the conflict file
self.assertEqual(len(diffs), 2)
self.assertIsNone(diffs[0].a_blob)
self.assertEqual(diffs[0].b_blob.name, repo_functions.TASK_METADATA_FILENAME)
self.assertEqual(diffs[1].a_blob.name, 'conflict.md')
self.assertEqual(diffs[1].b_blob.name, 'conflict.md')
# in TestRepo
def test_upstream_push_conflict(self):
''' Test that a conflict in two branches appears at the right spot.
'''
fake_author_email = u'erica@example.com'
task_description1, task_description2 = str(uuid4()), str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description1, fake_author_email)
branch2 = repo_functions.get_start_branch(self.clone2, 'master', task_description2, fake_author_email)
branch1_name, branch2_name = branch1.name, branch2.name
#
# Make new files in each branch and save them.
#
branch1.checkout()
branch2.checkout()
edit_functions.create_new_page(self.clone1, '', 'conflict.md',
dict(title='Hello'), 'Hello hello.')
edit_functions.create_new_page(self.clone2, '', 'conflict.md',
dict(title='Goodbye'), 'Goodbye goodbye.')
#
# Push changes from the two branches to origin.
#
args1 = self.clone1, 'conflict.md', '...', branch1.commit.hexsha, 'master'
repo_functions.save_working_file(*args1)
args2 = self.clone2, 'conflict.md', '...', branch2.commit.hexsha, 'master'
repo_functions.save_working_file(*args2)
#
# Merge the two branches to master; show that second merge will fail.
#
repo_functions.complete_branch(self.clone1, 'master', branch1_name)
self.assertFalse(branch1_name in self.origin.branches)
with self.assertRaises(repo_functions.MergeConflict) as conflict:
repo_functions.complete_branch(self.clone2, 'master', branch2_name)
self.assertEqual(conflict.exception.remote_commit, self.origin.commit())
self.assertEqual(conflict.exception.local_commit, self.clone2.commit())
# conflict.exception is the MergeConflict exception object
conflict_files = conflict.exception.files()
edited_files = [item for item in conflict_files if item['actions'] == repo_functions.CONFLICT_ACTION_EDITED]
self.assertEqual(len(edited_files), 1)
self.assertEqual(edited_files[0]['path'], 'conflict.md')
# in TestRepo
def test_conflict_resolution_clobber(self):
''' Test that a conflict in two branches can be clobbered.
'''
fake_author_email = u'erica@example.com'
task_description = str(uuid4())
title_branch = repo_functions.get_existing_branch(self.clone1, 'master', 'title')
compare_branch = repo_functions.get_start_branch(self.clone2, 'master', task_description, fake_author_email)
title_branch_name, compare_branch_name = title_branch.name, compare_branch.name
#
# Add goner.md in title_branch.
#
title_branch.checkout()
edit_functions.create_new_page(self.clone1, '', 'goner.md',
dict(title=task_description), 'Woooo woooo.')
args = self.clone1, 'goner.md', '...', title_branch.commit.hexsha, 'master'
commit = repo_functions.save_working_file(*args)
#
# Change index.md in compare_branch so it conflicts with the title branch.
#
compare_branch.checkout()
edit_functions.update_page(self.clone2, 'index.md',
dict(title=task_description), 'Hello hello.')
args = self.clone2, 'index.md', '...', compare_branch.commit.hexsha, 'master'
commit = repo_functions.save_working_file(*args)
#
# Merge the original title branch, fail to merge our conflicting branch.
#
repo_functions.complete_branch(self.clone1, 'master', title_branch_name)
with self.assertRaises(repo_functions.MergeConflict) as conflict:
repo_functions.complete_branch(self.clone2, 'master', compare_branch_name)
self.assertEqual(conflict.exception.local_commit, commit)
diffs = conflict.exception.remote_commit.diff(conflict.exception.local_commit)
self.assertEqual(len(diffs), 3)
for diff in diffs:
if diff.a_blob:
self.assertTrue(diff.a_blob.name in ('index.md', 'goner.md'))
#
# Merge our conflicting branch and clobber the default branch.
#
repo_functions.clobber_default_branch(self.clone2, 'master', compare_branch_name)
with open(join(self.clone2.working_dir, 'index.md')) as file:
front, body = jekyll_functions.load_jekyll_doc(file)
self.assertEqual(front['title'], task_description)
self.assertFalse(compare_branch_name in self.origin.branches)
# If goner.md is still around, then master wasn't fully clobbered.
self.clone1.branches['master'].checkout()
self.clone1.git.pull('origin', 'master')
self.assertFalse(exists(join(self.clone2.working_dir, 'goner.md')))
self.assertTrue(self.clone2.commit().message.startswith('Clobbered with work from'))
# in TestRepo
def test_conflict_resolution_abandon(self):
''' Test that a conflict in two branches can be abandoned.
'''
fake_author_email = u'erica@example.com'
task_description = str(uuid4())
title_branch_name = 'title'
repo_functions.get_existing_branch(self.clone1, 'master', title_branch_name)
compare_branch = repo_functions.get_start_branch(self.clone2, 'master', task_description, fake_author_email)
compare_branch_name = compare_branch.name
#
# Change index.md in compare_branch so it conflicts with title branch.
# Also add goner.md, which we'll later want to disappear.
#
compare_branch.checkout()
edit_functions.update_page(self.clone2, 'index.md',
dict(title=task_description), 'Hello hello.')
edit_functions.create_new_page(self.clone2, '', 'goner.md',
dict(title=task_description), 'Woooo woooo.')
args = self.clone2, 'index.md', '...', compare_branch.commit.hexsha, 'master'
commit = repo_functions.save_working_file(*args)
args = self.clone2, 'goner.md', '...', compare_branch.commit.hexsha, 'master'
commit = repo_functions.save_working_file(*args)
#
# Merge the original title branch, fail to merge our conflicting branch.
#
repo_functions.complete_branch(self.clone1, 'master', title_branch_name)
with self.assertRaises(repo_functions.MergeConflict) as conflict:
repo_functions.complete_branch(self.clone2, 'master', compare_branch_name)
self.assertEqual(conflict.exception.local_commit, commit)
diffs = conflict.exception.remote_commit.diff(conflict.exception.local_commit)
self.assertEqual(len(diffs), 3)
self.assertTrue(diffs[0].b_blob.name in ('index.md', 'goner.md', repo_functions.TASK_METADATA_FILENAME))
self.assertTrue(diffs[1].b_blob.name in ('index.md', 'goner.md', repo_functions.TASK_METADATA_FILENAME))
self.assertTrue(diffs[2].b_blob.name in ('index.md', 'goner.md', repo_functions.TASK_METADATA_FILENAME))
#
# Merge our conflicting branch and abandon it to the default branch.
#
repo_functions.abandon_branch(self.clone2, 'master', compare_branch_name)
with open(join(self.clone2.working_dir, 'index.md')) as file:
front, body = jekyll_functions.load_jekyll_doc(file)
self.assertNotEqual(front['title'], task_description)
self.assertFalse(compare_branch_name in self.origin.branches)
# If goner.md is still around, then the branch wasn't fully abandoned.
self.assertFalse(exists(join(self.clone2.working_dir, 'goner.md')))
self.assertTrue(self.clone2.commit().message.startswith('Abandoned work from'))
# in TestRepo
def test_peer_review(self):
''' Exercise the review process
'''
fake_author_email = u'erica@example.com'
task_description = str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
branch1_name = branch1.name
#
# Make a commit.
#
fake_creator_email = u'creator@example.com'
environ['GIT_AUTHOR_NAME'] = 'Jim Content Creator'
environ['GIT_COMMITTER_NAME'] = 'Jim Content Creator'
environ['GIT_AUTHOR_EMAIL'] = fake_creator_email
environ['GIT_COMMITTER_EMAIL'] = fake_creator_email
branch1.checkout()
review_state, review_authorized = repo_functions.get_review_state_and_authorized(self.clone1, 'master', branch1_name, fake_author_email)
self.assertEqual(review_state, constants.REVIEW_STATE_FRESH)
self.assertTrue(review_authorized)
edit_functions.update_page(self.clone1, 'index.md',
dict(title=task_description), 'Hello you-all.')
repo_functions.save_working_file(self.clone1, 'index.md', 'I made a change',
self.clone1.commit().hexsha, 'master')
# verify that the activity has unreviewed edits and that Jim Content Creator is authorized to request feedback
review_state, review_authorized = repo_functions.get_review_state_and_authorized(self.clone1, 'master', branch1_name, fake_creator_email)
self.assertEqual(review_state, constants.REVIEW_STATE_EDITED)
self.assertTrue(review_authorized)
# request feedback as Jim Content Creator
repo_functions.update_review_state(self.clone1, constants.REVIEW_STATE_FEEDBACK)
# verify that the activity has feedback requested and that fake is authorized to endorse
review_state, review_authorized = repo_functions.get_review_state_and_authorized(self.clone1, 'master', branch1_name, fake_author_email)
self.assertEqual(review_state, constants.REVIEW_STATE_FEEDBACK)
self.assertTrue(review_authorized)
#
# Approve the work as someone else.
#
fake_reviewer_email = u'reviewer@example.com'
environ['GIT_AUTHOR_NAME'] = 'Joe Reviewer'
environ['GIT_COMMITTER_NAME'] = 'Joe Reviewer'
environ['GIT_AUTHOR_EMAIL'] = fake_reviewer_email
environ['GIT_COMMITTER_EMAIL'] = fake_reviewer_email
# endorse
repo_functions.update_review_state(self.clone1, constants.REVIEW_STATE_ENDORSED)
# verify that the activity has been endorsed and that Joe Reviewer is authorized to publish
review_state, review_authorized = repo_functions.get_review_state_and_authorized(self.clone1, 'master', branch1_name, fake_reviewer_email)
self.assertEqual(review_state, constants.REVIEW_STATE_ENDORSED)
self.assertTrue(review_authorized)
#
# Make another commit.
#
edit_functions.update_page(self.clone1, 'index.md',
dict(title=task_description), 'Hello you there.')
repo_functions.save_working_file(self.clone1, 'index.md', 'I made a change',
self.clone1.commit().hexsha, 'master')
# verify that the activity has unreviewed edits and that Joe Reviewer is authorized to request feedback
review_state, review_authorized = repo_functions.get_review_state_and_authorized(self.clone1, 'master', branch1_name, fake_reviewer_email)
self.assertEqual(review_state, constants.REVIEW_STATE_EDITED)
self.assertTrue(review_authorized)
# request feedback as Joe Reviewer
repo_functions.update_review_state(self.clone1, constants.REVIEW_STATE_FEEDBACK)
# verify that the activity has feedback requested and that Joe Reviewer is not authorized to endorse
review_state, review_authorized = repo_functions.get_review_state_and_authorized(self.clone1, 'master', branch1_name, fake_reviewer_email)
self.assertEqual(review_state, constants.REVIEW_STATE_FEEDBACK)
self.assertFalse(review_authorized)
#
# Approve the work as someone else.
#
fake_nonprofit_email = u'reviewer@example.org'
environ['GIT_AUTHOR_NAME'] = 'Jane Reviewer'
environ['GIT_COMMITTER_NAME'] = 'Jane Reviewer'
environ['GIT_AUTHOR_EMAIL'] = fake_nonprofit_email
environ['GIT_COMMITTER_EMAIL'] = fake_nonprofit_email
# endorse
repo_functions.update_review_state(self.clone1, constants.REVIEW_STATE_ENDORSED)
# verify that the activity has been endorsed and that Jane Reviewer is authorized to publish
review_state, review_authorized = repo_functions.get_review_state_and_authorized(self.clone1, 'master', branch1_name, fake_nonprofit_email)
self.assertEqual(review_state, constants.REVIEW_STATE_ENDORSED)
self.assertTrue(review_authorized)
#
# Publish the work
#
merge_commit = repo_functions.complete_branch(clone=self.clone1, default_branch_name='master', working_branch_name=branch1_name)
# The commit message is expected
self.assertTrue(repo_functions.ACTIVITY_PUBLISHED_MESSAGE in merge_commit.message)
# The branch is gone
self.assertFalse(branch1_name in self.origin.branches)
self.assertFalse(branch1_name in self.clone1.branches)
# in TestRepo
def test_article_creation_with_unicode(self):
''' An article with unicode in its title is created as expected.
'''
# start a new branch
fake_author_email = u'erica@example.com'
task_description = u'suck blood from a mammal for mosquito larvae'
source_repo = self.origin
first_commit = list(source_repo.iter_commits())[-1].hexsha
dir_name = 'repo-{}-{}'.format(first_commit[:8], slugify(fake_author_email))
user_dir = realpath(join(self.work_path, quote(dir_name)))
if isdir(user_dir):
new_clone = ChimeRepo(user_dir)
new_clone.git.reset(hard=True)
new_clone.remotes.origin.fetch()
else:
new_clone = source_repo.clone(user_dir, bare=False)
# tell git to ignore merge conflicts on the task metadata file
repo_functions.ignore_task_metadata_on_merge(new_clone)
working_branch = repo_functions.get_start_branch(new_clone, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in new_clone.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch.checkout()
# create an article
art_title = u'快速狐狸'
art_slug = slugify(art_title)
add_message, file_path, redirect_path, do_save = view_functions.add_article_or_category(new_clone, u'', art_title, constants.ARTICLE_LAYOUT)
self.assertEqual(u'{}/index.{}'.format(art_slug, constants.CONTENT_FILE_EXTENSION), file_path)
self.assertEqual(u'The "{art_title}" article was created\n\n[{{"action": "create", "file_path": "{file_path}", "display_type": "article", "title": "{art_title}"}}]'.format(art_title=art_title, file_path=file_path), add_message)
self.assertEqual(u'{}/index.{}'.format(art_slug, constants.CONTENT_FILE_EXTENSION), redirect_path)
self.assertEqual(True, do_save)
# commit the article
repo_functions.save_working_file(new_clone, file_path, add_message, new_clone.commit().hexsha, 'master')
# in TestRepo
def test_edit_category_title_and_description(self):
''' Edits to category details are saved
'''
# start a new branch
fake_author_email = u'erica@example.com'
task_description = u'squeezing lemons for lemonade lovers'
source_repo = self.origin
first_commit = list(source_repo.iter_commits())[-1].hexsha
dir_name = 'repo-{}-{}'.format(first_commit[:8], slugify(fake_author_email))
user_dir = realpath(join(self.work_path, quote(dir_name)))
if isdir(user_dir):
new_clone = ChimeRepo(user_dir)
new_clone.git.reset(hard=True)
new_clone.remotes.origin.fetch()
else:
new_clone = source_repo.clone(user_dir, bare=False)
# tell git to ignore merge conflicts on the task metadata file
repo_functions.ignore_task_metadata_on_merge(new_clone)
working_branch = repo_functions.get_start_branch(new_clone, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in new_clone.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch.checkout()
# create a category
cat_title = u'快速狐狸'
cat_slug = slugify(cat_title)
add_message, file_path, redirect_path, do_save = view_functions.add_article_or_category(new_clone, u'', cat_title, constants.CATEGORY_LAYOUT)
self.assertEqual(u'{}/index.{}'.format(cat_slug, constants.CONTENT_FILE_EXTENSION), file_path)
self.assertEqual(u'The "{cat_title}" topic was created\n\n[{{"action": "create", "file_path": "{file_path}", "display_type": "category", "title": "{cat_title}"}}]'.format(cat_title=cat_title, file_path=file_path), add_message)
self.assertEqual(u'{}/'.format(cat_slug), redirect_path)
self.assertEqual(True, do_save)
# commit the category
repo_functions.save_working_file(new_clone, file_path, add_message, new_clone.commit().hexsha, 'master')
index_path = join(new_clone.working_dir, file_path)
# verify the values
with open(index_path) as file:
front_matter, body = jekyll_functions.load_jekyll_doc(file)
self.assertEqual(front_matter['title'], cat_title)
self.assertEqual(front_matter['description'], u'')
self.assertEqual(body, u'')
# change the values
fake_changes = {'en-title': u'Drink Craw', 'en-description': u'Pink Straw', 'en-body': u'', 'hexsha': new_clone.commit().hexsha}
new_values = dict(front_matter)
new_values.update(fake_changes)
new_path, did_save = view_functions.save_page(repo=new_clone, default_branch_name='master', working_branch_name=working_branch.name, file_path=file_path, new_values=new_values)
# check for the new values!
with open(index_path) as file:
front_matter, body = jekyll_functions.load_jekyll_doc(file)
self.assertEqual(front_matter['title'], new_values['en-title'])
self.assertEqual(front_matter['description'], new_values['en-description'])
self.assertEqual(body, u'')
# in TestRepo
def test_delete_category(self):
''' Create and delete a category
'''
# start a new branch
fake_author_email = u'erica@example.com'
task_description = u'grating lemons for zest lovers'
source_repo = self.origin
first_commit = list(source_repo.iter_commits())[-1].hexsha
dir_name = 'repo-{}-{}'.format(first_commit[:8], slugify(fake_author_email))
user_dir = realpath(join(self.work_path, quote(dir_name)))
if isdir(user_dir):
new_clone = ChimeRepo(user_dir)
new_clone.git.reset(hard=True)
new_clone.remotes.origin.fetch()
else:
new_clone = source_repo.clone(user_dir, bare=False)
# tell git to ignore merge conflicts on the task metadata file
repo_functions.ignore_task_metadata_on_merge(new_clone)
working_branch = repo_functions.get_start_branch(new_clone, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in new_clone.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch.checkout()
# create a category
cat_title = u'Daffodils and Drop Cloths'
cat_slug = slugify(cat_title)
add_message, file_path, redirect_path, do_save = view_functions.add_article_or_category(new_clone, u'', cat_title, constants.CATEGORY_LAYOUT)
self.assertEqual(u'{}/index.{}'.format(cat_slug, constants.CONTENT_FILE_EXTENSION), file_path)
self.assertEqual(u'The "{cat_title}" topic was created\n\n[{{"action": "create", "file_path": "{file_path}", "display_type": "category", "title": "{cat_title}"}}]'.format(cat_title=cat_title, file_path=file_path), add_message)
self.assertEqual(u'{}/'.format(cat_slug), redirect_path)
self.assertEqual(True, do_save)
# commit the category
repo_functions.save_working_file(new_clone, file_path, add_message, new_clone.commit().hexsha, 'master')
# verify that the directory and index file exist
cat_index_path = join(new_clone.working_dir, file_path)
self.assertTrue(exists(cat_index_path))
self.assertTrue(exists(repo_functions.strip_index_file(cat_index_path)))
# create a category inside that
cat2_title = u'Drain Bawlers'
cat2_slug = slugify(cat2_title)
add_message, cat2_path, redirect_path, do_save = view_functions.add_article_or_category(new_clone, cat_slug, cat2_title, constants.CATEGORY_LAYOUT)
self.assertEqual(u'{}/{}/index.{}'.format(cat_slug, cat2_slug, constants.CONTENT_FILE_EXTENSION), cat2_path)
self.assertEqual(u'The "{cat_title}" topic was created\n\n[{{"action": "create", "file_path": "{file_path}", "display_type": "category", "title": "{cat_title}"}}]'.format(cat_title=cat2_title, file_path=cat2_path), add_message)
self.assertEqual(u'{}/{}/'.format(cat_slug, cat2_slug), redirect_path)
self.assertEqual(True, do_save)
# commit the category
repo_functions.save_working_file(new_clone, cat2_path, add_message, new_clone.commit().hexsha, 'master')
# verify that the directory and index file exist
cat2_index_path = join(new_clone.working_dir, cat2_path)
self.assertTrue(exists(cat2_index_path))
self.assertTrue(exists(repo_functions.strip_index_file(cat2_index_path)))
# and an article inside that
art_title = u'သံပုရာဖျော်ရည်'
art_slug = slugify(art_title)
dir_path = redirect_path.rstrip('/')
add_message, art_path, redirect_path, do_save = view_functions.add_article_or_category(new_clone, dir_path, art_title, constants.ARTICLE_LAYOUT)
self.assertEqual(u'{}/{}/{}/index.{}'.format(cat_slug, cat2_slug, art_slug, constants.CONTENT_FILE_EXTENSION), art_path)
self.assertEqual(u'The "{art_title}" article was created\n\n[{{"action": "create", "file_path": "{file_path}", "display_type": "article", "title": "{art_title}"}}]'.format(art_title=art_title, file_path=art_path), add_message)
self.assertEqual(u'{}/{}/{}/index.{}'.format(cat_slug, cat2_slug, art_slug, constants.CONTENT_FILE_EXTENSION), redirect_path)
self.assertEqual(True, do_save)
# commit the article
repo_functions.save_working_file(new_clone, art_path, add_message, new_clone.commit().hexsha, 'master')
# verify that the directory and index file exist
art_index_path = join(new_clone.working_dir, art_path)
self.assertTrue(exists(art_index_path))
self.assertTrue(exists(repo_functions.strip_index_file(art_index_path)))
# now delete the second category
browse_path = repo_functions.strip_index_file(art_path)
redirect_path, do_save, commit_message = view_functions.delete_page(repo=new_clone, browse_path=browse_path, target_path=dir_path)
self.assertEqual(cat_slug.rstrip('/'), redirect_path.rstrip('/'))
self.assertEqual(True, do_save)
self.assertEqual(u'The "{cat2_title}" topic (containing 1 article) was deleted\n\n[{{"action": "delete", "file_path": "{cat2_path}", "display_type": "category", "title": "{cat2_title}"}}, {{"action": "delete", "file_path": "{art_path}", "display_type": "article", "title": "{art_title}"}}]'.format(cat2_title=cat2_title, cat2_path=cat2_path, art_path=art_path, art_title=art_title), commit_message)
repo_functions.save_working_file(clone=new_clone, path=dir_path, message=commit_message, base_sha=new_clone.commit().hexsha, default_branch_name='master')
# verify that the files are gone
self.assertFalse(exists(cat2_index_path))
self.assertFalse(exists(repo_functions.strip_index_file(cat2_index_path)))
self.assertFalse(exists(art_index_path))
self.assertFalse(exists(repo_functions.strip_index_file(art_index_path)))
# in TestRepo
def test_activity_history_recorded(self):
''' The activity history accurately records activity events.
'''
# start a new branch
fake_author_email = u'erica@example.com'
task_description = u'shake trees until coconuts fall off for castaways'
source_repo = self.origin
first_commit = list(source_repo.iter_commits())[-1].hexsha
dir_name = 'repo-{}-{}'.format(first_commit[:8], slugify(fake_author_email))
user_dir = realpath(join(self.work_path, quote(dir_name)))
if isdir(user_dir):
new_clone = ChimeRepo(user_dir)
new_clone.git.reset(hard=True)
new_clone.remotes.origin.fetch()
else:
new_clone = source_repo.clone(user_dir, bare=False)
# tell git to ignore merge conflicts on the task metadata file
repo_functions.ignore_task_metadata_on_merge(new_clone)
working_branch = repo_functions.get_start_branch(new_clone, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in new_clone.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch.checkout()
# create some category/article structure
create_details = [
('', 'Tree', constants.CATEGORY_LAYOUT),
('tree', 'Coconut', constants.CATEGORY_LAYOUT),
('tree/coconut', 'Coconut Milk', constants.ARTICLE_LAYOUT),
('', 'Rock', constants.CATEGORY_LAYOUT),
('rock', 'Barnacle', constants.CATEGORY_LAYOUT),
('', 'Sand', constants.CATEGORY_LAYOUT)
]
updated_details = []
for detail in create_details:
add_message, file_path, redirect_path, do_save = view_functions.add_article_or_category(new_clone, detail[0], detail[1], detail[2])
updated_details.append(detail + (file_path,))
repo_functions.save_working_file(new_clone, file_path, add_message, new_clone.commit().hexsha, 'master')
# add a comment
funny_comment = u'I like coconuts ᶘ ᵒᴥᵒᶅ'
repo_functions.provide_feedback(new_clone, funny_comment)
# add another comment with newlines
newline_comment = u'You wound me sir.\n\nI thought we were friends\nBut I guess we are not.'
repo_functions.provide_feedback(new_clone, newline_comment)
# delete a category with stuff in it
commit_message = view_functions.make_delete_display_commit_message(new_clone, 'tree')
deleted_file_paths, do_save = edit_functions.delete_file(new_clone, 'tree')
# commit
repo_functions.save_working_file(new_clone, 'tree', commit_message, new_clone.commit().hexsha, 'master')
# checkout
working_branch.checkout()
# get and check the history
activity = chime_activity.ChimeActivity(repo=new_clone, branch_name=working_branch.name, default_branch_name='master', actor_email=fake_author_email)
activity_history = activity.history
self.assertEqual(len(activity_history), 10)
# check the creation of the activity
check_item = activity_history.pop()
self.assertEqual(u'The "{}" activity was started'.format(task_description), check_item['commit_subject'])
self.assertEqual(u'Created task metadata file "{}"\nSet author_email to {}\nSet task_description to {}'.format(repo_functions.TASK_METADATA_FILENAME, fake_author_email, task_description), check_item['commit_body'])
self.assertEqual(constants.COMMIT_TYPE_ACTIVITY_UPDATE, check_item['commit_type'])
# check the delete
check_item = activity_history.pop(0)
self.assertEqual(u'The "{}" topic (containing 1 topic and 1 article) was deleted'.format(updated_details[0][1]), check_item['commit_subject'])
self.assertEqual(u'[{{"action": "delete", "file_path": "{cat1_path}", "display_type": "category", "title": "{cat1_title}"}}, {{"action": "delete", "file_path": "{cat2_path}", "display_type": "category", "title": "{cat2_title}"}}, {{"action": "delete", "file_path": "{art1_path}", "display_type": "article", "title": "{art1_title}"}}]'.format(cat1_path=updated_details[0][3], cat1_title=updated_details[0][1], cat2_path=updated_details[1][3], cat2_title=updated_details[1][1], art1_path=updated_details[2][3], art1_title=updated_details[2][1]), check_item['commit_body'])
self.assertEqual(constants.COMMIT_TYPE_EDIT, check_item['commit_type'])
# check the comments
check_item = activity_history.pop(0)
self.assertEqual(u'Provided feedback.', check_item['commit_subject'])
self.assertEqual(newline_comment, check_item['commit_body'])
self.assertEqual(constants.COMMIT_TYPE_COMMENT, check_item['commit_type'])
check_item = activity_history.pop(0)
self.assertEqual(u'Provided feedback.', check_item['commit_subject'])
self.assertEqual(funny_comment, check_item['commit_body'])
self.assertEqual(constants.COMMIT_TYPE_COMMENT, check_item['commit_type'])
# check the category & article creations
for pos, check_item in list(enumerate(activity_history)):
check_detail = updated_details[len(updated_details) - (pos + 1)]
self.assertEqual(u'The "{}" {} was created'.format(check_detail[1], view_functions.file_display_name(check_detail[2])), check_item['commit_subject'])
self.assertEqual(u'[{{"action": "create", "file_path": "{file_path}", "display_type": "{display_type}", "title": "{title}"}}]'.format(file_path=check_detail[3], display_type=check_detail[2], title=check_detail[1]), check_item['commit_body'])
self.assertEqual(constants.COMMIT_TYPE_EDIT, check_item['commit_type'])
# in TestRepo
def test_newlines_in_commit_message_body(self):
''' Newlines in the commit message body are preserved.
'''
# start a new branch
fake_author_email = u'erica@example.com'
task_description = u'cling to a rock and scrape bacteria and algae off of it with a radula for mollusks'
source_repo = self.origin
first_commit = list(source_repo.iter_commits())[-1].hexsha
dir_name = 'repo-{}-{}'.format(first_commit[:8], slugify(fake_author_email))
user_dir = realpath(join(self.work_path, quote(dir_name)))
if isdir(user_dir):
new_clone = ChimeRepo(user_dir)
new_clone.git.reset(hard=True)
new_clone.remotes.origin.fetch()
else:
new_clone = source_repo.clone(user_dir, bare=False)
# tell git to ignore merge conflicts on the task metadata file
repo_functions.ignore_task_metadata_on_merge(new_clone)
working_branch = repo_functions.get_start_branch(new_clone, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in new_clone.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch.checkout()
# add some comments with newlines
striking_comment = u'A striking feature of molluscs is the use of the same organ for multiple functions.\n(و ˃̵ᴗ˂̵)و\n\nFor example, the heart and nephridia ("kidneys") are important parts of the reproductive system, as well as the circulatory and excretory systems\nᶘ ᵒᴥᵒᶅ'
repo_functions.provide_feedback(new_clone, striking_comment)
universal_comment = u'The three most universal features defining modern molluscs are:\n\n1. A mantle with a significant cavity used for breathing and excretion,\n\n2. the presence of a radula, and\n\n3. the structure of the nervous system.'
repo_functions.provide_feedback(new_clone, universal_comment)
# checkout
working_branch.checkout()
_, universal_body = repo_functions.get_commit_message_subject_and_body(working_branch.commit)
_, striking_body = repo_functions.get_commit_message_subject_and_body(working_branch.commit.parents[0])
self.assertEqual(universal_comment, universal_body)
self.assertEqual(striking_comment, striking_body)
# in TestRepo
def test_delete_full_folders(self):
''' Make sure that full folders can be deleted, and that what's reported as deleted matches what's expected.
'''
# build some nested categories
view_functions.add_article_or_category(self.clone1, '', 'quick', constants.CATEGORY_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick', 'brown', constants.CATEGORY_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick/brown', 'fox', constants.CATEGORY_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick', 'red', constants.CATEGORY_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick', 'yellow', constants.CATEGORY_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick/yellow', 'banana', constants.CATEGORY_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick', 'orange', constants.CATEGORY_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick/brown', 'potato', constants.CATEGORY_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick/yellow', 'lemon', constants.CATEGORY_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick/red', 'tomato', constants.CATEGORY_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick/red', 'balloon', constants.CATEGORY_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick/orange', 'peanut', constants.CATEGORY_LAYOUT)
# add in some articles
view_functions.add_article_or_category(self.clone1, 'quick/brown/fox', 'fur', constants.ARTICLE_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick/brown/fox', 'ears', constants.ARTICLE_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick/yellow/lemon', 'rind', constants.ARTICLE_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick/yellow/lemon', 'pulp', constants.ARTICLE_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick/orange/peanut', 'shell', constants.ARTICLE_LAYOUT)
view_functions.add_article_or_category(self.clone1, 'quick/red/balloon', 'string', constants.ARTICLE_LAYOUT)
# add and commit
self.clone1.index.add(['*'])
self.clone1.index.commit(u'cats and arts committed for testing purposes')
# verify that everything's there as expected
file_paths = edit_functions.list_contained_files(self.clone1, join(self.clone1.working_dir, 'quick'))
file_paths.sort()
self.assertEqual(len(file_paths), 18)
self.assertTrue(exists(join(self.clone1.working_dir, 'quick')))
# delete everything, and get a file list back from the git rm command
deleted_file_paths, do_save = edit_functions.delete_file(self.clone1, 'quick')
deleted_file_paths.sort()
self.assertTrue(do_save)
self.assertEqual(len(deleted_file_paths), 18)
self.assertFalse(exists(join(self.clone1.working_dir, 'quick')))
# verify that everything in file_paths is in deleted_file_paths
for check_index in range(len(file_paths)):
self.assertEqual(file_paths[check_index], deleted_file_paths[check_index])
# in TestRepo
def test_task_metadata_creation(self):
''' The task metadata file is created when a branch is started, and contains the expected information.
'''
fake_author_email = u'erica@example.com'
task_description = str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
branch1_name = branch1.name
branch1.checkout()
# verify that the most recent commit on the new branch is for the task metadata file
# by checking for the name of the file in the commit message
self.assertTrue(repo_functions.TASK_METADATA_FILENAME in branch1.commit.message)
# validate the existence of the task metadata file
task_metadata = repo_functions.get_task_metadata_for_branch(self.clone1, branch1_name)
self.assertEqual(type(task_metadata), dict)
self.assertTrue(len(task_metadata) > 0)
# validate the contents of the task metadata file
self.assertEqual(task_metadata['author_email'], fake_author_email)
self.assertEqual(task_metadata['task_description'], task_description)
# in TestRepo
def test_task_metadata_creation_with_unicode(self):
''' The task metadata file is created when a branch is started, and contains the expected information.
'''
fake_author_email = u'¯\_(ツ)_/¯@快速狐狸.com'
task_description = u'(╯°□°)╯︵ ┻━┻ for ૮(꒦ິ ˙̫̮ ꒦ິ)ა'
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
branch1_name = branch1.name
branch1.checkout()
# verify that the most recent commit on the new branch is for the task metadata file
# by checking for the name of the file in the commit message
self.assertTrue(repo_functions.TASK_METADATA_FILENAME in branch1.commit.message)
# validate the existence of the task metadata file
task_metadata = repo_functions.get_task_metadata_for_branch(self.clone1, branch1_name)
self.assertEqual(type(task_metadata), dict)
self.assertTrue(len(task_metadata) > 0)
# validate the contents of the task metadata file
self.assertEqual(task_metadata['author_email'], fake_author_email)
self.assertEqual(task_metadata['task_description'], task_description)
# in TestRepo
def test_task_metadata_update(self):
''' The task metadata file can be updated
'''
fake_author_email = u'erica@example.com'
task_description = str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
branch1_name = branch1.name
branch1.checkout()
# verify that the most recent commit on the new branch is for the task metadata file
# by checking for the name of the file in the commit message
self.assertTrue(repo_functions.TASK_METADATA_FILENAME in branch1.commit.message)
# validate the existence of the task metadata file
task_metadata = repo_functions.get_task_metadata_for_branch(self.clone1, branch1_name)
self.assertEqual(type(task_metadata), dict)
self.assertTrue(len(task_metadata) > 0)
# validate the contents of the task metadata file
self.assertEqual(task_metadata['author_email'], fake_author_email)
self.assertEqual(task_metadata['task_description'], task_description)
# write a new task name and some other arbitrary data
metadata_update = {'task_description': u'Changed my mind', 'lead_singer': u'Johnny Rotten'}
repo_functions.save_task_metadata_for_branch(self.clone1, 'master', metadata_update)
# validate the contents of the task metadata file
new_task_metadata = repo_functions.get_task_metadata_for_branch(self.clone1, branch1_name)
self.assertEqual(type(new_task_metadata), dict)
self.assertTrue(len(new_task_metadata) > 0)
self.assertEqual(new_task_metadata['task_description'], metadata_update['task_description'])
self.assertEqual(new_task_metadata['lead_singer'], metadata_update['lead_singer'])
# in TestRepo
def test_task_metadata_deletion(self):
''' The task metadata file is deleted when a branch is completed, and isn't merged.
'''
fake_author_email = u'erica@example.com'
task_description = str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
branch1_name = branch1.name
branch1.checkout()
# Add a file and complete the branch
edit_functions.create_new_page(self.clone1, '', 'happy.md', dict(title='Hello'), 'Hello hello.')
args1 = self.clone1, 'happy.md', 'added cool file', branch1.commit.hexsha, 'master'
repo_functions.save_working_file(*args1)
merge_commit = repo_functions.complete_branch(self.clone1, 'master', branch1_name)
# The commit message is expected
self.assertTrue(repo_functions.ACTIVITY_PUBLISHED_MESSAGE in merge_commit.message)
# The branch is gone
self.assertFalse(branch1_name in self.origin.branches)
self.assertFalse(branch1_name in self.clone1.branches)
# The file we created exists
self.assertTrue(repo_functions.verify_file_exists_in_branch(self.clone1, 'happy.md', 'master'))
self.assertTrue(repo_functions.verify_file_exists_in_branch(self.origin, 'happy.md', 'master'))
# the task metadata file doesn't exist
self.assertFalse(repo_functions.verify_file_exists_in_branch(self.clone1, repo_functions.TASK_METADATA_FILENAME, 'master'))
self.assertFalse(repo_functions.verify_file_exists_in_branch(self.origin, repo_functions.TASK_METADATA_FILENAME, 'master'))
# in TestRepo
def test_merge_tagged_with_branch_metadata(self):
''' The merge commit is tagged with branch metadata on publish.
'''
# start an activity on clone1
erica_email = u'erica@example.com'
task_description = u'Attract Insects With Anthocyanin Pigments To The Cavity Formed By A Cupped Leaf for Nepenthes'
clone1_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, erica_email)
branch_name = clone1_branch.name
clone1_branch.checkout()
clone1_branch_task_metadata = repo_functions.get_task_metadata_for_branch(self.clone1, branch_name)
# check out the branch on clone2 and verify that it's the same
clone2_branch = repo_functions.get_existing_branch(self.clone2, 'master', branch_name)
clone2_branch.checkout()
clone2_branch_task_metadata = repo_functions.get_task_metadata_for_branch(self.clone2, branch_name)
self.assertEqual(clone2_branch.commit.hexsha, clone1_branch.commit.hexsha)
self.assertEqual(clone1_branch_task_metadata, clone2_branch_task_metadata)
# On clone1, add a file and complete the branch
edit_functions.create_new_page(self.clone1, '', 'happy.md', dict(title='Hello'), 'Hello hello.')
args1 = self.clone1, 'happy.md', 'added cool file', clone1_branch.commit.hexsha, 'master'
repo_functions.save_working_file(*args1)
merge_commit = repo_functions.complete_branch(self.clone1, 'master', branch_name)
# update clone2
self.clone2.git.fetch('origin')
# the branch is no longer in clone1 or origin
self.assertFalse(branch_name in self.clone1.branches)
self.assertFalse(branch_name in self.origin.branches)
# but it's still there in clone2
self.assertTrue(branch_name in self.clone2.branches)
# collect the tag ref, object, name
clone1_tag_ref = self.clone1.tags[0]
clone1_tag = clone1_tag_ref.tag
clone1_tag_name = clone1_tag.tag
clone2_tag_ref = self.clone2.tags[0]
clone2_tag = clone2_tag_ref.tag
clone2_tag_name = clone2_tag.tag
origin_tag_ref = self.origin.tags[0]
origin_tag = origin_tag_ref.tag
origin_tag_name = origin_tag.tag
# the tag exists
self.assertIsNotNone(clone1_tag_ref)
self.assertIsNotNone(clone2_tag_ref)
self.assertIsNotNone(origin_tag_ref)
# it's attached to the merge commit
self.assertEqual(clone1_tag_ref.commit, merge_commit)
self.assertEqual(clone2_tag_ref.commit, merge_commit)
self.assertEqual(origin_tag_ref.commit, merge_commit)
# it has the same name as the branch
self.assertEqual(clone1_tag_name, branch_name)
self.assertEqual(clone2_tag_name, branch_name)
self.assertEqual(origin_tag_name, branch_name)
# the tag message is the jsonified task metadata
clone1_tag_metadata = json.loads(clone1_tag.message)
clone2_tag_metadata = json.loads(clone2_tag.message)
origin_tag_metadata = json.loads(origin_tag.message)
self.assertEqual(clone1_tag_metadata, clone1_branch_task_metadata)
self.assertEqual(clone2_tag_metadata, clone1_branch_task_metadata)
self.assertEqual(origin_tag_metadata, clone1_branch_task_metadata)
# the file we published in clone1 is in clone2's local branch and master
self.clone2.git.pull('origin', branch_name)
self.assertTrue(repo_functions.verify_file_exists_in_branch(self.clone2, 'happy.md', branch_name))
self.clone2.branches['master'].checkout()
self.clone2.git.pull('origin', 'master')
self.assertTrue(repo_functions.verify_file_exists_in_branch(self.clone2, 'happy.md', 'master'))
# in TestRepo
def test_task_metadata_merge_conflict(self):
''' Task metadata file merge conflict is handled correctly
'''
fake_author_email1 = u'erica@example.com'
fake_author_email2 = u'nobody@example.com'
task_description1, task_description2 = str(uuid4()), str(uuid4())
branch1 = repo_functions.get_start_branch(self.clone1, 'master', task_description1, fake_author_email1)
branch2 = repo_functions.get_start_branch(self.clone2, 'master', task_description2, fake_author_email2)
branch1_name = branch1.name
# Check out the branches
branch1.checkout()
branch2.checkout()
# Save a task metadata file
fake_metadata = {'task_description': u'Changed my mind', 'lead_singer': u'Johnny Rotten'}
commit1 = repo_functions.save_task_metadata_for_branch(self.clone1, 'master', fake_metadata)
self.assertEqual(self.origin.branches[branch1_name].commit, commit1)
self.assertEqual(commit1, branch1.commit)
# merge the branch to master manually so the task metadata file will be included
message = u'Manual merge including task metadata'
self.clone1.git.checkout('master')
self.clone1.git.pull('origin', 'master')
try:
self.clone1.git.merge(branch1_name, '--no-ff', m=message)
except GitCommandError:
# raise the two commits in conflict.
remote_commit = self.clone1.refs['origin/master'].commit
self.clone1.git.reset('master', hard=True)
self.clone1.git.checkout(branch1_name)
raise repo_functions.MergeConflict(remote_commit, self.clone1.commit())
self.clone1.git.push('origin', 'master')
# Delete the working branch.
self.clone1.remotes.origin.push(':' + branch1_name)
self.clone1.delete_head([branch1_name])
# verify that the branch has been deleted
self.assertFalse(branch1_name in self.origin.branches)
# Although there are conflicting changes in the two task metadata files,
# there should be no conflict raised!
try:
args = self.clone2, repo_functions.TASK_METADATA_FILENAME, '...', branch2.commit.hexsha, 'master'
repo_functions.save_working_file(*args)
except repo_functions.MergeConflict:
self.assertTrue(False)
if __name__ == '__main__':
main()
| {
"content_hash": "461419ab8f1cbd653370c8a696506cce",
"timestamp": "",
"source": "github",
"line_count": 1506,
"max_line_length": 578,
"avg_line_length": 50.76892430278885,
"alnum_prop": 0.660074812315258,
"repo_name": "yudiutomo/chime",
"id": "be0391e586b74c9caee4499fbe17c9969b47c914",
"size": "76597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/repo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "489908"
},
{
"name": "HTML",
"bytes": "1357401"
},
{
"name": "JavaScript",
"bytes": "59090"
},
{
"name": "Python",
"bytes": "617425"
},
{
"name": "Ruby",
"bytes": "16142"
},
{
"name": "Shell",
"bytes": "2988"
}
],
"symlink_target": ""
} |
"""Module with common classes for the controller."""
from enum import Enum
from kytos.core.config import KytosConfig
__all__ = ('GenericEntity',)
class EntityStatus(Enum):
"""Enumeration of possible statuses for GenericEntity instances."""
UP = 1 # pylint: disable=invalid-name
DISABLED = 2
DOWN = 3
class GenericEntity:
"""Generic Class that represents any Entity."""
def __init__(self):
"""Create the GenericEntity object with empty metadata dictionary."""
options = KytosConfig().options['daemon']
self.metadata = {}
# operational status with True or False
self.active = True
# administrative status with True or False
self.enabled = options.enable_entities_by_default
@property
def status(self):
"""Return the current status of the Entity."""
if self.enabled and self.active:
return EntityStatus.UP
elif self.is_administrative_down():
return EntityStatus.DISABLED
return EntityStatus.DOWN
def is_administrative_down(self):
"""Return True for disabled Entities."""
return not self.enabled
def enable(self):
"""Administratively enable the Entity.
Although this method only sets an 'enabled' flag, always prefer to use
it instead of setting it manually. This allows us to change the
behavior on the future.
"""
self.enabled = True
def disable(self):
"""Administratively disable the Entity.
This method can disable other related entities. For this behavior,
rewrite it on the child classes.
"""
self.enabled = False
def add_metadata(self, key, value):
"""Add a new metadata (key, value)."""
if key in self.metadata:
return False
self.metadata[key] = value
return True
def remove_metadata(self, key):
"""Try to remove a specific metadata."""
try:
del self.metadata[key]
return True
except KeyError:
return False
def get_metadata(self, key):
"""Try to get a specific metadata."""
return self.metadata.get(key)
def update_metadata(self, key, value):
"""Overwrite a specific metadata."""
self.metadata[key] = value
def clear_metadata(self):
"""Remove all metadata information."""
self.metadata = {}
def extend_metadata(self, metadatas, force=True):
"""Extend the metadata information.
If force is True any existing value is overwritten.
"""
if force:
return self.metadata.update(metadatas)
for key, value in metadatas.items():
self.add_metadata(key, value)
| {
"content_hash": "6546c930e6866f880c1d4dc29fc4c75e",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 78,
"avg_line_length": 28.927083333333332,
"alnum_prop": 0.6157724162765574,
"repo_name": "renanrodrigo/kytos",
"id": "6fa0c37aad44e12ee453fd9add6f55b77ab93622",
"size": "2777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kytos/core/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "164432"
}
],
"symlink_target": ""
} |
import sqlalchemy.types as saTypes
# Local imports
from gp10.util import to_ord, from_ord
__all__ = [
'StripString',
'Ordinal',
]
class StripString(saTypes.TypeDecorator):
impl = saTypes.String
def process_result_value(self, value, dialect):
return value.strip()
def copy(self):
return StripString(self.impl.length)
class Ordinal(saTypes.TypeDecorator):
impl = saTypes.Integer
def process_result_value(self, value, dialect):
return from_ord(value)
def process_bind_param(self, value, dialect):
return to_ord(value)
| {
"content_hash": "7b010ea56716e6baf7bc40ca0b0a8616",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 51,
"avg_line_length": 21,
"alnum_prop": 0.6751700680272109,
"repo_name": "pacopablo/gp10",
"id": "8dc29bad03eb17d93745bfc74e9cd08b0c62b811",
"size": "926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gp10/types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67601"
}
],
"symlink_target": ""
} |
""" Messaging API
API to send & receive messages:
- currently SMS, Email, RSS & Twitter
Messages get sent to the Outbox (& Log)
From there, the Scheduler tasks collect them & send them
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3Msg",
"S3Compose",
)
import base64
import datetime
import string
import urllib
import urllib2
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
try:
from lxml import etree
except ImportError:
import sys
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
from gluon import current, redirect
from gluon.html import *
from s3codec import S3Codec
from s3crud import S3CRUD
from s3forms import S3SQLDefaultForm
from s3utils import s3_unicode
from s3validators import IS_IN_SET, IS_ONE_OF
from s3widgets import S3PentityAutocompleteWidget
IDENTITYTRANS = ALLCHARS = string.maketrans("", "")
NOTPHONECHARS = ALLCHARS.translate(IDENTITYTRANS, string.digits)
NOTTWITTERCHARS = ALLCHARS.translate(IDENTITYTRANS,
"%s%s_" % (string.digits, string.letters))
TWITTER_MAX_CHARS = 140
TWITTER_HAS_NEXT_SUFFIX = u' \u2026'
TWITTER_HAS_PREV_PREFIX = u'\u2026 '
# =============================================================================
class S3Msg(object):
""" Messaging framework """
def __init__(self,
modem=None):
T = current.T
self.modem = modem
# http://docs.oasis-open.org/emergency/edxl-have/cs01/xPIL-types.xsd
# <xs:simpleType name="CommunicationMediaTypeList">
# <xs:enumeration value="Cellphone"/>
# <xs:enumeration value="Fax"/>
# <xs:enumeration value="Pager"/>
# <xs:enumeration value="Telephone"/>
# <xs:enumeration value="VOIP"/>
# <xs:simpleType name="ElectronicAddressIdentifierTypeList">
# <xs:enumeration value="AIM"/>
# <xs:enumeration value="EMAIL"/>
# <xs:enumeration value="GOOGLE"/>
# <xs:enumeration value="GIZMO"/>
# <xs:enumeration value="ICQ"/>
# <xs:enumeration value="JABBER"/>
# <xs:enumeration value="MSN"/>
# <xs:enumeration value="SIP"/>
# <xs:enumeration value="SKYPE"/>
# <xs:enumeration value="URL"/>
# <xs:enumeration value="XRI"/>
# <xs:enumeration value="YAHOO"/>
# @ToDo: Remove the T from the init() & T upon usage instead
MOBILE = current.deployment_settings.get_ui_label_mobile_phone()
# Full range of contact options
self.CONTACT_OPTS = {"EMAIL": T("Email"),
"FACEBOOK": T("Facebook"),
"FAX": T("Fax"),
"HOME_PHONE": T("Home phone"),
"RADIO": T("Radio Callsign"),
"RSS": T("RSS Feed"),
"SKYPE": T("Skype"),
"SMS": MOBILE,
"TWITTER": T("Twitter"),
#"XMPP": "XMPP",
#"WEB": T("Website"),
"WORK_PHONE": T("Work phone"),
"OTHER": T("other")
}
# Those contact options to which we can send notifications
# NB Coded into hrm_map_popup & s3.msg.js
self.MSG_CONTACT_OPTS = {"EMAIL": T("Email"),
"SMS": MOBILE,
"TWITTER": T("Twitter"),
#"XMPP": "XMPP",
}
# SMS Gateways
self.GATEWAY_OPTS = {"MODEM": T("Modem"),
"SMTP": T("SMTP"),
"TROPO": T("Tropo"),
# Currently only available for Inbound
#"TWILIO": T("Twilio"),
"WEB_API": T("Web API"),
}
# -------------------------------------------------------------------------
@staticmethod
def sanitise_phone(phone):
"""
Strip out unnecessary characters from the string:
+()- & space
"""
settings = current.deployment_settings
default_country_code = settings.get_L10n_default_country_code()
clean = phone.translate(IDENTITYTRANS, NOTPHONECHARS)
# If number starts with a 0 then need to remove this & add the country code in
if clean[0] == "0":
# Add default country code
if default_country_code == 39:
# Italy keeps 0 after country code
clean = "%s%s" % (default_country_code, clean)
else:
clean = "%s%s" % (default_country_code,
clean.lstrip("0"))
return clean
# =========================================================================
# Inbound Messages
# =========================================================================
@staticmethod
def sort_by_sender(row):
"""
Helper method to sort messages according to sender priority.
"""
s3db = current.s3db
db = current.db
ptable = s3db.msg_parsing_status
mtable = s3db.msg_message
stable = s3db.msg_sender
try:
# @ToDo: Look at doing a Join?
pmessage = db(ptable.id == row.id).select(ptable.message_id)
m_id = pmessage.message_id
message = db(mtable.id == m_id).select(mtable.from_address,
limitby=(0, 1)).first()
sender = message.from_address
srecord = db(stable.sender == sender).select(stable.priority,
limitby=(0, 1)).first()
return srecord.priority
except:
import sys
# Return max value i.e. assign lowest priority
return sys.maxint
# -------------------------------------------------------------------------
@staticmethod
def parse(channel_id, function_name):
"""
Parse unparsed Messages from Channel with Parser
- called from Scheduler
@param channel_id: Channel
@param function_name: Parser
"""
from s3parser import S3Parsing
parser = S3Parsing.parser
stable = current.s3db.msg_parsing_status
query = (stable.channel_id == channel_id) & \
(stable.is_parsed == False)
messages = current.db(query).select(stable.id,
stable.message_id)
for message in messages:
# Parse the Message
reply_id = parser(function_name, message.message_id)
# Update to show that we've parsed the message & provide a link to the reply
message.update_record(is_parsed=True,
reply_id=reply_id)
return
# =========================================================================
# Outbound Messages
# =========================================================================
def compose(self,
type = "SMS",
recipient_type = None,
recipient = None,
#hide = True,
subject = "",
message = "",
url = None,
# @ToDo: re-implement
#formid = None,
):
"""
Form to Compose a Message
@param type: The default message type: None, EMAIL, SMS or TWITTER
@param recipient_type: Send to Persons or Groups? (pr_person or pr_group)
@param recipient: The pe_id of the person/group to send the message to
- this can also be set by setting one of
(in priority order, if multiple found):
request.vars.pe_id
request.vars.person_id @ToDo
request.vars.group_id @ToDo
request.vars.hrm_id @ToDo
@param subject: The default subject text (for Emails)
@param message: The default message text
@param url: Redirect to the specified URL() after message sent
@param formid: If set, allows multiple forms open in different tabs
"""
if not url:
url = URL(c="msg", f="compose")
# Unauthenticated users aren't allowed to Compose Messages
auth = current.auth
if auth.is_logged_in() or auth.basic():
pass
else:
redirect(URL(c="default", f="user", args="login",
vars={"_next" : url}))
# Authenticated users need to have update rights on the msg controller
if not auth.permission.has_permission("update", c="msg"):
current.session.error = current.T("You do not have permission to send messages")
redirect(URL(f="index"))
# Configure an instance of S3Compose
instance = S3Compose()
instance.contact_method = type
instance.recipient = recipient
instance.recipients = None
instance.recipient_type = recipient_type
instance.subject = subject
instance.message = message
#instance.formid = formid
instance.resource = None
instance.url = url
# Generate the form
form = instance._compose_form()
# Default title
# - can be overridden by the calling function
title = current.T("Send Message")
return dict(form = form,
title = title)
# -------------------------------------------------------------------------
@staticmethod
def send(recipient, message, subject=None):
"""
Send a single message to an Address
@param recipient: "email@address", "+4412345678", "@nick"
@param message: message body
@param subject: message subject (Email only)
"""
# Determine channel to send on based on format of recipient
if recipient.startswith("@"):
# Twitter
tablename = "msg_twitter"
elif "@" in recipient:
# Email
tablename = "msg_email"
else:
# SMS
tablename = "msg_sms"
# @ToDo: Complete this
# -------------------------------------------------------------------------
@staticmethod
def send_by_pe_id(pe_id,
subject = "",
message = "",
contact_method = "EMAIL",
from_address = None,
system_generated = False):
"""
Send a single message to a Person Entity (or list thereof)
@ToDo: contact_method = ALL
- look up the pr_contact options available for the pe & send via all
@ToDo: This is not transaction safe
- power failure in the middle will cause no message in the outbox
"""
s3db = current.s3db
# Place the Message in the appropriate Log
if contact_method == "EMAIL":
if not from_address:
from_address = current.deployment_settings.get_mail_sender()
table = s3db.msg_email
_id = table.insert(body=message,
subject=subject,
from_address=from_address,
#to_address=pe_id,
inbound=False,
)
record = dict(id=_id)
s3db.update_super(table, record)
message_id = record["message_id"]
elif contact_method == "SMS":
table = s3db.msg_sms
_id = table.insert(body=message,
from_address=from_address,
inbound=False,
)
record = dict(id=_id)
s3db.update_super(table, record)
message_id = record["message_id"]
elif contact_method == "TWITTER":
table = s3db.msg_twitter
_id = table.insert(body=message,
from_address=from_address,
inbound=False,
)
record = dict(id=_id)
s3db.update_super(table, record)
message_id = record["message_id"]
else:
# @ToDo
raise
# Place the Message in the main OutBox
table = s3db.msg_outbox
if isinstance(pe_id, list):
# Add an entry per recipient
listindex = 0
insert = table.insert
for _id in pe_id:
try:
insert(message_id = message_id,
pe_id = _id,
contact_method = contact_method,
system_generated = system_generated)
listindex = listindex + 1
except:
return listindex
else:
try:
table.insert(message_id = message_id,
pe_id = pe_id,
contact_method = contact_method,
system_generated = system_generated)
except:
return False
# Process OutBox async
current.s3task.async("msg_process_outbox",
args = [contact_method])
return message_id
# -------------------------------------------------------------------------
def process_outbox(self, contact_method="EMAIL"):
"""
Send pending messages from outbox (usually called from scheduler)
@param contact_method: the output channel (see pr_contact.method)
@todo: contact_method = "ALL"
"""
db = current.db
s3db = current.s3db
if contact_method == "SMS":
# Read all enabled Gateways
# - we assume there are relatively few & we may need to decide which to use based on the message's organisation
table = s3db.msg_sms_outbound_gateway
etable = db.msg_channel
query = (table.deleted == False) & \
(table.channel_id == etable.channel_id)
rows = db(query).select(table.channel_id,
table.organisation_id,
etable.instance_type,
)
if not rows:
# Raise exception here to make the scheduler
# task fail permanently until manually reset
raise ValueError("No SMS handler defined!")
if len(rows) == 1:
lookup_org = False
row = rows.first()
outgoing_sms_handler = row["msg_channel.instance_type"]
channel_id = row["msg_sms_outbound_gateway.channel_id"]
else:
lookup_org = True
org_branches = current.deployment_settings.get_org_branches()
if org_branches:
org_parents = s3db.org_parents
channels = {}
for row in rows:
channels[row["msg_sms_outbound_gateway.organisation_id"]] = \
dict(outgoing_sms_handler = row["msg_channel.instance_type"],
channel_id = row["msg_sms_outbound_gateway.channel_id"])
elif contact_method == "TWITTER":
twitter_settings = self.get_twitter_api()
if not twitter_settings:
# Raise exception here to make the scheduler
# task fail permanently
raise ValueError("No Twitter API available!")
def dispatch_to_pe_id(pe_id,
subject,
message,
outbox_id,
message_id,
organisation_id = None,
contact_method = contact_method):
"""
Helper method to send messages by pe_id
@param pe_id: the pe_id
@param subject: the message subject
@param message: the message body
@param outbox_id: the outbox record ID
@param message_id: the message_id
@param organisation_id: the organisation_id (for SMS)
@param contact_method: the contact method
"""
# Get the recipient's contact info
table = s3db.pr_contact
query = (table.pe_id == pe_id) & \
(table.contact_method == contact_method) & \
(table.deleted == False)
contact_info = db(query).select(table.value,
orderby=table.priority,
limitby=(0, 1)).first()
# Send the message
if contact_info:
address = contact_info.value
if contact_method == "EMAIL":
return self.send_email(address,
subject,
message)
elif contact_method == "SMS":
if lookup_org:
channel = channels.get(organisation_id)
if not channel and \
org_branches:
orgs = org_parents(organisation_id)
for org in orgs:
channel = channels.get(org)
if channel:
break
if not channel:
# Look for an unrestricted channel
channel = channels.get(None)
if not channel:
# We can't send this message as there is no unrestricted channel & none which matches this Org
return False
outgoing_sms_handler = channel["outgoing_sms_handler"]
channel_id = channel["channel_id"]
if outgoing_sms_handler == "msg_sms_webapi_channel":
return self.send_sms_via_api(address,
message,
message_id,
channel_id)
elif outgoing_sms_handler == "msg_sms_smtp_channel":
return self.send_sms_via_smtp(address,
message,
channel_id)
elif outgoing_sms_handler == "msg_sms_modem_channel":
return self.send_sms_via_modem(address,
message,
channel_id)
elif outgoing_sms_handler == "msg_sms_tropo_channel":
# NB This does not mean the message is sent
return self.send_sms_via_tropo(outbox_id,
message_id,
address,
message,
channel_id)
elif contact_method == "TWITTER":
return self.send_tweet(message, address)
return False
outbox = s3db.msg_outbox
query = (outbox.contact_method == contact_method) & \
(outbox.status == 1) & \
(outbox.deleted == False)
petable = s3db.pr_pentity
left = [petable.on(petable.pe_id == outbox.pe_id)]
fields = [outbox.id,
outbox.message_id,
outbox.pe_id,
outbox.retries,
petable.instance_type,
]
if contact_method == "EMAIL":
mailbox = s3db.msg_email
fields.extend([mailbox.subject, mailbox.body])
left.append(mailbox.on(mailbox.message_id == outbox.message_id))
elif contact_method == "SMS":
mailbox = s3db.msg_sms
fields.append(mailbox.body)
if lookup_org:
fields.append(mailbox.organisation_id)
left.append(mailbox.on(mailbox.message_id == outbox.message_id))
elif contact_method == "TWITTER":
mailbox = s3db.msg_twitter
fields.append(mailbox.body)
left.append(mailbox.on(mailbox.message_id == outbox.message_id))
else:
# @ToDo
raise
rows = db(query).select(*fields,
left=left,
orderby=~outbox.retries)
if not rows:
return
htable = s3db.hrm_human_resource
otable = db.org_organisation
ptable = db.pr_person
gtable = s3db.pr_group
mtable = db.pr_group_membership
# Left joins for multi-recipient lookups
gleft = [mtable.on((mtable.group_id == gtable.id) &
(mtable.person_id != None) &
(mtable.deleted != True)),
ptable.on((ptable.id == mtable.person_id) &
(ptable.deleted != True))
]
oleft = [htable.on((htable.organisation_id == otable.id) &
(htable.person_id != None) &
(htable.deleted != True)),
ptable.on((ptable.id == htable.person_id) &
(ptable.deleted != True))
]
atable = s3db.table("deploy_alert", None)
if atable:
ltable = db.deploy_alert_recipient
aleft = [ltable.on(ltable.alert_id == atable.id),
htable.on((htable.id == ltable.human_resource_id) &
(htable.person_id != None) &
(htable.deleted != True)),
ptable.on((ptable.id == htable.person_id) &
(ptable.deleted != True))
]
# chainrun: used to fire process_outbox again,
# when messages are sent to groups or organisations
chainrun = False
# Set a default for non-SMS
organisation_id = None
for row in rows:
status = True
if contact_method == "EMAIL":
subject = row["msg_email.subject"] or ""
message = row["msg_email.body"] or ""
elif contact_method == "SMS":
subject = None
message = row["msg_sms.body"] or ""
if lookup_org:
organisation_id = row["msg_sms.organisation_id"]
elif contact_method == "TWITTER":
subject = None
message = row["msg_twitter.body"] or ""
else:
# @ToDo
continue
entity_type = row["pr_pentity"].instance_type
if not entity_type:
current.log.warning("s3msg", "Entity type unknown")
continue
row = row["msg_outbox"]
pe_id = row.pe_id
message_id = row.message_id
if entity_type == "pr_group":
# Re-queue the message for each member in the group
gquery = (gtable.pe_id == pe_id)
recipients = db(gquery).select(ptable.pe_id, left=gleft)
pe_ids = set(r.pe_id for r in recipients)
pe_ids.discard(None)
if pe_ids:
for pe_id in pe_ids:
outbox.insert(message_id=message_id,
pe_id=pe_id,
contact_method=contact_method,
system_generated=True)
chainrun = True
status = True
elif entity_type == "deploy_alert":
# Re-queue the message for each HR in the group
aquery = (atable.pe_id == pe_id)
recipients = db(aquery).select(ptable.pe_id, left=aleft)
pe_ids = set(r.pe_id for r in recipients)
pe_ids.discard(None)
if pe_ids:
for pe_id in pe_ids:
outbox.insert(message_id=message_id,
pe_id=pe_id,
contact_method=contact_method,
system_generated=True)
chainrun = True
status = True
elif entity_type == "org_organisation":
# Re-queue the message for each HR in the organisation
oquery = (otable.pe_id == pe_id)
recipients = db(oquery).select(ptable.pe_id, left=oleft)
pe_ids = set(r.pe_id for r in recipients)
pe_ids.discard(None)
if pe_ids:
for pe_id in pe_ids:
outbox.insert(message_id=message_id,
pe_id=pe_id,
contact_method=contact_method,
system_generated=True)
chainrun = True
status = True
elif entity_type == "pr_person":
# Send the message to this person
try:
status = dispatch_to_pe_id(pe_id,
subject,
message,
row.id,
message_id,
organisation_id)
except:
status = False
else:
# Unsupported entity type
row.update_record(status = 4) # Invalid
db.commit()
continue
if status:
row.update_record(status = 2) # Sent
db.commit()
else:
if row.retries > 0:
row.update_record(retries = row.retries - 1)
db.commit()
elif row.retries is not None:
row.update_record(status = 5) # Failed
if chainrun:
self.process_outbox(contact_method)
return
# -------------------------------------------------------------------------
# Send Email
# -------------------------------------------------------------------------
def send_email(self,
to,
subject,
message,
attachments=None,
cc=None,
bcc=None,
reply_to=None,
sender=None,
encoding="utf-8",
#from_address=None,
):
"""
Function to send Email
- simple Wrapper over Web2Py's Email API
"""
if not to:
return False
settings = current.deployment_settings
default_sender = settings.get_mail_sender()
if not default_sender:
current.log.warning("Email sending disabled until the Sender address has been set in models/000_config.py")
return False
if not sender:
sender = default_sender
limit = settings.get_mail_limit()
if limit:
# Check whether we've reached our daily limit
day = datetime.timedelta(hours=24)
cutoff = current.request.utcnow - day
table = current.s3db.msg_channel_limit
# @ToDo: Include Channel Info
check = current.db(table.created_on > cutoff).count()
if check >= limit:
return False
# Log the sending
table.insert()
result = current.mail.send(to,
subject=subject,
message=message,
attachments=attachments,
cc=cc,
bcc=bcc,
reply_to=reply_to,
sender=sender,
encoding=encoding,
# Added to Web2Py 2014-03-04
# - defaults to sender
#from_address=from_address,
)
if not result:
current.session.error = current.mail.error
else:
current.session.error = None
return result
# -------------------------------------------------------------------------
def send_email_by_pe_id(self,
pe_id,
subject="",
message="",
from_address=None,
system_generated=False):
"""
API wrapper over send_by_pe_id
"""
return self.send_by_pe_id(pe_id,
subject,
message,
"EMAIL",
from_address,
system_generated)
# =========================================================================
# SMS
# =========================================================================
# -------------------------------------------------------------------------
# OpenGeoSMS
# -------------------------------------------------------------------------
@staticmethod
def prepare_opengeosms(location_id, code="S", map="google", text=""):
"""
Function to create an OpenGeoSMS
@param: location_id - reference to record in gis_location table
@param: code - the type of OpenGeoSMS:
S = Sahana
SI = Incident Report
ST = Task Dispatch
@param: map: "google" or "osm"
@param: text - the rest of the message
Returns the formatted OpenGeoSMS or None if it can't find
an appropriate location
"""
if not location_id:
return text
db = current.db
s3db = current.s3db
table = s3db.gis_location
query = (table.id == location_id)
location = db(query).select(table.lat,
table.lon,
#table.path,
#table.parent,
limitby=(0, 1)).first()
if not location:
return text
lat = location.lat
lon = location.lon
if lat is None or lon is None:
# @ToDo: Should we try parents? Or would that not be granular enough anyway?
return text
code = "GeoSMS=%s" % code
if map == "google":
url = "http://maps.google.com/?q=%f,%f" % (lat, lon)
elif map == "osm":
# NB Not sure how this will work in OpenGeoSMS client
url = "http://openstreetmap.org?mlat=%f&mlon=%f&zoom=14" % (lat, lon)
opengeosms = "%s&%s\n%s" % (url, code, text)
return opengeosms
# -------------------------------------------------------------------------
@staticmethod
def parse_opengeosms(message):
"""
Function to parse an OpenGeoSMS
@param: message - Inbound message to be parsed for OpenGeoSMS.
Returns the lat, lon, code and text contained in the message.
"""
lat = ""
lon = ""
code = ""
text = ""
words = message.split(" ")
if "http://maps.google.com/?q" in words[0]:
# Parse OpenGeoSMS
pwords = words[0].split("?q=")[1].split(",")
lat = pwords[0]
lon = pwords[1].split("&")[0]
code = pwords[1].split("&")[1].split("=")[1]
text = ""
for a in range(1, len(words)):
text = text + words[a] + " "
return lat, lon, code, text
# -------------------------------------------------------------------------
# Send SMS
# -------------------------------------------------------------------------
def send_sms_via_api(self,
mobile,
text = "",
message_id = None,
channel_id = None,
):
"""
Function to send SMS via Web API
"""
db = current.db
s3db = current.s3db
table = s3db.msg_sms_webapi_channel
# Get Configuration
if channel_id:
sms_api = db(table.channel_id == channel_id).select(limitby=(0, 1)
).first()
else:
sms_api = db(table.enabled == True).select(limitby=(0, 1)).first()
if not sms_api:
return False
post_data = {}
parts = sms_api.parameters.split("&")
for p in parts:
post_data[p.split("=")[0]] = p.split("=")[1]
mobile = self.sanitise_phone(mobile)
# To send non-ASCII characters in UTF-8 encoding, we'd need
# to hex-encode the text and activate unicode=1, but this
# would limit messages to 70 characters, and many mobile
# phones can't display unicode anyway.
# To be however able to send messages with at least special
# European characters like á or ø, we convert the UTF-8 to
# the default ISO-8859-1 (latin-1) here:
text_latin1 = s3_unicode(text).encode("utf-8") \
.decode("utf-8") \
.encode("iso-8859-1")
post_data[sms_api.message_variable] = text_latin1
post_data[sms_api.to_variable] = str(mobile)
url = sms_api.url
clickatell = "clickatell" in url
if clickatell:
text_len = len(text)
if text_len > 480:
current.log.error("Clickatell messages cannot exceed 480 chars")
return False
elif text_len > 320:
post_data["concat"] = 3
elif text_len > 160:
post_data["concat"] = 2
request = urllib2.Request(url)
query = urllib.urlencode(post_data)
if sms_api.username and sms_api.password:
# e.g. Mobile Commons
base64string = base64.encodestring("%s:%s" % (sms_api.username, sms_api.password)).replace("\n", "")
request.add_header("Authorization", "Basic %s" % base64string)
try:
result = urllib2.urlopen(request, query)
except urllib2.HTTPError, e:
current.log.error("SMS message send failed: %s" % e)
return False
else:
# Parse result
output = result.read()
if clickatell:
if output.startswith("ERR"):
current.log.error("Clickatell message send failed: %s" % output)
return False
elif message_id and output.startswith("ID"):
# Store ID from Clickatell to be able to followup
remote_id = output[4:]
db(s3db.msg_sms.message_id == message_id).update(remote_id=remote_id)
elif "mcommons" in url:
# http://www.mobilecommons.com/mobile-commons-api/rest/#errors
# Good = <response success="true"></response>
# Bad = <response success="false"><errror id="id" message="message"></response>
if "error" in output:
current.log.error("Mobile Commons message send failed: %s" % output)
return False
return True
# -------------------------------------------------------------------------
def send_sms_via_modem(self, mobile, text="", channel_id=None):
"""
Function to send SMS via locally-attached Modem
- needs to have the cron/sms_handler_modem.py script running
"""
mobile = self.sanitise_phone(mobile)
# Add '+' before country code
mobile = "+%s" % mobile
try:
self.modem.send_sms(mobile, text)
return True
except KeyError:
current.log.error("s3msg", "Modem not available: need to have the cron/sms_handler_modem.py script running")
return False
# -------------------------------------------------------------------------
def send_sms_via_smtp(self, mobile, text="", channel_id=None):
"""
Function to send SMS via SMTP
NB Different Gateways have different requirements for presence/absence of International code
http://en.wikipedia.org/wiki/List_of_SMS_gateways
http://www.obviously.com/tech_tips/SMS_Text_Email_Gateway.html
"""
table = current.s3db.msg_sms_smtp_channel
if channel_id:
query = (table.channel_id == channel_id)
else:
query = (table.enabled == True)
settings = current.db(query).select(limitby=(0, 1)
).first()
if not settings:
return False
mobile = self.sanitise_phone(mobile)
to = "%s@%s" % (mobile,
settings.address)
try:
result = self.send_email(to=to,
subject="",
message= text)
return result
except:
return False
#-------------------------------------------------------------------------------------------------
def send_sms_via_tropo(self,
row_id,
message_id,
recipient,
message,
network = "SMS",
channel_id = None,
):
"""
Send a URL request to Tropo to pick a message up
"""
db = current.db
s3db = current.s3db
table = s3db.msg_tropo_channel
base_url = "http://api.tropo.com/1.0/sessions"
action = "create"
if channel_id:
query = (table.channel_id == channel_id)
else:
query = (table.enabled == True)
tropo_settings = db(query).select(table.token_messaging,
limitby=(0, 1)).first()
if tropo_settings:
tropo_token_messaging = tropo_settings.token_messaging
#tropo_token_voice = tropo_settings.token_voice
else:
return
if network == "SMS":
recipient = self.sanitise_phone(recipient)
try:
s3db.msg_tropo_scratch.insert(row_id = row_id,
message_id = message_id,
recipient = recipient,
message = message,
network = network)
params = urllib.urlencode([("action", action),
("token", tropo_token_messaging),
("outgoing", "1"),
("row_id", row_id)
])
xml = urllib2.urlopen("%s?%s" % (base_url, params)).read()
# Parse Response (actual message is sent as a response to the POST which will happen in parallel)
#root = etree.fromstring(xml)
#elements = root.getchildren()
#if elements[0].text == "false":
# session.error = T("Message sending failed! Reason:") + " " + elements[2].text
# redirect(URL(f='index'))
#else:
# session.flash = T("Message Sent")
# redirect(URL(f='index'))
except:
pass
return False # Returning False because the API needs to ask us for the messsage again.
# -------------------------------------------------------------------------
def send_sms_by_pe_id(self,
pe_id,
message="",
from_address=None,
system_generated=False):
"""
API wrapper over send_by_pe_id
"""
return self.send_by_pe_id(pe_id,
message,
"SMS",
from_address,
system_generated,
subject=""
)
# -------------------------------------------------------------------------
# Twitter
# -------------------------------------------------------------------------
@staticmethod
def _sanitise_twitter_account(account):
"""
Only keep characters that are legal for a twitter account:
letters, digits, and _
"""
return account.translate(IDENTITYTRANS, NOTTWITTERCHARS)
# -------------------------------------------------------------------------
@staticmethod
def _break_to_chunks(text,
chunk_size=TWITTER_MAX_CHARS,
suffix = TWITTER_HAS_NEXT_SUFFIX,
prefix = TWITTER_HAS_PREV_PREFIX):
"""
Breaks text to <=chunk_size long chunks. Tries to do this at a space.
All chunks, except for last, end with suffix.
All chunks, except for first, start with prefix.
"""
res = []
current_prefix = "" # first chunk has no prefix
while text:
if len(current_prefix + text) <= chunk_size:
res.append(current_prefix + text)
return res
else: # break a chunk
c = text[:chunk_size - len(current_prefix) - len(suffix)]
i = c.rfind(" ")
if i > 0: # got a blank
c = c[:i]
text = text[len(c):].lstrip()
res.append((current_prefix + c.rstrip() + suffix))
current_prefix = prefix # from now on, we want a prefix
# -------------------------------------------------------------------------
@staticmethod
def get_twitter_api(channel_id=None):
"""
Initialize Twitter API
"""
try:
import tweepy
except ImportError:
current.log.error("s3msg", "Tweepy not available, so non-Tropo Twitter support disabled")
return None
table = current.s3db.msg_twitter_channel
if not channel_id:
# Try the 1st enabled one in the DB
query = (table.enabled == True)
else:
query = (table.channel_id == channel_id)
c = current.db(query).select(table.twitter_account,
table.consumer_key,
table.consumer_secret,
table.access_token,
table.access_token_secret,
limitby=(0, 1)
).first()
try:
oauth = tweepy.OAuthHandler(c.consumer_key,
c.consumer_secret)
oauth.set_access_token(c.access_token,
c.access_token_secret)
twitter_api = tweepy.API(oauth)
return (twitter_api, c.twitter_account)
except:
return None
# -------------------------------------------------------------------------
def send_tweet(self, text="", recipient=None):
"""
Function to tweet.
If a recipient is specified then we send via direct message if the recipient follows us.
- falls back to @mention (leaves less characters for the message).
Breaks long text to chunks if needed.
@ToDo: Option to Send via Tropo
"""
# Initialize Twitter API
twitter_settings = self.get_twitter_api()
if not twitter_settings:
# Abort
return False
import tweepy
twitter_api = twitter_settings[0]
twitter_account = twitter_settings[1]
from_address = twitter_api.me().screen_name
db = current.db
s3db = current.s3db
table = s3db.msg_twitter
otable = s3db.msg_outbox
def log_tweet(tweet, recipient, from_address):
# Log in msg_twitter
_id = table.insert(body=tweet,
from_address=from_address,
)
record = db(table.id == _id).select(table.id,
limitby=(0, 1)
).first()
s3db.update_super(table, record)
message_id = record.message_id
# Log in msg_outbox
otable.insert(message_id = message_id,
address = recipient,
status = 2,
contact_method = "TWITTER",
)
if recipient:
recipient = self._sanitise_twitter_account(recipient)
try:
can_dm = recipient == twitter_account or \
twitter_api.get_user(recipient).id in twitter_api.followers_ids(twitter_account)
except tweepy.TweepError:
# recipient not found
return False
if can_dm:
chunks = self._break_to_chunks(text)
for c in chunks:
try:
# Note: send_direct_message() requires explicit kwargs (at least in tweepy 1.5)
# See http://groups.google.com/group/tweepy/msg/790fcab8bc6affb5
if twitter_api.send_direct_message(screen_name=recipient,
text=c):
log_tweet(c, recipient, from_address)
except tweepy.TweepError:
current.log.error("Unable to Tweet DM")
else:
prefix = "@%s " % recipient
chunks = self._break_to_chunks(text,
TWITTER_MAX_CHARS - len(prefix))
for c in chunks:
try:
twitter_api.update_status("%s %s" % prefix, c)
except tweepy.TweepError:
current.log.error("Unable to Tweet @mention")
else:
log_tweet(c, recipient, from_address)
else:
chunks = self._break_to_chunks(text)
for c in chunks:
try:
twitter_api.update_status(c)
except tweepy.TweepError:
current.log.error("Unable to Tweet")
else:
log_tweet(c, recipient, from_address)
return True
#------------------------------------------------------------------------------
def post_to_facebook(self, text="", channel_id=None):
"""
Posts a message on Facebook
https://developers.facebook.com/docs/graph-api
@ToDo: Log messages in msg_facebook
"""
table = current.s3db.msg_facebook_channel
if not channel_id:
# Try the 1st enabled one in the DB
query = (table.enabled == True)
else:
query = (table.channel_id == channel_id)
c = current.db(query).select(table.app_id,
table.app_secret,
table.page_id,
table.page_access_token,
limitby=(0, 1)
).first()
import facebook
try:
app_access_token = facebook.get_app_access_token(c.app_id,
c.app_secret)
except:
import sys
message = sys.exc_info()[1]
current.log.error("S3MSG: %s" % message)
return
graph = facebook.GraphAPI(app_access_token)
page_id = c.page_id
if page_id:
graph = facebook.GraphAPI(c.page_access_token)
graph.put_object(page_id, "feed", message=text)
else:
graph.put_object(user_id, "feed", message=text)
# -------------------------------------------------------------------------
def poll(self, tablename, channel_id):
"""
Poll a Channel for New Messages
"""
channel_type = tablename.split("_", 2)[1]
# Launch the correct Poller
function_name = "poll_%s" % channel_type
try:
fn = getattr(S3Msg, function_name)
except:
error = "Unsupported Channel: %s" % channel_type
current.log.error(error)
return error
result = fn(channel_id)
return result
# -------------------------------------------------------------------------
@staticmethod
def poll_email(channel_id):
"""
This is a simple mailbox polling script for the Messaging Module.
It is normally called from the scheduler.
@ToDo: Handle MIME attachments
http://docs.python.org/2/library/email-examples.html
@ToDo: If there is a need to collect from non-compliant mailers
then suggest using the robust Fetchmail to collect & store
in a more compliant mailer!
@ToDo: If delete_from_server is false, we don't want to download the
same messages repeatedly. Perhaps record time of fetch runs
(or use info from the scheduler_run table), compare w/ message
timestamp, as a filter. That may not be completely accurate,
so could check msg_email for messages close to the last
fetch time. Or just advise people to have a dedicated account
to which email is sent, that does not also need to be read
by humans. Or don't delete the fetched mail until the next run.
"""
db = current.db
s3db = current.s3db
table = s3db.msg_email_channel
# Read-in configuration from Database
query = (table.channel_id == channel_id)
channel = db(query).select(table.username,
table.password,
table.server,
table.protocol,
table.use_ssl,
table.port,
table.delete_from_server,
limitby=(0, 1)).first()
if not channel:
return "No Such Email Channel: %s" % channel_id
import email
#import mimetypes
import socket
from dateutil import parser
date_parse = parser.parse
username = channel.username
password = channel.password
host = channel.server
protocol = channel.protocol
ssl = channel.use_ssl
port = int(channel.port)
delete = channel.delete_from_server
mtable = db.msg_email
minsert = mtable.insert
stable = db.msg_channel_status
sinsert = stable.insert
atable = s3db.msg_attachment
ainsert = atable.insert
dtable = db.doc_document
dinsert = dtable.insert
store = dtable.file.store
update_super = s3db.update_super
# Is this channel connected to a parser?
parser = s3db.msg_parser_enabled(channel_id)
if parser:
ptable = db.msg_parsing_status
pinsert = ptable.insert
# ---------------------------------------------------------------------
def parse_email(message):
"""
Helper to parse the mail
"""
# Create a Message object
msg = email.message_from_string(message)
# Parse the Headers
sender = msg["from"]
subject = msg.get("subject", "")
date_sent = msg.get("date", None)
# Store the whole raw message
raw = msg.as_string()
# Parse out the 'Body'
# Look for Attachments
attachments = []
# http://docs.python.org/2/library/email-examples.html
body = ""
for part in msg.walk():
if part.get_content_maintype() == "multipart":
# multipart/* are just containers
continue
filename = part.get_filename()
if not filename:
# Assume this is the Message Body (plain text or HTML)
if not body:
# Plain text will come first
body = part.get_payload(decode=True)
continue
attachments.append((filename, part.get_payload(decode=True)))
# Store in DB
data = dict(channel_id=channel_id,
from_address=sender,
subject=subject[:78],
body=body,
raw=raw,
inbound=True,
)
if date_sent:
data["date"] = date_parse(date_sent)
_id = minsert(**data)
record = dict(id=_id)
update_super(mtable, record)
message_id = record["message_id"]
for a in attachments:
# Linux ext2/3 max filename length = 255
# b16encode doubles length & need to leave room for doc_document.file.16charsuuid.
# store doesn't support unicode, so need an ascii string
filename = s3_unicode(a[0][:92]).encode("ascii", "ignore")
fp = StringIO()
fp.write(a[1])
fp.seek(0)
newfilename = store(fp, filename)
fp.close()
document_id = dinsert(name=filename,
file=newfilename)
update_super(dtable, dict(id=document_id))
ainsert(message_id=message_id,
document_id=document_id)
if parser:
pinsert(message_id=message_id,
channel_id=channel_id)
dellist = []
if protocol == "pop3":
import poplib
# http://docs.python.org/library/poplib.html
try:
if ssl:
p = poplib.POP3_SSL(host, port)
else:
p = poplib.POP3(host, port)
except socket.error, e:
error = "Cannot connect: %s" % e
current.log.error(error)
# Store status in the DB
sinsert(channel_id=channel_id,
status=error)
return error
try:
# Attempting APOP authentication...
p.apop(username, password)
except poplib.error_proto:
# Attempting standard authentication...
try:
p.user(username)
p.pass_(password)
except poplib.error_proto, e:
error = "Login failed: %s" % e
current.log.error(error)
# Store status in the DB
sinsert(channel_id=channel_id,
status=error)
return error
mblist = p.list()[1]
for item in mblist:
number, octets = item.split(" ")
# Retrieve the message (storing it in a list of lines)
lines = p.retr(number)[1]
parse_email("\n".join(lines))
if delete:
# Add it to the list of messages to delete later
dellist.append(number)
# Iterate over the list of messages to delete
for number in dellist:
p.dele(number)
p.quit()
elif protocol == "imap":
import imaplib
# http://docs.python.org/library/imaplib.html
try:
if ssl:
M = imaplib.IMAP4_SSL(host, port)
else:
M = imaplib.IMAP4(host, port)
except socket.error, e:
error = "Cannot connect: %s" % e
current.log.error(error)
# Store status in the DB
sinsert(channel_id=channel_id,
status=error)
return error
try:
M.login(username, password)
except M.error, e:
error = "Login failed: %s" % e
current.log.error(error)
# Store status in the DB
sinsert(channel_id=channel_id,
status=error)
# Explicitly commit DB operations when running from Cron
db.commit()
return error
# Select inbox
M.select()
# Search for Messages to Download
typ, data = M.search(None, "ALL")
mblist = data[0].split()
for number in mblist:
typ, msg_data = M.fetch(number, "(RFC822)")
for response_part in msg_data:
if isinstance(response_part, tuple):
parse_email(response_part[1])
if delete:
# Add it to the list of messages to delete later
dellist.append(number)
# Iterate over the list of messages to delete
for number in dellist:
typ, response = M.store(number, "+FLAGS", r"(\Deleted)")
M.close()
M.logout()
# -------------------------------------------------------------------------
@staticmethod
def poll_mcommons(channel_id):
"""
Fetches the inbound SMS from Mobile Commons API
http://www.mobilecommons.com/mobile-commons-api/rest/#ListIncomingMessages
"""
db = current.db
s3db = current.s3db
table = s3db.msg_mcommons_channel
query = (table.channel_id == channel_id)
channel = db(query).select(table.url,
table.campaign_id,
table.username,
table.password,
table.query,
table.timestmp,
limitby=(0, 1)).first()
if not channel:
return "No Such MCommons Channel: %s" % channel_id
url = channel.url
username = channel.username
password = channel.password
_query = channel.query
timestamp = channel.timestmp
url = "%s?campaign_id=%s" % (url, channel.campaign_id)
if timestamp:
url = "%s&start_time=%s" % (url, timestamp)
if _query:
url = "%s&query=%s" % (url, _query)
# Create a password manager
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, username, password)
# Create the AuthHandler
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
# Update the timestamp
# NB Ensure MCommons account is in UTC
db(query).update(timestmp = current.request.utcnow)
try:
_response = urllib2.urlopen(url)
except urllib2.HTTPError, e:
return "Error: %s" % e.code
else:
sms_xml = _response.read()
tree = etree.XML(sms_xml)
messages = tree.findall(".//message")
mtable = s3db.msg_sms
minsert = mtable.insert
update_super = s3db.update_super
decode = S3Codec.decode_iso_datetime
# Is this channel connected to a parser?
parser = s3db.msg_parser_enabled(channel_id)
if parser:
ptable = db.msg_parsing_status
pinsert = ptable.insert
for message in messages:
sender_phone = message.find("phone_number").text
body = message.find("body").text
received_on = decode(message.find("received_at").text)
_id = minsert(channel_id = channel_id,
sender_phone = sender_phone,
body = body,
received_on = received_on,
)
record = dict(id=_id)
update_super(mtable, record)
if parser:
pinsert(message_id = record["message_id"],
channel_id = channel_id)
return "OK"
# -------------------------------------------------------------------------
@staticmethod
def poll_twilio(channel_id):
"""
Fetches the inbound SMS from Twilio API
http://www.twilio.com/docs/api/rest
"""
db = current.db
s3db = current.s3db
table = s3db.msg_twilio_channel
query = (table.channel_id == channel_id)
channel = db(query).select(table.account_sid,
table.auth_token,
table.url,
limitby=(0, 1)).first()
if not channel:
return "No Such Twilio Channel: %s" % channel_id
# @ToDo: Do we really have to download *all* messages every time
# & then only import the ones we don't yet have?
account_sid = channel.account_sid
url = "%s/%s/SMS/Messages.json" % (channel.url, account_sid)
# Create a password manager
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, account_sid, channel.auth_token)
# Create the AuthHandler
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
try:
smspage = urllib2.urlopen(url)
except urllib2.HTTPError, e:
error = "Error: %s" % e.code
current.log.error(error)
# Store status in the DB
db(table.channel_id == channel_id).update(status=error)
return error
else:
sms_list = json.loads(smspage.read())
messages = sms_list["sms_messages"]
# Find all the SIDs we have already downloaded
# (even if message was deleted)
stable = db.msg_twilio_sid
sids = db(stable.id > 0).select(stable.sid)
downloaded_sms = [s.sid for s in sids]
mtable = s3db.msg_sms
minsert = mtable.insert
sinsert = stable.insert
update_super = s3db.update_super
# Is this channel connected to a parser?
parser = s3db.msg_parser_enabled(channel_id)
if parser:
ptable = db.msg_parsing_status
pinsert = ptable.insert
for sms in messages:
if (sms["direction"] == "inbound") and \
(sms["sid"] not in downloaded_sms):
sender = "<" + sms["from"] + ">"
_id = minsert(channel_id=channel_id,
body=sms["body"],
status=sms["status"],
from_address=sender,
received_on=sms["date_sent"])
record = dict(id=_id)
update_super(mtable, record)
message_id = record["message_id"]
sinsert(message_id = message_id,
sid=sms["sid"])
if parser:
pinsert(message_id = message_id,
channel_id = channel_id)
return "OK"
# -------------------------------------------------------------------------
@staticmethod
def poll_rss(channel_id):
"""
Fetches all new messages from a subscribed RSS Feed
"""
db = current.db
s3db = current.s3db
table = s3db.msg_rss_channel
query = (table.channel_id == channel_id)
channel = db(query).select(table.date,
table.etag,
table.url,
limitby=(0, 1)).first()
if not channel:
return "No Such RSS Channel: %s" % channel_id
# http://pythonhosted.org/feedparser
import feedparser
if channel.etag:
# http://pythonhosted.org/feedparser/http-etag.html
# NB This won't help for a server like Drupal 7 set to not allow caching & hence generating a new ETag/Last Modified each request!
d = feedparser.parse(channel.url, etag=channel.etag)
elif channel.date:
d = feedparser.parse(channel.url, modified=channel.date.utctimetuple())
else:
# We've not polled this feed before
d = feedparser.parse(channel.url)
if d.bozo:
# Something doesn't seem right
S3Msg.update_channel_status(channel_id,
status=d.bozo_exception.message,
period=(300, 3600))
return
# Update ETag/Last-polled
now = current.request.utcnow
data = dict(date=now)
etag = d.get("etag", None)
if etag:
data["etag"] = etag
db(query).update(**data)
from time import mktime, struct_time
gis = current.gis
geocode_r = gis.geocode_r
hierarchy_level_keys = gis.hierarchy_level_keys
utcfromtimestamp = datetime.datetime.utcfromtimestamp
gtable = db.gis_location
ginsert = gtable.insert
mtable = db.msg_rss
minsert = mtable.insert
update_super = s3db.update_super
# Is this channel connected to a parser?
parser = s3db.msg_parser_enabled(channel_id)
if parser:
ptable = db.msg_parsing_status
pinsert = ptable.insert
entries = d.entries
if entries:
# Check how many we have already to see if any are new
count_old = db(mtable.id > 0).count()
for entry in entries:
link = entry.get("link", None)
# Check for duplicates
# (ETag just saves bandwidth, doesn't filter the contents of the feed)
exists = db(mtable.from_address == link).select(mtable.id,
mtable.location_id,
mtable.message_id,
limitby=(0, 1)
).first()
if exists:
location_id = exists.location_id
else:
location_id = None
title = entry.title
content = entry.get("content", None)
if content:
content = content[0].value
else:
content = entry.get("description", None)
# Consider using dateutil.parser.parse(entry.get("published"))
# http://www.deadlybloodyserious.com/2007/09/feedparser-v-django/
date_published = entry.get("published_parsed", entry.get("updated_parsed"))
if isinstance(date_published, struct_time):
date_published = utcfromtimestamp(mktime(date_published))
else:
date_published = now
tags = entry.get("tags", None)
if tags:
tags = [t.term.encode("utf-8") for t in tags]
location = False
lat = entry.get("geo_lat", None)
lon = entry.get("geo_long", None)
if lat is None or lon is None:
# Try GeoRSS
georss = entry.get("georss_point", None)
if georss:
location = True
lat, lon = georss.split(" ")
else:
location = True
if location:
try:
query = (gtable.lat == lat) &\
(gtable.lon == lon)
exists = db(query).select(gtable.id,
limitby=(0, 1),
orderby=gtable.level,
).first()
if exists:
location_id = exists.id
else:
data = dict(lat=lat,
lon=lon,
)
results = geocode_r(lat, lon)
if isinstance(results, dict):
for key in hierarchy_level_keys:
v = results.get(key, None)
if v:
data[key] = v
#if location_id:
# Location has been changed
#db(gtable.id == location_id).update(**data)
location_id = ginsert(**data)
data["id"] = location_id
gis.update_location_tree(data)
except:
# Don't die on badly-formed Geo
pass
if exists:
db(mtable.id == exists.id).update(channel_id = channel_id,
title = title,
from_address = link,
body = content,
author = entry.get("author", None),
date = date_published,
location_id = location_id,
tags = tags,
# @ToDo: Enclosures
)
if parser:
pinsert(message_id = exists.message_id,
channel_id = channel_id)
else:
_id = minsert(channel_id = channel_id,
title = entry.title,
from_address = link,
body = content,
author = entry.get("author", None),
date = date_published,
location_id = location_id,
tags = tags,
# @ToDo: Enclosures
)
record = dict(id=_id)
update_super(mtable, record)
if parser:
pinsert(message_id = record["message_id"],
channel_id = channel_id)
if entries:
# Check again to see if there were any new ones
count_new = db(mtable.id > 0).count()
if count_new == count_old:
# No new posts?
# Back-off in-case the site isn't respecting ETags/Last-Modified
S3Msg.update_channel_status(channel_id,
status="+1",
period=(300, 3600))
return "OK"
#-------------------------------------------------------------------------
@staticmethod
def poll_twitter(channel_id):
"""
Function to call to fetch tweets into msg_twitter table
- called via Scheduler or twitter_inbox controller
"""
# Initialize Twitter API
twitter_settings = S3Msg.get_twitter_api(channel_id)
if not twitter_settings:
# Abort
return False
import tweepy
twitter_api = twitter_settings[0]
db = current.db
s3db = current.s3db
table = s3db.msg_twitter
# Get the latest Twitter message ID to use it as since_id
query = (table.channel_id == channel_id) & \
(table.inbound == True)
latest = db(query).select(table.msg_id,
orderby=~table.date,
limitby=(0, 1)
).first()
try:
if latest:
messages = twitter_api.direct_messages(since_id=latest.msg_id)
else:
messages = twitter_api.direct_messages()
except tweepy.TweepError as e:
error = e.message[0]["message"]
current.log.error("Unable to get the Tweets for the user: %s" % error)
return False
messages.reverse()
tinsert = table.insert
update_super = s3db.update_super
for message in messages:
_id = tinsert(channel_id = channel_id,
body = message.text,
from_address = message.sender_screen_name,
to_address = message.recipient_screen_name,
date = message.created_at,
inbound = True,
msg_id = message.id,
)
update_super(table, dict(id=_id))
return True
# -------------------------------------------------------------------------
@staticmethod
def update_channel_status(channel_id, status, period=None):
"""
Update the Status for a Channel
"""
db = current.db
# Read current status
stable = current.s3db.msg_channel_status
query = (stable.channel_id == channel_id)
old_status = db(query).select(stable.status,
limitby=(0, 1)
).first()
if old_status:
# Update
if status[0] == "+":
# Increment status if-numeric
old_status = old_status.status
try:
old_status = int(old_status)
except:
new_status = status
else:
new_status = old_status + int(status[1:])
else:
new_status = status
db(query).update(status = new_status)
else:
# Initialise
stable.insert(channel_id = channel_id,
status = status)
if period:
# Amend the frequency of the scheduled task
ttable = db.scheduler_task
args = '["msg_rss_channel", %s]' % channel_id
query = ((ttable.function_name == "msg_poll") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
ttable.period,
limitby=(0, 1)).first()
if not exists:
return
old_period = exists.period
max_period = period[1]
if old_period < max_period:
new_period = old_period + period[0]
new_period = min(new_period, max_period)
db(ttable.id == exists.id).update(period=new_period)
# -------------------------------------------------------------------------
@staticmethod
def twitter_search(search_id):
"""
Fetch Results for a Twitter Search Query
"""
try:
import TwitterSearch
except ImportError:
error = "Unresolved dependency: TwitterSearch required for fetching results from twitter keyword queries"
current.log.error("s3msg", error)
current.session.error = error
redirect(URL(f="index"))
db = current.db
s3db = current.s3db
# Read Settings
table = s3db.msg_twitter_channel
# Doesn't need to be enabled for Polling
settings = db(table.id > 0).select(table.consumer_key,
table.consumer_secret,
table.access_token,
table.access_token_secret,
limitby=(0, 1)).first()
if not settings:
error = "Twitter Search requires an account configuring"
current.log.error("s3msg", error)
current.session.error = error
redirect(URL(f="twitter_channel"))
qtable = s3db.msg_twitter_search
rtable = db.msg_twitter_result
search_query = db(qtable.id == search_id).select(qtable.id,
qtable.keywords,
qtable.lang,
qtable.count,
qtable.include_entities,
limitby=(0, 1)).first()
tso = TwitterSearch.TwitterSearchOrder()
tso.setKeywords(search_query.keywords.split(" "))
tso.setLanguage(search_query.lang)
# @ToDo Handle more than 100 results per page
# This may have to be changed upstream
tso.setCount(int(search_query.count))
tso.setIncludeEntities(search_query.include_entities)
try:
ts = TwitterSearch.TwitterSearch(
consumer_key = settings.consumer_key,
consumer_secret = settings.consumer_secret,
access_token = settings.access_token,
access_token_secret = settings.access_token_secret
)
except TwitterSearch.TwitterSearchException as e:
return(str(e))
from dateutil import parser
date_parse = parser.parse
gtable = db.gis_location
# Disable validation
rtable.location_id.requires = None
update_super = s3db.update_super
for tweet in ts.searchTweetsIterable(tso):
user = tweet["user"]["screen_name"]
body = tweet["text"]
tweet_id = tweet["id_str"]
lang = tweet["lang"]
created_at = date_parse(tweet["created_at"])
lat = None
lon = None
if tweet["coordinates"]:
lat = tweet["coordinates"]["coordinates"][1]
lon = tweet["coordinates"]["coordinates"][0]
location_id = gtable.insert(lat=lat, lon=lon)
else:
location_id = None
_id = rtable.insert(from_address = user,
search_id = search_id,
body = body,
tweet_id = tweet_id,
lang = lang,
date = created_at,
#inbound = True,
location_id = location_id,
)
update_super(rtable, dict(id=_id))
# This is simplistic as we may well want to repeat the same search multiple times
db(qtable.id == search_id).update(is_searched = True)
return "OK"
# -------------------------------------------------------------------------
@staticmethod
def process_keygraph(search_id):
""" Process results of twitter search with KeyGraph."""
import subprocess
import os
import tempfile
db = current.db
s3db = current.s3db
curpath = os.getcwd()
preprocess = S3Msg.preprocess_tweet
def generateFiles():
dirpath = tempfile.mkdtemp()
os.chdir(dirpath)
rtable = s3db.msg_twitter_search_results
tweets = db(rtable.deleted == False).select(rtable.body)
tweetno = 1
for tweet in tweets:
filename = "%s.txt" % tweetno
f = open(filename, "w")
f.write(preprocess(tweet.body))
tweetno += 1
return dirpath
tpath = generateFiles()
jarpath = os.path.join(curpath, "static", "KeyGraph", "keygraph.jar")
resultpath = os.path.join(curpath, "static", "KeyGraph", "results", "%s.txt" % search_id)
return subprocess.call(["java", "-jar", jarpath, tpath , resultpath])
# -------------------------------------------------------------------------
@staticmethod
def preprocess_tweet(tweet):
"""
Preprocesses tweets to remove URLs,
RTs, extra whitespaces and replace hashtags
with their definitions.
"""
import re
tagdef = S3Msg.tagdef
tweet = tweet.lower()
tweet = re.sub('((www\.[\s]+)|(https?://[^\s]+))', "", tweet)
tweet = re.sub('@[^\s]+', "", tweet)
tweet = re.sub('[\s]+', " ", tweet)
tweet = re.sub(r'#([^\s]+)', lambda m:tagdef(m.group(0)), tweet)
tweet = tweet.strip('\'"')
return tweet
# -------------------------------------------------------------------------
@staticmethod
def tagdef(hashtag):
"""
Returns the definition of a hashtag.
"""
hashtag = hashtag.split("#")[1]
turl = "http://api.tagdef.com/one.%s.json" % hashtag
try:
hashstr = urllib2.urlopen(turl).read()
hashdef = json.loads(hashstr)
except:
return hashtag
else:
return hashdef["defs"]["def"]["text"]
# =============================================================================
class S3Compose(S3CRUD):
""" RESTful method for messaging """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
API entry point
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
if r.http in ("GET", "POST"):
output = self.compose(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def compose(self, r, **attr):
"""
Generate a form to send a message
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
T = current.T
auth = current.auth
self.url = url = r.url()
# @ToDo: Use API
if auth.is_logged_in() or auth.basic():
pass
else:
redirect(URL(c="default", f="user", args="login",
vars={"_next": url}))
if not current.deployment_settings.has_module("msg"):
current.session.error = T("Cannot send messages if Messaging module disabled")
redirect(URL(f="index"))
if not auth.permission.has_permission("update", c="msg"):
current.session.error = T("You do not have permission to send messages")
redirect(URL(f="index"))
#_vars = r.get_vars
# Set defaults (used if coming via msg.compose())
self.contact_method = None
self.recipient = None
self.recipients = None
self.recipient_type = None
self.subject = None
self.message = None
#self.formid = None
form = self._compose_form()
# @ToDo: A 2nd Filter form
# if form.accepts(r.post_vars, current.session,
# formname="compose",
# keepvalues=True):
# query, errors = self._process_filter_options(form)
# if r.http == "POST" and not errors:
# self.resource.add_filter(query)
# _vars = form.vars
# Apply method
if self.method == "compose":
output = dict(form=form)
else:
r.error(501, current.ERROR.BAD_METHOD)
# Complete the page
if r.representation == "html":
title = self.crud_string(self.tablename, "title_compose")
if not title:
title = T("Send Message")
# subtitle = self.crud_string(self.tablename, "subtitle_compose")
# if not subtitle:
# subtitle = ""
# Maintain RHeader for consistency
if attr.get("rheader"):
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = title
#output["subtitle"] = subtitle
#output["form"] = form
#current.response.view = self._view(r, "list_filter.html")
current.response.view = self._view(r, "create.html")
return output
# -------------------------------------------------------------------------
def _compose_onvalidation(self, form):
"""
Set the sender
Route the message
"""
post_vars = current.request.post_vars
settings = current.deployment_settings
if settings.get_mail_default_subject():
system_name_short = "%s - " % settings.get_system_name_short()
else:
system_name_short = ""
if settings.get_mail_auth_user_in_subject():
user = current.auth.user
if user:
authenticated_user = "%s %s - " % (user.first_name,
user.last_name)
else:
authenticated_user = ""
post_vars.subject = authenticated_user + system_name_short + post_vars.subject
contact_method = post_vars.contact_method
recipients = self.recipients
if not recipients:
if not post_vars.pe_id:
if contact_method != "TWITTER":
current.session.error = current.T("Please enter the recipient(s)")
redirect(self.url)
else:
# This must be a Status Update
if current.msg.send_tweet(post_vars.body):
current.session.confirmation = current.T("Check outbox for the message status")
else:
current.session.error = current.T("Error sending message!")
redirect(self.url)
else:
recipients = post_vars.pe_id
if current.msg.send_by_pe_id(recipients,
post_vars.subject,
post_vars.body,
contact_method):
current.session.confirmation = current.T("Check outbox for the message status")
redirect(self.url)
else:
if current.mail.error:
# set by mail.error
current.session.error = "%s: %s" % (current.T("Error sending message"),
current.mail.error)
else:
current.session.error = current.T("Error sending message!")
redirect(self.url)
# -------------------------------------------------------------------------
def _compose_form(self):
""" Creates the form for composing the message """
T = current.T
db = current.db
s3db = current.s3db
request = current.request
get_vars = request.get_vars
mtable = s3db.msg_message
otable = s3db.msg_outbox
mtable.body.label = T("Message")
mtable.body.default = self.message
mtable.inbound.default = False
mtable.inbound.writable = False
resource = self.resource
recipient_type = self.recipient_type # from msg.compose()
if not recipient_type and resource:
# See if we have defined a custom recipient type for this table
# pr_person or pr_group
recipient_type = self._config("msg_recipient_type", None)
contact_method = self.contact_method # from msg.compose()
if not contact_method and resource:
# See if we have defined a custom default contact method for this table
contact_method = self._config("msg_contact_method", "EMAIL")
otable.contact_method.default = contact_method
recipient = self.recipient # from msg.compose()
if not recipient:
if "pe_id" in get_vars:
recipient = get_vars.pe_id
elif "person_id" in get_vars:
# @ToDo
pass
elif "group_id" in get_vars:
# @ToDo
pass
elif "human_resource.id" in get_vars:
# @ToDo
pass
if recipient:
recipients = [recipient]
else:
recipients = []
if resource:
table = resource.table
if "pe_id" in table:
field = "pe_id"
elif "person_id" in table:
field = "person_id$pe_id"
#elif "group_id" in table:
# # @ToDo
# field = "group_id$pe_id"
else:
field = None
if field:
records = resource.select([field], limit=None)["rows"]
recipients = [record.values()[0] for record in records]
pe_field = otable.pe_id
pe_field.label = T("Recipient(s)")
pe_field.writable = True
if recipients:
# Don't download a SELECT
pe_field.requires = None
# Tell onvalidation about them
self.recipients = recipients
pe_field.default = recipients
if len(recipients) == 1:
recipient = recipients[0]
represent = s3db.pr_PersonEntityRepresent(show_label=False)(recipient)
# Restrict message options to those available for the entity
petable = s3db.pr_pentity
entity_type = db(petable.pe_id == recipient).select(petable.instance_type,
limitby=(0, 1)
).first().instance_type
if entity_type == "pr_person":
all_contact_opts = current.msg.MSG_CONTACT_OPTS
contact_method_opts = {}
ctable = s3db.pr_contact
query = (ctable.deleted != True) & \
(ctable.pe_id == recipient)
rows = db(query).select(ctable.contact_method)
for row in rows:
if row.contact_method in all_contact_opts:
contact_method_opts[row.contact_method] = all_contact_opts[row.contact_method]
if not contact_method_opts:
current.session.error = T("There are no contacts available for this person!")
controller = request.controller
if controller == "hrm":
url = URL(c="hrm", f="person", args="contacts",
vars={"group": "staff",
"human_resource.id": get_vars.get("human_resource.id")})
elif controller == "vol":
url = URL(c="vol", f="person", args="contacts",
vars={"group": "volunteer",
"human_resource.id": get_vars.get("human_resource.id")})
elif controller == "member":
url = URL(c="member", f="person", args="contacts",
vars={"membership.id": get_vars.get("membership.id")})
else:
# @ToDo: Lookup the type
url = URL(f="index")
redirect(url)
otable.contact_method.requires = IS_IN_SET(contact_method_opts,
zero=None)
if contact_method not in contact_method_opts:
otable.contact_method.default = contact_method_opts.popitem()[0]
#elif entity_type = "pr_group":
# @ToDo: Loop through members
else:
# @ToDo: This should display all the Recipients (truncated with option to see all)
# - use pr_PersonEntityRepresent for bulk representation
represent = T("%(count)s Recipients") % dict(count=len(recipients))
else:
if recipient_type:
# Filter by Recipient Type
pe_field.requires = IS_ONE_OF(db,
"pr_pentity.pe_id",
# Breaks PG
#orderby="instance_type",
filterby="instance_type",
filter_opts=(recipient_type,))
pe_field.widget = S3PentityAutocompleteWidget(types=(recipient_type,))
else:
# @ToDo A new widget (tree?) required to handle multiple persons and groups
pe_field.widget = S3PentityAutocompleteWidget()
pe_field.comment = DIV(_class="tooltip",
_title="%s|%s" % \
(T("Recipients"),
T("Please enter the first few letters of the Person/Group for the autocomplete.")))
sqlform = S3SQLDefaultForm()
logform = sqlform(request=request,
resource=s3db.resource("msg_message"),
onvalidation=self._compose_onvalidation,
message="Message Sent",
format="html")
outboxform = sqlform(request=request,
resource=s3db.resource("msg_outbox"),
message="Message Sent",
format="html")
mailform = sqlform(request=request,
resource=s3db.resource("msg_email"),
message="Message Sent",
format="html")
# Shortcuts
lcustom = logform.custom
ocustom = outboxform.custom
mcustom = mailform.custom
pe_row = TR(TD(LABEL(ocustom.label.pe_id)),
_id="msg_outbox_pe_id__row")
if recipients:
ocustom.widget.pe_id["_class"] = "hide"
pe_row.append(TD(ocustom.widget.pe_id,
represent))
else:
pe_row.append(TD(ocustom.widget.pe_id))
pe_row.append(TD(ocustom.comment.pe_id))
# Build a custom form from the 2 source forms
form = DIV(lcustom.begin,
TABLE(TBODY(TR(TD(LABEL(ocustom.label.contact_method)),
TD(ocustom.widget.contact_method),
TD(ocustom.comment.contact_method),
_id="msg_outbox_contact_method__row"
),
pe_row,
TR(TD(LABEL(mcustom.label.subject)),
TD(mcustom.widget.subject),
TD(mcustom.comment.subject),
_id="msg_log_subject__row"
),
TR(TD(LABEL(lcustom.label.body)),
TD(lcustom.widget.body),
TD(lcustom.comment.body),
_id="msg_log_message__row"
),
#TR(TD(LABEL(lcustom.label.priority)),
#TD(lcustom.widget.priority),
#TD(lcustom.comment.priority),
#_id="msg_log_priority__row"
#),
TR(TD(),
TD(INPUT(_type="submit",
_value=T("Send message"),
_id="dummy_submit")),
_id="submit_record__row"
),
),
),
lcustom.end)
s3 = current.response.s3
if s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.msg.js" % request.application)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.msg.min.js" % request.application)
script = '''i18n.none_of_the_above="%s"''' % T("None of the above")
s3.js_global.append(script)
# @ToDo: Port SMS maxLength from alert_create_script() in controllers/deploy.py
return form
# END =========================================================================
| {
"content_hash": "aa112ea4e9ce81306998d2c30e44ddf8",
"timestamp": "",
"source": "github",
"line_count": 2585,
"max_line_length": 142,
"avg_line_length": 39.26266924564797,
"alnum_prop": 0.452194218377441,
"repo_name": "gnarula/eden_deployment",
"id": "e980a6331500914c7a947c3fff604dc9edb32380",
"size": "101565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/s3/s3msg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1305178"
},
{
"name": "JavaScript",
"bytes": "16338028"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "28218113"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "2491556"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.views.decorators.cache import never_cache
from advisers.admin import admin_site
from moj_irat.views import HealthcheckView
urlpatterns = [
url(r"^healthcheck.json$", never_cache(HealthcheckView.as_view()), name="healthcheck_json"),
url(r"^admin/", admin_site.urls),
url(r"^", include("advisers.urls")),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| {
"content_hash": "c1c65f835f65f06eedada30569d836e0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 96,
"avg_line_length": 39.61538461538461,
"alnum_prop": 0.7592233009708738,
"repo_name": "ministryofjustice/laa-legal-adviser-api",
"id": "b3bbf162623c4411bfa043dbf3096aa1453c4cd5",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "laalaa/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1100"
},
{
"name": "HTML",
"bytes": "6367"
},
{
"name": "Python",
"bytes": "85938"
},
{
"name": "Shell",
"bytes": "4624"
}
],
"symlink_target": ""
} |
"""Estimator classes for BoostedTrees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import functools
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.canned import boosted_trees_utils
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.array_ops import identity as tf_identity
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.util.tf_export import estimator_export
# TODO(nponomareva): Reveal pruning params here.
_TreeHParams = collections.namedtuple('TreeHParams', [
'n_trees', 'max_depth', 'learning_rate', 'l1', 'l2', 'tree_complexity',
'min_node_weight', 'center_bias', 'pruning_mode'
])
_HOLD_FOR_MULTI_CLASS_SUPPORT = object()
_HOLD_FOR_MULTI_DIM_SUPPORT = object()
_DUMMY_NUM_BUCKETS = -1
_DUMMY_NODE_ID = -1
def _get_transformed_features(features, sorted_feature_columns):
"""Gets the transformed features from features/feature_columns pair.
Args:
features: a dicionary of name to Tensor.
sorted_feature_columns: a list/set of tf.feature_column, sorted by name.
Returns:
result_features: a list of the transformed features, sorted by the name.
Raises:
ValueError: when unsupported features/columns are tried.
"""
# pylint:disable=protected-access
transformed_features = feature_column_lib._transform_features(
features, sorted_feature_columns)
result_features = []
for column in sorted_feature_columns:
if isinstance(column, feature_column_lib._BucketizedColumn):
source_name = column.source_column.name
squeezed_tensor = array_ops.squeeze(transformed_features[column], axis=1)
if len(squeezed_tensor.shape) > 1:
raise ValueError('For now, only supports features equivalent to rank 1 '
'but column `{}` got: {}'.format(
source_name, features[source_name].shape))
result_features.append(squeezed_tensor)
elif isinstance(column, feature_column_lib._IndicatorColumn):
source_name = column.categorical_column.name
tensor = math_ops.to_int32(transformed_features[column])
if len(tensor.shape) > 2:
raise ValueError('Rank of indicator column must be no more than 2, '
'but column `{}` got: {}'.format(
source_name, features[source_name].shape))
unstacked = array_ops.unstack(tensor, axis=1)
result_features.extend(unstacked)
else:
raise ValueError(
'For now, only bucketized_column and indicator_column is supported '
'but got: {}'.format(column))
# pylint:enable=protected-access
return result_features
def _local_variable(initial_value, name=None):
"""Stores a tensor as a local Variable for faster read."""
result = variable_scope.variable(
initial_value=initial_value,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False,
name=name)
if isinstance(initial_value, ops.Tensor):
# Match the resulting variable's shape if the initial_value is a Tensor.
result.set_shape(initial_value.shape)
return result
def _group_features_by_num_buckets(sorted_feature_columns):
"""Groups feature ids by the number of buckets.
Derives the feature ids based on iterating through ordered feature columns
and groups them by the number of buckets each feature require. Returns a
sorted list of buckets and a list of lists of feature ids for each of those
buckets.
Args:
sorted_feature_columns: a list/set of tf.feature_column sorted by name.
Returns:
bucket_size_list: a list of required bucket sizes.
feature_ids_list: a list of lists of feature ids for each bucket size.
Raises:
ValueError: when unsupported features columns are provided.
"""
bucket_size_to_feature_ids_dict = collections.OrderedDict()
# TODO(nponomareva) for now we preserve the previous functionality and bucket
# all numeric into the same num of buckets. Can be easily changed to using
# each numeric's real buckets num, but we need to test that it does not cause
# a performance hit.
# We will replace this dummy key with the real max after we calculate it.
bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS] = []
max_buckets_for_bucketized = 2
max_buckets_for_indicator = 2
feature_idx = 0
# pylint:disable=protected-access
for column in sorted_feature_columns:
if isinstance(column, feature_column_lib._IndicatorColumn):
num_categorical_features = column.categorical_column._num_buckets
if max_buckets_for_indicator not in bucket_size_to_feature_ids_dict:
bucket_size_to_feature_ids_dict[max_buckets_for_indicator] = []
for _ in range(num_categorical_features):
# We use bucket size of 2 for categorical.
bucket_size_to_feature_ids_dict[max_buckets_for_indicator].append(
feature_idx)
feature_idx += 1
elif isinstance(column, feature_column_lib._BucketizedColumn):
max_buckets_for_bucketized = max(max_buckets_for_bucketized,
len(column.boundaries) + 1)
bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS].append(feature_idx)
feature_idx += 1
elif not isinstance(column, feature_column_lib._IndicatorColumn): # pylint:disable=protected-access
raise ValueError(
'For now, only bucketized_column and indicator column are supported '
'but got: {}'.format(column))
# pylint:enable=protected-access
# Replace the dummy key with the real max num of buckets for all bucketized
# columns.
if max_buckets_for_bucketized not in bucket_size_to_feature_ids_dict:
bucket_size_to_feature_ids_dict[max_buckets_for_bucketized] = []
bucket_size_to_feature_ids_dict[max_buckets_for_bucketized].extend(
bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS])
del bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS]
feature_ids_list = list(bucket_size_to_feature_ids_dict.values())
bucket_size_list = list(bucket_size_to_feature_ids_dict.keys())
return bucket_size_list, feature_ids_list
def _calculate_num_features(sorted_feature_columns):
num_features = 0
for column in sorted_feature_columns:
if isinstance(column, feature_column_lib._IndicatorColumn): # pylint:disable=protected-access
num_features += column.categorical_column._num_buckets # pylint:disable=protected-access
else:
num_features += 1
return num_features
def _cache_transformed_features(features, sorted_feature_columns, batch_size):
"""Transform features and cache, then returns (cached_features, cache_op)."""
num_features = _calculate_num_features(sorted_feature_columns)
cached_features = [
_local_variable(
array_ops.zeros([batch_size], dtype=dtypes.int32),
name='cached_feature_{}'.format(i)) for i in range(num_features)
]
are_features_cached = _local_variable(False, name='are_features_cached')
def cache_features_and_return():
"""Caches transformed features.
The intention is to hide get_transformed_features() from the graph by
caching the result except the first step, since bucketize operation
(inside get_transformed_features) is expensive.
Returns:
input_feature_list: a list of input features.
cache_flip_op: op to add to graph to make sure cache update is included to
the graph.
"""
transformed_features = _get_transformed_features(features,
sorted_feature_columns)
cached = [
state_ops.assign(cached_features[i], transformed_features[i])
for i in range(num_features)
]
# TODO(youngheek): Try other combination of dependencies so that the
# function returns a single result, not a tuple.
with ops.control_dependencies(cached):
cache_flip_op = are_features_cached.assign(True)
return cached, cache_flip_op
input_feature_list, cache_flip_op = control_flow_ops.cond(
are_features_cached, lambda: (cached_features, control_flow_ops.no_op()),
cache_features_and_return)
return input_feature_list, cache_flip_op
class _CacheTrainingStatesUsingHashTable(object):
"""Caching logits, etc. using MutableHashTable."""
def __init__(self, example_ids, logits_dimension):
"""Creates a cache with the given configuration.
It maintains a MutableDenseHashTable for all values.
The API lookup() and insert() would have those specs,
tree_ids: shape=[batch_size], dtype=int32
node_ids: shape=[batch_size], dtype=int32
logits: shape=[batch_size, logits_dimension], dtype=float32
However in the MutableDenseHashTable, ids are bitcasted into float32 and
all values are concatenated as a single tensor (of float32).
Hence conversion happens internally before inserting to the HashTable and
after lookup from it.
Args:
example_ids: a Rank 1 tensor to be used as a key of the cache.
logits_dimension: a constant (int) for the dimension of logits.
Raises:
ValueError: if example_ids is other than int64 or string.
"""
if dtypes.as_dtype(dtypes.int64).is_compatible_with(example_ids.dtype):
empty_key = -1 << 62
elif dtypes.as_dtype(dtypes.string).is_compatible_with(example_ids.dtype):
empty_key = ''
else:
raise ValueError(
'Unsupported example_id_feature dtype %s.' % example_ids.dtype)
# Cache holds latest <tree_id, node_id, logits> for each example.
# tree_id and node_id are both int32 but logits is a float32.
# To reduce the overhead, we store all of them together as float32 and
# bitcast the ids to int32.
self._table_ref = lookup_ops.mutable_dense_hash_table_v2(
empty_key=empty_key, value_dtype=dtypes.float32, value_shape=[3])
self._example_ids = ops.convert_to_tensor(example_ids)
if self._example_ids.shape.ndims not in (None, 1):
raise ValueError(
'example_id should have rank 1, but got %s' % self._example_ids)
self._logits_dimension = logits_dimension
def lookup(self):
"""Returns cached_tree_ids, cached_node_ids, cached_logits."""
cached_tree_ids, cached_node_ids, cached_logits = array_ops.split(
lookup_ops.lookup_table_find_v2(
self._table_ref,
self._example_ids,
default_value=[0.0, _DUMMY_NODE_ID, 0.0]),
[1, 1, self._logits_dimension],
axis=1)
cached_tree_ids = array_ops.squeeze(
array_ops.bitcast(cached_tree_ids, dtypes.int32))
cached_node_ids = array_ops.squeeze(
array_ops.bitcast(cached_node_ids, dtypes.int32))
if self._example_ids.shape.ndims is not None:
cached_logits.set_shape(
[self._example_ids.shape[0], self._logits_dimension])
return (cached_tree_ids, cached_node_ids, cached_logits)
def insert(self, tree_ids, node_ids, logits):
"""Inserts values and returns the op."""
insert_op = lookup_ops.lookup_table_insert_v2(
self._table_ref, self._example_ids,
array_ops.concat(
[
array_ops.expand_dims(
array_ops.bitcast(tree_ids, dtypes.float32), 1),
array_ops.expand_dims(
array_ops.bitcast(node_ids, dtypes.float32), 1),
logits,
],
axis=1,
name='value_concat_for_cache_insert'))
return insert_op
class _CacheTrainingStatesUsingVariables(object):
"""Caching logits, etc. using Variables."""
def __init__(self, batch_size, logits_dimension):
"""Creates a cache with the given configuration.
It maintains three variables, tree_ids, node_ids, logits, for caching.
tree_ids: shape=[batch_size], dtype=int32
node_ids: shape=[batch_size], dtype=int32
logits: shape=[batch_size, logits_dimension], dtype=float32
Note, this can be used only with in-memory data setting.
Args:
batch_size: `int`, the size of the cache.
logits_dimension: a constant (int) for the dimension of logits.
"""
self._logits_dimension = logits_dimension
self._tree_ids = _local_variable(
array_ops.zeros([batch_size], dtype=dtypes.int32),
name='tree_ids_cache')
self._node_ids = _local_variable(
_DUMMY_NODE_ID * array_ops.ones([batch_size], dtype=dtypes.int32),
name='node_ids_cache')
self._logits = _local_variable(
array_ops.zeros([batch_size, logits_dimension], dtype=dtypes.float32),
name='logits_cache')
def lookup(self):
"""Returns cached_tree_ids, cached_node_ids, cached_logits."""
return (self._tree_ids, self._node_ids, self._logits)
def insert(self, tree_ids, node_ids, logits):
"""Inserts values and returns the op."""
return control_flow_ops.group(
[
self._tree_ids.assign(tree_ids),
self._node_ids.assign(node_ids),
self._logits.assign(logits)
],
name='cache_insert')
class _StopAtAttemptsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at the number of attempts."""
def __init__(self, num_finalized_trees_tensor, num_attempted_layers_tensor,
max_trees, max_depth):
self._num_finalized_trees_tensor = num_finalized_trees_tensor
self._num_attempted_layers_tensor = num_attempted_layers_tensor
self._max_trees = max_trees
self._max_depth = max_depth
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(
[self._num_finalized_trees_tensor, self._num_attempted_layers_tensor])
def after_run(self, run_context, run_values):
# num_* tensors should be retrieved by a separate session than the training
# one, in order to read the values after growing.
# So, if it's approaching to the limit, get the actual value by additional
# session.
num_finalized_trees, num_attempted_layers = run_values.results
if (num_finalized_trees >= self._max_trees - 1 or
num_attempted_layers > 2 * self._max_trees * self._max_depth - 1):
num_finalized_trees, num_attempted_layers = run_context.session.run(
[self._num_finalized_trees_tensor, self._num_attempted_layers_tensor])
if (num_finalized_trees >= self._max_trees or
num_attempted_layers > 2 * self._max_trees * self._max_depth):
run_context.request_stop()
def _get_max_splits(tree_hparams):
"""Calculates the max possible number of splits based on tree params."""
# maximum number of splits possible in the whole tree =2^(D-1)-1
max_splits = (1 << tree_hparams.max_depth) - 1
return max_splits
class _EnsembleGrower(object):
"""Abstract base class for different types of ensemble growers.
Use it to receive training ops for growing and centering bias, depending
on the implementation (for example, in memory or accumulator-based
distributed):
grower = ...create subclass grower(tree_ensemble, tree_hparams)
grow_op = grower.grow_tree(stats_summaries_list, feature_ids_list,
last_layer_nodes_range)
training_ops.append(grow_op)
"""
def __init__(self, tree_ensemble, tree_hparams, feature_ids_list):
"""Initializes a grower object.
Args:
tree_ensemble: A TreeEnsemble variable.
tree_hparams: TODO. collections.namedtuple for hyper parameters.
feature_ids_list: a list of lists of feature ids for each bucket size.
Raises:
ValueError: when pruning mode is invalid or pruning is used and no tree
complexity is set.
"""
self._tree_ensemble = tree_ensemble
self._tree_hparams = tree_hparams
self._feature_ids_list = feature_ids_list
# pylint: disable=protected-access
self._pruning_mode_parsed = boosted_trees_ops.PruningMode.from_str(
tree_hparams.pruning_mode)
if tree_hparams.tree_complexity > 0:
if self._pruning_mode_parsed == boosted_trees_ops.PruningMode.NO_PRUNING:
raise ValueError(
'Tree complexity have no effect unless pruning mode is chosen.')
else:
if self._pruning_mode_parsed != boosted_trees_ops.PruningMode.NO_PRUNING:
raise ValueError('For pruning, tree_complexity must be positive.')
# pylint: enable=protected-access
@abc.abstractmethod
def center_bias(self, center_bias_var, gradients, hessians):
"""Centers bias, if ready, based on statistics.
Args:
center_bias_var: A variable that will be updated when bias centering
finished.
gradients: A rank 2 tensor of gradients.
hessians: A rank 2 tensor of hessians.
Returns:
An operation for centering bias.
"""
@abc.abstractmethod
def grow_tree(self, stats_summaries_list, last_layer_nodes_range):
"""Grows a tree, if ready, based on provided statistics.
Args:
stats_summaries_list: List of stats summary tensors, representing sums of
gradients and hessians for each feature bucket.
last_layer_nodes_range: A tensor representing ids of the nodes in the
current layer, to be split.
Returns:
An op for growing a tree.
"""
def chief_init_op(self):
"""Ops that chief needs to run to initialize the state."""
return control_flow_ops.no_op()
# ============= Helper methods ===========
def _center_bias_fn(self, center_bias_var, mean_gradients, mean_hessians):
"""Updates the ensembles and cache (if needed) with logits prior."""
continue_centering = boosted_trees_ops.center_bias(
self._tree_ensemble.resource_handle,
mean_gradients=mean_gradients,
mean_hessians=mean_hessians,
l1=self._tree_hparams.l1,
l2=self._tree_hparams.l2)
return center_bias_var.assign(continue_centering)
def _grow_tree_from_stats_summaries(self, stats_summaries_list,
last_layer_nodes_range):
"""Updates ensemble based on the best gains from stats summaries."""
node_ids_per_feature = []
gains_list = []
thresholds_list = []
left_node_contribs_list = []
right_node_contribs_list = []
all_feature_ids = []
assert len(stats_summaries_list) == len(self._feature_ids_list)
max_splits = _get_max_splits(self._tree_hparams)
for i, feature_ids in enumerate(self._feature_ids_list):
(numeric_node_ids_per_feature, numeric_gains_list,
numeric_thresholds_list, numeric_left_node_contribs_list,
numeric_right_node_contribs_list) = (
boosted_trees_ops.calculate_best_gains_per_feature(
node_id_range=last_layer_nodes_range,
stats_summary_list=stats_summaries_list[i],
l1=self._tree_hparams.l1,
l2=self._tree_hparams.l2,
tree_complexity=self._tree_hparams.tree_complexity,
min_node_weight=self._tree_hparams.min_node_weight,
max_splits=max_splits))
all_feature_ids += feature_ids
node_ids_per_feature += numeric_node_ids_per_feature
gains_list += numeric_gains_list
thresholds_list += numeric_thresholds_list
left_node_contribs_list += numeric_left_node_contribs_list
right_node_contribs_list += numeric_right_node_contribs_list
grow_op = boosted_trees_ops.update_ensemble(
# Confirm if local_tree_ensemble or tree_ensemble should be used.
self._tree_ensemble.resource_handle,
feature_ids=all_feature_ids,
node_ids=node_ids_per_feature,
gains=gains_list,
thresholds=thresholds_list,
left_node_contribs=left_node_contribs_list,
right_node_contribs=right_node_contribs_list,
learning_rate=self._tree_hparams.learning_rate,
max_depth=self._tree_hparams.max_depth,
pruning_mode=self._pruning_mode_parsed)
return grow_op
class _InMemoryEnsembleGrower(_EnsembleGrower):
"""An in-memory ensemble grower."""
def __init__(self, tree_ensemble, tree_hparams, feature_ids_list):
super(_InMemoryEnsembleGrower, self).__init__(
tree_ensemble=tree_ensemble, tree_hparams=tree_hparams,
feature_ids_list=feature_ids_list)
def center_bias(self, center_bias_var, gradients, hessians):
# For in memory, we already have a full batch of gradients and hessians,
# so just take a mean and proceed with centering.
mean_gradients = array_ops.expand_dims(
math_ops.reduce_mean(gradients, 0), 0)
mean_heassians = array_ops.expand_dims(math_ops.reduce_mean(hessians, 0), 0)
return self._center_bias_fn(center_bias_var, mean_gradients, mean_heassians)
def grow_tree(self, stats_summaries_list, last_layer_nodes_range):
# For in memory, we already have full data in one batch, so we can grow the
# tree immediately.
return self._grow_tree_from_stats_summaries(
stats_summaries_list, last_layer_nodes_range)
class _AccumulatorEnsembleGrower(_EnsembleGrower):
"""An accumulator based ensemble grower."""
def __init__(self, tree_ensemble, tree_hparams, stamp_token,
n_batches_per_layer, bucket_size_list, is_chief, center_bias,
feature_ids_list):
super(_AccumulatorEnsembleGrower, self).__init__(
tree_ensemble=tree_ensemble, tree_hparams=tree_hparams,
feature_ids_list=feature_ids_list)
self._stamp_token = stamp_token
self._n_batches_per_layer = n_batches_per_layer
self._bucket_size_list = bucket_size_list
self._is_chief = is_chief
self._growing_accumulators = []
self._chief_init_ops = []
max_splits = _get_max_splits(self._tree_hparams)
for i, feature_ids in enumerate(self._feature_ids_list):
accumulator = data_flow_ops.ConditionalAccumulator(
dtype=dtypes.float32,
# The stats consist of grads and hessians (the last dimension).
shape=[len(feature_ids), max_splits, self._bucket_size_list[i], 2],
shared_name='numeric_stats_summary_accumulator_' + str(i))
self._chief_init_ops.append(
accumulator.set_global_step(self._stamp_token))
self._growing_accumulators.append(accumulator)
self._center_bias = center_bias
if center_bias:
self._bias_accumulator = data_flow_ops.ConditionalAccumulator(
dtype=dtypes.float32,
# The stats consist of grads and hessians means only.
# TODO(nponomareva): this will change for a multiclass
shape=[2, 1],
shared_name='bias_accumulator')
self._chief_init_ops.append(
self._bias_accumulator.set_global_step(self._stamp_token))
def center_bias(self, center_bias_var, gradients, hessians):
# For not in memory situation, we need to accumulate enough of batches first
# before proceeding with centering bias.
# Create an accumulator.
if not self._center_bias:
raise RuntimeError('center_bias called but bias centering is disabled.')
bias_dependencies = []
grads_and_hess = array_ops.stack([gradients, hessians], axis=0)
grads_and_hess = math_ops.reduce_mean(grads_and_hess, axis=1)
apply_grad = self._bias_accumulator.apply_grad(
grads_and_hess, self._stamp_token)
bias_dependencies.append(apply_grad)
# Center bias if enough batches were processed.
with ops.control_dependencies(bias_dependencies):
if not self._is_chief:
return control_flow_ops.no_op()
def _set_accumulators_stamp():
return control_flow_ops.group(
[acc.set_global_step(self._stamp_token + 1) for acc in
self._growing_accumulators])
def center_bias_from_accumulator():
accumulated = array_ops.unstack(self._bias_accumulator.take_grad(1),
axis=0)
center_bias_op = self._center_bias_fn(
center_bias_var,
array_ops.expand_dims(accumulated[0], 0),
array_ops.expand_dims(accumulated[1], 0))
with ops.control_dependencies([center_bias_op]):
return control_flow_ops.cond(center_bias_var,
control_flow_ops.no_op,
_set_accumulators_stamp)
center_bias_op = control_flow_ops.cond(
math_ops.greater_equal(self._bias_accumulator.num_accumulated(),
self._n_batches_per_layer),
center_bias_from_accumulator,
control_flow_ops.no_op,
name='wait_until_n_batches_for_bias_accumulated')
return center_bias_op
def grow_tree(self, stats_summaries_list, last_layer_nodes_range):
dependencies = []
for i in range(len(self._feature_ids_list)):
stats_summaries = stats_summaries_list[i]
apply_grad = self._growing_accumulators[i].apply_grad(
array_ops.stack(stats_summaries, axis=0), self._stamp_token)
dependencies.append(apply_grad)
# Grow the tree if enough batches is accumulated.
with ops.control_dependencies(dependencies):
if not self._is_chief:
return control_flow_ops.no_op()
min_accumulated = math_ops.reduce_min(
array_ops.stack([acc.num_accumulated() for acc in
self._growing_accumulators]))
def grow_tree_from_accumulated_summaries_fn():
"""Updates tree with the best layer from accumulated summaries."""
# Take out the accumulated summaries from the accumulator and grow.
stats_summaries_list = []
stats_summaries_list = [
array_ops.unstack(accumulator.take_grad(1), axis=0)
for accumulator in self._growing_accumulators
]
grow_op = self._grow_tree_from_stats_summaries(
stats_summaries_list, last_layer_nodes_range
)
return grow_op
grow_model = control_flow_ops.cond(
math_ops.greater_equal(min_accumulated, self._n_batches_per_layer),
grow_tree_from_accumulated_summaries_fn,
control_flow_ops.no_op,
name='wait_until_n_batches_accumulated')
return grow_model
def chief_init_op(self):
"""Ops that chief needs to run to initialize the state."""
return control_flow_ops.group(self._chief_init_ops)
def _bt_model_fn(
features,
labels,
mode,
head,
feature_columns,
tree_hparams,
n_batches_per_layer,
config,
closed_form_grad_and_hess_fn=None,
example_id_column_name=None,
# TODO(youngheek): replace this later using other options.
train_in_memory=False,
name='boosted_trees'):
"""Gradient Boosted Trees model_fn.
Args:
features: dict of `Tensor`.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `head_lib._Head` instance.
feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.
tree_hparams: TODO. collections.namedtuple for hyper parameters.
n_batches_per_layer: A `Tensor` of `int64`. Each layer is built after at
least n_batches_per_layer accumulations.
config: `RunConfig` object to configure the runtime settings.
closed_form_grad_and_hess_fn: a function that accepts logits and labels
and returns gradients and hessians. By default, they are created by
tf.gradients() from the loss.
example_id_column_name: Name of the feature for a unique ID per example.
Currently experimental -- not exposed to public API.
train_in_memory: `bool`, when true, it assumes the dataset is in memory,
i.e., input_fn should return the entire dataset as a single batch, and
also n_batches_per_layer should be set as 1.
name: Name to use for the model.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: mode or params are invalid, or features has the wrong type.
"""
sorted_feature_columns = sorted(feature_columns, key=lambda tc: tc.name)
with ops.name_scope(name) as name:
# Prepare.
global_step = training_util.get_or_create_global_step()
bucket_size_list, feature_ids_list = _group_features_by_num_buckets(
sorted_feature_columns)
# Create Ensemble resources.
tree_ensemble = boosted_trees_ops.TreeEnsemble(name=name)
# Create logits.
if mode != model_fn_lib.ModeKeys.TRAIN:
input_feature_list = _get_transformed_features(features,
sorted_feature_columns)
logits = boosted_trees_ops.predict(
# For non-TRAIN mode, ensemble doesn't change after initialization,
# so no local copy is needed; using tree_ensemble directly.
tree_ensemble_handle=tree_ensemble.resource_handle,
bucketized_features=input_feature_list,
logits_dimension=head.logits_dimension)
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=control_flow_ops.no_op,
logits=logits)
# ============== Training graph ==============
center_bias = tree_hparams.center_bias
is_single_machine = (config.num_worker_replicas <= 1)
if train_in_memory:
assert n_batches_per_layer == 1, (
'When train_in_memory is enabled, input_fn should return the entire '
'dataset as a single batch, and n_batches_per_layer should be set as '
'1.')
if (not config.is_chief or config.num_worker_replicas > 1 or
config.num_ps_replicas > 0):
raise ValueError('train_in_memory is supported only for '
'non-distributed training.')
worker_device = control_flow_ops.no_op().device
train_op = []
# Extract input features and set up cache for training.
training_state_cache = None
if train_in_memory:
# cache transformed features as well for in-memory training.
batch_size = array_ops.shape(labels)[0]
input_feature_list, input_cache_op = (
_cache_transformed_features(features, sorted_feature_columns,
batch_size))
train_op.append(input_cache_op)
training_state_cache = _CacheTrainingStatesUsingVariables(
batch_size, head.logits_dimension)
else:
input_feature_list = _get_transformed_features(features,
sorted_feature_columns)
if example_id_column_name:
example_ids = features[example_id_column_name]
training_state_cache = _CacheTrainingStatesUsingHashTable(
example_ids, head.logits_dimension)
if training_state_cache:
cached_tree_ids, cached_node_ids, cached_logits = (
training_state_cache.lookup())
else:
# Always start from the beginning when no cache is set up.
batch_size = array_ops.shape(labels)[0]
cached_tree_ids, cached_node_ids, cached_logits = (
array_ops.zeros([batch_size], dtype=dtypes.int32),
_DUMMY_NODE_ID * array_ops.ones([batch_size], dtype=dtypes.int32),
array_ops.zeros(
[batch_size, head.logits_dimension], dtype=dtypes.float32))
if is_single_machine:
local_tree_ensemble = tree_ensemble
ensemble_reload = control_flow_ops.no_op()
else:
# Have a local copy of ensemble for the distributed setting.
with ops.device(worker_device):
local_tree_ensemble = boosted_trees_ops.TreeEnsemble(
name=name + '_local', is_local=True)
# TODO(soroush): Do partial updates if this becomes a bottleneck.
ensemble_reload = local_tree_ensemble.deserialize(
*tree_ensemble.serialize())
with ops.control_dependencies([ensemble_reload]):
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
last_layer_nodes_range) = local_tree_ensemble.get_states()
partial_logits, tree_ids, node_ids = boosted_trees_ops.training_predict(
tree_ensemble_handle=local_tree_ensemble.resource_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=input_feature_list,
logits_dimension=head.logits_dimension)
logits = cached_logits + partial_logits
if train_in_memory:
grower = _InMemoryEnsembleGrower(tree_ensemble, tree_hparams,
feature_ids_list=feature_ids_list)
else:
grower = _AccumulatorEnsembleGrower(tree_ensemble, tree_hparams,
stamp_token, n_batches_per_layer,
bucket_size_list, config.is_chief,
center_bias=center_bias,
feature_ids_list=feature_ids_list)
summary.scalar('ensemble/num_trees', num_trees)
summary.scalar('ensemble/num_finalized_trees', num_finalized_trees)
summary.scalar('ensemble/num_attempted_layers', num_attempted_layers)
# Variable that determines whether bias centering is needed.
center_bias_var = variable_scope.variable(
initial_value=center_bias, name='center_bias_needed', trainable=False,
use_resource=True)
# Create training graph.
def _train_op_fn(loss):
"""Run one training iteration."""
if training_state_cache:
# Cache logits only after center_bias is complete, if it's in progress.
train_op.append(
control_flow_ops.cond(
center_bias_var, control_flow_ops.no_op,
lambda: training_state_cache.insert(tree_ids, node_ids, logits))
)
if closed_form_grad_and_hess_fn:
gradients, hessians = closed_form_grad_and_hess_fn(logits, labels)
else:
gradients = gradients_impl.gradients(loss, logits, name='Gradients')[0]
hessians = gradients_impl.gradients(
gradients, logits, name='Hessians')[0]
# TODO(youngheek): perhaps storage could be optimized by storing stats
# with the dimension max_splits_per_layer, instead of max_splits (for the
# entire tree).
max_splits = _get_max_splits(tree_hparams)
stats_summaries_list = []
for i, feature_ids in enumerate(feature_ids_list):
num_buckets = bucket_size_list[i]
summaries = [
array_ops.squeeze(
boosted_trees_ops.make_stats_summary(
node_ids=node_ids,
gradients=gradients,
hessians=hessians,
bucketized_features_list=[input_feature_list[f]],
max_splits=max_splits,
num_buckets=num_buckets),
axis=0) for f in feature_ids
]
stats_summaries_list.append(summaries)
if center_bias:
update_model = control_flow_ops.cond(
center_bias_var,
functools.partial(
grower.center_bias,
center_bias_var,
gradients,
hessians,
),
functools.partial(grower.grow_tree, stats_summaries_list,
last_layer_nodes_range))
else:
update_model = grower.grow_tree(stats_summaries_list,
last_layer_nodes_range)
train_op.append(update_model)
with ops.control_dependencies([update_model]):
increment_global = state_ops.assign_add(global_step, 1).op
train_op.append(increment_global)
return control_flow_ops.group(train_op, name='train_op')
estimator_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
# Add an early stop hook.
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks +
(_StopAtAttemptsHook(num_finalized_trees, num_attempted_layers,
tree_hparams.n_trees, tree_hparams.max_depth),),
training_chief_hooks=[GrowerInitializationHook(grower.chief_init_op())] +
list(estimator_spec.training_chief_hooks))
return estimator_spec
class GrowerInitializationHook(session_run_hook.SessionRunHook):
"""A SessionRunHook handles initialization of `_EnsembleGrower`."""
def __init__(self, init_op):
self._init_op = init_op
def after_create_session(self, session, coord):
session.run(self._init_op)
def _create_classification_head(n_classes,
weight_column=None,
label_vocabulary=None):
"""Creates a classification head. Refer to canned.head for details on args."""
# TODO(nponomareva): Support multi-class cases.
if n_classes == 2:
# pylint: disable=protected-access
return head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
# pylint: enable=protected-access
else:
raise ValueError('For now only binary classification is supported.'
'n_classes given as {}'.format(n_classes))
def _create_classification_head_and_closed_form(n_classes, weight_column,
label_vocabulary):
"""Creates a head for classifier and the closed form gradients/hessians."""
head = _create_classification_head(n_classes, weight_column, label_vocabulary)
if (n_classes == 2 and head.logits_dimension == 1 and
weight_column is None and label_vocabulary is None):
# Use the closed-form gradients/hessians for 2 class.
def _grad_and_hess_for_logloss(logits, labels):
"""A closed form gradient and hessian for logistic loss."""
# TODO(youngheek): add weights handling.
predictions = math_ops.reciprocal(math_ops.exp(-logits) + 1.0)
normalizer = math_ops.reciprocal(
math_ops.cast(array_ops.size(predictions), dtypes.float32))
labels = math_ops.cast(labels, dtypes.float32)
labels = head_lib._check_dense_labels_match_logits_and_reshape( # pylint: disable=protected-access
labels, logits, head.logits_dimension)
gradients = (predictions - labels) * normalizer
hessians = predictions * (1.0 - predictions) * normalizer
return gradients, hessians
closed_form = _grad_and_hess_for_logloss
else:
closed_form = None
return (head, closed_form)
def _create_regression_head(label_dimension, weight_column=None):
if label_dimension != 1:
raise ValueError('For now only 1 dimension regression is supported.'
'label_dimension given as {}'.format(label_dimension))
# pylint: disable=protected-access
return head_lib._regression_head(
label_dimension=label_dimension,
weight_column=weight_column,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
# pylint: enable=protected-access
def _bt_explanations_fn(features,
head,
sorted_feature_columns,
name='boosted_trees'):
"""Gradient Boosted Trees predict with explanations model_fn.
Args:
features: dict of `Tensor`.
head: A `head_lib._Head` instance.
sorted_feature_columns: Sorted iterable of `feature_column._FeatureColumn`
model inputs.
name: Name used for the model.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: mode or params are invalid, or features has the wrong type.
"""
mode = model_fn_lib.ModeKeys.PREDICT
with ops.name_scope(name) as name:
# Create Ensemble resources.
tree_ensemble = boosted_trees_ops.TreeEnsemble(name=name)
input_feature_list = _get_transformed_features(features,
sorted_feature_columns)
logits = boosted_trees_ops.predict(
# For non-TRAIN mode, ensemble doesn't change after initialization,
# so no local copy is needed; using tree_ensemble directly.
tree_ensemble_handle=tree_ensemble.resource_handle,
bucketized_features=input_feature_list,
logits_dimension=head.logits_dimension)
estimator_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=None,
train_op_fn=control_flow_ops.no_op,
logits=logits)
debug_op = boosted_trees_ops.example_debug_outputs(
tree_ensemble.resource_handle,
bucketized_features=input_feature_list,
logits_dimension=head.logits_dimension)
estimator_spec.predictions[boosted_trees_utils._DEBUG_PROTO_KEY] = debug_op # pylint: disable=protected-access
return estimator_spec
class _BoostedTreesBase(estimator.Estimator):
"""Base class for boosted trees estimators.
This class is intended to keep tree-specific functions (E.g., methods for
feature importances and directional feature contributions) in one central
place.
It is not a valid (working) Estimator on its own and should only be used as a
base class.
"""
def __init__(self, model_fn, model_dir, config, feature_columns, head,
center_bias, is_classification):
"""Initializes a `_BoostedTreesBase` instance.
Args:
model_fn: model_fn: Model function. See base class for more detail.
model_dir: Directory to save model parameters, graph and etc. See base
class for more detail.
config: `estimator.RunConfig` configuration object.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`
head: A `head_lib._Head` instance.
center_bias: Whether bias centering needs to occur. Bias centering refers
to the first node in the very first tree returning the prediction that
is aligned with the original labels distribution. For example, for
regression problems, the first node will return the mean of the labels.
For binary classification problems, it will return a logit for a prior
probability of label 1.
is_classification: If the estimator is for classification.
"""
super(_BoostedTreesBase, self).__init__(
model_fn=model_fn, model_dir=model_dir, config=config)
self._sorted_feature_columns = sorted(
feature_columns, key=lambda tc: tc.name)
self._head = head
self._n_features = _calculate_num_features(self._sorted_feature_columns)
self._center_bias = center_bias
self._is_classification = is_classification
def experimental_predict_with_explanations(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None):
"""Computes model explainability outputs per example along with predictions.
Currently supports directional feature contributions (DFCs). For each
instance, DFCs indicate the aggregate contribution of each feature. See
https://arxiv.org/abs/1312.1121 and
http://blog.datadive.net/interpreting-random-forests/ for more details.
Args:
input_fn: A function that provides input data for predicting as
minibatches. See [Premade Estimators](
https://tensorflow.org/guide/premade_estimators#create_input_functions)
for more information. The function should construct and return one of
the following: * A `tf.data.Dataset` object: Outputs of `Dataset`
object must be a tuple `(features, labels)` with same constraints as
below. * A tuple `(features, labels)`: Where `features` is a `tf.Tensor`
or a dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `model_fn`. They should
satisfy the expectation of `model_fn` from inputs.
predict_keys: list of `str`, name of the keys to predict. It is used if
the `tf.estimator.EstimatorSpec.predictions` is a `dict`. If
`predict_keys` is used then rest of the predictions will be filtered
from the dictionary, with the exception of 'bias' and 'dfc', which will
always be in the dictionary. If `None`, returns all keys in prediction
dict, as well as two new keys 'dfc' and 'bias'.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the prediction call.
checkpoint_path: Path of a specific checkpoint to predict. If `None`, the
latest checkpoint in `model_dir` is used. If there are no checkpoints
in `model_dir`, prediction is run with newly initialized `Variables`
instead of ones restored from checkpoint.
Yields:
Evaluated values of `predictions` tensors. The `predictions` tensors will
contain at least two keys 'dfc' and 'bias' for model explanations. The
`dfc` value corresponds to the contribution of each feature to the overall
prediction for this instance (positive indicating that the feature makes
it more likely to select class 1 and negative less likely). The 'bias'
value will be the same across all the instances, corresponding to the
probability (classification) or prediction (regression) of the training
data distribution.
Raises:
ValueError: when wrong arguments are given or unsupported functionalities
are requested.
"""
if not self._center_bias:
raise ValueError('center_bias must be enabled during estimator '
'instantiation when using '
'experimental_predict_with_explanations.')
# pylint: disable=protected-access
if not self._is_classification:
identity_inverse_link_fn = self._head._inverse_link_fn in (None,
tf_identity)
# pylint:enable=protected-access
if not identity_inverse_link_fn:
raise ValueError(
'For now only identity inverse_link_fn in regression_head is '
'supported for experimental_predict_with_explanations.')
# pylint:disable=unused-argument
def new_model_fn(features, labels, mode):
return _bt_explanations_fn(features, self._head,
self._sorted_feature_columns)
# pylint:enable=unused-argument
est = estimator.Estimator(
model_fn=new_model_fn,
model_dir=self.model_dir,
config=self.config,
warm_start_from=self._warm_start_settings)
# Make sure bias and dfc will be in prediction dict.
user_supplied_predict_keys = predict_keys is not None
if user_supplied_predict_keys:
predict_keys = set(predict_keys)
predict_keys.add(boosted_trees_utils._DEBUG_PROTO_KEY)
predictions = est.predict(
input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=True)
for pred in predictions:
bias, dfcs = boosted_trees_utils._parse_explanations_from_prediction(
pred[boosted_trees_utils._DEBUG_PROTO_KEY], self._n_features,
self._is_classification)
pred['bias'] = bias
pred['dfc'] = dfcs
# Don't need to expose serialized proto to end user.
del pred[boosted_trees_utils._DEBUG_PROTO_KEY]
yield pred
# pylint: disable=protected-access
@estimator_export('estimator.BoostedTreesClassifier')
class BoostedTreesClassifier(_BoostedTreesBase):
"""A Classifier for Tensorflow Boosted Trees models.
@compatibility(eager)
Estimators can be used while eager execution is enabled. Note that `input_fn`
and all hooks are executed inside a graph context, so they have to be written
to be compatible with graph mode. Note that `input_fn` code using `tf.data`
generally works in both graph and eager modes.
@end_compatibility
"""
def __init__(self,
feature_columns,
n_batches_per_layer,
model_dir=None,
n_classes=_HOLD_FOR_MULTI_CLASS_SUPPORT,
weight_column=None,
label_vocabulary=None,
n_trees=100,
max_depth=6,
learning_rate=0.1,
l1_regularization=0.,
l2_regularization=0.,
tree_complexity=0.,
min_node_weight=0.,
config=None,
center_bias=False,
pruning_mode='none'):
"""Initializes a `BoostedTreesClassifier` instance.
Example:
```python
bucketized_feature_1 = bucketized_column(
numeric_column('feature_1'), BUCKET_BOUNDARIES_1)
bucketized_feature_2 = bucketized_column(
numeric_column('feature_2'), BUCKET_BOUNDARIES_2)
# Need to see a large portion of the data before we can build a layer, for
# example half of data n_batches_per_layer = 0.5 * NUM_EXAMPLES / BATCH_SIZE
classifier = estimator.BoostedTreesClassifier(
feature_columns=[bucketized_feature_1, bucketized_feature_2],
n_batches_per_layer=n_batches_per_layer,
n_trees=100,
... <some other params>
)
def input_fn_train():
...
return dataset
classifier.train(input_fn=input_fn_train)
def input_fn_eval():
...
return dataset
metrics = classifier.evaluate(input_fn=input_fn_eval)
```
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
n_batches_per_layer: the number of batches to collect statistics per
layer. The total number of batches is total number of data divided by
batch size.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Multiclass support is not yet implemented.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to downweight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
n_trees: number trees to be created.
max_depth: maximum depth of the tree to grow.
learning_rate: shrinkage parameter to be used when a tree added to the
model.
l1_regularization: regularization multiplier applied to the absolute
weights of the tree leafs.
l2_regularization: regularization multiplier applied to the square weights
of the tree leafs.
tree_complexity: regularization factor to penalize trees with more leaves.
min_node_weight: min_node_weight: minimum hessian a node must have for a
split to be considered. The value will be compared with
sum(leaf_hessian)/(batch_size * n_batches_per_layer).
config: `RunConfig` object to configure the runtime settings.
center_bias: Whether bias centering needs to occur. Bias centering refers
to the first node in the very first tree returning the prediction that
is aligned with the original labels distribution. For example, for
regression problems, the first node will return the mean of the labels.
For binary classification problems, it will return a logit for a prior
probability of label 1.
pruning_mode: one of 'none', 'pre', 'post' to indicate no pruning, pre-
pruning (do not split a node if not enough gain is observed) and post
pruning (build the tree up to a max depth and then prune branches with
negative gain). For pre and post pruning, you MUST provide
tree_complexity >0.
Raises:
ValueError: when wrong arguments are given or unsupported functionalities
are requested.
"""
# TODO(nponomareva): Support multi-class cases.
if n_classes == _HOLD_FOR_MULTI_CLASS_SUPPORT:
n_classes = 2
head, closed_form = _create_classification_head_and_closed_form(
n_classes, weight_column, label_vocabulary=label_vocabulary)
# HParams for the model.
tree_hparams = _TreeHParams(
n_trees, max_depth, learning_rate, l1_regularization, l2_regularization,
tree_complexity, min_node_weight, center_bias, pruning_mode)
def _model_fn(features, labels, mode, config):
return _bt_model_fn(
features,
labels,
mode,
head,
feature_columns,
tree_hparams,
n_batches_per_layer,
config,
closed_form_grad_and_hess_fn=closed_form)
super(BoostedTreesClassifier, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_columns=feature_columns,
head=head,
center_bias=center_bias,
is_classification=True)
@estimator_export('estimator.BoostedTreesRegressor')
class BoostedTreesRegressor(_BoostedTreesBase):
"""A Regressor for Tensorflow Boosted Trees models.
@compatibility(eager)
Estimators can be used while eager execution is enabled. Note that `input_fn`
and all hooks are executed inside a graph context, so they have to be written
to be compatible with graph mode. Note that `input_fn` code using `tf.data`
generally works in both graph and eager modes.
@end_compatibility
"""
def __init__(self,
feature_columns,
n_batches_per_layer,
model_dir=None,
label_dimension=_HOLD_FOR_MULTI_DIM_SUPPORT,
weight_column=None,
n_trees=100,
max_depth=6,
learning_rate=0.1,
l1_regularization=0.,
l2_regularization=0.,
tree_complexity=0.,
min_node_weight=0.,
config=None,
center_bias=False,
pruning_mode='none'):
"""Initializes a `BoostedTreesRegressor` instance.
Example:
```python
bucketized_feature_1 = bucketized_column(
numeric_column('feature_1'), BUCKET_BOUNDARIES_1)
bucketized_feature_2 = bucketized_column(
numeric_column('feature_2'), BUCKET_BOUNDARIES_2)
# Need to see a large portion of the data before we can build a layer, for
# example half of data n_batches_per_layer = 0.5 * NUM_EXAMPLES / BATCH_SIZE
regressor = estimator.BoostedTreesRegressor(
feature_columns=[bucketized_feature_1, bucketized_feature_2],
n_batches_per_layer=n_batches_per_layer,
n_trees=100,
... <some other params>
)
def input_fn_train():
...
return dataset
regressor.train(input_fn=input_fn_train)
def input_fn_eval():
...
return dataset
metrics = regressor.evaluate(input_fn=input_fn_eval)
```
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
n_batches_per_layer: the number of batches to collect statistics per
layer. The total number of batches is total number of data divided by
batch size.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
label_dimension: Number of regression targets per example.
Multi-dimensional support is not yet implemented.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to downweight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
n_trees: number trees to be created.
max_depth: maximum depth of the tree to grow.
learning_rate: shrinkage parameter to be used when a tree added to the
model.
l1_regularization: regularization multiplier applied to the absolute
weights of the tree leafs.
l2_regularization: regularization multiplier applied to the square weights
of the tree leafs.
tree_complexity: regularization factor to penalize trees with more leaves.
min_node_weight: min_node_weight: minimum hessian a node must have for a
split to be considered. The value will be compared with
sum(leaf_hessian)/(batch_size * n_batches_per_layer).
config: `RunConfig` object to configure the runtime settings.
center_bias: Whether bias centering needs to occur. Bias centering refers
to the first node in the very first tree returning the prediction that
is aligned with the original labels distribution. For example, for
regression problems, the first node will return the mean of the labels.
For binary classification problems, it will return a logit for a prior
probability of label 1.
pruning_mode: one of 'none', 'pre', 'post' to indicate no pruning, pre-
pruning (do not split a node if not enough gain is observed) and post
pruning (build the tree up to a max depth and then prune branches with
negative gain). For pre and post pruning, you MUST provide
tree_complexity >0.
Raises:
ValueError: when wrong arguments are given or unsupported functionalities
are requested.
"""
# TODO(nponomareva): Extend it to multi-dimension cases.
if label_dimension == _HOLD_FOR_MULTI_DIM_SUPPORT:
label_dimension = 1
head = _create_regression_head(label_dimension, weight_column)
# HParams for the model.
tree_hparams = _TreeHParams(
n_trees, max_depth, learning_rate, l1_regularization, l2_regularization,
tree_complexity, min_node_weight, center_bias, pruning_mode)
def _model_fn(features, labels, mode, config):
return _bt_model_fn(features, labels, mode, head, feature_columns,
tree_hparams, n_batches_per_layer, config)
super(BoostedTreesRegressor, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_columns=feature_columns,
head=head,
center_bias=center_bias,
is_classification=False)
# pylint: enable=protected-access
| {
"content_hash": "9598b27617f2160aca660d0d32835618",
"timestamp": "",
"source": "github",
"line_count": 1423,
"max_line_length": 115,
"avg_line_length": 42.53548840477864,
"alnum_prop": 0.66298242135871,
"repo_name": "xodus7/tensorflow",
"id": "756d32d03fe52ad265420ffd6e9b921934976498",
"size": "61217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/estimator/canned/boosted_trees.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "340946"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "48861698"
},
{
"name": "CMake",
"bytes": "195699"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1240309"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "834061"
},
{
"name": "Jupyter Notebook",
"bytes": "2604756"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52618"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40952138"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "459258"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from typing import NoReturn, TypeVar
from nacl import exceptions as exc
from nacl._sodium import ffi, lib
from nacl.exceptions import ensure
crypto_generichash_BYTES: int = lib.crypto_generichash_blake2b_bytes()
crypto_generichash_BYTES_MIN: int = lib.crypto_generichash_blake2b_bytes_min()
crypto_generichash_BYTES_MAX: int = lib.crypto_generichash_blake2b_bytes_max()
crypto_generichash_KEYBYTES: int = lib.crypto_generichash_blake2b_keybytes()
crypto_generichash_KEYBYTES_MIN: int = (
lib.crypto_generichash_blake2b_keybytes_min()
)
crypto_generichash_KEYBYTES_MAX: int = (
lib.crypto_generichash_blake2b_keybytes_max()
)
crypto_generichash_SALTBYTES: int = lib.crypto_generichash_blake2b_saltbytes()
crypto_generichash_PERSONALBYTES: int = (
lib.crypto_generichash_blake2b_personalbytes()
)
crypto_generichash_STATEBYTES: int = lib.crypto_generichash_statebytes()
_OVERLONG = "{0} length greater than {1} bytes"
_TOOBIG = "{0} greater than {1}"
def _checkparams(
digest_size: int, key: bytes, salt: bytes, person: bytes
) -> None:
"""Check hash parameters"""
ensure(
isinstance(key, bytes),
"Key must be a bytes sequence",
raising=exc.TypeError,
)
ensure(
isinstance(salt, bytes),
"Salt must be a bytes sequence",
raising=exc.TypeError,
)
ensure(
isinstance(person, bytes),
"Person must be a bytes sequence",
raising=exc.TypeError,
)
ensure(
isinstance(digest_size, int),
"Digest size must be an integer number",
raising=exc.TypeError,
)
ensure(
digest_size <= crypto_generichash_BYTES_MAX,
_TOOBIG.format("Digest_size", crypto_generichash_BYTES_MAX),
raising=exc.ValueError,
)
ensure(
len(key) <= crypto_generichash_KEYBYTES_MAX,
_OVERLONG.format("Key", crypto_generichash_KEYBYTES_MAX),
raising=exc.ValueError,
)
ensure(
len(salt) <= crypto_generichash_SALTBYTES,
_OVERLONG.format("Salt", crypto_generichash_SALTBYTES),
raising=exc.ValueError,
)
ensure(
len(person) <= crypto_generichash_PERSONALBYTES,
_OVERLONG.format("Person", crypto_generichash_PERSONALBYTES),
raising=exc.ValueError,
)
def generichash_blake2b_salt_personal(
data: bytes,
digest_size: int = crypto_generichash_BYTES,
key: bytes = b"",
salt: bytes = b"",
person: bytes = b"",
) -> bytes:
"""One shot hash interface
:param data: the input data to the hash function
:type data: bytes
:param digest_size: must be at most
:py:data:`.crypto_generichash_BYTES_MAX`;
the default digest size is
:py:data:`.crypto_generichash_BYTES`
:type digest_size: int
:param key: must be at most
:py:data:`.crypto_generichash_KEYBYTES_MAX` long
:type key: bytes
:param salt: must be at most
:py:data:`.crypto_generichash_SALTBYTES` long;
will be zero-padded if needed
:type salt: bytes
:param person: must be at most
:py:data:`.crypto_generichash_PERSONALBYTES` long:
will be zero-padded if needed
:type person: bytes
:return: digest_size long digest
:rtype: bytes
"""
_checkparams(digest_size, key, salt, person)
ensure(
isinstance(data, bytes),
"Input data must be a bytes sequence",
raising=exc.TypeError,
)
digest = ffi.new("unsigned char[]", digest_size)
# both _salt and _personal must be zero-padded to the correct length
_salt = ffi.new("unsigned char []", crypto_generichash_SALTBYTES)
_person = ffi.new("unsigned char []", crypto_generichash_PERSONALBYTES)
ffi.memmove(_salt, salt, len(salt))
ffi.memmove(_person, person, len(person))
rc = lib.crypto_generichash_blake2b_salt_personal(
digest, digest_size, data, len(data), key, len(key), _salt, _person
)
ensure(rc == 0, "Unexpected failure", raising=exc.RuntimeError)
return ffi.buffer(digest, digest_size)[:]
_Blake2State = TypeVar("_Blake2State", bound="Blake2State")
class Blake2State:
"""
Python-level wrapper for the crypto_generichash_blake2b state buffer
"""
__slots__ = ["_statebuf", "digest_size"]
def __init__(self, digest_size: int):
self._statebuf = ffi.new(
"unsigned char[]", crypto_generichash_STATEBYTES
)
self.digest_size = digest_size
def __reduce__(self) -> NoReturn:
"""
Raise the same exception as hashlib's blake implementation
on copy.copy()
"""
raise TypeError(
"can't pickle {} objects".format(self.__class__.__name__)
)
def copy(self: _Blake2State) -> _Blake2State:
_st = self.__class__(self.digest_size)
ffi.memmove(
_st._statebuf, self._statebuf, crypto_generichash_STATEBYTES
)
return _st
def generichash_blake2b_init(
key: bytes = b"",
salt: bytes = b"",
person: bytes = b"",
digest_size: int = crypto_generichash_BYTES,
) -> Blake2State:
"""
Create a new initialized blake2b hash state
:param key: must be at most
:py:data:`.crypto_generichash_KEYBYTES_MAX` long
:type key: bytes
:param salt: must be at most
:py:data:`.crypto_generichash_SALTBYTES` long;
will be zero-padded if needed
:type salt: bytes
:param person: must be at most
:py:data:`.crypto_generichash_PERSONALBYTES` long:
will be zero-padded if needed
:type person: bytes
:param digest_size: must be at most
:py:data:`.crypto_generichash_BYTES_MAX`;
the default digest size is
:py:data:`.crypto_generichash_BYTES`
:type digest_size: int
:return: a initialized :py:class:`.Blake2State`
:rtype: object
"""
_checkparams(digest_size, key, salt, person)
state = Blake2State(digest_size)
# both _salt and _personal must be zero-padded to the correct length
_salt = ffi.new("unsigned char []", crypto_generichash_SALTBYTES)
_person = ffi.new("unsigned char []", crypto_generichash_PERSONALBYTES)
ffi.memmove(_salt, salt, len(salt))
ffi.memmove(_person, person, len(person))
rc = lib.crypto_generichash_blake2b_init_salt_personal(
state._statebuf, key, len(key), digest_size, _salt, _person
)
ensure(rc == 0, "Unexpected failure", raising=exc.RuntimeError)
return state
def generichash_blake2b_update(state: Blake2State, data: bytes) -> None:
"""Update the blake2b hash state
:param state: a initialized Blake2bState object as returned from
:py:func:`.crypto_generichash_blake2b_init`
:type state: :py:class:`.Blake2State`
:param data:
:type data: bytes
"""
ensure(
isinstance(state, Blake2State),
"State must be a Blake2State object",
raising=exc.TypeError,
)
ensure(
isinstance(data, bytes),
"Input data must be a bytes sequence",
raising=exc.TypeError,
)
rc = lib.crypto_generichash_blake2b_update(
state._statebuf, data, len(data)
)
ensure(rc == 0, "Unexpected failure", raising=exc.RuntimeError)
def generichash_blake2b_final(state: Blake2State) -> bytes:
"""Finalize the blake2b hash state and return the digest.
:param state: a initialized Blake2bState object as returned from
:py:func:`.crypto_generichash_blake2b_init`
:type state: :py:class:`.Blake2State`
:return: the blake2 digest of the passed-in data stream
:rtype: bytes
"""
ensure(
isinstance(state, Blake2State),
"State must be a Blake2State object",
raising=exc.TypeError,
)
_digest = ffi.new("unsigned char[]", crypto_generichash_BYTES_MAX)
rc = lib.crypto_generichash_blake2b_final(
state._statebuf, _digest, state.digest_size
)
ensure(rc == 0, "Unexpected failure", raising=exc.RuntimeError)
return ffi.buffer(_digest, state.digest_size)[:]
| {
"content_hash": "5d83f7cd113f9bf9eebe3f32a6e3fb65",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 78,
"avg_line_length": 30.76865671641791,
"alnum_prop": 0.6293960708222168,
"repo_name": "pyca/pynacl",
"id": "6ab385a59718221f54738f1727c757447427b04d",
"size": "8852",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/nacl/bindings/crypto_generichash.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "67265"
},
{
"name": "Batchfile",
"bytes": "5642"
},
{
"name": "C",
"bytes": "4746076"
},
{
"name": "C#",
"bytes": "1924"
},
{
"name": "CMake",
"bytes": "10446"
},
{
"name": "M4",
"bytes": "82907"
},
{
"name": "Makefile",
"bytes": "612466"
},
{
"name": "PHP",
"bytes": "563"
},
{
"name": "Python",
"bytes": "366871"
},
{
"name": "Shell",
"bytes": "507692"
},
{
"name": "Smarty",
"bytes": "26875"
},
{
"name": "VBScript",
"bytes": "294"
}
],
"symlink_target": ""
} |
import sys
from vpnporthole.session import Session
from vpnporthole.settings import Settings
from vpnporthole.argparsetree import ArgParseTree
class Main(ArgParseTree):
"""
"""
def args(self, parser):
pass
class Action(ArgParseTree):
settings = None
def args(self, parser):
parser.add_argument("profile", help='Profile name or "all"')
def run(self, args):
if args.profile == 'all':
profile_names = Settings.list_profile_names()
for profile_name in sorted(profile_names):
self.settings = Settings(profile_name)
session = Session(self.settings)
self.go(session, args)
else:
self.settings = Settings(args.profile)
session = Session(self.settings)
return self.go(session, args)
def go(self, session, args):
raise NotImplementedError()
class Build(Action):
"""\
Build profile
Build the docker image for this profile
"""
def go(self, session, args):
if session.build():
return 0
return 1
class Start(Action):
"""\
Start profile
Start the docker container for this profile, requires user to enter password none configured
"""
def go(self, session, args):
try:
if session.start():
return 0
return 1
except KeyboardInterrupt:
return 1
class Stop(Action):
"""\
Stop profile
Stop the docker container for this profile
"""
def go(self, session, args):
if session.stop():
return 0
return 1
class Status(Action):
"""\
Profile status
Determine if the docker container for this image is running
"""
def go(self, session, args):
if session.status():
status = 'RUNNING'
exitcode = 0
else:
status = 'STOPPED'
exitcode = 1
sys.stdout.write("%s %s %s@%s\n" % (status, self.settings.profile_name,
self.settings.username(), self.settings.vpn()))
return exitcode
class Health(Action):
"""\
Profile health
Run the user defined "health" hook inside the container
"""
def go(self, session, args):
exitcode = session.health()
if exitcode == 0:
status = 'OK'
else:
status = 'BAD'
sys.stdout.write("%s %s %s@%s\n" % (status, self.settings.profile_name,
self.settings.username(), self.settings.vpn()))
return exitcode
class Refresh(Action):
"""\
Profile refresh
Run the user defined "refresh" hook inside the container
"""
def go(self, session, args):
exitcode = session.refresh()
return exitcode
class Shell(Action):
"""\
Shell into active profile
Open shell in Docker container
"""
def go(self, session, args):
if session.shell():
return 0
return 1
class Info(Action):
"""\
Docker container info for profile
"""
def go(self, session, args):
if session.info():
return 0
return 1
class Rm(Action):
"""\
Stop the profile, and remove the docker container
Remove any running/stopped containers and images for this profile
"""
def go(self, session, args):
if session.purge():
return 0
return 1
class Restart(Action):
"""\
Restart profile
Restart Docker container for this profile
"""
def go(self, session, args):
if session.status():
if not session.stop():
sys.stderr.write("Failed to stop!\n")
return 1
if session.start():
return 0
sys.stderr.write("Failed to start!\n")
return 1
sys.stderr.write("Not running!\n")
return 1
class RouteAction(Action):
def args(self, parser):
super(RouteAction, self).args(parser)
parser.add_argument('subnet', help="IPv4 subnet to route into active profile, e.g.: 10.1.2.0/24")
class AddRoute(RouteAction):
"""\
Add route to active profile
"""
name = 'add-route'
def go(self, session, args):
if session.add_route(args.subnet):
return 0
return 1
class DelRoute(RouteAction):
"""\
Remove route to active profile
"""
name = 'del-route'
def go(self, session, args):
if session.del_route(args.subnet):
return 0
return 1
class DomainAction(Action):
def args(self, parser):
super(DomainAction, self).args(parser)
parser.add_argument('domain', help="DNS sub-domain to delegate into the active profile, e.g.: example.com")
class AddDomain(DomainAction):
"""\
Add DNS domain to active profile
"""
name = 'add-domain'
def go(self, session, args):
if session.add_domain(args.domain):
return 0
return 1
class DelDomain(DomainAction):
"""\
Remove DNS domain to active profile
"""
name = 'del-domain'
def go(self, session, args):
if session.del_domain(args.domain):
return 0
return 1
class Docs(ArgParseTree):
"""\
vpn-porthole documentation
"""
def run(self, args):
import pkg_resources
try:
tag = 'v' + pkg_resources.get_distribution('vpn-porthole').version
except pkg_resources.DistributionNotFound:
tag = 'master'
print("vpn-porthole documentation can be found at:")
print(" https://github.com/sourcesimian/vpn-porthole/blob/%s/README.md" % tag)
return 0
def main():
m = Main()
Build(m)
Start(m)
Status(m)
Health(m)
Refresh(m)
Stop(m)
Restart(m)
AddRoute(m)
DelRoute(m)
AddDomain(m)
DelDomain(m)
Info(m)
Shell(m)
Rm(m)
Docs(m)
try:
return m.main()
except KeyboardInterrupt:
sys.stderr.write('^C\n')
return 3
if __name__ == "__main__":
exit(main())
| {
"content_hash": "1718ed4cef31dca6752f6b0b4783fc79",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 116,
"avg_line_length": 22.18705035971223,
"alnum_prop": 0.5643644617380026,
"repo_name": "sourcesimian/vpn-porthole",
"id": "49eec758495bfc50f78e745fc23ef966aedc4a9c",
"size": "6191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vpnporthole/cli.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1138"
},
{
"name": "Python",
"bytes": "46451"
},
{
"name": "Shell",
"bytes": "137"
}
],
"symlink_target": ""
} |
import math
import re
from wtforms import fields, widgets
from flask import request, url_for, redirect, flash, abort
from flask_superadmin.babel import gettext
from flask_superadmin.base import BaseView, expose
from flask_superadmin.form import (BaseForm, ChosenSelectWidget, FileField,
DatePickerWidget, DateTimePickerWidget)
import traceback
class AdminModelConverter(object):
def convert(self, *args, **kwargs):
field = super(AdminModelConverter, self).convert(*args, **kwargs)
if field:
widget = field.kwargs.get('widget', field.field_class.widget)
if isinstance(widget, widgets.Select):
field.kwargs['widget'] = ChosenSelectWidget(
multiple=widget.multiple)
elif issubclass(field.field_class, fields.DateTimeField):
field.kwargs['widget'] = DateTimePickerWidget()
elif issubclass(field.field_class, fields.DateField):
field.kwargs['widget'] = DatePickerWidget()
elif issubclass(field.field_class, fields.FileField):
field.field_class = FileField
return field
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
def camelcase_to_space(name):
return first_cap_re.sub(r'\1 \2', name)
def prettify(str):
return str.replace('_', ' ').title()
class BaseModelAdmin(BaseView):
""" BaseModelAdmin provides create/edit/delete functionality for an
abstract Model. The abstraction is further customized by the
backend-specific model admin (see flask_superadmin/model/backends/) and
by the user-defined admin classes inheriting from ModelAdmin.
"""
# Number of objects to display per page in the list view
list_per_page = 20
# Columns to display in the list index - can be field names or callables.
# Admin's methods have higher priority than the fields/methods on
# the model or document.
list_display = tuple()
# Only fields with names specified in `fields` will be displayed in the
# form (minus the ones mentioned in `exclude`). The order is preserved,
# too. You can also include methods that are on the model admin, or on the
# model/document, as long as they are marked as read-only (i.e. included
# in `readonly_fields`). Priority of fields' lookup: methods on the model
# admin, methods/fields on the model/document.
fields = tuple()
readonly_fields = tuple()
exclude = tuple()
form = None
can_edit = True
can_create = True
can_delete = True
list_template = 'admin/model/list.html'
edit_template = 'admin/model/edit.html'
add_template = 'admin/model/add.html'
delete_template = 'admin/model/delete.html'
search_fields = tuple()
field_overrides = {}
# A dictionary of field_name: overridden_params_dict, e.g.
# { 'name': { 'label': 'Name', 'description': 'This is a name' } }
# Parameters that can be overridden: label, description, validators,
# filters, default
field_args = None
@staticmethod
def model_detect(model):
return False
def __init__(self, model=None, name=None, category=None, endpoint=None,
url=None):
if name is None:
name = '%s' % camelcase_to_space(model.__name__)
if endpoint is None:
endpoint = ('%s' % model.__name__).lower()
super(BaseModelAdmin, self).__init__(name, category, endpoint, url)
if model:
self.model = model
def get_display_name(self):
return self.model.__name__
def allow_pk(self):
return not self.model._meta.auto_increment
def get_column(self, instance, name):
parts = name.split('.')
value = instance
for p in parts:
# admin's methods have higher priority than the fields/methods on
# the model or document. If a callable is found on the admin
# level, it's also passed an instance object
if hasattr(self, p) and callable(getattr(self, p)):
value = getattr(self, p)(instance)
else:
value = getattr(value, p, None)
if callable(value):
value = value()
if not value:
break
return value
def get_reference(self, column_value):
for model, model_view in self.admin._models:
if type(column_value) == model:
return '/admin/%s/%s/' % (model_view.endpoint,
self.get_pk(column_value))
def get_readonly_fields(self, instance):
ret_vals = {}
for field in self.readonly_fields:
self_field = getattr(self, field, None)
if callable(self_field):
val = self_field(instance)
else:
val = getattr(instance, field)
if callable(val):
val = val()
if not isinstance(val, dict):
# Check if the value is a reference field to a doc/model
# registered in the admin. If so, link to it.
reference = self.get_reference(val)
val = {
'label': prettify(field),
'value': val,
'url': reference if reference else None
}
ret_vals[field] = val
return ret_vals
def get_converter(self):
raise NotImplemented()
def get_model_form(self):
""" Returns the model form, should get overridden in backend-specific
view.
"""
raise NotImplemented()
def get_form(self):
model_form = self.get_model_form()
converter = self.get_converter()
if isinstance(converter, type):
converter = converter()
form = model_form(self.model, base_class=BaseForm, fields=self.fields,
readonly_fields=self.readonly_fields,
exclude=self.exclude, field_args=self.field_args,
converter=converter)
return form
def get_add_form(self):
return self.get_form()
def get_objects(self, *pks):
raise NotImplemented()
def get_object(self, pk):
raise NotImplemented()
def get_pk(self, instance):
return
def save_model(self, instance, form, adding=False):
raise NotImplemented()
def delete_models(self, *pks):
raise NotImplemented()
def is_sortable(self, column):
return False
def field_name(self, field):
return prettify(field)
def construct_search(self, field_name):
raise NotImplemented()
def get_queryset(self):
raise NotImplemented()
def get_list(self):
raise NotImplemented()
def get_url_name(self, name):
URLS = {
'index': '.list',
'add': '.add',
'delete': '.delete',
'edit': '.edit'
}
return URLS[name]
def dispatch_save_redirect(self, instance):
if '_edit' in request.form:
return redirect(
url_for(self.get_url_name('edit'), pk=self.get_pk(instance))
)
elif '_add_another' in request.form:
return redirect(url_for(self.get_url_name('add')))
else:
return redirect(url_for(self.get_url_name('index')))
@expose('/add/', methods=('GET', 'POST'))
def add(self):
if not self.can_create:
abort(403)
Form = self.get_add_form()
if request.method == 'POST':
form = Form()
if form.validate_on_submit():
try:
instance = self.save_model(self.model(), form, adding=True)
flash(gettext('New %(model)s saved successfully',
model=self.get_display_name()), 'success')
return self.dispatch_save_redirect(instance)
except Exception, ex:
print traceback.format_exc()
if hasattr(self, 'session'):
self.session.rollback()
flash(gettext('Failed to add model. %(error)s',
error=str(ex)), 'error')
else:
try:
form = Form(obj=self.model())
except TypeError:
raise Exception('The database model for %r should have an '
'__init__ with all arguments set to defaults.'
% self.model.__name__)
return self.render(self.add_template, model=self.model, form=form)
@property
def page(self):
return request.args.get('page', 0, type=int)
def total_pages(self, count):
return int(math.ceil(float(count) / self.list_per_page))
@property
def sort(self):
sort = request.args.get('sort', None)
if sort and sort.startswith('-'):
desc = True
sort = sort[1:]
else:
desc = False
return sort, desc
@property
def search(self):
return request.args.get('q', None)
def page_url(self, page):
search_query = self.search
sort, desc = self.sort
if sort and desc:
sort = '-' + sort
if page == 0:
page = None
return url_for(self.get_url_name('index'), page=page, sort=sort,
q=search_query)
def sort_url(self, sort, desc=None):
if sort and desc:
sort = '-' + sort
search_query = self.search
return url_for(self.get_url_name('index'), sort=sort, q=search_query)
@expose('/', methods=('GET', 'POST',))
def list(self):
"""
List view
"""
# Grab parameters from URL
if request.method == 'POST':
id_list = request.form.getlist('_selected_action')
if id_list and (request.form.get('action-delete') or
request.form.get('action', None) == 'delete'):
return self.delete(*id_list)
sort, sort_desc = self.sort
page = self.page
search_query = self.search
count, data = self.get_list(page=page, sort=sort, sort_desc=sort_desc,
search_query=search_query)
return self.render(self.list_template, data=data, page=page,
total_pages=self.total_pages(count), sort=sort,
sort_desc=sort_desc, count=count, modeladmin=self,
search_query=search_query)
@expose('/<pk>/', methods=('GET', 'POST'))
def edit(self, pk):
try:
instance = self.get_object(pk)
except self.model.DoesNotExist:
abort(404)
Form = self.get_form()
if request.method == 'POST':
form = Form(obj=instance)
if form.validate_on_submit():
try:
self.save_model(instance, form, adding=False)
flash(
'Changes to %s saved successfully' % self.get_display_name(),
'success'
)
return self.dispatch_save_redirect(instance)
except Exception, ex:
print traceback.format_exc()
flash(gettext('Failed to edit model. %(error)s',
error=str(ex)), 'error')
else:
form = Form(obj=instance)
return self.render(self.edit_template, model=self.model, form=form,
pk=self.get_pk(instance), instance=instance)
@expose('/<pk>/delete/', methods=('GET', 'POST'))
def delete(self, pk=None, *pks):
if not self.can_delete:
abort(403)
if pk:
pks += pk,
if request.method == 'POST' and 'confirm_delete' in request.form:
count = len(pks)
self.delete_models(*pks)
flash(
'Successfully deleted %s %ss' % (count, self.get_display_name()),
'success'
)
return redirect(url_for(self.get_url_name('index')))
else:
instances = self.get_objects(*pks)
return self.render(self.delete_template, instances=instances)
class ModelAdmin(BaseModelAdmin):
pass
| {
"content_hash": "0c9bd7bfa2dbc525fdae8acf9ad4335e",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 85,
"avg_line_length": 33.172872340425535,
"alnum_prop": 0.5504690130682274,
"repo_name": "0x1997/Flask-SuperAdmin",
"id": "6644cab2436793a55df866be6227489f53022fbc",
"size": "12473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_superadmin/model/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import sys
import os
import unittest
import pkgutil
import webbrowser
import coverage
import optparse
from utrunner import jsontestrunner, wrappers
def discover_and_run_tests(test_dir, timer=False, debug=False, json_file_path=None):
# Separate output from the invoking command
print("=" * 70)
# use the default shared TestLoader instance
test_loader = unittest.defaultTestLoader
# create a TestSuite
test_suite = unittest.TestSuite()
# discover all tests in .\tests directory
timings = []
for imp, modname, _ in pkgutil.walk_packages([test_dir]):
mod = imp.find_module(modname).load_module(modname)
for test in test_loader.loadTestsFromModule(mod):
if timer:
for item in test._tests:
item.run = wrappers.timing(item.run, item._testMethodName, timings)
if debug:
for item in test._tests:
wrappers.debug_testcase(item)
test_suite.addTests(test)
if json_file_path is not None:
# open file
with open(json_file_path, 'w') as f:
# use the custom JSON test runner
test_runner = jsontestrunner.JSONTestRunner(f)
results = test_runner.run(test_suite)
else:
# use the basic test runner that outputs to sys.stderr
test_runner = unittest.TextTestRunner()
results = test_runner.run(test_suite)
if timer:
sorted_timings = sorted(timings, key=lambda x: x[1], reverse=True)
print()
print("Timings (in milliseconds):")
print()
for item in sorted_timings:
print("{} {}".format(item[0], item[1]))
return results
def test_with_coverage(source_directory=None, test_directory=None, xml=False, html=False, html_and_launch=False, timer=False, debug=False, report=False, json_file_path=None, force=False):
current_dir = os.getcwd()
if source_directory is None:
source_directory = os.path.split(current_dir)[1]
if test_directory is None:
test_directory = "unittests"
source_directory = os.path.join(current_dir, source_directory)
test_directory = os.path.join(current_dir, test_directory)
if report or html or html_and_launch:
cov = coverage.Coverage(source=[source_directory])
cov.start()
results = discover_and_run_tests(test_directory, timer, debug, json_file_path)
cov.stop()
cov.save()
if results.wasSuccessful() or force:
if html:
cov.html_report()
if html_and_launch:
webbrowser.open(os.path.join(current_dir, 'htmlcov', 'index.html'))
if report:
cov.report()
if xml:
cov.xml_report()
else:
results = discover_and_run_tests(test_directory, timer, debug, json_file_path)
return results
def main():
parser = optparse.OptionParser("usage: %prog [options]")
parser.add_option("-s", "--source", dest="source_directory", default=None, type="string",
help="Location of source files (for determining code coverage)")
parser.add_option("-t", "--tests", dest="test_directory", default=None, type="string",
help="Location of unit test files")
parser.add_option('--timer', action="store_true", default=False, dest="timer",
help="Times the individual unittest execution times")
parser.add_option('-d', '--debug', action="store_true", default=False, dest="debug",
help="Attach debugger when a test case fails")
parser.add_option("-c", '--coverage', action="store_true", default=False, dest="html",
help="Generate an HTML report")
parser.add_option("-w", '--web', action="store_true", default=False, dest="html_and_launch",
help="Generate an HTML report and opens the report in the default web browser")
parser.add_option("-x", '--xml', action="store_true", default=False, dest="xml",
help="Generate an XML report")
parser.add_option("-r", '--report', action="store_true", default=False, dest="report",
help="Generate a text report and displays to the console")
parser.add_option("-f", '--force', action="store_true", default=False, dest="force",
help="Continue with specified reporting even if unit tests fail")
parser.add_option("-j", "--json", dest="json_file_path", default=None, type="string",
help="Output via JSON test results format to FILE", metavar="FILE")
(options, args) = parser.parse_args()
input_args = options.__dict__
# Validate json file input
file_path = input_args.get('json_file_path', None)
if file_path is not None:
# default to local directory if just a filename
directory = os.path.dirname(file_path[0]) or '.'
os.makedirs(directory, exist_ok=True)
results = test_with_coverage(**input_args)
sys.exit(not results.wasSuccessful() * 1)
| {
"content_hash": "376f3a4157f50d77cfc7773a09b47a36",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 187,
"avg_line_length": 44.57017543859649,
"alnum_prop": 0.6181853965754772,
"repo_name": "kevwo/utrunner",
"id": "f9202d880a9fc7f09d4c17d99914bcbb58a0ca8b",
"size": "5081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utrunner/utrunner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10133"
}
],
"symlink_target": ""
} |
from lxml import etree
from io import StringIO
from lxml import html
def parse_html_to_tree(html_string):
"""
Takes in an HTML string from a request such as request.text, and parses it with etree and returns an ElementTree
object from lxml
:param html_string: String of html.
:return: ElementTree of said html.
"""
if 'Sorry, no records matching' in html_string or 'BEGIN: No records found error' in html_string:
return None
tree = html.fromstring(html_string)
return tree
| {
"content_hash": "0125dbc2a0bbd1d084e9651c7ca38860",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 116,
"avg_line_length": 28.944444444444443,
"alnum_prop": 0.7024952015355086,
"repo_name": "arthurtyukayev/python-safer",
"id": "df5947331a49923f861c871d75ffc44ab19e56f3",
"size": "521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "safer/crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28982"
}
],
"symlink_target": ""
} |
import os
import tarfile
from bento.core.package import \
PackageDescription, file_list
from bento.commands.errors \
import \
UsageException
from bento.commands.core \
import \
Command, Option
from bento.core.utils \
import \
ensure_dir
from bento._config import \
BENTO_SCRIPT
def tarball_basename(dist_name, version=None):
if version:
return "%s-%s" % (dist_name, version)
else:
return dist_name
class SdistCommand(Command):
long_descr = """\
Purpose: create a tarball for the project
Usage: bentomaker sdist [OPTIONS]."""
short_descr = "create a tarball."
common_options = Command.common_options \
+ [Option("--output-dir",
help="Output diretory", default="dist")]
def __init__(self):
Command.__init__(self)
self.tarname = None
self.topdir = None
def run(self, ctx):
argv = ctx.get_command_arguments()
p = ctx.options_context.parser
o, a = p.parse_args(argv)
if o.help:
p.print_help()
return
filename = BENTO_SCRIPT
if not len(a) > 0:
if not os.path.exists(filename):
raise UsageException("Missing %s file" % BENTO_SCRIPT)
pkg = PackageDescription.from_file(filename)
tarname = tarball_basename(pkg.name, pkg.version) + ".tar.gz"
self.tarname = os.path.abspath(os.path.join(o.output_dir, tarname))
self.topdir = "%s-%s" % (pkg.name, pkg.version)
create_tarball(pkg, ctx.top_node, self.tarname, self.topdir)
def create_tarball(pkg, top_node, tarname=None, topdir=None):
if tarname is None:
basename = tarball_basename(pkg.name, pkg.version)
tarname = "%s.tar.gz" % basename
if topdir is None:
topdir = "%s-%s" % (pkg.name, pkg.version)
ensure_dir(tarname)
tf = tarfile.open(tarname, "w:gz")
try:
for file in file_list(pkg, top_node):
tf.add(file, os.path.join(topdir, file))
finally:
tf.close()
return tarname
| {
"content_hash": "592834fcbe318e30b057a5a882cd1b75",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 75,
"avg_line_length": 29.830985915492956,
"alnum_prop": 0.5906515580736544,
"repo_name": "abadger/Bento",
"id": "b318343f8bb639a4f24d12f37547abd9c7191d14",
"size": "2118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bento/commands/sdist.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8367"
},
{
"name": "C++",
"bytes": "165"
},
{
"name": "FORTRAN",
"bytes": "97"
},
{
"name": "Python",
"bytes": "1018735"
},
{
"name": "Shell",
"bytes": "5067"
}
],
"symlink_target": ""
} |
"""Templates for generating event classes for structured metrics."""
import codegen
########
# HEADER
########
HEADER_FILE_TEMPLATE = """\
// Generated from gen_events.py. DO NOT EDIT!
// source: structured.xml
#ifndef {file.guard_path}
#define {file.guard_path}
#include <cstdint>
#include <string>
#include "components/metrics/structured/event_base.h"
namespace metrics {{
namespace structured {{
namespace events {{
constexpr uint64_t kProjectNameHashes[] = {project_name_hashes};\
{event_code}
}} // namespace events
}} // namespace structured
}} // namespace metrics
#endif // {file.guard_path}\
"""
HEADER_EVENT_TEMPLATE = """
class {event.name} final : public ::metrics::structured::EventBase {{
public:
{event.name}();
~{event.name}() override;
static constexpr uint64_t kEventNameHash = UINT64_C({event.name_hash});
static constexpr uint64_t kProjectNameHash = UINT64_C({event.project_name_hash});\
{metric_code}
}};\
"""
HEADER_METRIC_TEMPLATE = """
static constexpr uint64_t k{metric.name}NameHash = UINT64_C({metric.hash});
{event.name}& Set{metric.name}(const {metric.type} value);\
"""
HEADER = codegen.Template(
basename="structured_events.h",
file_template=HEADER_FILE_TEMPLATE,
event_template=HEADER_EVENT_TEMPLATE,
metric_template=HEADER_METRIC_TEMPLATE)
######
# IMPL
######
IMPL_FILE_TEMPLATE = """\
// Generated from gen_events.py. DO NOT EDIT!
// source: structured.xml
#include "{file.dir_path}/structured_events.h"
namespace metrics {{
namespace structured {{
namespace events {{\
{event_code}
}} // namespace events
}} // namespace structured
}} // namespace metrics\
"""
IMPL_EVENT_TEMPLATE = """
{event.name}::{event.name}() :
::metrics::structured::EventBase(kEventNameHash, kProjectNameHash) {{}}
{event.name}::~{event.name}() = default;\
{metric_code}\
"""
IMPL_METRIC_TEMPLATE = """
{event.name}& {event.name}::Set{metric.name}(const {metric.type} value) {{
{metric.setter}(k{metric.name}NameHash, value);
return *this;
}}\
"""
IMPL = codegen.Template(
basename="structured_events.cc",
file_template=IMPL_FILE_TEMPLATE,
event_template=IMPL_EVENT_TEMPLATE,
metric_template=IMPL_METRIC_TEMPLATE)
def WriteFiles(outdir, relpath, data):
HEADER.WriteFile(outdir, relpath, data)
IMPL.WriteFile(outdir, relpath, data)
| {
"content_hash": "b1cca8bb0469bb123ebea40e89d4e462",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 84,
"avg_line_length": 22.259615384615383,
"alnum_prop": 0.6950323974082073,
"repo_name": "endlessm/chromium-browser",
"id": "4ead3094891fd763d1a6f817f0e5b19c8702362c",
"size": "2478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/metrics/structured/events_template.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from sftpcloudfs.constants import version, project_url
def readme():
try:
return open('README.md').read()
except:
return ""
setup(name='sftp-cloudfs',
version=version,
description='SFTP interface to OpenStack Object Storage (Swift)',
long_description=readme(),
long_description_content_type="text/markdown",
author='Nick Craig-Wood',
author_email='nick@memset.com',
url=project_url,
license='MIT',
include_package_data=True,
zip_safe=False,
install_requires=['paramiko>=1.7.6', 'python-swiftclient>=2.0.0', 'python-daemon>=1.5',
'python-memcached>=1.45', 'ftp-cloudfs>=0.35'],
scripts=['bin/sftpcloudfs'],
packages = find_packages(exclude=['tests']),
tests_require = ["nose"],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Environment :: No Input/Output (Daemon)',
'License :: OSI Approved :: MIT License',
],
test_suite = "nose.collector",
)
| {
"content_hash": "fb7d8c6509006ce79f10eae26f0b3a05",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 93,
"avg_line_length": 34.22857142857143,
"alnum_prop": 0.6010016694490818,
"repo_name": "Memset/sftpcloudfs",
"id": "afea04ecb7322cc7cc3d9456893ca6f56c08407f",
"size": "1220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "591"
},
{
"name": "Python",
"bytes": "78269"
},
{
"name": "Shell",
"bytes": "820"
}
],
"symlink_target": ""
} |
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
b = open("new.txt",'w')
if req.get("result").get("action") != "shipping.cost":
return {}
result = req.get("result")
parameters = result.get("parameters")
zone = parameters.get("shipping-zone")
cost = {'Europe':100, 'North America':200, 'South America':300, 'Asia':400, 'Africa':500}
speech = "The cost of shipping to " + zone + " is " + str(cost[zone]) + " euros."
print("Response:")
print(os.getcwd())
print(speech)
b.write("speech")
b.close()
return {
"speech": speech,
"displayText": speech,
#"data": {},
# "contextOut": [],
"source": "apiai-onlinestore-shipping"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=True, port=port, host='0.0.0.0')
| {
"content_hash": "40917824af9d1132e8e5455b34ff25eb",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 93,
"avg_line_length": 23.383333333333333,
"alnum_prop": 0.6022808267997148,
"repo_name": "happyeye01/api-ai",
"id": "ba158730182564fea2973238dec7e8e18599f39b",
"size": "1426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1426"
}
],
"symlink_target": ""
} |
'''maintain a pickled cache file on disk'''
import cPickle as pickle
import os
import pdb
import time
import unittest
if False:
# example
class Cache(object):
pass
def read_data(dictionary):
'return the data; it will be pickled and written to the file at path_to_cache'
return None
c = Cache(verbose=True)
path_to_cache = os.path.join('a', 'b', 'c')
dictionary = {'arg1': 123}
returned_value_from_read_data = c.read(read_data, path_to_cache, dictionary)
class Cache(object):
def __init__(self, verbose=False):
self.verbose = verbose
def read(self, read_data_function, path_to_cache, dictionary):
'return whatever read_data_function(**kwds) returns'
start_time = time.time()
if os.path.exists(path_to_cache):
with open(path_to_cache, 'r') as f:
cache = pickle.load(f)
if self.verbose:
print 'read cache; elapsed wall clock time', time.time() - start_time
else:
cache = read_data_function(dictionary)
if self.verbose:
print 'read underlying data; elapsed wall clock time', time.time() - start_time
start_time = time.time()
with open(path_to_cache, 'w') as f:
pickle.dump(cache, f)
if self.verbose:
print 'write cache: elapsed wall clock time', time.time() - start_time
return cache
class CacheTest(unittest.TestCase):
def test_1(self):
read_data_result = 'my data'
dictionary = {'abc': 123}
class Reader(object):
def __init__(self):
self.invocations = 0
def read(self):
self.invocations += 1
return read_data_result
reader = Reader()
def read_data(dictionary):
self.assertEqual(dictionary['abc'], 123)
return reader.read()
verbose = False
dir_temp = os.getenv('temp') # for now, just support Windows
path_to_cache = os.path.join(dir_temp, 'Cache-test.pickle')
if os.path.isfile(path_to_cache):
os.remove(path_to_cache)
self.assertFalse(os.path.isfile(path_to_cache))
c = Cache(verbose=verbose)
cached_data_1 = c.read(read_data, path_to_cache, dictionary)
self.assertEqual(read_data_result, cached_data_1)
self.assertTrue(os.path.isfile(path_to_cache))
self.assertEqual(reader.invocations, 1)
cached_data_2 = c.read(read_data, path_to_cache, dictionary)
self.assertEqual(read_data_result, cached_data_2)
self.assertTrue(os.path.isfile(path_to_cache))
self.assertEqual(reader.invocations, 1)
self.assertEqual(cached_data_1, cached_data_2)
# remove cache file
os.remove(path_to_cache)
self.assertFalse(os.path.isfile(path_to_cache))
if __name__ == '__main__':
unittest.main()
if False:
# avoid linter warnings about imports not used
pdb
| {
"content_hash": "e62b42cf04c9a2cb92de252fd7229a18",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 95,
"avg_line_length": 31.71875,
"alnum_prop": 0.5924466338259442,
"repo_name": "rlowrance/re-avm",
"id": "d9b27617baaff7d2a46089f3f49e14f7f144a03d",
"size": "3045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "18083"
},
{
"name": "Python",
"bytes": "523158"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, connection
from bluebottle.clients import properties
class Migration(migrations.Migration):
dependencies = [
('funding', '0025_auto_20190904_1154'),
]
operations = [
]
| {
"content_hash": "10756fc102a024fe2234f510b3f3fe90",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 47,
"avg_line_length": 17.0625,
"alnum_prop": 0.6886446886446886,
"repo_name": "onepercentclub/bluebottle",
"id": "715476341e5f5bdac3699cb4ce168c45d01fbb44",
"size": "347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/funding/migrations/0026_auto_20190904_1200.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
'''
Get system os language, region, support win, osx
'''
import sys
import locale
import subprocess
def osx_read_language():
ret = ''
try:
cmd = 'defaults read -g AppleLanguages'
arglist = cmd.split(' ')
ret = subprocess.Popen(arglist,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=None)
lang = ret.stdout.read().decode(locale.getpreferredencoding()).replace('\r', '').replace('\n', '')\
.replace(' ', '').replace('(', '').replace(')', '').replace('\"', '')
ret = lang.split(',')[0]
except:
pass
return ret
def os_language():
language = ''
if sys.platform.lower().startswith('win'):
try:
from ctypes import windll
# Result: en_US, zh_CN
ret = locale.windows_locale[windll.kernel32.GetUserDefaultUILanguage()]
language = ret.split('_')[0]
except:
pass
elif sys.platform.lower().startswith('darwin'):
lan = osx_read_language()
language = lan.split('-')[0]
return language
def os_region():
region = ''
if sys.platform.lower().startswith('win'):
try:
# Result: locale.getdefaultlocale() = ('zh_CN', 'cp936')
region = locale.getdefaultlocale()[0].split('_')[1]
except:
pass
elif sys.platform.lower().startswith('darwin'):
lan = osx_read_language()
region = lan.split('-')[1]
return region
print('Language', os_language())
print('Region', os_region()) | {
"content_hash": "c320180e5f7ed41be59c6aedb6616a8a",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 107,
"avg_line_length": 27.183333333333334,
"alnum_prop": 0.5321888412017167,
"repo_name": "tcp813/mouTools",
"id": "bd3f3b297d72aa73b871ce03197c5e4ea951cb66",
"size": "1631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/lang_region.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "374"
},
{
"name": "Batchfile",
"bytes": "553"
},
{
"name": "C",
"bytes": "18596"
},
{
"name": "C#",
"bytes": "300"
},
{
"name": "C++",
"bytes": "13278"
},
{
"name": "CMake",
"bytes": "530"
},
{
"name": "HTML",
"bytes": "15189"
},
{
"name": "JavaScript",
"bytes": "285"
},
{
"name": "Makefile",
"bytes": "450"
},
{
"name": "Python",
"bytes": "183617"
},
{
"name": "QMake",
"bytes": "689"
},
{
"name": "Shell",
"bytes": "159"
}
],
"symlink_target": ""
} |
from django.db import models
class News(models.Model):
id = models.AutoField(primary_key = True)
title = models.CharField(max_length = 255)
summary = models.CharField(max_length = 300)
content = models.CharField(max_length = 9000)
author = models.CharField(max_length = 255)
category = models.CharField(max_length = 24)
pubtime = models.DateTimeField(auto_now_add = True)
modtime = models.DateTimeField(auto_now_add = True)
hits = models.IntegerField(default = 0)
status = models.CharField(max_length = 8)
class Meta:
db_table = 'swan_news'
def __str__(self):
return self.id
class Comment(models.Model):
id = models.AutoField(primary_key = True)
sid = models.CharField(max_length = 255)
content = models.CharField(max_length = 300)
author = models.CharField(max_length = 255)
avatar = models.CharField(max_length = 255)
pubtime = models.DateTimeField(auto_now_add = True)
status = models.CharField(max_length = 8)
class Meta:
db_table = 'swan_comment'
def __str__(self):
return self.id
| {
"content_hash": "7c3b3b3e1d7a33f1d4e5fa6dca93c677",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 55,
"avg_line_length": 34.5625,
"alnum_prop": 0.6636528028933092,
"repo_name": "huaiping/pandora",
"id": "d0b6ef729f9cbb6226cb1cb7def3ebfe0af62682",
"size": "1106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "news/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21967"
},
{
"name": "JavaScript",
"bytes": "136848"
},
{
"name": "Python",
"bytes": "25954"
}
],
"symlink_target": ""
} |
from office365.runtime.client_value import ClientValue
from office365.runtime.client_value_collection import ClientValueCollection
class TaxonomyFieldValue(ClientValue):
def __init__(self, label=None, term_guid=None, wss_id=-1):
"""
Represents a single value held in a TaxonomyField (section 3.1.5.27) object.
:param str label: Specifies the label of the TaxonomyField (section 3.1.5.27) object.
:parm str term_guid: Specifies a string representing Term (section 3.1.5.16) GUID.
:parm int wss_id: Specifies the list item identifier of the list item containing the TaxonomyFieldValue
that is encapsulated by the TaxonomyFieldValue (section 3.1.5.13) object.
"""
super(TaxonomyFieldValue, self).__init__()
self.Label = label
self.TermGuid = term_guid
self.WssId = wss_id
def __str__(self):
return "{0};#{1}|{2}".format(self.WssId, self.Label, self.TermGuid)
@property
def entity_type_name(self):
return "SP.Taxonomy.TaxonomyFieldValue"
class TaxonomyFieldValueCollection(ClientValueCollection):
"""Represents the multi-value object for the taxonomy column."""
def __init__(self, initial_values):
super(TaxonomyFieldValueCollection, self).__init__(TaxonomyFieldValue, initial_values)
def __str__(self):
return ";#".join([str(item) for item in self._data])
| {
"content_hash": "7d47425e9bff9179222de99362c36b44",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 111,
"avg_line_length": 39.30555555555556,
"alnum_prop": 0.6798586572438162,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "b00990aeb4cc0cd47c357a66a32af889799ef2bb",
"size": "1415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "office365/sharepoint/taxonomy/field_value.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
} |
from . import Pointer, Structure, Reference, Index, const_index, is_aggregate
from .. import llvm
import ctypes
try:
import numpy as np
except ImportError:
np = None
__all__ = ["Any", "Array", "FastSlice", "Slice"]
class _AnyClass(object):
def __repr__(self):
return "Any"
#: Used in in slice or array specification to indicate variable shape dimension.
Any = _AnyClass()
class _ItemAccessor(object):
"""Mixin for common Array/Slice item accessing routines."""
def emit_getitem(self, builder, v, i):
if len(i) < len(self.shape):
return self._emit_subslice(builder, v, i)
else:
gep = self._item_gep(builder, v, i)
if is_aggregate(self.element_type):
return gep, Reference(self.element_type)
else:
v = llvm.BuildLoad(builder, gep, "getitem")
set_tbaa(v, "n2o.{0}.element".format(self.tag))
return v, self.element_type
def emit_setitem(self, builder, v, i, e):
if not llvm.types_equal(self.element_type.llvm_type, llvm.TypeOf(e)):
# FIXME because we don't have e's nitrous type, for now just state
# what the type *should* be for assignment to succeed.
raise TypeError("Element value must be a(n) {0}".format(self.element_type))
gep = self._item_gep(builder, v, i)
v = llvm.BuildStore(builder, e, gep)
set_tbaa(v, "n2o.{0}.element".format(self.tag))
def _emit_subslice(self, builder, v, i):
"""Emits a sub-slice based on partial index *i*"""
from ..function import entry_alloca
SSTy = Slice(self.element_type, self.shape[len(i):])
ss = entry_alloca(builder, SSTy.llvm_type, "subslice")
# Setting shape dimensions
subshape, subshape_ty = SSTy._struct.emit_getattr(builder, ss, "shape")
# shape is a reference
shape, shape_ty = self.emit_getattr(builder, v, "shape")
for j in range(len(self.shape) - len(i)):
dim, _ = shape_ty.value_type.emit_getitem(builder, shape, (const_index(j + len(i)),))
subshape_ty.value_type.emit_setitem(builder, subshape, (const_index(j),), dim)
# Setting pointer to data sub-block.
data_idx = i + (const_index(0),) * (len(self.shape) - len(i))
SSTy._struct.emit_setattr(builder, ss, "data", self._item_gep(builder, v, data_idx))
return ss, SSTy
class Array(_ItemAccessor):
"""Array backed by llvm.ArrayType rather than pointer to memory.
This enables us to declare it as an aggregate type which can be returned by value.
TODO describe constructor initialization etc.
"""
def __init__(self, element_type, shape):
self.element_type = element_type
self.shape = shape
def __repr__(self):
return "Array({0}, shape={1})".format(self.element_type, repr(self.shape))
def __str__(self):
return "<Array {0}>".format(shape_str(self.element_type, self.shape))
def __call__(self, values=None):
from nitrous.lib import ValueEmitter
from nitrous.function import entry_alloca
from itertools import product
def emit(builder):
v = entry_alloca(builder, self.llvm_type, "v.array")
if values is not None:
for i in product(*(range(d) for d in self.shape)):
ii = tuple(const_index(j) for j in i)
vi = values
for k in i:
vi = vi[k]
self.emit_setitem(builder, v, ii, vi)
return v, Reference(self)
return ValueEmitter(emit)
@property
def llvm_type(self):
from operator import mul
n = reduce(mul, self.shape, 1)
return llvm.ArrayType(self.element_type.llvm_type, n)
@property
def c_type(self):
from operator import mul
return reduce(mul, self.shape[::-1], self.element_type.c_type)
@property
def tag(self):
shape_tag = "".join("d{0}".format(d) for d in self.shape)
return "A{0}{1}".format(shape_tag, self.element_type.tag)
def convert(self, p):
if np and isinstance(p, np.ndarray):
p = np.ctypeslib.as_ctypes(p)
return p
def emit_getattr(self, builder, ref, attr):
ndim = len(self.shape)
if attr == "ndim":
return const_index(ndim), Index
elif attr == "shape":
# First time, initialize a global constant array
# and then use it on every access.
module = llvm.GetParentModule__(builder)
shape_name = "__n2o_array_shape_{0}".format(id(self))
shape = llvm.GetNamedGlobal(module, shape_name)
if not shape:
dims = (llvm.ValueRef * ndim)(*(const_index(d) for d in self.shape))
shape_init = llvm.ConstArray(Index.llvm_type, dims, ndim)
shape = llvm.AddGlobal(module, llvm.TypeOf(shape_init), shape_name)
llvm.SetInitializer(shape, shape_init)
llvm.SetGlobalConstant(shape, llvm.TRUE)
return shape, Array(Index, (ndim,))
else:
raise AttributeError(attr)
def _item_gep(self, builder, v, i):
if len(i) != len(self.shape):
raise TypeError("Index and array shapes don't match ({0} != {1})"
.format(len(i), len(self.shape)))
# TODO check const shape dimension values?
# Build conversion from ND-index to flat memory offset
# FIXME currently assumes row-major memory alignment, first dimension can vary
const_shape = map(const_index, self.shape[1:])
ii = flatten_index(builder, i, const_shape)
# Cast so that we can get GEP to a particular element.
p_type = llvm.PointerType(self.element_type.llvm_type, 0)
p = llvm.BuildPointerCast(builder, v, p_type, "array.ptr")
return llvm.BuildGEP(builder, p, ctypes.byref(ii), 1, "addr")
class FastSlice(_ItemAccessor):
def __init__(self, element_type, shape=(Any,)):
self.element_type = element_type
self.shape = shape
self.ndim = len(shape)
def __repr__(self):
return "FastSlice({0}, shape={1})".format(self.element_type, repr(self.shape))
def __str__(self):
return "<FastSlice {0}>".format(shape_str(self.element_type, self.shape))
@property
def llvm_type(self):
return llvm.PointerType(self.element_type.llvm_type, 0)
@property
def c_type(self):
return ctypes.POINTER(self.element_type.c_type)
@property
def tag(self):
shape_tag = "".join("d{0}".format(d) for d in self.shape)
return "F{0}{1}".format(shape_tag, self.element_type.tag)
def convert(self, p):
pointer_type = ctypes.POINTER(self.element_type.c_type)
# FIXME conversions are unsafe, since they force-cast
# anything to pointer to element_type.
if np and isinstance(p, np.ndarray):
return p.ctypes.data_as(pointer_type)
return ctypes.cast(p, pointer_type)
def emit_getattr(self, builder, ref, attr):
ndim = len(self.shape)
if attr == "ndim":
return const_index(ndim), Index
elif attr == "shape":
# First time, initialize a global constant array
# and then use it on every access.
module = llvm.GetParentModule__(builder)
shape_name = "__n2o_slice_shape_{0}".format(id(self))
shape = llvm.GetNamedGlobal(module, shape_name)
if not shape:
dims = (llvm.ValueRef * ndim)(*(const_index(d) for d in self.shape))
shape_init = llvm.ConstArray(Index.llvm_type, dims, ndim)
shape = llvm.AddGlobal(module, llvm.TypeOf(shape_init), shape_name)
llvm.SetInitializer(shape, shape_init)
llvm.SetGlobalConstant(shape, llvm.TRUE)
return shape, Array(Index, (ndim,))
else:
raise AttributeError(attr)
def _item_gep(self, builder, v, i):
if len(i) != len(self.shape):
raise TypeError("Index and array shapes don't match ({0} != {1})"
.format(len(i), len(self.shape)))
# TODO check const shape dimension values?
# Build conversion from ND-index to flat memory offset
# FIXME currently assumes row-major memory alignment, first dimension can vary
const_shape = [const_index(d) for d in self.shape[1:]]
ii = flatten_index(builder, i, const_shape)
return llvm.BuildGEP(builder, v, ctypes.byref(ii), 1, "addr")
_slice_types = {}
class Slice(_ItemAccessor):
# Wraps incoming np.array or ctypes array into a structure
# with standard shape/number-of-dimensions attributes that can be
# used from compiled function.
#
# The resulting structure supports getitem/setitem so that there's
# no need to address it's `data` attribute.
def __init__(self, element_type, shape=(Any,)):
self.element_type = element_type
self.shape = shape
# Prevent distinct slice LLVM types being allocated every single
# time one declares them. This is a problem in places like
# templates where only the data types being passed in and slice
# type gets derived from it. Key types on their data type and shape.
k = (llvm.address_of(element_type.llvm_type), shape)
try:
self._struct = _slice_types[k]
except KeyError:
self._struct = _slice_types.setdefault(
k, Structure("Slice",
("data", Pointer(element_type)),
("shape", Array(Index, (len(shape),))))
)
def __repr__(self):
return "Slice({0}, shape={1})".format(self.element_type, repr(self.shape))
def __str__(self):
return "<Slice {0}>".format(shape_str(self.element_type, self.shape))
@property
def llvm_type(self):
return self._struct.llvm_type
@property
def c_type(self):
return self._struct.c_type
@property
def tag(self):
shape_tag = "".join("d{0}".format(d) for d in self.shape)
return "B{0}{1}".format(shape_tag, self.element_type.tag)
def convert(self, p):
pointer_type = ctypes.POINTER(self.element_type.c_type)
# FIXME conversions are unsafe, since they force-cast
# anything to pointer to element_type.
if np and isinstance(p, np.ndarray):
return self._struct.c_type(p.ctypes.data_as(pointer_type),
(Index.c_type * len(p.shape))(*p.shape))
shape = ctypes_shape(p)
conv_p = ctypes.cast(p, pointer_type)
return self._struct.c_type(conv_p, (Index.c_type * len(shape))(*shape))
def emit_getattr(self, builder, ref, attr):
if attr == "ndim":
return const_index(len(self.shape)), None
elif attr in ("shape", "data"):
v, t = self._struct.emit_getattr(builder, ref, attr)
set_tbaa(v, "n2o.{0}.{1}".format(self.tag, attr))
return v, t
else:
raise AttributeError(attr)
def emit_setattr(self, builder, ref, attr, v):
raise TypeError("Slice is immutable")
def _item_gep(self, builder, v, i):
if len(i) != len(self.shape):
raise TypeError("Index and slice shapes don't match ({0} != {1})"
.format(len(i), len(self.shape)))
# Get array shape from struct value
shape_value, shape_type = self.emit_getattr(builder, v, "shape")
data_value, data_type = self.emit_getattr(builder, v, "data")
def emit_dimension(i):
# Use direct constants, if possible; otherwise load from actual shape array.
if self.shape[i] == Any:
# Shape type is a reference to array, use the actual type
dim, _ = shape_type.value_type.emit_getitem(builder, shape_value, (const_index(i),))
else:
dim = const_index(self.shape[i])
return dim
# Build conversion from ND-index to flat memory offset
# FIXME currently assumes row-major memory alignment, first dimension can vary
const_shape = [emit_dimension(d) for d in range(1, len(self.shape))]
ii = flatten_index(builder, i, const_shape)
return llvm.BuildGEP(builder, data_value, ctypes.byref(ii), 1, "addr")
def flatten_index(builder, index, const_shape):
"""Converts N-dimensional index into 1-dimensional one.
index is of a form ``(i0, i1, ... iN)``, where *i* is ValueRefs
holding individual dimension indices.
First dimension is considered to be variable. Given array shape
``(d0, d1, ... dN)``, *const_shape* contains ``(d1, d2, ... dN)``.
If array is 1-dimensional, *const_shape* is an empty tuple.
"""
mul_ = lambda x, y: llvm.BuildMul(builder, x, y, "v")
# out = 0
out = const_index(0)
for i in range(0, len(const_shape)):
# out += index[i-1] * reduce(mul, const_shape[i:], 1)
tmp = reduce(mul_, const_shape[i:], const_index(1))
rhs = llvm.BuildMul(builder, index[i], tmp, "v")
out = llvm.BuildAdd(builder, out, rhs, "v")
# return out + index[-1]
return llvm.BuildAdd(builder, out, index[-1], "v")
def ctypes_shape(x):
"""Infer shape of a ctypes array."""
try:
dim = x._length_
return (dim,) + ctypes_shape(x[0])
except AttributeError:
return ()
def shape_str(element_type, shape):
"""Return human-friendly description of array shape."""
dim_0 = "?" if shape[0] in (Any, None) else shape[0]
sub_shape = element_type if len(shape) == 1 else shape_str(element_type, shape[1:])
return "[{0} x {1}]".format(dim_0, sub_shape)
def set_tbaa(v, name):
root = llvm.MDNode__((llvm.ValueRef * 1)(llvm.MDString("n2o.tbaa", 8)), 1)
node = llvm.MDNode__((llvm.ValueRef * 2)(llvm.MDString(name, len(name)), root), 2)
llvm.SetNamedMetadata__(v, "tbaa", node)
| {
"content_hash": "bf283a23edfdfeb2fe0d128bb621aadf",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 100,
"avg_line_length": 36.18622448979592,
"alnum_prop": 0.5897779344377864,
"repo_name": "dtcaciuc/nitrous",
"id": "1069ce5b03ef2f767c670259293a90e63c9e2885",
"size": "14185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nitrous/types/array.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "4560"
},
{
"name": "Python",
"bytes": "182339"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'api'
| {
"content_hash": "888bdb25c4b155e6d1f6168cfdb45d2c",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 16.2,
"alnum_prop": 0.7283950617283951,
"repo_name": "jminuscula/dixit-online",
"id": "90fb1640d1421c2653d5854a5f2d1248c35091dc",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/src/dixit/api/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9361"
},
{
"name": "HTML",
"bytes": "3442"
},
{
"name": "JavaScript",
"bytes": "3714"
},
{
"name": "Python",
"bytes": "81638"
},
{
"name": "TypeScript",
"bytes": "22157"
}
],
"symlink_target": ""
} |
"""XLA backend that runs XRT operators via TensorFlow remote eager.
This module implements the Python XLA client's `Backend` abstraction using XRT
, which embeds XLA's compiler/runtime operations as TensorFlow
operations. The module uses TensorFlow's remote eager RPC API to invoke XRT
operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-direct-tensorflow-import
from tensorflow.compiler.xla.python import xla_client
from tensorflow.compiler.xla.python import xla_extension as _xla
# pylint: enable=g-direct-tensorflow-import
def _make_xla_shape(shape):
if shape.is_tuple():
return _xla.Shape.Tuple([_make_xla_shape(s) for s in shape.tuple_shapes()])
return _xla.Shape.Array(shape.xla_element_type(), shape.dimensions(),
shape.minor_to_major())
def get_tf_context(target, worker):
"""Returns a TensorFlow RPC client object.
Args:
target: string; a host:port pair (e.g., '10.0.101.1:8470') naming an XRT
server.
worker: string; the task name of the remote TensorFlow worker.
"""
client = _xla.xrt.GetTfClient(target, worker)
options = _xla.xrt.XrtTfContextOptions()
options.max_queue_size = 10000
return _xla.xrt.XrtTfContext.Create(options, client, worker, 0)
class XrtBackend(xla_client.Backend):
"""XLA backend using XRT.
Args:
tf_context: an XrtTfContext object.
tf_device_type: the type of TensorFlow device to use for XRT (e.g. `"TPU"`).
"""
def __init__(self, tf_context, tf_device_type):
self.tf_device_type = tf_device_type
self.context = _xla.xrt.XrtContext.Create(tf_context, tf_device_type)
def device_count(self):
return self.context.DeviceCount()
def buffer_from_pyval(self, pyval, device=0):
return _xla.xrt.XrtBuffer.FromLiteral(self.context, device, pyval)
def delete_buffer(self, c_buffer):
c_buffer.Delete()
def destructure_tuple(self, c_buffer):
return c_buffer.DestructureTuple()
def compile(self, computation, arg_shapes, result_shape, compile_options):
del arg_shapes
del result_shape
# pylint: disable=protected-access
program_shape = xla_client._wrap_program_shape(
computation.GetProgramShape())
# pylint: enable=protected-access
proto = computation.GetSerializedProto()
arg_shapes = [
_make_xla_shape(shape.with_major_to_minor_layout_if_absent())
for shape in program_shape.parameter_shapes
]
result_shape = _make_xla_shape(
program_shape.result_shape.with_major_to_minor_layout_if_absent())
device_assignment = _xla.xrt.AssignDevices(compile_options.num_replicas, 1)
return _xla.xrt.XrtExecutable.Compile(self.context, proto, arg_shapes,
result_shape, device_assignment)
def delete_executable(self, executable):
executable.Delete()
def execute(self, executable, args):
return executable.Execute(args)
def execute_replicated(self, executable, per_replica_args):
# The extra list packing and unpacking is to handle multiple
# computations per replica, which we don't support yet.
return executable.ExecuteReplicated([per_replica_args])[0]
| {
"content_hash": "357cae160661b8cfc19f2030d084c7bb",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 34.784946236559136,
"alnum_prop": 0.7075734157650696,
"repo_name": "kevin-coder/tensorflow-fork",
"id": "ccf574f6842aef00dbc0e31cf1ae9086df4ca497",
"size": "3924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/compiler/xla/python/xrt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9117"
},
{
"name": "C",
"bytes": "340300"
},
{
"name": "C++",
"bytes": "39383425"
},
{
"name": "CMake",
"bytes": "194940"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33617202"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425910"
}
],
"symlink_target": ""
} |
"""
dicom2nifti
@author: abrys
"""
import os
import traceback
import logging
import nibabel
import numpy
import pydicom.config as pydicom_config
from pydicom.tag import Tag
import dicom2nifti.common as common
import dicom2nifti.settings as settings
import dicom2nifti.convert_generic as convert_generic
from dicom2nifti.exceptions import ConversionError, ConversionValidationError
pydicom_config.enforce_valid_values = False
logger = logging.getLogger(__name__)
def dicom_to_nifti(dicom_input, output_file=None):
"""
This is the main dicom to nifti conversion fuction for philips images.
As input philips images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: file path to the output nifti
:param dicom_input: directory with dicom files for 1 scan
"""
assert common.is_philips(dicom_input)
# remove duplicate slices based on position and data
dicom_input = convert_generic.remove_duplicate_slices(dicom_input)
# remove localizers based on image type
dicom_input = convert_generic.remove_localizers_by_imagetype(dicom_input)
# remove_localizers based on image orientation (only valid if slicecount is validated)
dicom_input = convert_generic.remove_localizers_by_orientation(dicom_input)
# if no dicoms remain raise exception
if not dicom_input:
raise ConversionValidationError('TOO_FEW_SLICES/LOCALIZER')
if common.is_multiframe_dicom(dicom_input):
_assert_explicit_vr(dicom_input)
logger.info('Found multiframe dicom')
if _is_multiframe_4d(dicom_input):
logger.info('Found sequence type: MULTIFRAME 4D')
return _multiframe_to_nifti(dicom_input, output_file)
if _is_multiframe_anatomical(dicom_input):
logger.info('Found sequence type: MULTIFRAME ANATOMICAL')
return convert_generic.multiframe_to_nifti(dicom_input, output_file)
else:
logger.info('Found singleframe dicom')
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if _is_singleframe_4d(dicom_input):
logger.info('Found sequence type: SINGLEFRAME 4D')
return _singleframe_to_nifti(grouped_dicoms, output_file)
logger.info('Assuming anatomical data')
return convert_generic.dicom_to_nifti(dicom_input, output_file)
def _assert_explicit_vr(dicom_input):
"""
Assert that explicit vr is used
"""
if settings.validate_multiframe_implicit:
header = dicom_input[0]
if header.file_meta[0x0002, 0x0010].value == '1.2.840.10008.1.2':
raise ConversionError('IMPLICIT_VR_ENHANCED_DICOM')
def _is_multiframe_diffusion_imaging(dicom_input):
"""
Use this function to detect if a dicom series is a philips multiframe dti dataset
NOTE: We already assue this is a 4D dataset as input
"""
header = dicom_input[0]
if "PerFrameFunctionalGroupsSequence" not in header:
return False
# check if there is diffusion info in the frame
found_diffusion = False
diffusion_tag = Tag(0x0018, 0x9117)
for frame in header.PerFrameFunctionalGroupsSequence:
if diffusion_tag in frame:
found_diffusion = True
break
if not found_diffusion:
return False
return True
def _is_multiframe_4d(dicom_input):
"""
Use this function to detect if a dicom series is a philips multiframe 4D dataset
"""
# check if it is multi frame dicom
if not common.is_multiframe_dicom(dicom_input):
return False
number_of_stacks, _ = common.multiframe_get_stack_count(dicom_input)
if number_of_stacks <= 1:
return False
return True
def _is_multiframe_anatomical(dicom_input):
"""
Use this function to detect if a dicom series is a philips multiframe anatomical dataset
NOTE: Only the first slice will be checked so you can only provide an already sorted dicom directory
(containing one series)
"""
# check if it is multi frame dicom
if not common.is_multiframe_dicom(dicom_input):
return False
number_of_stacks, _ = common.multiframe_get_stack_count(dicom_input)
if number_of_stacks > 1:
return False
return True
def _is_singleframe_4d(dicom_input):
"""
Use this function to detect if a dicom series is a philips singleframe 4D dataset
"""
header = dicom_input[0]
# check if there are stack information
slice_number_mr_tag = Tag(0x2001, 0x100a)
if slice_number_mr_tag not in header:
return False
# check if there are multiple timepoints
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if len(grouped_dicoms) <= 1:
return False
return True
def _is_singleframe_diffusion_imaging(grouped_dicoms):
"""
Use this function to detect if a dicom series is a philips singleframe dti dataset
NOTE: We already assume singleframe 4D input
"""
# check that there is bval information
if _is_bval_type_b(grouped_dicoms):
return True
if _is_bval_type_a(grouped_dicoms):
return True
return False
def _is_bval_type_a(grouped_dicoms):
"""
Check if the bvals are stored in the first of 2 currently known ways for single frame dti
"""
bval_tag = Tag(0x2001, 0x1003)
bvec_x_tag = Tag(0x2005, 0x10b0)
bvec_y_tag = Tag(0x2005, 0x10b1)
bvec_z_tag = Tag(0x2005, 0x10b2)
for group in grouped_dicoms:
if bvec_x_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_x_tag])) and \
bvec_y_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_y_tag])) and \
bvec_z_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_z_tag])) and \
bval_tag in group[0] and _is_float(common.get_fl_value(group[0][bval_tag])) and \
common.get_fl_value(group[0][bval_tag]) != 0:
return True
return False
def _is_bval_type_b(grouped_dicoms):
"""
Check if the bvals are stored in the second of 2 currently known ways for single frame dti
"""
bval_tag = Tag(0x0018, 0x9087)
bvec_tag = Tag(0x0018, 0x9089)
for group in grouped_dicoms:
if bvec_tag in group[0] and bval_tag in group[0]:
bvec = common.get_fd_array_value(group[0][bvec_tag], 3)
bval = common.get_fd_value(group[0][bval_tag])
if _is_float(bvec[0]) and _is_float(bvec[1]) and _is_float(bvec[2]) and _is_float(bval) and bval != 0:
return True
return False
def _is_float(value):
"""
Check if float
"""
try:
float(value)
return True
except ValueError:
return False
def _multiframe_to_nifti(dicom_input, output_file):
"""
This function will convert philips 4D or anatomical multiframe series to a nifti
"""
# Read the multiframe dicom file
logger.info('Read dicom file')
multiframe_dicom = dicom_input[0]
# Create mosaic block
logger.info('Creating data block')
full_block = common.multiframe_to_block(multiframe_dicom)
logger.info('Creating affine')
# Create the nifti header info
affine, max_slice_increment = common.multiframe_create_affine([multiframe_dicom], full_block)
logger.info('Creating nifti')
# Convert to nifti
if full_block.ndim > 3: # do not squeeze single slice data
full_block = full_block.squeeze()
nii_image = nibabel.Nifti1Image(full_block, affine)
try:
timing_parameters = multiframe_dicom.SharedFunctionalGroupsSequence[0].MRTimingAndRelatedParametersSequence[0]
first_frame = multiframe_dicom[Tag(0x5200, 0x9230)][0]
common.set_tr_te(nii_image, float(timing_parameters.RepetitionTime),
float(first_frame.MREchoSequence[0].EchoTime))
except:
logger.info('Unable to set timing info')
# Save to disk
if output_file is not None:
logger.info('Saving nifti to disk %s' % output_file)
nii_image.header.set_slope_inter(1, 0)
nii_image.header.set_xyzt_units(2) # set units for xyz (leave t as unknown)
nii_image.to_filename(output_file)
if _is_multiframe_diffusion_imaging(dicom_input):
bval_file = None
bvec_file = None
if output_file is not None:
# Create the bval en bvec files
base_path = os.path.dirname(output_file)
base_name = os.path.splitext(os.path.splitext(os.path.basename(output_file))[0])[0]
logger.info('Creating bval en bvec files')
bval_file = '%s/%s.bval' % (base_path, base_name)
bvec_file = '%s/%s.bvec' % (base_path, base_name)
bval, bvec, bval_file, bvec_file = _create_bvals_bvecs(multiframe_dicom, bval_file, bvec_file, nii_image,
output_file)
return {'NII_FILE': output_file,
'BVAL_FILE': bval_file,
'BVEC_FILE': bvec_file,
'NII': nii_image,
'BVAL': bval,
'BVEC': bvec}
return {'NII_FILE': output_file,
'NII': nii_image,
'MAX_SLICE_INCREMENT': max_slice_increment}
def _singleframe_to_nifti(grouped_dicoms, output_file):
"""
This function will convert a philips singleframe series to a nifti
"""
# Create mosaic block
logger.info('Creating data block')
full_block = _singleframe_to_block(grouped_dicoms)
logger.info('Creating affine')
# Create the nifti header info
affine, slice_increment = common.create_affine(grouped_dicoms[0])
logger.info('Creating nifti')
# Convert to nifti
if full_block.ndim > 3: # do not squeeze single slice data
full_block = full_block.squeeze()
nii_image = nibabel.Nifti1Image(full_block, affine)
common.set_tr_te(nii_image, float(grouped_dicoms[0][0].RepetitionTime), float(grouped_dicoms[0][0].EchoTime))
if output_file is not None:
# Save to disk
logger.info('Saving nifti to disk %s' % output_file)
nii_image.header.set_slope_inter(1, 0)
nii_image.header.set_xyzt_units(2) # set units for xyz (leave t as unknown)
nii_image.to_filename(output_file)
if _is_singleframe_diffusion_imaging(grouped_dicoms):
bval_file = None
bvec_file = None
# Create the bval en bvec files
if output_file is not None:
base_name = os.path.splitext(output_file)[0]
if base_name.endswith('.nii'):
base_name = os.path.splitext(base_name)[0]
logger.info('Creating bval en bvec files')
bval_file = '%s.bval' % base_name
bvec_file = '%s.bvec' % base_name
nii_image, bval, bvec, bval_file, bvec_file = _create_singleframe_bvals_bvecs(grouped_dicoms,
bval_file,
bvec_file,
nii_image,
output_file)
return {'NII_FILE': output_file,
'BVAL_FILE': bval_file,
'BVEC_FILE': bvec_file,
'NII': nii_image,
'BVAL': bval,
'BVEC': bvec,
'MAX_SLICE_INCREMENT': slice_increment}
return {'NII_FILE': output_file,
'NII': nii_image,
'MAX_SLICE_INCREMENT': slice_increment}
def _singleframe_to_block(grouped_dicoms):
"""
Generate a full datablock containing all timepoints
"""
# For each slice / mosaic create a data volume block
data_blocks = []
for index in range(0, len(grouped_dicoms)):
logger.info('Creating block %s of %s' % (index + 1, len(grouped_dicoms)))
current_block = _stack_to_block(grouped_dicoms[index])
current_block = current_block[:, :, :, numpy.newaxis]
data_blocks.append(current_block)
try:
full_block = numpy.concatenate(data_blocks, axis=3)
except:
traceback.print_exc()
raise ConversionError("MISSING_DICOM_FILES")
# Apply the rescaling if needed
common.apply_scaling(full_block, grouped_dicoms[0][0])
return full_block
def _stack_to_block(timepoint_dicoms):
"""
Convert a mosaic slice to a block of data by reading the headers, splitting the mosaic and appending
"""
return common.get_volume_pixeldata(timepoint_dicoms)
def _get_grouped_dicoms(dicom_input):
"""
Search all dicoms in the dicom directory, sort and validate them
fast_read = True will only read the headers not the data
"""
# if all dicoms have an instance number try sorting by instance number else by position
if [d for d in dicom_input if 'InstanceNumber' in d]:
dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber)
else:
dicoms = common.sort_dicoms(dicom_input)
# now group per stack
grouped_dicoms = [[]] # list with first element a list
timepoint_index = 0
previous_stack_position = -1
# loop over all sorted dicoms
stack_position_tag = Tag(0x2001, 0x100a) # put this there as this is a slow step and used a lot
for index in range(0, len(dicoms)):
dicom_ = dicoms[index]
stack_position = 0
if stack_position_tag in dicom_:
stack_position = common.get_is_value(dicom_[stack_position_tag])
if previous_stack_position == stack_position:
# if the stack number is the same we move to the next timepoint
timepoint_index += 1
if len(grouped_dicoms) <= timepoint_index:
grouped_dicoms.append([])
else:
# if it changes move back to the first timepoint
timepoint_index = 0
grouped_dicoms[timepoint_index].append(dicom_)
previous_stack_position = stack_position
return grouped_dicoms
def _create_bvals_bvecs(multiframe_dicom, bval_file, bvec_file, nifti, nifti_file):
"""
Write the bvals from the sorted dicom files to a bval file
Inspired by https://github.com/IBIC/ibicUtils/blob/master/ibicBvalsBvecs.py
"""
# create the empty arrays
number_of_stacks, number_of_stack_slices = common.multiframe_get_stack_count([multiframe_dicom])
bvals = numpy.zeros([number_of_stacks], dtype=numpy.int32)
bvecs = numpy.zeros([number_of_stacks, 3])
# loop over all timepoints and create a list with all bvals and bvecs
for stack_index in range(0, number_of_stacks):
stack = multiframe_dicom[Tag(0x5200, 0x9230)][stack_index]
if str(stack[Tag(0x0018, 0x9117)][0][Tag(0x0018, 0x9075)].value) == 'DIRECTIONAL':
bvals[stack_index] = common.get_fd_value(stack[Tag(0x0018, 0x9117)][0][Tag(0x0018, 0x9087)])
bvecs[stack_index, :] = common.get_fd_array_value(stack[Tag(0x0018, 0x9117)][0]
[Tag(0x0018, 0x9076)][0][Tag(0x0018, 0x9089)], 3)
# truncate nifti if needed
nifti, bvals, bvecs = _fix_diffusion_images(bvals, bvecs, nifti, nifti_file)
# save the found bvecs to the file
if numpy.count_nonzero(bvals) > 0 or numpy.count_nonzero(bvecs) > 0:
common.write_bval_file(bvals, bval_file)
common.write_bvec_file(bvecs, bvec_file)
else:
bval_file = None
bvec_file = None
bvals = None
bvecs = None
return bvals, bvecs, bval_file, bvec_file
def _fix_diffusion_images(bvals, bvecs, nifti, nifti_file):
"""
This function will remove the last timepoint from the nifti, bvals and bvecs if the last vector is 0,0,0
This is sometimes added at the end by philips
"""
# if all zero continue of if the last bvec is not all zero continue
if numpy.count_nonzero(bvecs) == 0 or not numpy.count_nonzero(bvals[-1]) == 0:
# nothing needs to be done here
return nifti, bvals, bvecs
# remove last elements from bvals and bvecs
bvals = bvals[:-1]
bvecs = bvecs[:-1]
# remove last elements from the nifti
new_nifti = nibabel.Nifti1Image(common.get_nifti_data(nifti)[:, :, :, :-1].squeeze(), nifti.affine)
new_nifti.header.set_slope_inter(1, 0)
new_nifti.header.set_xyzt_units(2) # set units for xyz (leave t as unknown)
new_nifti.to_filename(nifti_file)
return new_nifti, bvals, bvecs
def _create_singleframe_bvals_bvecs(grouped_dicoms, bval_file, bvec_file, nifti, nifti_file):
"""
Write the bvals from the sorted dicom files to a bval file
"""
# create the empty arrays
bvals = numpy.zeros([len(grouped_dicoms)], dtype=numpy.int32)
bvecs = numpy.zeros([len(grouped_dicoms), 3])
# loop over all timepoints and create a list with all bvals and bvecs
if _is_bval_type_a(grouped_dicoms):
bval_tag = Tag(0x2001, 0x1003)
bvec_x_tag = Tag(0x2005, 0x10b0)
bvec_y_tag = Tag(0x2005, 0x10b1)
bvec_z_tag = Tag(0x2005, 0x10b2)
for stack_index in range(0, len(grouped_dicoms)):
bvals[stack_index] = common.get_fl_value(grouped_dicoms[stack_index][0][bval_tag])
bvecs[stack_index, :] = [common.get_fl_value(grouped_dicoms[stack_index][0][bvec_x_tag]),
common.get_fl_value(grouped_dicoms[stack_index][0][bvec_y_tag]),
common.get_fl_value(grouped_dicoms[stack_index][0][bvec_z_tag])]
elif _is_bval_type_b(grouped_dicoms):
bval_tag = Tag(0x0018, 0x9087)
bvec_tag = Tag(0x0018, 0x9089)
for stack_index in range(0, len(grouped_dicoms)):
bvals[stack_index] = common.get_fd_value(grouped_dicoms[stack_index][0][bval_tag])
bvecs[stack_index, :] = common.get_fd_array_value(grouped_dicoms[stack_index][0][bvec_tag], 3)
# truncate nifti if needed
nifti, bvals, bvecs = _fix_diffusion_images(bvals, bvecs, nifti, nifti_file)
# save the found bvecs to the file
if numpy.count_nonzero(bvals) > 0 or numpy.count_nonzero(bvecs) > 0:
common.write_bval_file(bvals, bval_file)
common.write_bvec_file(bvecs, bvec_file)
else:
bval_file = None
bvec_file = None
bvals = None
bvecs = None
return nifti, bvals, bvecs, bval_file, bvec_file
| {
"content_hash": "65f6efc4a884aca4ba569a6c531e8f81",
"timestamp": "",
"source": "github",
"line_count": 500,
"max_line_length": 118,
"avg_line_length": 37.002,
"alnum_prop": 0.6319658396843414,
"repo_name": "icometrix/dicom2nifti",
"id": "b1494c81f55a36caf1ea1a1eb5703d281dd78ae4",
"size": "18525",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dicom2nifti/convert_philips.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "268301"
},
{
"name": "Shell",
"bytes": "564"
}
],
"symlink_target": ""
} |
import unittest
import pytest
from django.db import models
from dirtyfields import DirtyFieldsMixin
JSON_FIELD_AVAILABLE = False
try:
from jsonfield import JSONField
JSON_FIELD_AVAILABLE = True
except ImportError:
pass
if JSON_FIELD_AVAILABLE:
class JSONFieldModel(DirtyFieldsMixin, models.Model):
json_field = JSONField()
@unittest.skipIf(not JSON_FIELD_AVAILABLE, 'django jsonfield library required')
@pytest.mark.django_db
def test_json_field():
tm = JSONFieldModel.objects.create(json_field={'data': [1, 2, 3]})
data = tm.json_field['data']
data.append(4)
assert tm.get_dirty_fields() == {
'json_field': {'data': [1, 2, 3]}
}
| {
"content_hash": "89ec80edd1b03fd9add8f99c51225f82",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 23.75862068965517,
"alnum_prop": 0.6966618287373004,
"repo_name": "jdotjdot/django-dirtyfields",
"id": "34db1aeeef809ffc630d347ad4b73827d88fc151",
"size": "689",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_json_field.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38514"
}
],
"symlink_target": ""
} |
try:
integer_types = (int, long)
range = xrange
except NameError: # Python 3
integer_types = (int,)
import random
from graphtheory.structures.edges import Edge
from graphtheory.structures.graphs import Graph
def swap(L, i, j):
"""Swap items on the list."""
L[i], L[j] = L[j], L[i]
def make_random_spgraph(n):
"""Make a series-parallel graph with n vertices."""
if n < 2:
raise ValueError("bad n")
graph = Graph(n)
for node in range(n):
graph.add_node(node)
source = 0
sink = n-1
idx = 1
edge_list = [Edge(source, sink, idx)]
idx += 1
node = n-2
while node > 0:
# Losowanie krawedzi na ktorej bedzie operacja.
i = random.randrange(0, len(edge_list))
swap(edge_list, i, -1)
edge = edge_list[-1]
# Losowanie operacji.
if edge.target == sink:
action = random.choice(["series", "parallel", "jackknife"])
else:
action = random.choice(["series", "parallel"])
if action == "series":
edge_list.pop()
edge_list.append(Edge(edge.source, node, idx))
idx += 1
edge_list.append(Edge(node, edge.target, idx))
idx += 1
elif action == "parallel":
edge_list.append(Edge(edge.source, node, idx))
idx += 1
edge_list.append(Edge(node, edge.target, idx))
idx += 1
elif action == "jackknife":
edge_list.append(Edge(edge.target, node, idx))
idx += 1
node -= 1
for edge in edge_list:
graph.add_edge(edge)
return graph
def make_random_ktree(n, k): # using list
"""Make a random k-tree with n vertices."""
if k >= n:
raise ValueError("bad k") # run time error possible
graph = Graph(n)
if n < 1:
raise ValueError("bad n")
elif n == 1:
graph.add_node(0)
else:
for node in range(n):
graph.add_node(node)
# Make {n-k-1, ..., n-1} into (k+1)-clique in graph.
for source in range(n-k-1, n):
for target in range(n-k-1, n):
if source < target:
graph.add_edge(Edge(source, target))
node = n-k-2
while node >= 0:
# Wybor source z przedzialu od node+1 do n-k-1.
# To jest jakby wybor duzej (k+1)-kliki,
source = random.choice(range(node+1, n-k))
# Teraz zbieram wierzcholki tej kliki, ale one maja byc
# wieksze od source, wieksze numery!
neighbors = list(target for target in graph.iteradjacent(source)
if source < target)
neighbors.append(source) # closed neighborhood
# Z duzej (k+1)-kliki wybieram mala k-klike.
idx = random.randrange(0, len(neighbors))
swap(neighbors, idx, -1)
neighbors.pop()
# Connect node to all nodes from neighbors.
for target in neighbors:
graph.add_edge(Edge(node, target))
node -= 1
return graph
def find_peo_spgraph1(graph): # graph has to be connected
"""Find PEO for a supergraph (2-tree) of an sp-graph."""
if graph.is_directed():
raise ValueError("the graph is directed")
order = list() # PEO of 2-tree
graph_copy = graph.copy()
degree_dict = dict((node, graph.degree(node))
for node in graph.iternodes()) # O(V) time
bucket = list(set() for deg in range(graph.v())) # O(V) time
for node in graph.iternodes(): # wstawiam do kubelkow, O(V) time
bucket[graph.degree(node)].add(node)
# Dopoki sa wierzcholki stopnia 2 wykonuj odrywanie.
deg = 2
while bucket[deg]:
source = bucket[deg].pop()
order.append(source)
node1, node2 = list(graph_copy.iteradjacent(source))
edge = Edge(node1, node2)
if graph_copy.has_edge(edge):
# Jezeli ma krawedz, to trzeba poprawic stopnie wierzcholkow,
# bo przy usuwaniu krawedzi przy source zmniejsza sie stopnie.
deg1 = degree_dict[node1] # stary stopien
bucket[deg1].remove(node1)
bucket[deg1-1].add(node1)
degree_dict[node1] = deg1-1 # nowy stopien
deg2 = degree_dict[node2] # stary stopien
bucket[deg2].remove(node2)
bucket[deg2-1].add(node2)
degree_dict[node2] = deg2-1 # nowy stopien
else: # tu nie trzeba poprawiac stopni
graph_copy.add_edge(edge)
# Usuwamy krawedzie z source.
graph_copy.del_edge(Edge(source, node1))
graph_copy.del_edge(Edge(source, node2))
# Sprawdzamy co zostalo.
len1 = len(bucket[1])
if len1 == 2 and len(order) + len1 == graph.v():
# Zostala jedna krawedz, dodajemy konce do PEO.
order.append(bucket[1].pop())
order.append(bucket[1].pop())
elif len(bucket[len1]) == 1 and len(order) + len1 + 1 == graph.v():
# Zostala gwiazda, jest jackknife.
while bucket[1]:
order.append(bucket[1].pop())
order.append(bucket[len1].pop())
else:
raise ValueError("not an sp-graph")
return order
def find_peo_spgraph2(graph): # graph has to be connected
"""Find PEO for a supergraph (2-tree) of an sp-graph."""
if graph.is_directed():
raise ValueError("the graph is directed")
order = list() # PEO of 2-tree
graph_copy = graph.copy()
degree2 = set(node for node in graph.iternodes()
if graph.degree(node) == 2) # active nodes with degree 2
# Dopoki sa wierzcholki stopnia 2 wykonuj odrywanie.
while degree2:
source = degree2.pop()
if graph_copy.degree(source) != 2:
# Czasem stopien wierzcholka moze sie zmniejszyc!
continue
order.append(source)
node1, node2 = tuple(graph_copy.iteradjacent(source))
edge = Edge(node1, node2)
if graph_copy.has_edge(edge):
# Jezeli ma krawedz, to trzeba poprawic stopnie wierzcholkow,
# bo przy usuwaniu krawedzi przy source zmniejsza sie stopnie.
if graph_copy.degree(node1) == 3:
degree2.add(node1)
if graph_copy.degree(node2) == 3:
degree2.add(node2)
else: # tu nie trzeba poprawiac stopni
graph_copy.add_edge(edge)
# Usuwamy krawedzie z source.
graph_copy.del_edge(Edge(source, node1))
graph_copy.del_edge(Edge(source, node2))
# Sprawdzamy co zostalo.
degree1 = set(node for node in graph_copy.iternodes()
if graph_copy.degree(node) == 1)
if len(degree1) == 2 and len(order) + 2 == graph.v():
# Zostala jedna krawedz, dodajemy konce do PEO.
order.append(degree1.pop())
order.append(degree1.pop())
elif len(order) + len(degree1) + 1 == graph.v():
# Zostala gwiazda, jest jackknife.
# Szukam centrum gwiazdy.
for node in graph_copy.iternodes():
deg = graph_copy.degree(node)
if deg > 1:
if deg == len(degree1):
break
else:
raise ValueError("not an sp-graph")
while degree1:
order.append(degree1.pop())
order.append(node)
else:
raise ValueError("not an sp-graph")
return order
find_peo_spgraph = find_peo_spgraph1
# EOF
| {
"content_hash": "fa8725f1fb57631c1b17106e4c79c2c1",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 76,
"avg_line_length": 36.995049504950494,
"alnum_prop": 0.5660377358490566,
"repo_name": "ufkapano/graphs-dict",
"id": "1e552863946f633178b301536dd52d498d66aa37",
"size": "7492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphtheory/seriesparallel/sptools.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "970894"
}
],
"symlink_target": ""
} |
'''Regenerates the strong name verification scripts based on the list of
assemblies stored in this file.
The generated files should be checked in.
'''
ASSEMBLIES = sorted([
"AnalysisTest",
"AnalysisTests",
"AzurePublishingUITests",
"DebuggerTests",
"DebuggerUITests",
"DjangoTests",
"DjangoUITests",
"FastCgiTest",
"IronPythonTests",
"Microsoft.IronPythonTools.Resolver",
"Microsoft.PythonTools",
"Microsoft.PythonTools.Analysis",
"Microsoft.PythonTools.Analysis.Browser",
"Microsoft.PythonTools.Analyzer",
"Microsoft.PythonTools.Attacher",
"Microsoft.PythonTools.AttacherX86",
"Microsoft.PythonTools.AzureSetup",
"Microsoft.PythonTools.BuildTasks",
"Microsoft.PythonTools.Debugger",
"Microsoft.PythonTools.Django",
"Microsoft.PythonTools.EnvironmentsList",
"Microsoft.PythonTools.EnvironmentsList.Host",
"Microsoft.PythonTools.ExpressInteractiveWorkaround",
"Microsoft.PythonTools.ImportWizard",
"Microsoft.PythonTools.IronPython",
"Microsoft.PythonTools.IronPython.Interpreter",
"Microsoft.PythonTools.ML",
"Microsoft.PythonTools.Profiling",
"Microsoft.PythonTools.ProjectWizards",
"Microsoft.PythonTools.PyKinect",
"Microsoft.PythonTools.TestAdapter",
"Microsoft.PythonTools.Uwp",
"Microsoft.PythonTools.VSInterpreters",
"Microsoft.PythonTools.VsLogger",
"Microsoft.PythonTools.WebRole",
"MockVsTests",
"ProfilingUITests",
"PythonToolsTests",
"PythonToolsMockTests",
"PythonToolsUITests",
"ReplWindowUITests",
"SharedProjectTests",
"TestAdapterTests",
"TestSccPackage",
"TestUtilities",
"TestUtilities.Python",
"TestUtilities.Python.Analysis",
"TestUtilities.UI",
"VSInterpretersTests",
])
def EnableSkipVerification():
yield 'Windows Registry Editor Version 5.00'
yield ''
for name in ASSEMBLIES:
yield '[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\StrongName\Verification\{},B03F5F7F11D50A3A]'.format(name)
for name in ASSEMBLIES:
yield '[HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\StrongName\Verification\{},B03F5F7F11D50A3A]'.format(name)
def EnableSkipVerificationX86():
yield 'Windows Registry Editor Version 5.00'
yield ''
for name in ASSEMBLIES:
yield '[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\StrongName\Verification\{},B03F5F7F11D50A3A]'.format(name)
def DisableSkipVerification():
yield 'Windows Registry Editor Version 5.00'
yield ''
for name in ASSEMBLIES:
yield '[-HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\StrongName\Verification\{},B03F5F7F11D50A3A]'.format(name)
for name in ASSEMBLIES:
yield '[-HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\StrongName\Verification\{},B03F5F7F11D50A3A]'.format(name)
def DisableSkipVerificationX86():
yield 'Windows Registry Editor Version 5.00'
yield ''
for name in ASSEMBLIES:
yield '[-HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\StrongName\Verification\{},B03F5F7F11D50A3A]'.format(name)
FILES = [
EnableSkipVerification,
EnableSkipVerificationX86,
DisableSkipVerification,
DisableSkipVerificationX86,
]
if __name__ == '__main__':
for file in FILES:
with open(file.__name__ + '.reg', 'w', encoding='utf-8') as f:
f.writelines(line + '\n' for line in file())
print('Wrote {}.reg'.format(file.__name__))
| {
"content_hash": "571bc2e509476a1d13b6b44afdb3d30c",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 125,
"avg_line_length": 34.775510204081634,
"alnum_prop": 0.7153755868544601,
"repo_name": "alanch-ms/PTVS",
"id": "2594426aede680df7b3ebca09f1e4bb5bae224d5",
"size": "3432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Prerequisites/generate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "2973"
},
{
"name": "C",
"bytes": "452311"
},
{
"name": "C#",
"bytes": "12610548"
},
{
"name": "C++",
"bytes": "189225"
},
{
"name": "CSS",
"bytes": "7024"
},
{
"name": "HTML",
"bytes": "55775"
},
{
"name": "JavaScript",
"bytes": "85712"
},
{
"name": "Objective-C",
"bytes": "4201"
},
{
"name": "PowerShell",
"bytes": "55709"
},
{
"name": "Python",
"bytes": "2635441"
},
{
"name": "Smarty",
"bytes": "8356"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
} |
import math
import hashlib
def sxor(s1, s2):
"""
XOR strings
"""
return ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(s1, s2))
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += ord(c) << (8*i) # 2x speedup vs. exponentiation
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0':
nPad += 1
else:
break
return (__b58chars[0] * nPad) + result
def b58decode(v, length=None):
""" decode v into a string of len bytes
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def get_code_string(base):
if base == 10:
return "0123456789"
elif base == 16:
return "0123456789abcdef"
elif base == 58:
return "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
elif base == 256:
return ''.join([chr(x) for x in range(256)])
else:
raise ValueError("Invalid base!")
def encode(val, base, minlen=0):
code_string = get_code_string(base)
result = ""
val = int(val)
while val > 0:
result = code_string[val % base] + result
val /= base
if len(result) < minlen:
result = code_string[0] * (int(minlen) - len(result)) + result
return result
def decode(string, base):
code_string = get_code_string(base)
result = 0
if base == 16:
string = string.lower()
while len(string) > 0:
result *= base
result += code_string.find(string[0])
string = string[1:]
return result
| {
"content_hash": "48bdf1eabe80a06ae73a810f5d2af1cb",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 75,
"avg_line_length": 23.12621359223301,
"alnum_prop": 0.593198992443325,
"repo_name": "inuitwallet/bippy",
"id": "b3ea522562b02b3cc949941f6668ef6ad3084265",
"size": "2382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "num/enc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "158559"
}
],
"symlink_target": ""
} |
"""Testing the linked_list class - CF 401 Python Week 2 Assignment."""
import pytest
@pytest.fixture
def new_empty_stack():
"""Create an empty object of type Stack to be used in test functions."""
from stack import Stack
this_stack = Stack()
return this_stack
@pytest.fixture
def stack_123():
"""Return a object of type Stack containing 3 nodes with values 1, 2, 3."""
from stack import Stack
this_stack = Stack([1, 2, 3])
return this_stack
def test_create_new_empty_Stack_out_of_Linked_List():
"""Test if a new object of type Stack gets instantiated correctly."""
from stack import Stack
new_stack = Stack()
assert new_stack._container.head is None
def test_create_empty_Linked_List():
"""Test if a new empty object of type Stack gets created correctly."""
from linked_list import Linked_List
new_list = Linked_List()
assert new_list.head is None
def test_empty_new_node_object_has_none():
"""Test the emptyiness of a node."""
from linked_list import Node
new_node = Node()
assert new_node.value is None
assert new_node.nxt is None
def test_new_node_has_data():
"""Test if a new node is correctly created with data."""
from linked_list import Node
new_node = Node("five")
assert new_node.value == "five"
def test_when_initialized_with_iterable_makes_nodes():
"""Test if the number of nodes matches the number of elements passed with
the iterable."""
from stack import Stack
my_nodes = [1, 2, 3]
new_stack = Stack(my_nodes)
assert len(new_stack._container) == 3
def test_that_pop_fails_when_called_on_empty_stack(new_empty_stack):
"""Test that calling pop on empty Stack raises an exception."""
from stack import Stack
with pytest.raises(IndexError):
new_empty_stack.pop()
def test_that_pop_removes_the_head(stack_123):
"""Test that Stack.pop() removes the top element of the Stack"""
from stack import Stack
stack_123.pop()
assert len(stack_123._container) == 2
def test_that_push_adds_on_top_of_existing_stack(stack_123):
"""Test that Stack.push() adds on top of an existing stack"""
from stack import Stack
stack_123.push(4)
assert stack_123._container.head.value == 4
def test_that_push_adds_on_top_of_an_empty_stack(new_empty_stack):
"""Test that Stack.push() adds on top of an empty stack by moving the head"""
new_empty_stack._container.head is None
from stack import Stack
new_empty_stack.push(1)
assert new_empty_stack._container.head.value == 1
| {
"content_hash": "0914d0192df991d042ccd6756a777829",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 81,
"avg_line_length": 30.61904761904762,
"alnum_prop": 0.6819595645412131,
"repo_name": "CCallahanIV/data-structures",
"id": "cffc2ecc01382ae503dfab4f2f1b4eb3bec0c56e",
"size": "2572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test_stack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "142894"
}
],
"symlink_target": ""
} |
import json
import logging
import unittest
from mixbox.binding_utils import ExternalEncoding
from mixbox.entities import Entity
from mixbox.vendor import six
import cybox.utils
from cybox.compat import MutableSequence
logger = logging.getLogger(__name__)
def assert_equal_ignore(item1, item2, ignore_keys=None):
"""Recursively compare two dictionaries, ignoring differences in some keys.
"""
if not ignore_keys:
ignore_keys = []
if isinstance(item1, dict) and isinstance(item2, dict):
item1keys = set(item1.keys())
item2keys = set(item2.keys())
ignore = set(ignore_keys)
compare_keys = (item1keys | item2keys) - ignore
for k in compare_keys:
assert k in item1, "Item 1 is missing %s" % k
assert k in item2, "Item 2 is missing %s" % k
assert_equal_ignore(item1.get(k), item2.get(k), ignore_keys)
elif isinstance(item1, list) and isinstance(item2, list):
assert len(item1) == len(item2), "Lists are of different lengths"
for (x, y) in zip(item1, item2):
assert_equal_ignore(x, y, ignore_keys)
else:
assert item1 == item2, "%s != %s" % (item1, item2)
def assert_entity_equals(entity, other, name=None, stack=None):
"""Assert all of the TypedFields in two Entities are equal."""
# Shorten the lines.
is_entity = lambda x: isinstance(x, Entity)
is_mutableseq = lambda x: isinstance(x, MutableSequence)
if stack is None:
stack = []
if name is not None:
stack.append(name)
if is_entity(entity) and is_entity(other):
for name, var in entity.typed_fields_with_attrnames():
assert_entity_equals(
var.__get__(entity),
var.__get__(other),
name=name,
stack=stack
)
elif is_mutableseq(entity) and is_mutableseq(other):
# "multiple" TypedFields store their contents in mutable sequences.
assert len(entity) == len(other)
for x, y in zip(entity, other):
assert_entity_equals(x, y, None, stack=stack)
else:
assert entity == other, "(%s) %r != %r stack=%s" % (name, entity, other, stack)
if name is not None and stack:
stack.pop()
def round_trip(o, output=False, list_=False):
""" Performs all eight conversions to verify import/export functionality.
1. cybox.Entity -> dict/list
2. dict/list -> JSON string
3. JSON string -> dict/list
4. dict/list -> cybox.Entity
5. cybox.Entity -> Bindings Object
6. Bindings Object -> XML String
7. XML String -> Bindings Object
8. Bindings object -> cybox.Entity
It returns the final object, so tests which call this function can check to
ensure it was not modified during any of the transforms.
"""
klass = o.__class__
if output:
logger.debug("Class: {0}".format(klass))
logger.debug("-" * 40)
# 1. cybox.Entity -> dict/list
if list_:
d = o.to_list()
else:
d = o.to_dict()
# 2. dict/list -> JSON string
json_string = json.dumps(d)
if output:
logger.debug(json_string)
logger.debug("-" * 40)
# Before parsing the JSON, make sure the cache is clear
cybox.utils.cache_clear()
# 3. JSON string -> dict/list
d2 = json.loads(json_string)
# 4. dict/list -> cybox.Entity
if list_:
o2 = klass.from_list(d2)
else:
o2 = klass.from_dict(d2)
# 5. cybox.Entity -> Bindings Object
xobj = o2.to_obj()
# 6. Bindings Object -> XML String
xml_string = o2.to_xml(encoding=ExternalEncoding)
# Explicitly check to see if it's a Unicode string before trying to decode
# it.
if not isinstance(xml_string, six.text_type):
xml_string = xml_string.decode(ExternalEncoding)
if output:
logger.debug(xml_string)
logger.debug("-" * 40)
# Before parsing the XML, make sure the cache is clear
cybox.utils.cache_clear()
#7. XML String -> Bindings Object
xobj2 = klass._binding.parseString(xml_string)
# 8. Bindings object -> cybox.Entity
o3 = klass.from_obj(xobj2)
return o3
def round_trip_dict(cls, dict_):
obj = cls.object_from_dict(dict_)
dict2 = cls.dict_from_object(obj)
return dict2
def round_trip_list(cls, list_):
obj = cls.object_from_list(list_)
list2 = cls.list_from_object(obj)
return list2
class EntityTestCase(object):
"""A mixin class for testing CybOX Entities"""
def setUp(self):
self.assertNotEqual(self.klass, None)
self.assertNotEqual(self._full_dict, None)
def test_round_trip_dict(self):
# Round_trip_dict doesn't start or end with Python objects (obviously),
# so this is a less than ideal test.
dict2 = round_trip_dict(self.klass, self._full_dict)
self.maxDiff = None
self.assertEqual(self._full_dict, dict2)
def test_round_trip(self):
# This is a better test, even though we start from a dictionary, and
# can only compare the dict representations right now.
ent = self.klass.from_dict(self._full_dict)
ent2 = round_trip(ent, output=True)
self.maxDiff = None
# For now, the only way to compare two entity representations is to
# compare the dictionary output to the original dictionary.
self.assertEqual(self._full_dict, ent2.to_dict())
def test_round_trip_entity(self):
# This is a better test, even though we start from a dictionary, and
# can only compare the dict representations right now.
ent = self.klass.from_dict(self._full_dict)
ent2 = round_trip(ent, output=True)
assert_entity_equals(ent, ent2)
# TODO: Assert to_xml() on two round-tripped entities are identical.
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c7c3837b38bb9b15f841e9777c753edd",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 87,
"avg_line_length": 30.87958115183246,
"alnum_prop": 0.6256358087487284,
"repo_name": "CybOXProject/python-cybox",
"id": "60c27a36dcb55f0182496940b88ca67ac1424e60",
"size": "6003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cybox/test/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4610747"
}
],
"symlink_target": ""
} |
import telnetlib
from pprint import pprint as pp
import time
TELNET_PORT = 23
TELNET_TIMEOUT = 6
def login(remote_conn, username, password):
output = remote_conn.read_until("sername:", TELNET_TIMEOUT)
remote_conn.write(username + '\n')
output += remote_conn.read_until("assword:", TELNET_TIMEOUT)
remote_conn.write(password + '\n')
return output
def send_command(remote_conn, cmd):
cmd = cmd.rstrip()
remote_conn.write(cmd + '\n')
time.sleep(1)
return remote_conn.read_very_eager()
def main():
ip_addr = '184.105.247.70'
username = 'pyclass'
password = '88newclass'
remote_conn = telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
output = login(remote_conn, username, password)
print("doing stuff and thangs\n")
output = send_command(remote_conn, 'terminal length 0')
arp_output = send_command(remote_conn, 'show ip arp')
config_output = send_command(remote_conn, 'show config')
output = remote_conn.read_very_eager()
print output
#print config
#print(config)
f = open("config.txt", 'w')
f.write(config_output)
f.close()
f = open("arp.txt", 'w')
f.write(arp_output)
f.close()
main()
| {
"content_hash": "43c8f9c556f1da14b0063e184e896743",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 72,
"avg_line_length": 20.915254237288135,
"alnum_prop": 0.6426256077795786,
"repo_name": "sgreen1/pynet",
"id": "e079c7273b03cfdab709ff44d5fec375ed93e373",
"size": "1257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "week2/week1-1.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16349"
}
],
"symlink_target": ""
} |
"""
Add color cycling fun to your i3bar.
This is the most pointless yet most exciting module you can imagine.
It allows color cycling of modules. Imagine the joy of having the current time
change through the colors of the rainbow.
If you were completely insane you could also use it to implement the i3bar
equivalent of the <blink> tag and cause yourself endless headaches and the
desire to vomit.
The color for the contained module(s) is changed and cycles through your chosen
gradient by default this is the colors of the rainbow. This module will
increase the amount of updates that py3status needs to do so should be used
sparingly.
Configuration parameters:
cycle_time: How often we change this color in seconds
(default 1)
force: If True then the color will always be set. If false the color will
only be changed if it has not been set by a module.
(default False)
format: display format for this module (default '{output}')
gradient: The colors we will cycle through, This is a list of hex values
*(default [ '#FF0000', '#FFFF00', '#00FF00', '#00FFFF',
'#0000FF', '#FF00FF', '#FF0000', ])*
multi_color: If True then each module the rainbow contains will be colored
differently (default True)
steps: Number of steps between each color in the gradient
(default 10)
Example config:
```
order += "rainbow time"
# show time colorfully
rainbow time {
time {}
}
```
Example blinking config:
```
order += "rainbow blink_time"
# blinking text black/white
rainbow blink_time{
gradient = [
'#FFFFFF',
'#000000',
]
steps = 1
time {}
}
```
@author tobes
SAMPLE OUTPUT
[
{'color': '#FF0000', 'full_text': 'module 1'},
{'color': '#CCFF00', 'full_text': 'module 2', 'separator': True},
{'color': '#00FF66', 'full_text': 'module 3', 'separator': True},
{'color': '#0066FF', 'full_text': 'module 4', 'separator': True},
{'color': '#CC00FF', 'full_text': 'module 5', 'separator': True}
]
"""
from __future__ import division
import re
import math
from time import time
HEX_RE = re.compile('#([0-9a-fA-F]{3}|[0-9a-fA-F]{6})')
class Py3status:
"""
"""
# configuration parameters
cycle_time = 1
force = False
format = '{output}'
gradient = [
'#FF0000',
'#FFFF00',
'#00FF00',
'#00FFFF',
'#0000FF',
'#FF00FF',
'#FF0000',
]
multi_color = True
steps = 10
class Meta:
container = True
def post_config_hook(self):
def from_hex(color):
"""
Convert hex color #xxx or #xxxxxx to [r, g, b].
"""
if not HEX_RE.match(color):
color = '#FFF'
if len(color) == 7:
return (int(color[1:3], 16), int(color[3:5], 16),
int(color[5:], 16))
return (int(color[1], 16) * 17, int(color[2], 16) * 17,
int(color[3], 16) * 17)
def to_hex(color):
"""
Convert [r, g, b] to hex.
"""
return '#{:02X}{:02X}{:02X}'.format(
int(color[0]), int(color[1]), int(color[2]))
def make_color(c1, c2, t):
"""
Generate a mid color between c1 and c2.
"""
def fade(i):
a = c1[i]
b = c2[i]
x = (b * t)
x += (a * (1 - t))
return x
c1 = from_hex(c1)
c2 = from_hex(c2)
return (fade(0), fade(1), fade(2))
colors = []
if self.steps == 1:
colors = [to_hex(from_hex(x)) for x in self.gradient]
else:
for i in range(len(self.gradient) - 1):
for j in range(self.steps):
colors.append(to_hex(make_color(self.gradient[
i], self.gradient[i + 1], j / (self.steps))))
self.colors = colors
self.active_color = 0
self._set_cycle_time()
def _set_cycle_time(self):
"""
Set next cycle update time synced to nearest second or 0.1 of second.
"""
now = time()
try:
cycle_time = now - self._cycle_time
if cycle_time < 0:
cycle_time = 0
except AttributeError:
cycle_time = 0
cycle_time += self.cycle_time
if cycle_time == int(cycle_time):
self._cycle_time = math.ceil(now + cycle_time)
else:
self._cycle_time = math.ceil((now + cycle_time) * 10) / 10
self._cycle_time = now + self.cycle_time
def _get_current_output(self):
"""
Get child modules output.
"""
output = []
for item in self.items:
out = self.py3.get_output(item)
if out and 'separator' not in out[-1]:
out[-1]['separator'] = True
output += out
return output
def rainbow(self):
"""
Make a rainbow!
"""
if not self.items:
return {
'full_text': '',
'cached_until': self.py3.CACHE_FOREVER
}
if time() >= self._cycle_time - (self.cycle_time / 10):
self.active_color = (self.active_color + 1) % len(self.colors)
self._set_cycle_time()
color = self.colors[self.active_color]
content = self._get_current_output()
output = []
if content:
step = len(self.colors) // len(content)
for index, item in enumerate(content):
if self.multi_color:
offset = (self.active_color + (index * step)) % len(self.colors)
color = self.colors[offset]
obj = item.copy()
if self.force or not obj.get('color'):
obj['color'] = color
output.append(obj)
composites = {'output': self.py3.composite_create(output)}
rainbow = self.py3.safe_format(self.format, composites)
return {
'cached_until': self._cycle_time,
'full_text': rainbow
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| {
"content_hash": "6812e909b96b0a2170c833422840a0bb",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 80,
"avg_line_length": 28.385650224215247,
"alnum_prop": 0.5298578199052133,
"repo_name": "docwalter/py3status",
"id": "62096c8972e10178862f1566a783dced9888d3c8",
"size": "6354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py3status/modules/rainbow.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "670177"
}
],
"symlink_target": ""
} |
import argparse
import csv
#parse args
parser = argparse.ArgumentParser(
description='collect running time of workers of a query')
parser.add_argument("-o", "--output", type=str, help="input file")
parser.add_argument("-i", "--input", type=str, help="output file")
parser.add_argument("-s", "--shuffle",
help="shuffle file or not", action="store_true")
args = parser.parse_args()
# sort events according to its time stamp
def sort_events(sortfunc):
with open(args.input, 'rb') as f:
csvreader = csv.reader(f)
events = [row for row in csvreader]
sorted(events, key=sortfunc)
with open(args.output, 'wb') as w:
csvwriter = csv.writer(w)
for event in events:
print event
csvwriter.writerow(event)
def main():
print args.output
print args.input
if args.shuffle:
sort_events(lambda row: long(row[3]))
else:
sort_events(lambda row: long(row[2]))
if __name__ == "__main__":
main()
| {
"content_hash": "41d05ea9dc2a6a344050d65c8ffd04b4",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 68,
"avg_line_length": 27.37837837837838,
"alnum_prop": 0.6199407699901284,
"repo_name": "stechu/QuerySimulator",
"id": "46a577538df5e1a249aa9e990c6d7454ce98bdf7",
"size": "1036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utilities/sort.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7758"
}
],
"symlink_target": ""
} |
from unittest import mock
from django.db import connection, transaction
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import (
Article, InheritedArticleA, InheritedArticleB, NullablePublicationThrough,
NullableTargetArticle, Publication,
)
class ManyToManyTests(TestCase):
def setUp(self):
# Create a couple of Publications.
self.p1 = Publication.objects.create(title='The Python Journal')
self.p2 = Publication.objects.create(title='Science News')
self.p3 = Publication.objects.create(title='Science Weekly')
self.p4 = Publication.objects.create(title='Highlights for Children')
self.a1 = Article.objects.create(headline='Django lets you build Web apps easily')
self.a1.publications.add(self.p1)
self.a2 = Article.objects.create(headline='NASA uses Python')
self.a2.publications.add(self.p1, self.p2, self.p3, self.p4)
self.a3 = Article.objects.create(headline='NASA finds intelligent life on Earth')
self.a3.publications.add(self.p2)
self.a4 = Article.objects.create(headline='Oxygen-free diet works wonders')
self.a4.publications.add(self.p2)
def test_add(self):
# Create an Article.
a5 = Article(headline='Django lets you create Web apps easily')
# You can't associate it with a Publication until it's been saved.
msg = (
'"<Article: Django lets you create Web apps easily>" needs to have '
'a value for field "id" before this many-to-many relationship can be used.'
)
with self.assertRaisesMessage(ValueError, msg):
getattr(a5, 'publications')
# Save it!
a5.save()
# Associate the Article with a Publication.
a5.publications.add(self.p1)
self.assertQuerysetEqual(a5.publications.all(), ['<Publication: The Python Journal>'])
# Create another Article, and set it to appear in both Publications.
a6 = Article(headline='ESA uses Python')
a6.save()
a6.publications.add(self.p1, self.p2)
a6.publications.add(self.p3)
# Adding a second time is OK
a6.publications.add(self.p3)
self.assertQuerysetEqual(
a6.publications.all(),
[
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
]
)
# Adding an object of the wrong type raises TypeError
with self.assertRaisesMessage(TypeError, "'Publication' instance expected, got <Article"):
with transaction.atomic():
a6.publications.add(a5)
# Add a Publication directly via publications.add by using keyword arguments.
a6.publications.create(title='Highlights for Adults')
self.assertQuerysetEqual(
a6.publications.all(),
[
'<Publication: Highlights for Adults>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
]
)
def test_reverse_add(self):
# Adding via the 'other' end of an m2m
a5 = Article(headline='NASA finds intelligent life on Mars')
a5.save()
self.p2.article_set.add(a5)
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
]
)
self.assertQuerysetEqual(a5.publications.all(), ['<Publication: Science News>'])
# Adding via the other end using keywords
self.p2.article_set.create(headline='Carbon-free diet works wonders')
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: Carbon-free diet works wonders>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
a6 = self.p2.article_set.all()[3]
self.assertQuerysetEqual(
a6.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
]
)
def test_related_sets(self):
# Article objects have access to their related Publication objects.
self.assertQuerysetEqual(self.a1.publications.all(), ['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
]
)
# Publication objects have access to their related Article objects.
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
]
)
self.assertQuerysetEqual(
self.p1.article_set.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
]
)
self.assertQuerysetEqual(
Publication.objects.get(id=self.p4.id).article_set.all(),
['<Article: NASA uses Python>']
)
def test_selects(self):
# We can perform kwarg queries across m2m relationships
self.assertQuerysetEqual(
Article.objects.filter(publications__id__exact=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__pk=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science"),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science").distinct(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
]
)
# The count() function respects distinct() as well.
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").count(), 4)
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").distinct().count(), 3)
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2.id]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
]
)
# Excluding a related item works as you would expect, too (although the SQL
# involved is a little complex).
self.assertQuerysetEqual(
Article.objects.exclude(publications=self.p2),
['<Article: Django lets you build Web apps easily>']
)
def test_reverse_selects(self):
# Reverse m2m queries are supported (i.e., starting at the table that
# doesn't have a ManyToManyField).
python_journal = ['<Publication: The Python Journal>']
self.assertQuerysetEqual(Publication.objects.filter(id__exact=self.p1.id), python_journal)
self.assertQuerysetEqual(Publication.objects.filter(pk=self.p1.id), python_journal)
self.assertQuerysetEqual(
Publication.objects.filter(article__headline__startswith="NASA"),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Publication.objects.filter(article__id__exact=self.a1.id), python_journal)
self.assertQuerysetEqual(Publication.objects.filter(article__pk=self.a1.id), python_journal)
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1.id), python_journal)
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1), python_journal)
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2.id]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_delete(self):
# If we delete a Publication, its Articles won't be able to access it.
self.p1.delete()
self.assertQuerysetEqual(
Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
]
)
self.assertQuerysetEqual(self.a1.publications.all(), [])
# If we delete an Article, its Publications won't be able to access it.
self.a2.delete()
self.assertQuerysetEqual(
Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
]
)
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
]
)
def test_bulk_delete(self):
# Bulk delete some Publications - references to deleted publications should go
Publication.objects.filter(title__startswith='Science').delete()
self.assertQuerysetEqual(
Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
]
)
self.assertQuerysetEqual(
Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
]
)
self.assertQuerysetEqual(
self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
]
)
# Bulk delete some articles - references to deleted objects should go
q = Article.objects.filter(headline__startswith='Django')
self.assertQuerysetEqual(q, ['<Article: Django lets you build Web apps easily>'])
q.delete()
# After the delete, the QuerySet cache needs to be cleared,
# and the referenced objects should be gone
self.assertQuerysetEqual(q, [])
self.assertQuerysetEqual(self.p1.article_set.all(), ['<Article: NASA uses Python>'])
def test_remove(self):
# Removing publication from an article:
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
]
)
self.a4.publications.remove(self.p2)
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
]
)
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And from the other end
self.p2.article_set.remove(self.a3)
self.assertQuerysetEqual(self.p2.article_set.all(), ['<Article: NASA uses Python>'])
self.assertQuerysetEqual(self.a3.publications.all(), [])
def test_set(self):
self.p2.article_set.set([self.a4, self.a3])
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
]
)
self.assertQuerysetEqual(self.a4.publications.all(), ['<Publication: Science News>'])
self.a4.publications.set([self.p3.id])
self.assertQuerysetEqual(self.p2.article_set.all(), ['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(), ['<Publication: Science Weekly>'])
self.p2.article_set.set([])
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications.set([])
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.p2.article_set.set([self.a4, self.a3], clear=True)
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
]
)
self.assertQuerysetEqual(self.a4.publications.all(), ['<Publication: Science News>'])
self.a4.publications.set([self.p3.id], clear=True)
self.assertQuerysetEqual(self.p2.article_set.all(), ['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(), ['<Publication: Science Weekly>'])
self.p2.article_set.set([], clear=True)
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications.set([], clear=True)
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign_forward(self):
msg = (
"Direct assignment to the reverse side of a many-to-many set is "
"prohibited. Use article_set.set() instead."
)
with self.assertRaisesMessage(TypeError, msg):
self.p2.article_set = [self.a4, self.a3]
def test_assign_reverse(self):
msg = (
"Direct assignment to the forward side of a many-to-many "
"set is prohibited. Use publications.set() instead."
)
with self.assertRaisesMessage(TypeError, msg):
self.a1.publications = [self.p1, self.p2]
def test_assign(self):
# Relation sets can be assigned using set().
self.p2.article_set.set([self.a4, self.a3])
self.assertQuerysetEqual(
self.p2.article_set.all(), [
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
]
)
self.assertQuerysetEqual(self.a4.publications.all(), ['<Publication: Science News>'])
self.a4.publications.set([self.p3.id])
self.assertQuerysetEqual(self.p2.article_set.all(), ['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(), ['<Publication: Science Weekly>'])
# An alternate to calling clear() is to set an empty set.
self.p2.article_set.set([])
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications.set([])
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign_ids(self):
# Relation sets can also be set using primary key values
self.p2.article_set.set([self.a4.id, self.a3.id])
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
]
)
self.assertQuerysetEqual(self.a4.publications.all(), ['<Publication: Science News>'])
self.a4.publications.set([self.p3.id])
self.assertQuerysetEqual(self.p2.article_set.all(), ['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(), ['<Publication: Science Weekly>'])
def test_forward_assign_with_queryset(self):
# Querysets used in m2m assignments are pre-evaluated so their value
# isn't affected by the clearing operation in ManyRelatedManager.set()
# (#19816).
self.a1.publications.set([self.p1, self.p2])
qs = self.a1.publications.filter(title='The Python Journal')
self.a1.publications.set(qs)
self.assertEqual(1, self.a1.publications.count())
self.assertEqual(1, qs.count())
def test_reverse_assign_with_queryset(self):
# Querysets used in M2M assignments are pre-evaluated so their value
# isn't affected by the clearing operation in ManyRelatedManager.set()
# (#19816).
self.p1.article_set.set([self.a1, self.a2])
qs = self.p1.article_set.filter(headline='Django lets you build Web apps easily')
self.p1.article_set.set(qs)
self.assertEqual(1, self.p1.article_set.count())
self.assertEqual(1, qs.count())
def test_clear(self):
# Relation sets can be cleared:
self.p2.article_set.clear()
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And you can clear from the other end
self.p2.article_set.add(self.a3, self.a4)
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
]
)
self.assertQuerysetEqual(self.a4.publications.all(), ['<Publication: Science News>'])
self.a4.publications.clear()
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.assertQuerysetEqual(self.p2.article_set.all(), ['<Article: NASA finds intelligent life on Earth>'])
def test_clear_after_prefetch(self):
a4 = Article.objects.prefetch_related('publications').get(id=self.a4.id)
self.assertQuerysetEqual(a4.publications.all(), ['<Publication: Science News>'])
a4.publications.clear()
self.assertQuerysetEqual(a4.publications.all(), [])
def test_remove_after_prefetch(self):
a4 = Article.objects.prefetch_related('publications').get(id=self.a4.id)
self.assertQuerysetEqual(a4.publications.all(), ['<Publication: Science News>'])
a4.publications.remove(self.p2)
self.assertQuerysetEqual(a4.publications.all(), [])
def test_add_after_prefetch(self):
a4 = Article.objects.prefetch_related('publications').get(id=self.a4.id)
self.assertEqual(a4.publications.count(), 1)
a4.publications.add(self.p1)
self.assertEqual(a4.publications.count(), 2)
def test_set_after_prefetch(self):
a4 = Article.objects.prefetch_related('publications').get(id=self.a4.id)
self.assertEqual(a4.publications.count(), 1)
a4.publications.set([self.p2, self.p1])
self.assertEqual(a4.publications.count(), 2)
a4.publications.set([self.p1])
self.assertEqual(a4.publications.count(), 1)
def test_add_then_remove_after_prefetch(self):
a4 = Article.objects.prefetch_related('publications').get(id=self.a4.id)
self.assertEqual(a4.publications.count(), 1)
a4.publications.add(self.p1)
self.assertEqual(a4.publications.count(), 2)
a4.publications.remove(self.p1)
self.assertQuerysetEqual(a4.publications.all(), ['<Publication: Science News>'])
def test_inherited_models_selects(self):
"""
#24156 - Objects from child models where the parent's m2m field uses
related_name='+' should be retrieved correctly.
"""
a = InheritedArticleA.objects.create()
b = InheritedArticleB.objects.create()
a.publications.add(self.p1, self.p2)
self.assertQuerysetEqual(
a.publications.all(),
[
'<Publication: Science News>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(b.publications.all(), [])
b.publications.add(self.p3)
self.assertQuerysetEqual(
a.publications.all(),
[
'<Publication: Science News>',
'<Publication: The Python Journal>',
]
)
self.assertQuerysetEqual(b.publications.all(), ['<Publication: Science Weekly>'])
class ManyToManyQueryTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.article = Article.objects.create(headline='Django lets you build Web apps easily')
cls.nullable_target_article = NullableTargetArticle.objects.create(headline='The python is good')
NullablePublicationThrough.objects.create(article=cls.nullable_target_article, publication=None)
@skipUnlessDBFeature('supports_foreign_keys')
def test_count_join_optimization(self):
with CaptureQueriesContext(connection) as query:
self.article.publications.count()
self.assertNotIn('JOIN', query[0]['sql'])
self.assertEqual(self.nullable_target_article.publications.count(), 0)
def test_count_join_optimization_disabled(self):
with mock.patch.object(connection.features, 'supports_foreign_keys', False), \
CaptureQueriesContext(connection) as query:
self.article.publications.count()
self.assertIn('JOIN', query[0]['sql'])
@skipUnlessDBFeature('supports_foreign_keys')
def test_exists_join_optimization(self):
with CaptureQueriesContext(connection) as query:
self.article.publications.exists()
self.assertNotIn('JOIN', query[0]['sql'])
self.assertIs(self.nullable_target_article.publications.exists(), False)
def test_exists_join_optimization_disabled(self):
with mock.patch.object(connection.features, 'supports_foreign_keys', False), \
CaptureQueriesContext(connection) as query:
self.article.publications.exists()
self.assertIn('JOIN', query[0]['sql'])
| {
"content_hash": "a0e44784dd50a806379c8d329a9b3ce2",
"timestamp": "",
"source": "github",
"line_count": 596,
"max_line_length": 113,
"avg_line_length": 42.93456375838926,
"alnum_prop": 0.5938098401656962,
"repo_name": "timgraham/django",
"id": "933eb23a7a67cd346ee36595e45c6446dc8f648d",
"size": "25589",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tests/many_to_many/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84974"
},
{
"name": "HTML",
"bytes": "224563"
},
{
"name": "JavaScript",
"bytes": "257097"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12931531"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
def crypt(word, salt): # real signature unknown; restored from __doc__
"""
crypt(word, salt) -> string
word will usually be a user's password. salt is a 2-character string
which will be used to select one of 4096 variations of DES. The characters
in salt must be either ".", "/", or an alphanumeric character. Returns
the hashed password as a string, which will be composed of characters from
the same alphabet as the salt.
"""
return ""
# no classes
| {
"content_hash": "0d785adac71351e8b2b676f07bb4a53e",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 78,
"avg_line_length": 40.666666666666664,
"alnum_prop": 0.6844262295081968,
"repo_name": "akiokio/centralfitestoque",
"id": "c12387758a62767573f9555faad5596e53f700f9",
"size": "667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/.pycharm_helpers/python_stubs/-1384406770/crypt.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "253279"
},
{
"name": "JavaScript",
"bytes": "253299"
},
{
"name": "Python",
"bytes": "6144500"
},
{
"name": "Ruby",
"bytes": "168219"
},
{
"name": "Shell",
"bytes": "21"
}
],
"symlink_target": ""
} |
import nltk as nltk
from collections import Counter
import itertools
# TODO: this part is language-specific
sentence_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
def separate_sentences(text):
return sentence_tokenizer.tokenize(text)
| {
"content_hash": "f50e59eb057418ffea359c0bc3c14bd4",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 28.444444444444443,
"alnum_prop": 0.80078125,
"repo_name": "chrishokamp/maxent-decoder",
"id": "acc6bdea7e9ce6889df6bc3a6702fd3f0137593c",
"size": "303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "preprocess/sentence_tokenize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28454"
}
],
"symlink_target": ""
} |
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| {
"content_hash": "09112f52a60798d5eb9f15a1ac690b62",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 28.92982456140351,
"alnum_prop": 0.6258338386901152,
"repo_name": "DailyActie/Surrogate-Model",
"id": "cc0640e4bec119a937e366a4e5ee845ba447c861",
"size": "1693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/scikit-learn-master/examples/cluster/plot_digits_agglomeration.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
} |
"""
Authors: Porebski Elvis C00170343
Tyrakowski Bartosz C00155128
Date: February, 2016
"""
import json
from os.path import exists
from flask import Flask
from flask_socketio import SocketIO, emit
from drs.drs import Drs
from drs.partition.ntfspartition import NtfsPartition
from drs.partition.partitionmanager import PartitionManager
app = Flask(__name__, static_url_path='')
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
drs = Drs()
@socketio.on('connect')
def connected():
print('Client is connected')
emit('connection status', {'status': 'connected'})
@app.route('/')
def angular():
return app.send_static_file('index.html')
@socketio.on('request:partitions')
def get_partitions():
print('client requested partitions...')
partitions = PartitionManager.load_partitions()
response = [partition.to_json() for partition in partitions]
print(response)
emit('response:partitions', json.dumps(response))
@socketio.on('request:recover_all')
def recover_all():
print('request received: recover_all')
if drs:
print('recovering all files...')
drs.recover_all()
print('recovery completed!')
@socketio.on('request:mft_analyse')
def analyse_mft(path):
if exists(path):
source = None
partitions = drs.get_partitions()
for partition in partitions:
if partition.path == path:
source = partition
break
if source is None:
source = NtfsPartition(path=path, size=0, label=path)
drs.analyse(partition=source, callback=socket_callback)
print('MFT analysis completed.', '{} deleted records found.'.format(len(drs.data_bank.values())))
results = []
for deleted_record in drs.data_bank.values():
print('{}\n'.format(deleted_record),
'Record Number: {}\n'.format(deleted_record['data'].record_number),
'Size: {}\n'.format(deleted_record['data'].attrs['size']),
'Parent: {}\n'.format(deleted_record['data'].attrs['parent_dir_file_req_no']),
'Parent Sequence Number: {}\n'.format(deleted_record['data'].attrs['parent_dir_seq_no']))
file_name = deleted_record['data'].attrs['file_name']
path = deleted_record['path']
is_orphan = deleted_record['is_orphan']
results.append({
'file_name': file_name,
'dir_path': path,
'is_orphan': is_orphan
})
emit('deleted_file_found', json.dumps(results))
def socket_callback(record_number, total):
if record_number % 1000 == 0 or record_number == total:
emit('mft_analyser_progress', {
'current': record_number,
'total': total
})
app.debug = False
if __name__ == '__main__':
socketio.run(app)
| {
"content_hash": "7d2747933ad24191f42cc4b9da3818fe",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 107,
"avg_line_length": 29.632653061224488,
"alnum_prop": 0.609504132231405,
"repo_name": "eepDev/DRS",
"id": "5b422a469f3a0143ba59e3002ccd855747964fea",
"size": "2904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drs/ui/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "7853"
},
{
"name": "JavaScript",
"bytes": "4285"
},
{
"name": "Python",
"bytes": "74185"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^signin/$', 'ale.views.sign_in', name='sign in'),
url(r'^signup/$', 'ale.views.sign_up', name='sign in'),
url(r'^logout/$', 'ale.views.log_out', name='log out'),
url(r'^$', 'ale.views.dashboard', name='dashboard'),
url(r'^project/create/$', 'ale.views.create_project', name='dashboard'),
url(r'^project/import/(?P<project_path>.*)/$', 'ale.io_views.import_user_project',
name='import'),
url(r'^project/export/(?P<project_path>.*)/$', 'ale.io_views.export_user_project',
name='import'),
url(r'^project/(?P<project_path>.*)/$', 'ale.views.show_user_project', name='project'),
url(r'^json/project/(?P<project_path>.*)/cell/modify/$', 'ale.views.modify_cell',
name='modify cell'),
url(r'^json/project/(?P<project_path>.*)/$', 'ale.views.cells_data_json', name='project json'),
url(r'^json/shares/project/(?P<project_path>.*)/$', 'ale.share_views.get_shares',
name='get shares'),
url(r'^json/share/project/(?P<project_path>.*)/$', 'ale.share_views.share_project',
name='share project'),
url(r'^json/share/remove/project/(?P<project_path>.*)/$', 'ale.share_views.remove_share',
name='remove share'),
url(r'^share/(?P<hash_key>.*)/$', 'ale.share_views.show_shared_project', name='shared project'),
url(r'^json/share/cells/(?P<hash_key>.*)/$', 'ale.share_views.get_json_shared_cells',
name='shared project cells'),
url(r'^json/share/modify/cell/(?P<hash_key>.*)/$', 'ale.share_views.modify_shared_project_cell',
name='modify shared project'),
url(r'^import/shared/(?P<hash_key>.*)/$', 'ale.share_views.import_shared_project',
name='import shared project'),
url(r'^export/shared/(?P<hash_key>.*)/$', 'ale.share_views.export_shared_project',
name='export shared project'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
) | {
"content_hash": "43906726efeac60c95d022e69c54659f",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 119,
"avg_line_length": 60.43181818181818,
"alnum_prop": 0.48251222264009025,
"repo_name": "stermedia/AndroidLanguageEditor",
"id": "ed99b66b9311e45e76d9bcba43ce0406e213c062",
"size": "2659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AndroidLanguageEditor/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "124028"
},
{
"name": "JavaScript",
"bytes": "689395"
},
{
"name": "Python",
"bytes": "21840"
}
],
"symlink_target": ""
} |
'''
Created on 1.12.2016
@author: Darren
'''
'''
There is a fence with n posts, each post can be painted with one of the k colors.
You have to paint all the posts such that no more than two adjacent fence posts have the same color.
Return the total number of ways you can paint the fence.
Note:
n and k are non-negative integers
'''
| {
"content_hash": "54ac212ffcb6b64469dfa501a8be3344",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 100,
"avg_line_length": 22.466666666666665,
"alnum_prop": 0.7299703264094956,
"repo_name": "darrencheng0817/AlgorithmLearning",
"id": "11140c8b3cd1d80a0e7bbcff4de93996ca798dc0",
"size": "337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/leetcode/PaintFence.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2663"
},
{
"name": "Java",
"bytes": "89490"
},
{
"name": "Python",
"bytes": "600854"
}
],
"symlink_target": ""
} |
__author__ = 'Rohan'
from Tkinter import *
from tkFileDialog import *
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import vgprice
def getelements(vglist, type):
tmplist = []
for i in vglist:
tmplist.append(getattr(i, type))
return tmplist
def createstructarray(looseprices, cibprices, newprices, reviews, genres, years, consolenames, newloosespread):
x = np.zeros((looseprices.size,),
dtype = [('looseprices', 'f4'), ('cibprices', 'f4'), ('newprices', 'f4'), ('reviews',
'f4'),
('genres', 'a30'), ('years', 'a5'), ('consolenames', 'a30'),
('newloosespread', 'f4')])
x['looseprices'] = looseprices
x['cibprices'] = cibprices
x['newprices'] = newprices
x['reviews'] = reviews
x['genres'] = genres
x['years'] = years
x['consolenames'] = consolenames
x['newloosespread'] = newloosespread
return x
def firstplot(vgarray):
"""
plots each genre spread v reviewscore
:param vgarray:
"""
listGenres = np.unique(vgarray['genres'])
plt.figure(1)
sp = 1
for i in listGenres:
plt.subplot(7, 4, sp)
spread = vgarray[np.where(vgarray['genres'] == i)]['newloosespread']
review = vgarray[np.where(vgarray['genres'] == i)]['reviews']
plt.plot(spread[~np.isnan(spread)&~np.isnan(review)], review[~np.isnan(review)&~np.isnan(spread)], 'ro')
try:
m, b, rvalue, pvalue, stderror = stats.linregress(spread[~np.isnan(spread)&~np.isnan(review)], review[~np.isnan(review)&~np.isnan(spread)])
x1, x2, n = np.amin(spread[~np.isnan(spread)&~np.isnan(review)]), np.amax(spread[~np.isnan(spread)&~np.isnan(review)]), 11
x = np.r_[x1:x2:n * 1j]
plt.plot(x, m * x + b)
except:
pass
cor = stats.pearsonr(spread[~np.isnan(spread)&~np.isnan(review)], review[~np.isnan(review)&~np.isnan(spread)])
cor = "{0:.2f}".format(cor[0])
plt.title(i + " --- " + cor, size = 9)
sp = sp + 1
plt.tight_layout()
plt.show()
def secondplot(vgarray):
"""
asks for genre and then plots that genre and everything sans that genre
:param vgarray:
"""
listGenres = np.unique(vgarray['genres'])
print "Please input one of the following genres:"
for i in listGenres:
print i
var = raw_input("Enter a genre:")
if var not in listGenres:
print "wrong entry"
return
spreadgenre = vgarray[np.where(vgarray['genres'] == var)]['newloosespread']
reviewgenre = vgarray[np.where(vgarray['genres'] == var)]['reviews']
spreadelse = vgarray[np.where(vgarray['genres'] != var)]['newloosespread']
reviewelse = vgarray[np.where(vgarray['genres'] != var)]['reviews']
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(spreadgenre[~np.isnan(spreadgenre)&~np.isnan(reviewgenre)], reviewgenre[~np.isnan(reviewgenre)&~np.isnan(spreadgenre)], 'ro')
try:
m, b, rvalue, pvalue, stderror = stats.linregress(spreadgenre[~np.isnan(spreadgenre)&~np.isnan(reviewgenre)],
reviewgenre[~np.isnan(reviewgenre)&~np.isnan(spreadgenre)])
x1, x2, n = np.amin(spreadgenre[~np.isnan(spreadgenre)&~np.isnan(reviewgenre)]), np.amax(spreadgenre[~np.isnan(spreadgenre)&~np.isnan(reviewgenre)]), 11
x = np.r_[x1:x2:n * 1j]
plt.plot(x, m * x + b)
except:
pass
cor = stats.pearsonr(spreadgenre[~np.isnan(spreadgenre)&~np.isnan(reviewgenre)], reviewgenre[~np.isnan(reviewgenre)&~np.isnan(spreadgenre)])
cor = "{0:.2f}".format(cor[0])
plt.title(var + " --- " + str(cor), size = 9)
plt.subplot(2, 1, 2)
plt.plot(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)], reviewelse[~np.isnan(reviewelse)&~np.isnan(spreadelse)], 'ro')
try:
m, b, rvalue, pvalue, stderror = stats.linregress(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)],
reviewelse[~np.isnan(reviewelse)&~np.isnan(spreadelse)])
x1, x2, n = np.amin(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)]), np.amax(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)]), 11
x = np.r_[x1:x2:n * 1j]
plt.plot(x, m * x + b)
except:
pass
cor = stats.pearsonr(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)], reviewelse[~np.isnan(reviewelse)&~np.isnan(spreadelse)])
cor = "{0:.2f}".format(cor[0])
plt.title("everything but " + var + " --- " + str(cor), size = 9)
plt.tight_layout()
plt.show()
def thirdplot(vgarray):
"""
asks for year and plots that year and everything sans that year
:param vgarray:
:return:
"""
listYears = np.sort(np.unique(vgarray['years']))
print "Please input one of the following year:"
for i in listYears:
print i
var = raw_input("Enter a year:")
if var not in listYears:
print "wrong entry"
return
spreadyear = vgarray[np.where(vgarray['years'] == var)]['newloosespread']
reviewyear = vgarray[np.where(vgarray['years'] == var)]['reviews']
spreadelse = vgarray[np.where(vgarray['years'] != var)]['newloosespread']
reviewelse = vgarray[np.where(vgarray['years'] != var)]['reviews']
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(spreadyear[~np.isnan(spreadyear)&~np.isnan(reviewyear)], reviewyear[~np.isnan(reviewyear)&~np.isnan(spreadyear)], 'ro')
try:
m, b, rvalue, pvalue, stderror = stats.linregress(spreadyear[~np.isnan(spreadyear)&~np.isnan(reviewyear)], reviewyear[~np.isnan(reviewyear)&~np.isnan(spreadyear)])
x1, x2, n = np.amin(spreadyear[~np.isnan(spreadyear)&~np.isnan(reviewyear)]), np.amax(spreadyear[~np.isnan(spreadyear)&~np.isnan(reviewyear)]), 11
x = np.r_[x1:x2:n * 1j]
plt.plot(x, m * x + b)
except:
pass
cor = stats.pearsonr(spreadyear[~np.isnan(spreadyear)&~np.isnan(reviewyear)], reviewyear[~np.isnan(reviewyear)&~np.isnan(spreadyear)])
cor = "{0:.2f}".format(cor[0])
plt.title(var + " --- " + cor, size = 9)
plt.subplot(2, 1, 2)
plt.plot(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)], reviewelse[~np.isnan(reviewelse)&~np.isnan(spreadelse)], 'ro')
try:
m, b, rvalue, pvalue, stderror = stats.linregress(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)], reviewelse[~np.isnan(reviewelse)&~np.isnan(spreadelse)])
x1, x2, n = np.amin(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)]), np.amax(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)]), 11
x = np.r_[x1:x2:n * 1j]
plt.plot(x, m * x + b)
except:
pass
cor = stats.pearsonr(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)], reviewelse[~np.isnan(reviewelse)&~np.isnan(spreadelse)])
cor = "{0:.2f}".format(cor[0])
plt.title("everything but " + var + " --- " + cor, size = 9)
plt.tight_layout()
plt.show()
def fourthplot(vgarray):
"""
asks for console name and plots that and everything sans that
:param vgarray:
:return:
"""
listConsoleNames = np.sort(np.unique(vgarray['consolenames']))
print "Please input one of the following consolenames:"
for i in listConsoleNames:
print i
var = raw_input("Enter a consolenames:")
if var not in listConsoleNames:
print "wrong entry"
return
spreadconsolename = vgarray[np.where(vgarray['consolenames'] == var)]['newloosespread']
reviewconsolename = vgarray[np.where(vgarray['consolenames'] == var)]['reviews']
spreadelse = vgarray[np.where(vgarray['consolenames'] != var)]['newloosespread']
reviewelse = vgarray[np.where(vgarray['consolenames'] != var)]['reviews']
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(spreadconsolename[~np.isnan(spreadconsolename)&~np.isnan(reviewconsolename)], reviewconsolename[~np.isnan(reviewconsolename)&~np.isnan(reviewconsolename)], 'ro')
try:
m, b, rvalue, pvalue, stderror = stats.linregress(spreadconsolename[~np.isnan(spreadconsolename)&~np.isnan(reviewconsolename)], reviewconsolename[~np.isnan(reviewconsolename)&~np.isnan(reviewconsolename)])
x1, x2, n = np.amin(spreadconsolename[~np.isnan(spreadconsolename)&~np.isnan(reviewconsolename)]), np.amax(spreadconsolename[~np.isnan(spreadconsolename)&~np.isnan(reviewconsolename)]), 11
x = np.r_[x1:x2:n * 1j]
plt.plot(x, m * x + b)
except:
pass
cor = stats.pearsonr(spreadconsolename[~np.isnan(spreadconsolename)&~np.isnan(reviewconsolename)], reviewconsolename[~np.isnan(reviewconsolename)&~np.isnan(reviewconsolename)])
cor = "{0:.2f}".format(cor[0])
plt.title(var + " --- " + cor, size = 9)
plt.subplot(2, 1, 2)
plt.plot(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)], reviewelse[~np.isnan(reviewelse)&~np.isnan(spreadelse)], 'ro')
try:
m, b, rvalue, pvalue, stderror = stats.linregress(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)], reviewelse[~np.isnan(reviewelse)&~np.isnan(spreadelse)])
x1, x2, n = np.amin(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)]), np.amax(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)]), 11
x = np.r_[x1:x2:n * 1j]
plt.plot(x, m * x + b)
except:
pass
cor = stats.pearsonr(spreadelse[~np.isnan(spreadelse)&~np.isnan(reviewelse)], reviewelse[~np.isnan(reviewelse)&~np.isnan(spreadelse)])
cor = "{0:.2f}".format(cor[0])
plt.title("everything but " + var + " --- " + cor, size = 9)
plt.tight_layout()
plt.show()
def main():
root = Tk()
root.withdraw()
openfilename = askopenfilename(filetypes = [("csv", "*.csv")], parent = root, title = "Please select vgprice csv ")
root.destroy()
VgpriceList = []
try:
vgFile = open(openfilename, 'r')
try:
vgFile.readline()
for line in vgFile:
args = line.rstrip("\n").split(",")
c = vgprice.vgpriceObj(*args)
#print c
VgpriceList.append(c)
finally:
vgFile.close()
except IOError:
print "file does not exist"
return
reviewscores = np.array(getelements(VgpriceList, 'score'))
genres = np.array(getelements(VgpriceList, 'genre'))
years = np.array(getelements(VgpriceList, 'year'))
consolenames = np.array(getelements(VgpriceList, 'consolename'))
looseprices = np.log(np.array(getelements(VgpriceList, 'looseprice'), dtype = np.float))
cibprices = np.log(np.array(getelements(VgpriceList, 'cibprice'), dtype = np.float))
newprices = np.log(np.array(getelements(VgpriceList, 'newprice'), dtype = np.float))
newloosespread = newprices - looseprices
vgarray = createstructarray(looseprices, cibprices, newprices, reviewscores, genres, years, consolenames,
newloosespread)
firstplot(vgarray)
secondplot(vgarray)
thirdplot(vgarray)
fourthplot(vgarray)
if __name__ == '__main__':
main() | {
"content_hash": "405e32225b3198b994b10240148d0567",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 213,
"avg_line_length": 41.11721611721612,
"alnum_prop": 0.6212026726057907,
"repo_name": "rohanfray/IS602-Final",
"id": "80cc907284a8fda37472f28a87ecb1d66731759e",
"size": "11225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IS602 Final.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13793"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.db import IntegrityError, transaction
from rest_framework.response import Response
from sentry import features
from sentry.api.bases import GroupEndpoint
from sentry.api.serializers import serialize
from sentry.api.serializers.models.integration import IntegrationIssueConfigSerializer
from sentry.integrations import IntegrationFeatures
from sentry.integrations.exceptions import IntegrationError, IntegrationFormError
from sentry.models import Activity, ExternalIssue, GroupLink, Integration
from sentry.signals import integration_issue_created, integration_issue_linked
MISSING_FEATURE_MESSAGE = "Your organization does not have access to this feature."
class GroupIntegrationDetailsEndpoint(GroupEndpoint):
def _has_issue_feature(self, organization, user):
has_issue_basic = features.has(
"organizations:integrations-issue-basic", organization, actor=user
)
has_issue_sync = features.has(
"organizations:integrations-issue-sync", organization, actor=user
)
return has_issue_sync or has_issue_basic
def create_issue_activity(self, request, group, installation, external_issue):
issue_information = {
"title": external_issue.title,
"provider": installation.model.get_provider().name,
"location": installation.get_issue_url(external_issue.key),
"label": installation.get_issue_display_name(external_issue) or external_issue.key,
}
Activity.objects.create(
project=group.project,
group=group,
type=Activity.CREATE_ISSUE,
user=request.user,
data=issue_information,
)
def get(self, request, group, integration_id):
if not self._has_issue_feature(group.organization, request.user):
return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400)
# Keep link/create separate since create will likely require
# many external API calls that aren't necessary if the user is
# just linking
action = request.GET.get("action")
if action not in {"link", "create"}:
return Response({"detail": "Action is required and should be either link or create"})
organization_id = group.project.organization_id
try:
integration = Integration.objects.get(id=integration_id, organizations=organization_id)
except Integration.DoesNotExist:
return Response(status=404)
if not (
integration.has_feature(IntegrationFeatures.ISSUE_BASIC)
or integration.has_feature(IntegrationFeatures.ISSUE_SYNC)
):
return Response(
{"detail": "This feature is not supported for this integration."}, status=400
)
try:
return Response(
serialize(
integration,
request.user,
IntegrationIssueConfigSerializer(group, action, params=request.GET),
organization_id=organization_id,
)
)
except IntegrationError as exc:
return Response({"detail": exc.message}, status=400)
# was thinking put for link an existing issue, post for create new issue?
def put(self, request, group, integration_id):
if not self._has_issue_feature(group.organization, request.user):
return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400)
external_issue_id = request.data.get("externalIssue")
if not external_issue_id:
return Response({"externalIssue": ["Issue ID is required"]}, status=400)
organization_id = group.project.organization_id
try:
integration = Integration.objects.get(id=integration_id, organizations=organization_id)
except Integration.DoesNotExist:
return Response(status=404)
if not (
integration.has_feature(IntegrationFeatures.ISSUE_BASIC)
or integration.has_feature(IntegrationFeatures.ISSUE_SYNC)
):
return Response(
{"detail": "This feature is not supported for this integration."}, status=400
)
installation = integration.get_installation(organization_id)
try:
data = installation.get_issue(external_issue_id, data=request.data)
except IntegrationFormError as exc:
return Response(exc.field_errors, status=400)
except IntegrationError as exc:
return Response({"non_field_errors": [exc.message]}, status=400)
defaults = {
"title": data.get("title"),
"description": data.get("description"),
"metadata": data.get("metadata"),
}
external_issue_key = installation.make_external_key(data)
external_issue, created = ExternalIssue.objects.get_or_create(
organization_id=organization_id,
integration_id=integration.id,
key=external_issue_key,
defaults=defaults,
)
if created:
integration_issue_linked.send_robust(
integration=integration,
organization=group.project.organization,
user=request.user,
sender=self.__class__,
)
else:
external_issue.update(**defaults)
installation.store_issue_last_defaults(group.project_id, request.data)
try:
installation.after_link_issue(external_issue, data=request.data)
except IntegrationFormError as exc:
return Response(exc.field_errors, status=400)
except IntegrationError as exc:
return Response({"non_field_errors": [exc.message]}, status=400)
try:
with transaction.atomic():
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
except IntegrityError:
return Response({"non_field_errors": ["That issue is already linked"]}, status=400)
self.create_issue_activity(request, group, installation, external_issue)
# TODO(jess): would be helpful to return serialized external issue
# once we have description, title, etc
url = data.get("url") or installation.get_issue_url(external_issue.key)
context = {
"id": external_issue.id,
"key": external_issue.key,
"url": url,
"integrationId": external_issue.integration_id,
"displayName": installation.get_issue_display_name(external_issue),
}
return Response(context, status=201)
def post(self, request, group, integration_id):
if not self._has_issue_feature(group.organization, request.user):
return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400)
organization_id = group.project.organization_id
try:
integration = Integration.objects.get(id=integration_id, organizations=organization_id)
except Integration.DoesNotExist:
return Response(status=404)
if not (
integration.has_feature(IntegrationFeatures.ISSUE_BASIC)
or integration.has_feature(IntegrationFeatures.ISSUE_SYNC)
):
return Response(
{"detail": "This feature is not supported for this integration."}, status=400
)
installation = integration.get_installation(organization_id)
try:
data = installation.create_issue(request.data)
except IntegrationFormError as exc:
return Response(exc.field_errors, status=400)
except IntegrationError as exc:
return Response({"non_field_errors": [exc.message]}, status=400)
external_issue_key = installation.make_external_key(data)
external_issue, created = ExternalIssue.objects.get_or_create(
organization_id=organization_id,
integration_id=integration.id,
key=external_issue_key,
defaults={
"title": data.get("title"),
"description": data.get("description"),
"metadata": data.get("metadata"),
},
)
try:
with transaction.atomic():
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
except IntegrityError:
return Response({"detail": "That issue is already linked"}, status=400)
if created:
integration_issue_created.send_robust(
integration=integration,
organization=group.project.organization,
user=request.user,
sender=self.__class__,
)
installation.store_issue_last_defaults(group.project_id, request.data)
self.create_issue_activity(request, group, installation, external_issue)
# TODO(jess): return serialized issue
url = data.get("url") or installation.get_issue_url(external_issue.key)
context = {
"id": external_issue.id,
"key": external_issue.key,
"url": url,
"integrationId": external_issue.integration_id,
"displayName": installation.get_issue_display_name(external_issue),
}
return Response(context, status=201)
def delete(self, request, group, integration_id):
if not self._has_issue_feature(group.organization, request.user):
return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400)
# note here externalIssue refers to `ExternalIssue.id` wheras above
# it refers to the id from the provider
external_issue_id = request.GET.get("externalIssue")
if not external_issue_id:
return Response({"detail": "External ID required"}, status=400)
organization_id = group.project.organization_id
try:
integration = Integration.objects.get(id=integration_id, organizations=organization_id)
except Integration.DoesNotExist:
return Response(status=404)
if not (
integration.has_feature(IntegrationFeatures.ISSUE_BASIC)
or integration.has_feature(IntegrationFeatures.ISSUE_SYNC)
):
return Response(
{"detail": "This feature is not supported for this integration."}, status=400
)
try:
external_issue = ExternalIssue.objects.get(
organization_id=organization_id, integration_id=integration.id, id=external_issue_id
)
except ExternalIssue.DoesNotExist:
return Response(status=404)
with transaction.atomic():
GroupLink.objects.filter(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue_id,
relationship=GroupLink.Relationship.references,
).delete()
# check if other groups reference this external issue
# and delete if not
if not GroupLink.objects.filter(
linked_type=GroupLink.LinkedType.issue, linked_id=external_issue_id
).exists():
external_issue.delete()
return Response(status=204)
| {
"content_hash": "0522803198b8d48512450d8e1bb095b0",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 100,
"avg_line_length": 40.83848797250859,
"alnum_prop": 0.6168798384382362,
"repo_name": "mvaled/sentry",
"id": "643a3d95f8b11bb714c9c217e07b8a37d3f224ff",
"size": "11884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/group_integration_details.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
import json
import csv
import re
name_pattern = re.compile("^[a-zA-Z ]+$")
def name_valid(name):
return re.match(name_pattern, name) is not None
def hex_to_rgb(hex):
i = int(hex, 16)
r = (i >> 16) & 0xFF
g = (i >> 8) & 0xFF
b = i & 0xFF
return (r, g, b)
colors = {}
with open("./colors.json", "r") as colorfile:
colors = json.load(colorfile)
colors = colors["colors"]
colors_written = {}
with open("db.csv", "wb") as csvfile:
writer = csv.writer(csvfile)
for code in colors:
color = colors[code]
name = color["name"]
if not name_valid(name):
continue
name = name.lower()
if name not in colors_written:
colors_written[name] = 1
hex = color["hex"]
(r, g, b) = hex_to_rgb(hex)
writer.writerow([name, r, g, b])
| {
"content_hash": "f2db22eb1419b22161ab74ac8caef02a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 48,
"avg_line_length": 20.513513513513512,
"alnum_prop": 0.6100131752305665,
"repo_name": "andrewortman/colorbot",
"id": "b904575653247810d12454818473cacf3e54c610",
"size": "782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/scraped/benjaminmoore/tocsv.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5923091"
},
{
"name": "Python",
"bytes": "23820"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/")
def index():
p = {}
return render_template("index.html", p=p)
@app.route('/pjax/<query>')
def pjax_index(query):
p = {}
if "X-PJAX" in request.headers and query:
content = "you requested '%s' in pjax" % query
p['placeholder'] = content
return render_template("container.html", p=p)
else:
p['placeholder'] = "you requested '%s' in static" % query
p['static'] = True
return render_template("index.html", p=p)
@app.route('/hoge/')
@app.route('/hoge/<title>')
def show_title(title=None):
if title and title.startwith('#'):
return render_template("index.html", p=dict(placeholder=title[1:]))
else:
return render_template("index.html", p=dict(placeholder='no title'))
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=8888)
| {
"content_hash": "434ea83e5b1fad01a15da2073235d42e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 72,
"avg_line_length": 26.176470588235293,
"alnum_prop": 0.6303370786516854,
"repo_name": "ymotongpoo/restroom",
"id": "b9eb753cd3e179177e7eeaf54b02c3d0412a40a5",
"size": "915",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/pjaxtest/pjax-server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4513"
},
{
"name": "C++",
"bytes": "26009"
},
{
"name": "CSS",
"bytes": "1634"
},
{
"name": "D",
"bytes": "838"
},
{
"name": "Go",
"bytes": "11639"
},
{
"name": "HTML",
"bytes": "705490"
},
{
"name": "JavaScript",
"bytes": "10224"
},
{
"name": "Makefile",
"bytes": "4858"
},
{
"name": "OCaml",
"bytes": "10006"
},
{
"name": "Python",
"bytes": "224305"
},
{
"name": "Shell",
"bytes": "462"
},
{
"name": "TypeScript",
"bytes": "2406"
}
],
"symlink_target": ""
} |
"""
Generate prefix list from file
"""
import sys
import fileinput
import getopt
from ipv4seq import numipv4
from ipv4holes import getholes
from prefixin import *
USAGE_MSG = """
Get IPv4 prefixes from a list
(c) POWERNET ISP 2016
Usage:
ipv4full.py [-a|l|s] [-g] [-p] [-n] [-d] <file>
Options:
-a|--all Show prefixes, holes and special (default, prefer)
-l|--hole Only holes
-s|--summary Only summary of prefixes
-g|--aggregate Allow aggregate sequences, otherwise only nested prefixes aggregated
-p|--special Without special
-n|--as_path Show AS_PATH
-d|--prepend Without prepend +|-|*, only with -l or -s
Input file format is A.B.C.D/X,PATH in each line
"""
def main(opt_all=False, opt_hole=False, opt_summary=False, opt_aggregate=False, opt_special=False, opt_aspath=False, opt_prepend=False):
ipstack = []
prefix_spec_i = 0
prefix_asn = 0
opt_list = "alsgpnd"
lopt_list = ("all", "hole", "summary", "aggregate", "special", "aspath", "prepend")
input_flow_name = "-"
err_id = 0
def prefix_out((netin), lp, i, a, opt_s=False, opt_p=False, opt_n=False):
for net in netin:
if net[2] != a:
i = 0
prefixes, i = prefix_spec(net, i)
for p in prefixes:
if p in PREFIX_SPEC:
if not opt_s:
sids = '*'
else:
continue
else:
sids = lp
if opt_p:
sids = ""
print ("{}{}/{}{}".format(sids, numipv4(p[0]), p[1], ", " + str(p[2]) if opt_n else ""))
a = net[2]
return i,a
try:
opts, args = getopt.getopt(sys.argv[1:], opt_list, lopt_list)
for opt, arg in opts:
if opt in ("-a", "--all"):
opt_all = True
elif opt in ("-l", "--hole") and not (opt_all or opt_summary):
opt_hole = True
elif opt in ("-s", "--summary") and not (opt_all or opt_hole):
opt_summary = True
elif opt in ("-g", "--aggregate"):
opt_aggregate = True
elif opt in ("-p", "--special"):
opt_special = True
elif opt in ("-n", "--aspath"):
opt_aspath = True
elif opt in ("-d", "--prepend"):
opt_prepend = True
if not (opt_summary or opt_hole):
opt_all = True
opt_prepend = False
if len(args) > 0:
input_flow_name = args[-1]
# Main cycle
for line in fileinput.input(input_flow_name):
rline = line.strip()
prefix = ipv4num(rline)
if not len(prefix):
print "Invalid prefix '{}'".format(rline)
err_id = 1
break
# Filter too long mask
if prefix[1] > PREFIX_MAX:
continue
if prefix_asn != prefix[2]:
prefix_spec_i = 0
prefix = inprefix_spec(prefix, prefix_spec_i)
# Main executes
holes, netunion, ipstack[:] = getholes(prefix, ipstack, opt_aggregate)
# Print output
if opt_summary or opt_all:
prefix_spec_i, prefix_asn = prefix_out(netunion, "+", prefix_spec_i, prefix_asn, opt_special, opt_prepend, opt_aspath)
if opt_hole or opt_all:
prefix_spec_i, prefix_asn = prefix_out(holes, "-", prefix_spec_i, prefix_asn, opt_special, opt_prepend, opt_aspath)
except IOError:
print ("Input read error in '{}'".format(input_flow_name))
err_id = 2
except getopt.GetoptError:
print (USAGE_MSG)
err_id = 3
finally:
fileinput.close()
if not err_id:
if opt_summary or opt_all:
# After execute print
prefix_out(ipstack, "+", prefix_spec_i, prefix_asn, opt_special, opt_prepend, opt_aspath)
return err_id
if __name__ == '__main__':
exit(main())
| {
"content_hash": "9819ed161b89203f2fef8958abe06751",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 136,
"avg_line_length": 29.324137931034482,
"alnum_prop": 0.49106302916274697,
"repo_name": "Urlandi/bgptablehole",
"id": "5bc3a8aba17980ad71cd9d4dfbca415a5a8655d5",
"size": "4277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipv4full.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5151"
},
{
"name": "Python",
"bytes": "13837"
}
],
"symlink_target": ""
} |
import hashlib
import mimetypes
import os
import os.path
import re
import shutil
import time
from datetime import datetime
class connector():
"""Connector for elFinder"""
_options = {
'root': '/afs/',
'URL': '',
'rootAlias': 'Home',
'dotFiles': False,
'dirSize': True,
'fileMode': 0644,
'dirMode': 0755,
'imgLib': 'auto',
'tmbDir': '.tmb',
'tmbAtOnce': 5,
'tmbSize': 48,
'fileURL': True,
'uploadMaxSize': 256,
'uploadWriteChunk': 8192,
'uploadAllow': [],
'uploadDeny': [],
'uploadOrder': ['deny', 'allow'],
# 'aclObj': None, # TODO
# 'aclRole': 'user', # TODO
'defaults': {
'read': True,
'write': True,
'rm': True
},
'perms': {},
'archiveMimes': {},
'archivers': {
'create': {},
'extract': {}
},
'disabled': [],
'debug': False
}
_commands = {
'open': '__open',
'reload': '__reload',
'mkdir': '__mkdir',
'mkfile': '__mkfile',
'rename': '__rename',
'upload': '__upload',
'paste': '__paste',
'rm': '__rm',
'duplicate': '__duplicate',
'read': '__read',
'edit': '__edit',
'extract': '__extract',
'archive': '__archive',
'resize': '__resize',
'tmb': '__thumbnails',
'ping': '__ping'
}
_mimeType = {
# text
'txt': 'text/plain',
'conf': 'text/plain',
'ini': 'text/plain',
'php': 'text/x-php',
'html': 'text/html',
'htm': 'text/html',
'js' : 'text/javascript',
'css': 'text/css',
'rtf': 'text/rtf',
'rtfd': 'text/rtfd',
'py' : 'text/x-python',
'java': 'text/x-java-source',
'rb' : 'text/x-ruby',
'sh' : 'text/x-shellscript',
'pl' : 'text/x-perl',
'sql': 'text/x-sql',
# apps
'doc': 'application/msword',
'ogg': 'application/ogg',
'7z': 'application/x-7z-compressed',
# video
'ogm': 'appllication/ogm',
'mkv': 'video/x-matroska'
}
_time = 0
_request = {}
_response = {}
_errorData = {}
_form = {}
_im = None
_sp = None
_today = 0
_yesterday = 0
# public variables
httpAllowedParameters = ('cmd', 'target', 'targets[]', 'current', 'tree', 'name',
'content', 'src', 'dst', 'cut', 'init', 'type', 'width', 'height', 'upload[]')
# return variables
httpStatusCode = 0
httpHeader = {}
httpResponse = None
def __init__(self, opts):
for opt in opts:
self._options[opt] = opts.get(opt)
self._response['debug'] = {}
self._options['URL'] = self.__checkUtf8(self._options['URL'])
self._options['URL'] = self._options['URL'].rstrip('/')
self._options['root'] = self.__checkUtf8(self._options['root'])
self._options['root'] = self._options['root'].rstrip(os.sep)
self.__debug('URL', self._options['URL'])
self.__debug('root', self._options['root'])
for cmd in self._options['disabled']:
if cmd in self._commands:
del self._commands[cmd]
if self._options['tmbDir']:
self._options['tmbDir'] = os.path.join(self._options['root'], self._options['tmbDir'])
if not os.path.exists(self._options['tmbDir']):
self._options['tmbDir'] = False
def __reset(self):
"""Flush per request variables"""
self.httpStatusCode = 0
self.httpHeader = {}
self.httpResponse = None
self._request = {}
self._response = {}
self._errorData = {}
self._form = {}
self._time = time.time()
t = datetime.fromtimestamp(self._time)
self._today = time.mktime(datetime(t.year, t.month, t.day).timetuple())
self._yesterday = self._today - 86400
self._response['debug'] = {}
def run(self, httpRequest = []):
"""main function"""
self.__reset()
rootOk = True
if not os.path.exists(self._options['root']) or self._options['root'] == '':
rootOk = False
self._response['error'] = 'Invalid backend configuration'
elif not self.__isAllowed(self._options['root'], 'read'):
rootOk = False
self._response['error'] = 'Access denied'
for field in self.httpAllowedParameters:
if field in httpRequest:
self._request[field] = httpRequest[field]
if rootOk is True:
if 'cmd' in self._request:
if self._request['cmd'] in self._commands:
cmd = self._commands[self._request['cmd']]
func = getattr(self, '_' + self.__class__.__name__ + cmd, None)
if callable(func):
try:
func()
except Exception, e:
self._response['error'] = 'Command Failed'
self.__debug('exception', str(e))
else:
self._response['error'] = 'Unknown command'
else:
self.__open()
if 'init' in self._request:
self.__checkArchivers()
self._response['disabled'] = self._options['disabled']
if not self._options['fileURL']:
url = ''
else:
url = self._options['URL']
self._response['params'] = {
'dotFiles': self._options['dotFiles'],
'uplMaxSize': str(self._options['uploadMaxSize']) + 'M',
'archives': self._options['archivers']['create'].keys(),
'extract': self._options['archivers']['extract'].keys(),
'url': url
}
if self._errorData:
self._response['errorData'] = self._errorData
if self._options['debug']:
self.__debug('time', (time.time() - self._time))
else:
if 'debug' in self._response:
del self._response['debug']
if self.httpStatusCode < 100:
self.httpStatusCode = 200
if not 'Content-type' in self.httpHeader:
if ('cmd' in self._request and self._request['cmd'] == 'upload') or self._options['debug']:
self.httpHeader['Content-type'] = 'text/html'
else:
self.httpHeader['Content-type'] = 'application/json'
self.httpResponse = self._response
return self.httpStatusCode, self.httpHeader, self.httpResponse
def __open(self):
"""Open file or directory"""
# try to open file
if 'current' in self._request:
curDir = self.__findDir(self._request['current'], None)
curFile = self.__find(self._request['target'], curDir)
if not curDir or not curFile or os.path.isdir(curFile):
self.httpStatusCode = 404
self.httpHeader['Content-type'] = 'text/html'
self.httpResponse = 'File not found'
return
if not self.__isAllowed(curDir, 'read') or not self.__isAllowed(curFile, 'read'):
self.httpStatusCode = 403
self.httpHeader['Content-type'] = 'text/html'
self.httpResponse = 'Access denied'
return
if os.path.islink(curFile):
curFile = self.__readlink(curFile)
if not curFile or os.path.isdir(curFile):
self.httpStatusCode = 404
self.httpHeader['Content-type'] = 'text/html'
self.httpResponse = 'File not found'
return
if (
not self.__isAllowed(os.path.dirname(curFile), 'read')
or not self.__isAllowed(curFile, 'read')
):
self.httpStatusCode = 403
self.httpHeader['Content-type'] = 'text/html'
self.httpResponse = 'Access denied'
return
mime = self.__mimetype(curFile)
parts = mime.split('/', 2)
if parts[0] == 'image': disp = 'image'
elif parts[0] == 'text': disp = 'inline'
else: disp = 'attachments'
self.httpStatusCode = 200
self.httpHeader['Content-type'] = mime
self.httpHeader['Content-Disposition'] = disp + '; filename=' + os.path.basename(curFile)
self.httpHeader['Content-Location'] = curFile.replace(self._options['root'], '')
self.httpHeader['Content-Transfer-Encoding'] = 'binary'
self.httpHeader['Content-Length'] = str(os.lstat(curFile).st_size)
self.httpHeader['Connection'] = 'close'
self._response['file'] = open(curFile, 'r')
return
# try dir
else:
path = self._options['root']
if 'target' in self._request and self._request['target']:
target = self.__findDir(self._request['target'], None)
if not target:
self._response['error'] = 'Invalid parameters'
elif not self.__isAllowed(target, 'read'):
self._response['error'] = 'Access denied'
else:
path = target
self.__content(path, 'tree' in self._request)
pass
def __rename(self):
"""Rename file or dir"""
current = name = target = None
curDir = curName = newName = None
if 'name' in self._request and 'current' in self._request and 'target' in self._request:
name = self._request['name']
current = self._request['current']
target = self._request['target']
curDir = self.__findDir(current, None)
curName = self.__find(target, curDir)
newName = os.path.join(curDir, name)
if not curDir or not curName:
self._response['error'] = 'File not found'
elif not self.__isAllowed(curDir, 'write') and self.__isAllowed(curName, 'rm'):
self._response['error'] = 'Access denied'
elif not self.__checkName(name):
self._response['error'] = 'Invalid name'
elif os.path.exists(newName):
self._response['error'] = 'File or folder with the same name already exists'
else:
self.__rmTmb(curName)
try:
os.rename(curName, newName)
self._response['select'] = [self.__hash(newName)]
self.__content(curDir, os.path.isdir(newName))
except:
self._response['error'] = 'Unable to rename file'
def __mkdir(self):
"""Create new directory"""
current = None
path = None
newDir = None
if 'name' in self._request and 'current' in self._request:
name = self._request['name']
current = self._request['current']
path = self.__findDir(current, None)
newDir = os.path.join(path, name)
if not path:
self._response['error'] = 'Invalid parameters'
elif not self.__isAllowed(path, 'write'):
self._response['error'] = 'Access denied'
elif not self.__checkName(name):
self._response['error'] = 'Invalid name'
elif os.path.exists(newDir):
self._response['error'] = 'File or folder with the same name already exists'
else:
try:
os.mkdir(newDir, int(self._options['dirMode']))
self._response['select'] = [self.__hash(newDir)]
self.__content(path, True)
except:
self._response['error'] = 'Unable to create folder'
def __mkfile(self):
"""Create new file"""
name = current = None
curDir = newFile = None
if 'name' in self._request and 'current' in self._request:
name = self._request['name']
current = self._request['current']
curDir = self.__findDir(current, None)
newFile = os.path.join(curDir, name)
if not curDir or not name:
self._response['error'] = 'Invalid parameters'
elif not self.__isAllowed(curDir, 'write'):
self._response['error'] = 'Access denied'
elif not self.__checkName(name):
self._response['error'] = 'Invalid name'
elif os.path.exists(newFile):
self._response['error'] = 'File or folder with the same name already exists'
else:
try:
open(newFile, 'w').close()
self._response['select'] = [self.__hash(newFile)]
self.__content(curDir, False)
except:
self._response['error'] = 'Unable to create file'
def __rm(self):
"""Delete files and directories"""
current = rmList = None
curDir = rmFile = None
if 'current' in self._request and 'targets[]' in self._request:
current = self._request['current']
rmList = self._request['targets[]']
curDir = self.__findDir(current, None)
if not rmList or not curDir:
self._response['error'] = 'Invalid parameters'
return False
if not isinstance(rmList, list):
rmList = [rmList]
for rm in rmList:
rmFile = self.__find(rm, curDir)
if not rmFile: continue
self.__remove(rmFile)
# TODO if errorData not empty return error
self.__content(curDir, True)
def __upload(self):
"""Upload files"""
try: # Windows needs stdio set for binary mode.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
if 'current' in self._request:
curDir = self.__findDir(self._request['current'], None)
if not curDir:
self._response['error'] = 'Invalid parameters'
return
if not self.__isAllowed(curDir, 'write'):
self._response['error'] = 'Access denied'
return
if not 'upload[]' in self._request:
self._response['error'] = 'No file to upload'
return
upFiles = self._request['upload[]']
# invalid format
# must be dict('filename1': 'filedescriptor1', 'filename2': 'filedescriptor2', ...)
if not isinstance(upFiles, dict):
self._response['error'] = 'Invalid parameters'
return
self._response['select'] = []
total = 0
upSize = 0
maxSize = self._options['uploadMaxSize'] * 1024 * 1024
for name, data in upFiles.iteritems():
if name:
total += 1
name = os.path.basename(name)
if not self.__checkName(name):
self.__errorData(name, 'Invalid name')
else:
name = os.path.join(curDir, name)
try:
f = open(name, 'wb', self._options['uploadWriteChunk'])
for chunk in self.__fbuffer(data):
f.write(chunk)
f.close()
upSize += os.lstat(name).st_size
if self.__isUploadAllow(name):
os.chmod(name, self._options['fileMode'])
self._response['select'].append(self.__hash(name))
else:
self.__errorData(name, 'Not allowed file type')
try:
os.unlink(name)
except:
pass
except:
self.__errorData(name, 'Unable to save uploaded file')
if upSize > maxSize:
try:
os.unlink(name)
self.__errorData(name, 'File exceeds the maximum allowed filesize')
except:
pass
# TODO ?
self.__errorData(name, 'File was only partially uploaded')
break
if self._errorData:
if len(self._errorData) == total:
self._response['error'] = 'Unable to upload files'
else:
self._response['error'] = 'Some files was not uploaded'
self.__content(curDir, False)
return
def __paste(self):
"""Copy or cut files/directories"""
if 'current' in self._request and 'src' in self._request and 'dst' in self._request:
curDir = self.__findDir(self._request['current'], None)
src = self.__findDir(self._request['src'], None)
dst = self.__findDir(self._request['dst'], None)
if not curDir or not src or not dst or not 'targets[]' in self._request:
self._response['error'] = 'Invalid parameters'
return
files = self._request['targets[]']
if not isinstance(files, list):
files = [files]
cut = False
if 'cut' in self._request:
if self._request['cut'] == '1':
cut = True
if not self.__isAllowed(src, 'read') or not self.__isAllowed(dst, 'write'):
self._response['error'] = 'Access denied'
return
for fhash in files:
f = self.__find(fhash, src)
if not f:
self._response['error'] = 'File not found'
return
newDst = os.path.join(dst, os.path.basename(f))
if dst.find(f) == 0:
self._response['error'] = 'Unable to copy into itself'
return
if cut:
if not self.__isAllowed(f, 'rm'):
self._response['error'] = 'Move failed'
self._errorData(f, 'Access denied')
self.__content(curDir, True)
return
# TODO thumbs
if os.path.exists(newDst):
self._response['error'] = 'Unable to move files'
self._errorData(f, 'File or folder with the same name already exists')
self.__content(curDir, True)
return
try:
os.rename(f, newDst)
self.__rmTmb(f)
continue
except:
self._response['error'] = 'Unable to move files'
self._errorData(f, 'Unable to move')
self.__content(curDir, True)
return
else:
if not self.__copy(f, newDst):
self._response['error'] = 'Unable to copy files'
self.__content(curDir, True)
return
continue
self.__content(curDir, True)
else:
self._response['error'] = 'Invalid parameters'
return
def __duplicate(self):
"""Create copy of files/directories"""
if 'current' in self._request and 'target' in self._request:
curDir = self.__findDir(self._request['current'], None)
target = self.__find(self._request['target'], curDir)
if not curDir or not target:
self._response['error'] = 'Invalid parameters'
return
if not self.__isAllowed(target, 'read') or not self.__isAllowed(curDir, 'write'):
self._response['error'] = 'Access denied'
newName = self.__uniqueName(target)
if not self.__copy(target, newName):
self._response['error'] = 'Unable to create file copy'
return
self.__content(curDir, True)
return
def __resize(self):
"""Scale image size"""
if not (
'current' in self._request and 'target' in self._request
and 'width' in self._request and 'height' in self._request
):
self._response['error'] = 'Invalid parameters'
return
width = int(self._request['width'])
height = int(self._request['height'])
curDir = self.__findDir(self._request['current'], None)
curFile = self.__find(self._request['target'], curDir)
if width < 1 or height < 1 or not curDir or not curFile:
self._response['error'] = 'Invalid parameters'
return
if not self.__isAllowed(curFile, 'write'):
self._response['error'] = 'Access denied'
return
if not self.__mimetype(curFile).find('image') == 0:
self._response['error'] = 'File is not an image'
return
self.__debug('resize ' + curFile, str(width) + ':' + str(height))
self.__initImgLib()
try:
im = self._im.open(curFile)
imResized = im.resize((width, height), self._im.ANTIALIAS)
imResized.save(curFile)
except Exception, e:
self.__debug('resizeFailed_' + path, str(e))
self._response['error'] = 'Unable to resize image'
return
self._response['select'] = [self.__hash(curFile)]
self.__content(curDir, False)
return
def __thumbnails(self):
"""Create previews for images"""
if 'current' in self._request:
curDir = self.__findDir(self._request['current'], None)
if not curDir or curDir == self._options['tmbDir']:
return False
else:
return False
self.__initImgLib()
if self.__canCreateTmb():
if self._options['tmbAtOnce'] > 0:
tmbMax = self._options['tmbAtOnce']
else:
tmbMax = 5
self._response['current'] = self.__hash(curDir)
self._response['images'] = {}
i = 0
for f in os.listdir(curDir):
path = os.path.join(curDir, f)
fhash = self.__hash(path)
if self.__canCreateTmb(path) and self.__isAllowed(path, 'read'):
tmb = os.path.join(self._options['tmbDir'], fhash + '.png')
if not os.path.exists(tmb):
if self.__tmb(path, tmb):
self._response['images'].update({
fhash: self.__path2url(tmb)
})
i += 1
if i >= tmbMax:
self._response['tmb'] = True
break
else:
return False
return
def __content(self, path, tree):
"""CWD + CDC + maybe(TREE)"""
self.__cwd(path)
self.__cdc(path)
if tree:
self._response['tree'] = self.__tree(self._options['root'])
def __cwd(self, path):
"""Current Working Directory"""
name = os.path.basename(path)
if path == self._options['root']:
name = self._options['rootAlias']
root = True
else:
root = False
if self._options['rootAlias']:
basename = self._options['rootAlias']
else:
basename = os.path.basename(self._options['root'])
rel = basename + path[len(self._options['root']):]
self._response['cwd'] = {
'hash': self.__hash(path),
'name': self.__checkUtf8(name),
'mime': 'directory',
'rel': self.__checkUtf8(rel),
'size': 0,
'date': datetime.fromtimestamp(os.stat(path).st_mtime).strftime("%d %b %Y %H:%M"),
'read': True,
'write': self.__isAllowed(path, 'write'),
'rm': not root and self.__isAllowed(path, 'rm')
}
def __cdc(self, path):
"""Current Directory Content"""
files = []
dirs = []
for f in sorted(os.listdir(path)):
if not self.__isAccepted(f): continue
pf = os.path.join(path, f)
info = {}
info = self.__info(pf)
info['hash'] = self.__hash(pf)
if info['mime'] == 'directory':
dirs.append(info)
else:
files.append(info)
dirs.extend(files)
self._response['cdc'] = dirs
def __info(self, path):
mime = ''
filetype = 'file'
if os.path.isfile(path): filetype = 'file'
if os.path.isdir(path): filetype = 'dir'
if os.path.islink(path): filetype = 'link'
stat = os.lstat(path)
statDate = datetime.fromtimestamp(stat.st_mtime)
fdate = ''
if stat.st_mtime >= self._today:
fdate = 'Today ' + statDate.strftime("%H:%M")
elif stat.st_mtime >= self._yesterday and stat.st_mtime < self._today:
fdate = 'Yesterday ' + statDate.strftime("%H:%M")
else:
fdate = statDate.strftime("%d %b %Y %H:%M")
info = {
'name': self.__checkUtf8(os.path.basename(path)),
'hash': self.__hash(path),
'mime': 'directory' if filetype == 'dir' else self.__mimetype(path),
'date': fdate,
'size': self.__dirSize(path) if filetype == 'dir' else stat.st_size,
'read': self.__isAllowed(path, 'read'),
'write': self.__isAllowed(path, 'write'),
'rm': self.__isAllowed(path, 'rm')
}
if filetype == 'link':
lpath = self.__readlink(path)
if not lpath:
info['mime'] = 'symlink-broken'
return info
if os.path.isdir(lpath):
info['mime'] = 'directory'
else:
info['parent'] = self.__hash(os.path.dirname(lpath))
info['mime'] = self.__mimetype(lpath)
if self._options['rootAlias']:
basename = self._options['rootAlias']
else:
basename = os.path.basename(self._options['root'])
info['link'] = self.__hash(lpath)
info['linkTo'] = basename + lpath[len(self._options['root']):]
info['read'] = info['read'] and self.__isAllowed(lpath, 'read')
info['write'] = info['write'] and self.__isAllowed(lpath, 'write')
info['rm'] = self.__isAllowed(lpath, 'rm')
else:
lpath = False
if not info['mime'] == 'directory':
if self._options['fileURL'] and info['read'] is True:
if lpath:
info['url'] = self.__path2url(lpath)
else:
info['url'] = self.__path2url(path)
if info['mime'][0:5] == 'image':
if self.__canCreateTmb():
dim = self.__getImgSize(path)
if dim:
info['dim'] = dim
info['resize'] = True
# if we are in tmb dir, files are thumbs itself
if os.path.dirname(path) == self._options['tmbDir']:
info['tmb'] = self.__path2url(path)
return info
tmb = os.path.join(self._options['tmbDir'], info['hash'] + '.png')
if os.path.exists(tmb):
tmbUrl = self.__path2url(tmb)
info['tmb'] = tmbUrl
else:
self._response['tmb'] = True
return info
def __tree(self, path):
"""Return directory tree starting from path"""
if not os.path.isdir(path): return ''
if os.path.islink(path): return ''
if path == self._options['root'] and self._options['rootAlias']:
name = self._options['rootAlias']
else:
name = os.path.basename(path)
tree = {
'hash': self.__hash(path),
'name': self.__checkUtf8(name),
'read': self.__isAllowed(path, 'read'),
'write': self.__isAllowed(path, 'write'),
'dirs': []
}
if self.__isAllowed(path, 'read'):
for d in sorted(os.listdir(path)):
pd = os.path.join(path, d)
if os.path.isdir(pd) and not os.path.islink(pd) and self.__isAccepted(d):
tree['dirs'].append(self.__tree(pd))
return tree
def __uniqueName(self, path, copy = ' copy'):
"""Generate unique name for file copied file"""
curDir = os.path.dirname(path)
curName = os.path.basename(path)
lastDot = curName.rfind('.')
ext = newName = ''
if not os.path.isdir(path) and re.search(r'\..{3}\.(gz|bz|bz2)$', curName):
pos = -7
if curName[-1:] == '2':
pos -= 1
ext = curName[pos:]
oldName = curName[0:pos]
newName = oldName + copy
elif os.path.isdir(path) or lastDot <= 0:
oldName = curName
newName = oldName + copy
pass
else:
ext = curName[lastDot:]
oldName = curName[0:lastDot]
newName = oldName + copy
pos = 0
if oldName[-len(copy):] == copy:
newName = oldName
elif re.search(r'' + copy +'\s\d+$', oldName):
pos = oldName.rfind(copy) + len(copy)
newName = oldName[0:pos]
else:
newPath = os.path.join(curDir, newName + ext)
if not os.path.exists(newPath):
return newPath
# if we are here then copy already exists or making copy of copy
# we will make new indexed copy *black magic*
idx = 1
if pos > 0: idx = int(oldName[pos:])
while True:
idx += 1
newNameExt = newName + ' ' + str(idx) + ext
newPath = os.path.join(curDir, newNameExt)
if not os.path.exists(newPath):
return newPath
# if idx >= 1000: break # possible loop
return
def __remove(self, target):
"""Internal remove procedure"""
if not self.__isAllowed(target, 'rm'):
self.__errorData(target, 'Access denied')
if not os.path.isdir(target):
try:
os.unlink(target)
return True
except:
self.__errorData(target, 'Remove failed')
return False
else:
for i in os.listdir(target):
if self.__isAccepted(i):
self.__remove(os.path.join(target, i))
try:
os.rmdir(target)
return True
except:
self.__errorData(target, 'Remove failed')
return False
pass
def __copy(self, src, dst):
"""Internal copy procedure"""
dstDir = os.path.dirname(dst)
if not self.__isAllowed(src, 'read'):
self.__errorData(src, 'Access denied')
return False
if not self.__isAllowed(dstDir, 'write'):
self.__errorData(dstDir, 'Access denied')
return False
if os.path.exists(dst):
self.__errorData(dst, 'File or folder with the same name already exists')
return False
if not os.path.isdir(src):
try:
shutil.copyfile(src, dst)
shutil.copymode(src, dst)
return True
except:
self.__errorData(src, 'Unable to copy files')
return False
else:
try:
os.mkdir(dst)
shutil.copymode(src, dst)
except:
self.__errorData(src, 'Unable to copy files')
return False
for i in os.listdir(src):
newSrc = os.path.join(src, i)
newDst = os.path.join(dst, i)
if not self.__copy(newSrc, newDst):
self.__errorData(newSrc, 'Unable to copy files')
return False
return True
def __checkName(self, name):
"""Check for valid file/dir name"""
pattern = r'[\/\\\:\<\>]'
if re.search(pattern, name):
return False
return True
def __findDir(self, fhash, path):
"""Find directory by hash"""
fhash = str(fhash)
if not path:
path = self._options['root']
if fhash == self.__hash(path):
return path
if not os.path.isdir(path):
return None
for d in os.listdir(path):
pd = os.path.join(path, d)
if os.path.isdir(pd) and not os.path.islink(pd):
if fhash == self.__hash(pd):
return pd
else:
ret = self.__findDir(fhash, pd)
if ret:
return ret
return None
def __find(self, fhash, parent):
"""Find file/dir by hash"""
fhash = str(fhash)
if os.path.isdir(parent):
for i in os.listdir(parent):
path = os.path.join(parent, i)
if fhash == self.__hash(path):
return path
return None
def __read(self):
if 'current' in self._request and 'target' in self._request:
curDir = self.__findDir(self._request['current'], None)
curFile = self.__find(self._request['target'], curDir)
if curDir and curFile:
if self.__isAllowed(curFile, 'read'):
self._response['content'] = open(curFile, 'r').read()
else:
self._response['error'] = 'Access denied'
return
self._response['error'] = 'Invalid parameters'
return
def __edit(self):
"""Save content in file"""
error = ''
if 'current' in self._request and 'target' in self._request and 'content' in self._request:
curDir = self.__findDir(self._request['current'], None)
curFile = self.__find(self._request['target'], curDir)
error = curFile
if curFile and curDir:
if self.__isAllowed(curFile, 'write'):
try:
f = open(curFile, 'w+')
f.write(self._request['content'])
f.close()
self._response['target'] = self.__info(curFile)
except:
self._response['error'] = 'Unable to write to file'
else:
self._response['error'] = 'Access denied'
return
self._response['error'] = 'Invalid parameters'
return
def __archive(self):
"""Compress files/directories to archive"""
self.__checkArchivers()
if (
not self._options['archivers']['create']
or not 'type' in self._request
or not 'current' in self._request
or not 'targets[]' in self._request
):
self._response['error'] = 'Invalid parameters'
return
curDir = self.__findDir(self._request['current'], None)
archiveType = self._request['type']
if (
not archiveType in self._options['archivers']['create']
or not archiveType in self._options['archiveMimes']
or not curDir
or not self.__isAllowed(curDir, 'write')
):
self._response['error'] = 'Unable to create archive'
return
files = self._request['targets[]']
if not isinstance(files, list):
files = [files]
realFiles = []
for fhash in files:
curFile = self.__find(fhash, curDir)
if not curFile:
self._response['error'] = 'File not found'
return
realFiles.append(os.path.basename(curFile))
arc = self._options['archivers']['create'][archiveType]
if len(realFiles) > 1:
archiveName = 'Archive'
else:
archiveName = realFiles[0]
archiveName += '.' + arc['ext']
archiveName = self.__uniqueName(archiveName, '')
archivePath = os.path.join(curDir, archiveName)
cmd = [arc['cmd']]
for a in arc['argc'].split():
cmd.append(a)
cmd.append(archiveName)
for f in realFiles:
cmd.append(f)
curCwd = os.getcwd()
os.chdir(curDir)
self.__runSubProcess(cmd)
os.chdir(curCwd)
if os.path.exists(archivePath):
self.__content(curDir, False)
self._response['select'] = [self.__hash(archivePath)]
else:
self._response['error'] = 'Unable to create archive'
return
def __extract(self):
"""Uncompress archive"""
if not 'current' in self._request or not 'target' in self._request:
self._response['error'] = 'Invalid parameters'
return
curDir = self.__findDir(self._request['current'], None)
curFile = self.__find(self._request['target'], curDir)
mime = self.__mimetype(curFile)
self.__checkArchivers()
if (
not mime in self._options['archivers']['extract']
or not curDir
or not curFile
or not self.__isAllowed(curDir, 'write')
):
self._response['error'] = 'Invalid parameters'
return
arc = self._options['archivers']['extract'][mime]
cmd = [arc['cmd']]
for a in arc['argc'].split():
cmd.append(a)
cmd.append(os.path.basename(curFile))
curCwd = os.getcwd()
os.chdir(curDir)
ret = self.__runSubProcess(cmd)
os.chdir(curCwd)
if ret:
self.__content(curDir, True)
else:
self._response['error'] = 'Unable to extract files from archive'
return
def __ping(self):
"""Workaround for Safari"""
self.httpStatusCode = 200
self.httpHeader['Connection'] = 'close'
return
def __mimetype(self, path):
"""Detect mimetype of file"""
mime = mimetypes.guess_type(path)[0] or 'unknown'
ext = path[path.rfind('.') + 1:]
if mime == 'unknown' and ('.' + ext) in mimetypes.types_map:
mime = mimetypes.types_map['.' + ext]
if mime == 'text/plain' and ext == 'pl':
mime = self._mimeType[ext]
if mime == 'application/vnd.ms-office' and ext == 'doc':
mime = self._mimeType[ext]
if mime == 'unknown':
if os.path.basename(path) in ['README', 'ChangeLog']:
mime = 'text/plain'
else:
if ext in self._mimeType:
mime = self._mimeType[ext]
# self.__debug('mime ' + os.path.basename(path), ext + ' ' + mime)
return mime
def __tmb(self, path, tmb):
"""Internal thumbnail create procedure"""
try:
im = self._im.open(path).copy()
size = self._options['tmbSize'], self._options['tmbSize']
box = self.__cropTuple(im.size)
if box:
im = im.crop(box)
im.thumbnail(size, self._im.ANTIALIAS)
im.save(tmb, 'PNG')
except Exception, e:
self.__debug('tmbFailed_' + path, str(e))
return False
return True
def __rmTmb(self, path):
tmb = self.__tmbPath(path)
if self._options['tmbDir']:
if os.path.exists(tmb):
try:
os.unlink(tmb)
except:
pass
def __cropTuple(self, size):
w, h = size
if w > h: # landscape
l = int((w - h) / 2)
u = 0
r = l + h
d = h
return (l, u, r, d)
elif h > w: # portrait
l = 0
u = int((h - w) / 2)
r = w
d = u + w
return (l, u, r, d)
else: # cube
pass
return False
def __readlink(self, path):
"""Read link and return real path if not broken"""
target = os.readlink(path);
if not target[0] == '/':
target = os.path.join(os.path.dirname(path), target)
target = os.path.normpath(target)
if os.path.exists(target):
if not target.find(self._options['root']) == -1:
return target
return False
def __dirSize(self, path):
total_size = 0
if self._options['dirSize']:
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
if os.path.exists(fp):
total_size += os.stat(fp).st_size
else:
total_size = os.lstat(path).st_size
return total_size
def __fbuffer(self, f, chunk_size = _options['uploadWriteChunk']):
while True:
chunk = f.read(chunk_size)
if not chunk: break
yield chunk
def __canCreateTmb(self, path = None):
if self._options['imgLib'] and self._options['tmbDir']:
if path is not None:
mime = self.__mimetype(path)
if not mime[0:5] == 'image':
return False
return True
else:
return False
def __tmbPath(self, path):
tmb = False
if self._options['tmbDir']:
if not os.path.dirname(path) == self._options['tmbDir']:
tmb = os.path.join(self._options['tmbDir'], self.__hash(path) + '.png')
return tmb
def __isUploadAllow(self, name):
allow = False
deny = False
mime = self.__mimetype(name)
if 'all' in self._options['uploadAllow']:
allow = True
else:
for a in self._options['uploadAllow']:
if mime.find(a) == 0:
allow = True
if 'all' in self._options['uploadDeny']:
deny = True
else:
for d in self._options['uploadDeny']:
if mime.find(d) == 0:
deny = True
if self._options['uploadOrder'][0] == 'allow': # ,deny
if deny is True:
return False
elif allow is True:
return True
else:
return False
else: # deny,allow
if allow is True:
return True
elif deny is True:
return False
else:
return True
def __isAccepted(self, target):
if target == '.' or target == '..':
return False
if target[0:1] == '.' and not self._options['dotFiles']:
return False
return True
def __isAllowed(self, path, access):
if not os.path.exists(path):
return False
if access == 'read':
if not os.access(path, os.R_OK):
self.__errorData(path, access)
return False
elif access == 'write':
if not os.access(path, os.W_OK):
self.__errorData(path, access)
return False
elif access == 'rm':
if not os.access(os.path.dirname(path), os.W_OK):
self.__errorData(path, access)
return False
else:
return False
path = path[len(os.path.normpath(self._options['root'])):]
for ppath in self._options['perms']:
regex = r'' + ppath
if re.search(regex, path) and access in self._options['perms'][ppath]:
return self._options['perms'][ppath][access]
return self._options['defaults'][access]
def __hash(self, path):
"""Hash of the path"""
m = hashlib.md5()
m.update(path)
return str(m.hexdigest())
def __path2url(self, path):
curDir = path
length = len(self._options['root'])
url = self.__checkUtf8(self._options['URL'] + curDir[length:]).replace(os.sep, '/')
try:
import urllib
url = urllib.quote(url, '/:~')
except:
pass
return url
def __errorData(self, path, msg):
"""Collect error/warning messages"""
self._errorData[path] = msg
def __initImgLib(self):
if not self._options['imgLib'] is False and self._im is None:
try:
import Image
Image
self._im = Image
self._options['imgLib'] = 'PIL'
except:
self._options['imgLib'] = False
self._im = False
self.__debug('imgLib', self._options['imgLib'])
return self._options['imgLib']
def __getImgSize(self, path):
self.__initImgLib();
if self.__canCreateTmb():
try:
im = self._im.open(path)
return str(im.size[0]) + 'x' + str(im.size[1])
except:
pass
return False
def __debug(self, k, v):
if self._options['debug']:
self._response['debug'].update({k: v})
return
def __checkArchivers(self):
# import subprocess
# sp = subprocess.Popen(['tar', '--version'], shell = False,
# stdout = subprocess.PIPE, stderr=subprocess.PIPE)
# out, err = sp.communicate()
# print 'out:', out, '\nerr:', err, '\n'
archive = { 'create': {}, 'extract': {} }
if 'archive' in self._options['disabled'] and 'extract' in self._options['disabled']:
self._options['archiveMimes'] = []
self._options['archivers'] = archive
return
tar = self.__runSubProcess(['tar', '--version'])
gzip = self.__runSubProcess(['gzip', '--version'])
bzip2 = self.__runSubProcess(['bzip2', '--version'])
zipc = self.__runSubProcess(['zip', '--version'])
unzip = self.__runSubProcess(['unzip', '--help'])
rar = self.__runSubProcess(['rar', '--version'], validReturn = [0, 7])
unrar = self.__runSubProcess(['unrar'], validReturn = [0, 7])
p7z = self.__runSubProcess(['7z', '--help'])
p7za = self.__runSubProcess(['7za', '--help'])
p7zr = self.__runSubProcess(['7zr', '--help'])
# tar = False
# tar = gzip = bzip2 = zipc = unzip = rar = unrar = False
# print tar, gzip, bzip2, zipc, unzip, rar, unrar, p7z, p7za, p7zr
c = archive['create']
e = archive['extract']
if tar:
mime = 'application/x-tar'
c.update({mime: {'cmd': 'tar', 'argc': '-cf', 'ext': 'tar'}})
e.update({mime: {'cmd': 'tar', 'argc': '-xf', 'ext': 'tar'}})
if tar and gzip:
mime = 'application/x-gzip'
c.update({mime: {'cmd': 'tar', 'argc': '-czf', 'ext': 'tar.gz'}})
e.update({mime: {'cmd': 'tar', 'argc': '-xzf', 'ext': 'tar.gz'}})
if tar and bzip2:
mime = 'application/x-bzip2'
c.update({mime: {'cmd': 'tar', 'argc': '-cjf', 'ext': 'tar.bz2'}})
e.update({mime: {'cmd': 'tar', 'argc': '-xjf', 'ext': 'tar.bz2'}})
mime = 'application/zip'
if zipc:
c.update({mime: {'cmd': 'zip', 'argc': '-r9', 'ext': 'zip'}})
if unzip:
e.update({mime: {'cmd': 'unzip', 'argc': '', 'ext': 'zip'}})
mime = 'application/x-rar'
if rar:
c.update({mime: {'cmd': 'rar', 'argc': 'a -inul', 'ext': 'rar'}})
e.update({mime: {'cmd': 'rar', 'argc': 'x -y', 'ext': 'rar'}})
elif unrar:
e.update({mime: {'cmd': 'unrar', 'argc': 'x -y', 'ext': 'rar'}})
p7zip = None
if p7z:
p7zip = '7z'
elif p7za:
p7zip = '7za'
elif p7zr:
p7zip = '7zr'
if p7zip:
mime = 'application/x-7z-compressed'
c.update({mime: {'cmd': p7zip, 'argc': 'a -t7z', 'ext': '7z'}})
e.update({mime: {'cmd': p7zip, 'argc': 'e -y', 'ext': '7z'}})
mime = 'application/x-tar'
if not mime in c:
c.update({mime: {'cmd': p7zip, 'argc': 'a -ttar', 'ext': 'tar'}})
if not mime in e:
e.update({mime: {'cmd': p7zip, 'argc': 'e -y', 'ext': 'tar'}})
mime = 'application/x-gzip'
if not mime in c:
c.update({mime: {'cmd': p7zip, 'argc': 'a -tgzip', 'ext': 'gz'}})
if not mime in e:
e.update({mime: {'cmd': p7zip, 'argc': 'e -y', 'ext': 'tar.gz'}})
mime = 'application/x-bzip2'
if not mime in c:
c.update({mime: {'cmd': p7zip, 'argc': 'a -tbzip2', 'ext': 'bz2'}})
if not mime in e:
e.update({mime: {'cmd': p7zip, 'argc': 'e -y', 'ext': 'tar.bz2'}})
mime = 'application/zip'
if not mime in c:
c.update({mime: {'cmd': p7zip, 'argc': 'a -tzip', 'ext': 'zip'}})
if not mime in e:
e.update({mime: {'cmd': p7zip, 'argc': 'e -y', 'ext': 'zip'}})
if not self._options['archiveMimes']:
self._options['archiveMimes'] = c.keys()
else:
pass
self._options['archivers'] = archive
pass
def __runSubProcess(self, cmd, validReturn = [0]):
if self._sp is None:
import subprocess
self._sp = subprocess
try:
sp = self._sp.Popen(cmd, shell = False, stdout = self._sp.PIPE, stderr = self._sp.PIPE, stdin = self._sp.PIPE)
out, err = sp.communicate('')
ret = sp.returncode
# print cmd, ret, out, err
except:
return False
if not ret in validReturn:
return False
return True
def __checkUtf8(self, name):
try:
name.decode('utf-8')
except UnicodeDecodeError:
name = unicode(name, 'utf-8', 'replace')
self.__debug('invalid encoding', name)
#name += ' (invalid encoding)'
return name
| {
"content_hash": "3bbabbbe1765653840f18180cba55802",
"timestamp": "",
"source": "github",
"line_count": 1497,
"max_line_length": 113,
"avg_line_length": 26.828991315965265,
"alnum_prop": 0.6111097278589747,
"repo_name": "bfaviero/ok",
"id": "879b73193e47744306b8d599b3cfa87ffff8e92f",
"size": "40274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ok_filebrowser/elFinder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31240"
},
{
"name": "HTML",
"bytes": "9606"
},
{
"name": "JavaScript",
"bytes": "229636"
},
{
"name": "Python",
"bytes": "476991"
}
],
"symlink_target": ""
} |
import boto3
import sure # noqa # pylint: disable=unused-import
from moto import mock_ec2, settings
from moto.core import DEFAULT_ACCOUNT_ID as ACCOUNT_ID
from unittest import SkipTest
@mock_ec2
def test_describe_transit_gateway_peering_attachment_empty():
if settings.TEST_SERVER_MODE:
raise SkipTest("ServerMode is not guaranteed to be empty")
ec2 = boto3.client("ec2", region_name="us-west-1")
all_attachments = ec2.describe_transit_gateway_peering_attachments()[
"TransitGatewayPeeringAttachments"
]
all_attachments.should.equal([])
@mock_ec2
def test_create_and_describe_transit_gateway_peering_attachment():
ec2 = boto3.client("ec2", region_name="us-west-1")
gateway_id1 = ec2.create_transit_gateway(Description="my first gateway")[
"TransitGateway"
]["TransitGatewayId"]
gateway_id2 = ec2.create_transit_gateway(Description="my second gateway")[
"TransitGateway"
]["TransitGatewayId"]
response = ec2.create_transit_gateway_peering_attachment(
TransitGatewayId=gateway_id1,
PeerTransitGatewayId=gateway_id2,
PeerAccountId=ACCOUNT_ID,
PeerRegion="us-east-1",
)
response.should.have.key("TransitGatewayPeeringAttachment")
attachment = response["TransitGatewayPeeringAttachment"]
attachment.should.have.key("TransitGatewayAttachmentId").match(
"tgw-attach-[a-z0-9]+"
)
attachment["RequesterTgwInfo"]["TransitGatewayId"].should.equal(gateway_id1)
attachment["AccepterTgwInfo"]["TransitGatewayId"].should.equal(gateway_id2)
all_attachments = ec2.describe_transit_gateway_peering_attachments()[
"TransitGatewayPeeringAttachments"
]
our_attachment = [
att
for att in all_attachments
if att["TransitGatewayAttachmentId"] == attachment["TransitGatewayAttachmentId"]
]
our_attachment.should.equal([attachment])
@mock_ec2
def test_describe_transit_gateway_peering_attachment_by_filters():
ec2 = boto3.client("ec2", region_name="us-west-1")
gateway_id1 = ec2.create_transit_gateway(Description="my first gateway")[
"TransitGateway"
]["TransitGatewayId"]
gateway_id2 = ec2.create_transit_gateway(Description="my second gateway")[
"TransitGateway"
]["TransitGatewayId"]
gateway_id3 = ec2.create_transit_gateway(Description="my second gateway")[
"TransitGateway"
]["TransitGatewayId"]
attchmnt1 = create_peering_attachment(ec2, gateway_id1, gateway_id2)
attchmnt2 = create_peering_attachment(ec2, gateway_id1, gateway_id3)
attchmnt3 = create_peering_attachment(ec2, gateway_id2, gateway_id3)
all_attachments = ec2.describe_transit_gateway_peering_attachments()[
"TransitGatewayPeeringAttachments"
]
ours = [
a
for a in all_attachments
if a["TransitGatewayAttachmentId"] in [attchmnt1, attchmnt2, attchmnt3]
]
ours.should.have.length_of(3)
find_1 = ec2.describe_transit_gateway_peering_attachments(
TransitGatewayAttachmentIds=[attchmnt1]
)["TransitGatewayPeeringAttachments"]
[a["TransitGatewayAttachmentId"] for a in find_1].should.equal([attchmnt1])
find_1_3 = ec2.describe_transit_gateway_peering_attachments(
TransitGatewayAttachmentIds=[attchmnt1, attchmnt3]
)["TransitGatewayPeeringAttachments"]
[a["TransitGatewayAttachmentId"] for a in find_1_3].should.equal(
[attchmnt1, attchmnt3]
)
find_3 = ec2.describe_transit_gateway_peering_attachments(
Filters=[{"Name": "transit-gateway-attachment-id", "Values": [attchmnt3]}]
)["TransitGatewayPeeringAttachments"]
[a["TransitGatewayAttachmentId"] for a in find_3].should.equal([attchmnt3])
filters = [{"Name": "state", "Values": ["available"]}]
find_all = retrieve_all_attachments(ec2, filters)
all_ids = [a["TransitGatewayAttachmentId"] for a in find_all]
all_ids.should.contain(attchmnt1)
all_ids.should.contain(attchmnt2)
all_ids.should.contain(attchmnt3)
find_none = ec2.describe_transit_gateway_peering_attachments(
Filters=[{"Name": "state", "Values": ["unknown"]}]
)["TransitGatewayPeeringAttachments"]
find_none.should.equal([])
ec2.reject_transit_gateway_peering_attachment(TransitGatewayAttachmentId=attchmnt2)
find_available = ec2.describe_transit_gateway_peering_attachments(
TransitGatewayAttachmentIds=[attchmnt1, attchmnt2],
Filters=[{"Name": "state", "Values": ["available"]}],
)["TransitGatewayPeeringAttachments"]
[a["TransitGatewayAttachmentId"] for a in find_available].should.equal([attchmnt1])
@mock_ec2
def test_create_and_accept_transit_gateway_peering_attachment():
ec2 = boto3.client("ec2", region_name="us-west-1")
gateway_id1 = ec2.create_transit_gateway(Description="my first gateway")[
"TransitGateway"
]["TransitGatewayId"]
gateway_id2 = ec2.create_transit_gateway(Description="my second gateway")[
"TransitGateway"
]["TransitGatewayId"]
attchment_id = create_peering_attachment(ec2, gateway_id1, gateway_id2)
ec2.accept_transit_gateway_peering_attachment(
TransitGatewayAttachmentId=attchment_id
)
attachment = ec2.describe_transit_gateway_peering_attachments(
TransitGatewayAttachmentIds=[attchment_id]
)["TransitGatewayPeeringAttachments"][0]
attachment.should.have.key("TransitGatewayAttachmentId").equal(attchment_id)
attachment.should.have.key("State").equal("available")
@mock_ec2
def test_create_and_reject_transit_gateway_peering_attachment():
ec2 = boto3.client("ec2", region_name="us-west-1")
gateway_id1 = ec2.create_transit_gateway(Description="my first gateway")[
"TransitGateway"
]["TransitGatewayId"]
gateway_id2 = ec2.create_transit_gateway(Description="my second gateway")[
"TransitGateway"
]["TransitGatewayId"]
attchment_id = create_peering_attachment(ec2, gateway_id1, gateway_id2)
ec2.reject_transit_gateway_peering_attachment(
TransitGatewayAttachmentId=attchment_id
)
attachment = ec2.describe_transit_gateway_peering_attachments(
TransitGatewayAttachmentIds=[attchment_id]
)["TransitGatewayPeeringAttachments"][0]
attachment.should.have.key("TransitGatewayAttachmentId").equal(attchment_id)
attachment.should.have.key("State").equal("rejected")
@mock_ec2
def test_create_and_delete_transit_gateway_peering_attachment():
ec2 = boto3.client("ec2", region_name="us-west-1")
gateway_id1 = ec2.create_transit_gateway(Description="my first gateway")[
"TransitGateway"
]["TransitGatewayId"]
gateway_id2 = ec2.create_transit_gateway(Description="my second gateway")[
"TransitGateway"
]["TransitGatewayId"]
attchment_id = create_peering_attachment(ec2, gateway_id1, gateway_id2)
ec2.delete_transit_gateway_peering_attachment(
TransitGatewayAttachmentId=attchment_id
)
attachment = ec2.describe_transit_gateway_peering_attachments(
TransitGatewayAttachmentIds=[attchment_id]
)["TransitGatewayPeeringAttachments"][0]
attachment.should.have.key("TransitGatewayAttachmentId").equal(attchment_id)
attachment.should.have.key("State").equal("deleted")
def create_peering_attachment(ec2, gateway_id1, gateway_id2):
return ec2.create_transit_gateway_peering_attachment(
TransitGatewayId=gateway_id1,
PeerTransitGatewayId=gateway_id2,
PeerAccountId=ACCOUNT_ID,
PeerRegion="us-east-1",
)["TransitGatewayPeeringAttachment"]["TransitGatewayAttachmentId"]
def retrieve_all_attachments(client, filters=[]): # pylint: disable=W0102
resp = client.describe_transit_gateway_peering_attachments(Filters=filters)
attmnts = resp["TransitGatewayPeeringAttachments"]
token = resp.get("NextToken")
while token:
resp = client.describe_transit_gateway_peering_attachments(
Filters=filters, NextToken=token
)
attmnts.extend(resp["TransitGatewayPeeringAttachments"])
token = resp.get("NextToken")
return attmnts
| {
"content_hash": "c8a6b58ac9d4664d2dcfe6e2251ed351",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 88,
"avg_line_length": 39.95073891625616,
"alnum_prop": 0.7135635018495684,
"repo_name": "spulec/moto",
"id": "a61cd5f914397dbd211f8d4d8cae0bb3f86f010c",
"size": "8110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_ec2/test_transit_gateway_peering_attachments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "5983"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "1424"
},
{
"name": "Jinja",
"bytes": "2502"
},
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Python",
"bytes": "14737868"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "5515"
}
],
"symlink_target": ""
} |
"""
WSGI config for Dota2Stats project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "Dota2Stats.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Dota2Stats.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "d1c42f32c1723a4a30e7ca48ffec1c30",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 44.71875,
"alnum_prop": 0.7945492662473794,
"repo_name": "mmbob/Dota2Stats",
"id": "f33a4381a846a97b69887244624af90ae04092b9",
"size": "1431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dota2Stats/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "61055"
},
{
"name": "JavaScript",
"bytes": "1468"
},
{
"name": "Python",
"bytes": "24884"
}
],
"symlink_target": ""
} |
import pyexcel as pe
import os
from base import clean_up_files
from nose.tools import raises
class TestSpliting:
def setUp(self):
self.testfile4 = "multiple_sheets.xls"
self.content4 = {
"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]],
"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]],
"Sheet3": [[u'X', u'Y', u'Z'], [1, 4, 7], [2, 5, 8], [3, 6, 9]]
}
w = pe.BookWriter(self.testfile4)
w.write_book_from_dict(self.content4)
w.close()
def test_split_a_book(self):
pe.cookbook.split_a_book(self.testfile4, "extracted.csv")
assert os.path.exists("Sheet1_extracted.csv")
assert os.path.exists("Sheet2_extracted.csv")
assert os.path.exists("Sheet3_extracted.csv")
def test_split_a_book_2(self):
"""use default output file name"""
pe.cookbook.split_a_book(self.testfile4)
assert os.path.exists("Sheet1_%s" % self.testfile4)
assert os.path.exists("Sheet2_%s" % self.testfile4)
assert os.path.exists("Sheet3_%s" % self.testfile4)
def test_extract_a_book(self):
pe.cookbook.extract_a_sheet_from_a_book(self.testfile4, "Sheet1", "extracted.csv")
assert os.path.exists("Sheet1_extracted.csv")
def test_extract_a_book_2(self):
"""Use default output file name"""
pe.cookbook.extract_a_sheet_from_a_book(self.testfile4, "Sheet1")
assert os.path.exists("Sheet1_%s" % self.testfile4)
def tearDown(self):
file_list = [
self.testfile4,
"Sheet1_extracted.csv",
"Sheet2_extracted.csv",
"Sheet3_extracted.csv",
"Sheet1_multiple_sheets.xls",
"Sheet2_multiple_sheets.xls",
"Sheet3_multiple_sheets.xls"]
clean_up_files(file_list)
class TestCookbook:
def setUp(self):
"""
Make a test csv file as:
1,1,1,1
2,2,2,2
3,3,3,3
"""
self.testfile = "test1.xls"
self.content = {
"X": [1, 2, 3, 4, 5],
"Y": [6, 7, 8, 9, 10],
"Z": [11, 12, 13, 14, 15],
}
w = pe.Writer(self.testfile)
w.write_dict(self.content)
w.close()
self.testfile2 = "test.csv"
self.content2 = {
"O": [1, 2, 3, 4, 5],
"P": [6, 7, 8, 9, 10],
"Q": [11, 12, 13, 14, 15],
}
w = pe.Writer(self.testfile2)
w.write_dict(self.content2)
w.close()
self.testfile3 = "test.xls"
self.content3 = {
"R": [1, 2, 3, 4, 5],
"S": [6, 7, 8, 9, 10],
"T": [11, 12, 13, 14, 15],
}
w = pe.Writer(self.testfile3)
w.write_dict(self.content3)
w.close()
self.testfile4 = "multiple_sheets.xls"
self.content4 = {
"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]],
"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]],
"Sheet3": [[u'X', u'Y', u'Z'], [1, 4, 7], [2, 5, 8], [3, 6, 9]]
}
w = pe.BookWriter(self.testfile4)
w.write_book_from_dict(self.content4)
w.close()
@raises(ValueError)
def test_update_columns(self):
bad_column = {"A": [31, 1, 1, 1, 1]}
# try non-existent column first
pe.cookbook.update_columns(self.testfile, bad_column)
@raises(NotImplementedError)
def test_update_columns2(self):
custom_column = {"Z": [33, 44, 55, 66, 77]}
pe.cookbook.update_columns(self.testfile, custom_column)
r = pe.SeriesReader("pyexcel_%s" % self.testfile)
data = pe.utils.to_dict(r)
assert data["Z"] == custom_column["Z"]
pe.cookbook.update_columns(self.testfile, custom_column, "test4.xls")
r = pe.SeriesReader("test4.xls")
data = pe.utils.to_dict(r)
assert data["Z"] == custom_column["Z"]
# test if it try not overwrite a file
pe.cookbook.update_columns(self.testfile, custom_column) #bang
def test_update_rows(self):
bad_column = {100: [31, 1, 1, 1, 1]}
custom_column = {"1.0": [3,4]}
try:
# try non-existent column first
pe.cookbook.update_rows(self.testfile, bad_column)
assert 1==2
except ValueError:
assert 1==1
pe.cookbook.update_rows(self.testfile, custom_column)
r = pe.Reader("pyexcel_%s" % self.testfile)
assert custom_column["1.0"] == r.row_at(1)[1:]
try:
# try not to overwrite a file
pe.cookbook.update_rows(self.testfile, custom_column)
r = pe.SeriesReader("pyexcel_%s" % self.testfile)
assert 1==2
except NotImplementedError:
assert 1==1
pe.cookbook.update_rows(self.testfile, custom_column, "test4.xls")
r = pe.Reader("test4.xls")
assert custom_column["1.0"] == r.row_at(1)[1:]
@raises(NotImplementedError)
def test_merge_two_files(self):
pe.cookbook.merge_two_files(self.testfile, self.testfile2)
r = pe.SeriesReader("pyexcel_merged.csv")
r.apply_formatter(pe.formatters.SheetFormatter(int))
data = pe.utils.to_dict(r)
content = {}
content.update(self.content)
content.update(self.content2)
assert data == content
pe.cookbook.merge_two_files(self.testfile, self.testfile2) # bang
@raises(NotImplementedError)
def test_merge_files(self):
file_array = [self.testfile, self.testfile2, self.testfile3]
pe.cookbook.merge_files(file_array)
r = pe.SeriesReader("pyexcel_merged.csv")
r.apply_formatter(pe.formatters.SheetFormatter(int))
data = pe.utils.to_dict(r)
content = {}
content.update(self.content)
content.update(self.content2)
content.update(self.content3)
assert data == content
pe.cookbook.merge_files(file_array) # bang, do not overwrite
@raises(NotImplementedError)
def test_merge_two_readers(self):
r1 = pe.SeriesReader(self.testfile)
r2 = pe.SeriesReader(self.testfile2)
pe.cookbook.merge_two_readers(r1, r2)
r = pe.SeriesReader("pyexcel_merged.csv")
r.apply_formatter(pe.formatters.SheetFormatter(int))
data = pe.utils.to_dict(r)
content = {}
content.update(self.content)
content.update(self.content2)
assert data == content
pe.cookbook.merge_two_readers(r1, r2) # bang, do not overwrite
@raises(NotImplementedError)
def test_merge_readers(self):
r1 = pe.SeriesReader(self.testfile)
r2 = pe.SeriesReader(self.testfile2)
r3 = pe.SeriesReader(self.testfile3)
file_array = [r1, r2, r3]
pe.cookbook.merge_readers(file_array)
r = pe.SeriesReader("pyexcel_merged.csv")
r.apply_formatter(pe.formatters.SheetFormatter(int))
data = pe.utils.to_dict(r)
content = {}
content.update(self.content)
content.update(self.content2)
content.update(self.content3)
assert data == content
pe.cookbook.merge_readers(file_array) # bang, do not overwrite
def test_merge_two_row_filter_hat_readers(self):
r1 = pe.SeriesReader(self.testfile)
r2 = pe.SeriesReader(self.testfile2)
pe.cookbook.merge_two_readers(r1, r2)
r = pe.SeriesReader("pyexcel_merged.csv")
r.apply_formatter(pe.formatters.SheetFormatter(int))
data = pe.utils.to_dict(r)
content = {}
content.update(self.content)
content.update(self.content2)
assert data == content
def test_merge_two_row_filter_hat_readers_2(self):
"""
Now start row filtering
"""
r1 = pe.SeriesReader(self.testfile)
r1.add_filter(pe.filters.OddRowFilter())
r2 = pe.SeriesReader(self.testfile2)
r2.add_filter(pe.filters.EvenRowFilter())
pe.cookbook.merge_two_readers(r1, r2)
r = pe.SeriesReader("pyexcel_merged.csv")
r.apply_formatter(pe.formatters.SheetFormatter(int))
data = pe.utils.to_dict(r)
content = {
'Y': [7, 9, 0],
'X': [2, 4, 0],
'Z': [12, 14, 0],
'O': [1, 3, 5],
'Q': [11, 13, 15],
'P': [6, 8, 10]
}
assert data == content
def test_merge_two_row_filter_hat_readers_3(self):
"""
Now start column filtering
"""
r1 = pe.SeriesReader(self.testfile)
r1.add_filter(pe.filters.OddColumnFilter())
r2 = pe.SeriesReader(self.testfile2)
r2.add_filter(pe.filters.EvenColumnFilter())
pe.cookbook.merge_two_readers(r1, r2)
r = pe.SeriesReader("pyexcel_merged.csv")
r.apply_formatter(pe.formatters.SheetFormatter(int))
data = pe.utils.to_dict(r)
content = {
"Y": [6, 7, 8, 9, 10],
"O": [1, 2, 3, 4, 5],
"Q": [11, 12, 13, 14, 15]
}
assert data == content
def test_merge_any_files_to_a_book(self):
file_array = [self.testfile, self.testfile2,
self.testfile3, self.testfile4]
pe.cookbook.merge_all_to_a_book(file_array, "merged.xlsx")
r = pe.BookReader("merged.xlsx")
r[self.testfile].name_columns_by_row(0)
content = r[self.testfile].to_dict()
assert content == self.content
r[self.testfile2].apply_formatter(pe.formatters.SheetFormatter(int))
r[self.testfile2].name_columns_by_row(0)
content2 = r[self.testfile2].to_dict()
assert content2 == self.content2
r[self.testfile3].name_columns_by_row(0)
content3 = r[self.testfile3].to_dict()
assert content3 == self.content3
content4 = pe.utils.to_array(r["Sheet1"])
assert content4 == self.content4["Sheet1"]
content5 = pe.utils.to_array(r["Sheet2"])
assert content5 == self.content4["Sheet2"]
content6 = pe.utils.to_array(r["Sheet3"])
assert content6 == self.content4["Sheet3"]
def test_merge_csv_files_to_a_book(self):
file_array = [self.testfile, self.testfile2,
self.testfile3]
pe.cookbook.merge_csv_to_a_book(file_array, "merged.xlsx")
r = pe.BookReader("merged.xlsx")
r[self.testfile].name_columns_by_row(0)
content = r[self.testfile].to_dict()
assert content == self.content
r[self.testfile2].format(int)
r[self.testfile2].name_columns_by_row(0)
content2 = r[self.testfile2].to_dict()
assert content2 == self.content2
r[self.testfile3].name_columns_by_row(0)
content3 = r[self.testfile3].to_dict()
assert content3 == self.content3
def tearDown(self):
file_list = [
self.testfile,
self.testfile2,
self.testfile3,
self.testfile4,
"pyexcel_%s" % self.testfile,
"pyexcel_merged.csv",
"merged.xlsx",
"merged.xls",
"test4.xls"
]
clean_up_files(file_list)
| {
"content_hash": "7fe92ba30a23a06fa41a4648e82270a0",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 90,
"avg_line_length": 37.01655629139073,
"alnum_prop": 0.5636461221933984,
"repo_name": "lordakshaya/pyexcel",
"id": "58b07204fc842f501fa0915fa11b4a2e24ef05e1",
"size": "11179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cookbook.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "455"
},
{
"name": "HTML",
"bytes": "235"
},
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "433260"
},
{
"name": "Shell",
"bytes": "541"
}
],
"symlink_target": ""
} |
import abc
import imghdr
import os
import re
import sys
import semantic_version
import six
from murano.packages import exceptions
from murano.packages import package
class PackageBase(package.Package):
def __init__(self, format_name, runtime_version,
source_directory, manifest):
super(PackageBase, self).__init__(
format_name, runtime_version, source_directory)
self._full_name = manifest.get('FullName')
if not self._full_name:
raise exceptions.PackageFormatError('FullName is not specified')
self._check_full_name(self._full_name)
self._version = semantic_version.Version.coerce(str(manifest.get(
'Version', '0.0.0')))
self._package_type = manifest.get('Type')
if self._package_type not in package.PackageType.ALL:
raise exceptions.PackageFormatError(
'Invalid package Type {0}'.format(self._package_type))
self._display_name = manifest.get('Name', self._full_name)
self._description = manifest.get('Description')
self._author = manifest.get('Author')
self._supplier = manifest.get('Supplier') or {}
self._logo = manifest.get('Logo')
self._tags = manifest.get('Tags')
self._logo_cache = None
self._supplier_logo_cache = None
self._source_directory = source_directory
@abc.abstractproperty
def requirements(self):
raise NotImplementedError()
@abc.abstractproperty
def classes(self):
raise NotImplementedError()
@abc.abstractmethod
def get_class(self, name):
raise NotImplementedError()
@abc.abstractproperty
def ui(self):
raise NotImplementedError()
@property
def full_name(self):
return self._full_name
@property
def source_directory(self):
return self._source_directory
@property
def version(self):
return self._version
@property
def package_type(self):
return self._package_type
@property
def display_name(self):
return self._display_name
@property
def description(self):
return self._description
@property
def author(self):
return self._author
@property
def supplier(self):
return self._supplier
@property
def tags(self):
return list(self._tags)
@property
def logo(self):
return self._load_image(self._logo, 'logo.png', 'logo')
@property
def meta(self):
return None
@property
def supplier_logo(self):
return self._load_image(
self._supplier.get('Logo'), 'supplier_logo.png', 'supplier logo')
def get_resource(self, name):
resources_dir = os.path.join(self._source_directory, 'Resources')
if not os.path.exists(resources_dir):
os.makedirs(resources_dir)
return os.path.join(resources_dir, name)
def _load_image(self, file_name, default_name, what_image):
full_path = os.path.join(
self._source_directory, file_name or default_name)
if not os.path.isfile(full_path) and not file_name:
return
try:
if imghdr.what(full_path) != 'png':
raise exceptions.PackageLoadError(
'{0} is not in PNG format'.format(what_image))
with open(full_path) as stream:
return stream.read()
except Exception as ex:
trace = sys.exc_info()[2]
six.reraise(exceptions.PackageLoadError,
exceptions.PackageLoadError(
'Unable to load {0}: {1}'.format(what_image, ex)),
trace)
@staticmethod
def _check_full_name(full_name):
error = exceptions.PackageFormatError('Invalid FullName ' + full_name)
if re.match(r'^[\w\.]+$', full_name):
if full_name.startswith('.') or full_name.endswith('.'):
raise error
if '..' in full_name:
raise error
else:
raise error
| {
"content_hash": "fd56eefd93d4c99911688610e5517387",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 78,
"avg_line_length": 29.768115942028984,
"alnum_prop": 0.5971275559883155,
"repo_name": "satish-avninetworks/murano",
"id": "40598c63f5931915f403d91ff83801797560a076",
"size": "4691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "murano/packages/package_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "152"
},
{
"name": "Mako",
"bytes": "1013"
},
{
"name": "PowerShell",
"bytes": "2772"
},
{
"name": "Puppet",
"bytes": "86"
},
{
"name": "Python",
"bytes": "1315378"
},
{
"name": "Ruby",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "25729"
}
],
"symlink_target": ""
} |
from django.contrib.auth.decorators import login_required
class DisableCacheProtectAdminPages(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if '/grapher/admin' in request.path and 'Cache-Control' not in response:
response['Cache-Control'] = 'no-cache'
return response
def process_view(self, request, view_func, view_args, view_kwargs):
if '/grapher/admin' in request.path:
if not '/grapher/admin/login' in request.path and not '/grapher/admin/invitation' in request.path:
return login_required(view_func)(request, *view_args, **view_kwargs)
return None
| {
"content_hash": "9d785aacbc236a37cdc1a83f959a4bba",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 110,
"avg_line_length": 33,
"alnum_prop": 0.6561264822134387,
"repo_name": "aaldaber/owid-grapher",
"id": "e9b63d9c41b45f6191a05ba7e0239ec260f4784f",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grapher_admin/disable_cache_protect_admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41787"
},
{
"name": "HTML",
"bytes": "44942"
},
{
"name": "JavaScript",
"bytes": "5376"
},
{
"name": "Python",
"bytes": "1689167"
},
{
"name": "Shell",
"bytes": "2596"
},
{
"name": "TypeScript",
"bytes": "899184"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from smartfields.processors import ImageProcessor, ImageFormat
class ImageProcessorsTestCase(TestCase):
def test_dimensions_scaling(self):
p = ImageProcessor()
# scaling up: hard set dims
self.assertEqual(p.get_dimensions(200, 100, width=100), (100, 50))
self.assertEqual(p.get_dimensions(200, 100, height=200), (400, 200))
# scaling up: single
self.assertEqual(p.get_dimensions(200, 100, min_width=300), (300, 150))
self.assertEqual(p.get_dimensions(200, 100, min_height=200), (400, 200))
# scaling up: both
self.assertEqual(p.get_dimensions(200, 100, min_width=300, min_height=200), (400, 200))
self.assertEqual(p.get_dimensions(200, 100, min_width=600, min_height=200), (600, 300))
# scaling up: mixing
self.assertEqual(p.get_dimensions(200, 100, min_width=300, max_width=400), (300, 150))
self.assertEqual(p.get_dimensions(200, 100, min_height=200, max_height=400), (400, 200))
# scaling down: single
self.assertEqual(p.get_dimensions(200, 100, max_width=50), (50, 25))
self.assertEqual(p.get_dimensions(200, 100, max_height=25), (50, 25))
# scaling down: both
self.assertEqual(p.get_dimensions(200, 100, max_width=100, max_height=75), (100, 50))
self.assertEqual(p.get_dimensions(200, 100, max_width=150, max_height=50), (100, 50))
# scaling down: mixin
self.assertEqual(p.get_dimensions(200, 100, min_width=50, max_width=100), (100, 50))
self.assertEqual(p.get_dimensions(200, 100, min_height=10, max_height=50), (100, 50))
# no scaling: single
self.assertEqual(p.get_dimensions(200, 100, min_width=100), (200, 100))
self.assertEqual(p.get_dimensions(200, 100, min_height=50), (200, 100))
self.assertEqual(p.get_dimensions(200, 100, max_width=300), (200, 100))
self.assertEqual(p.get_dimensions(200, 100, max_height=150), (200, 100))
# no scaling: both
self.assertEqual(p.get_dimensions(200, 100, min_width=50, min_height=50), (200, 100))
self.assertEqual(p.get_dimensions(200, 100, max_width=400, max_height=200), (200, 100))
# without preserving ratio
self.assertEqual(p.get_dimensions(
200, 100, min_width=50, max_width=100, min_height=2000,
max_height=2001, preserve=False), (100, 2000))
self.assertEqual(p.get_dimensions(
200, 100, height=500, min_width=300, max_width=400, preserve=False), (300, 500))
def test_dimensions_checking(self):
p = ImageProcessor()
# ones that totally don't make sense
self.assertRaises(AssertionError, p._check_scale_params, width=100, min_width=50)
self.assertRaises(AssertionError, p._check_scale_params, width=100, max_width=50)
self.assertRaises(AssertionError, p._check_scale_params, height=50, min_height=50)
self.assertRaises(AssertionError, p._check_scale_params, height=50, max_height=50)
self.assertRaises(AssertionError, p._check_scale_params, min_width=100, max_width=50)
self.assertRaises(AssertionError, p._check_scale_params, min_height=100, max_height=50)
# ones that make no sense with preserve=True
self.assertRaises(AssertionError, p._check_scale_params, width=100, height=50)
self.assertRaises(AssertionError, p._check_scale_params, width=100, min_height=50)
self.assertRaises(AssertionError, p._check_scale_params, width=100, max_height=50)
self.assertRaises(AssertionError, p._check_scale_params, height=50, min_width=50)
self.assertRaises(AssertionError, p._check_scale_params, height=50, min_height=50)
self.assertRaises(AssertionError, p._check_scale_params, min_width=100, max_height=50)
self.assertRaises(AssertionError, p._check_scale_params, max_width=100, min_height=50)
def test_misc(self):
f1 = ImageFormat('BMP', ext='dib')
f2 = ImageFormat('BMP')
f3 = ImageFormat('PSD')
self.assertEqual(f1, f2)
self.assertEqual(f1, 'BMP')
self.assertNotEqual(f1, f3)
self.assertTrue(f1.can_read)
self.assertEqual(f1.get_ext(), 'dib')
self.assertEqual(f2.get_ext(), 'bmp')
self.assertEqual(f1.get_exts(), 'bmp,dib')
self.assertEqual(f1.get_mode(), 'RGB')
self.assertEqual(f1.get_mode(old_mode='non_existent'), 'RGB')
self.assertEqual(f1.get_mode(old_mode='CMYK'), 'RGB')
self.assertEqual(f1.get_mode(old_mode='LA'), 'P')
p = ImageProcessor(format=f3)
self.assertRaises(AssertionError, p.check_params)
self.assertEqual(set(p.supported_formats.input_exts.split(',')),
set('sgi,pcx,xpm,tif,tiff,jpg,jpe,jpeg,jfif,xbm,gif,bmp,dib,tga,'
'tpic,im,psd,ppm,pgm,pbm,png'.split(',')))
p = ImageProcessor()
self.assertIsNone(p.get_ext())
self.assertEquals(p.get_ext(format=ImageFormat('TIFF', ext='')), '')
| {
"content_hash": "088d08eb180fd70d13fa96b14390bd47",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 96,
"avg_line_length": 59.31764705882353,
"alnum_prop": 0.6493454978183261,
"repo_name": "lehins/django-smartfields",
"id": "ad3369a7b37e8c7a78386d43acf8070f14fa6ed6",
"size": "5042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_suite/test_processors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "433"
},
{
"name": "CoffeeScript",
"bytes": "14579"
},
{
"name": "HTML",
"bytes": "5381"
},
{
"name": "Python",
"bytes": "137111"
}
],
"symlink_target": ""
} |
import unittest, sys, os
from stripe import Stripe
class TestStripe(unittest.TestCase):
"""
A test class for the stripe module
"""
def setUp(self):
pass
def testSanity(self):
self.assertEqual(0, 0)
| {
"content_hash": "d1d1f5c1cb756af98c99e9b431dd199e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 38,
"avg_line_length": 15.933333333333334,
"alnum_prop": 0.6276150627615062,
"repo_name": "DisorderlyZen/python-stripe",
"id": "7e74a654209c1fda9e6eee05739c591b36fa8ddd",
"size": "239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/stripe_test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "5124"
}
],
"symlink_target": ""
} |
""" P1 tests for networks in advanced zone with security groups
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
import netaddr
from nose.plugins.attrib import attr
class Services:
""" Test networks in advanced zone with security groups"""
def __init__(self):
self.services = {
"domain": {
"name": "DOM",
},
"project": {
"name": "Project",
"displaytext": "Test project",
},
"account": {
"email": "admin-XABU1@test.com",
"firstname": "admin-XABU1",
"lastname": "admin-XABU1",
"username": "admin-XABU1",
# Random characters are appended for unique
# username
"password": "fr3sca",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"shared_network_offering_sg": {
"name": 'MySharedOffering-sg',
"displaytext": 'MySharedOffering-sg',
"guestiptype": 'Shared',
"supportedservices": 'Dhcp,Dns,UserData,SecurityGroup',
"specifyVlan" : "False",
"specifyIpRanges" : "False",
"traffictype": 'GUEST',
"serviceProviderList" : {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"UserData": 'VirtualRouter',
"SecurityGroup": 'SecurityGroupProvider'
},
},
"shared_network_offering": {
"name": 'MySharedOffering',
"displaytext": 'MySharedOffering',
"guestiptype": 'Shared',
"supportedservices": 'Dhcp,Dns,UserData',
"specifyVlan" : "False",
"specifyIpRanges" : "False",
"traffictype": 'GUEST',
"serviceProviderList" : {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"UserData": 'VirtualRouter'
},
},
"shared_network_sg": {
"name": "MyIsolatedNetwork - Test",
"displaytext": "MyIsolatedNetwork",
"networkofferingid":"1",
"vlan" :1200,
"gateway" :"172.16.15.1",
"netmask" :"255.255.255.0",
"startip" :"172.16.15.2",
"endip" :"172.16.15.20",
"acltype" : "Domain",
"scope":"all",
},
"shared_network": {
"name": "MySharedNetwork - Test",
"displaytext": "MySharedNetwork",
"vlan" :1201,
"gateway" :"172.16.15.1",
"netmask" :"255.255.255.0",
"startip" :"172.16.15.21",
"endip" :"172.16.15.41",
"acltype" : "Domain",
"scope":"all",
},
"isolated_network_offering": {
"name": 'Network offering-DA services',
"displaytext": 'Network offering-DA services',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList": {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
"Vpn": 'VirtualRouter',
"Firewall": 'VirtualRouter',
"Lb": 'VirtualRouter',
"UserData": 'VirtualRouter',
"StaticNat": 'VirtualRouter',
},
},
"isolated_network": {
"name": "Isolated Network",
"displaytext": "Isolated Network",
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
# Cent OS 5.3 (64 bit)
"sleep": 90,
"timeout": 10,
"mode": 'advanced',
"securitygroupenabled": 'true'
}
class TestNetworksInAdvancedSG(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestNetworksInAdvancedSG,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.api_client = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
self.cleanup_networks = []
self.cleanup_accounts = []
self.cleanup_domains = []
self.cleanup_projects = []
self.cleanup_vms = []
return
def tearDown(self):
try:
#Clean up, terminate the created network offerings
cleanup_resources(self.api_client, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
#below components is not a part of cleanup because to mandate the order and to cleanup network
try:
for vm in self.cleanup_vms:
vm.delete(self.api_client)
except Exception as e:
raise Exception("Warning: Exception during virtual machines cleanup : %s" % e)
try:
for project in self.cleanup_projects:
project.delete(self.api_client)
except Exception as e:
raise Exception("Warning: Exception during project cleanup : %s" % e)
try:
for account in self.cleanup_accounts:
account.delete(self.api_client)
except Exception as e:
raise Exception("Warning: Exception during account cleanup : %s" % e)
try:
for domain in self.cleanup_domains:
domain.delete(self.api_client)
except Exception as e:
raise Exception("Warning: Exception during domain cleanup : %s" % e)
#Wait till all resources created are cleaned up completely and then attempt to delete Network
time.sleep(self.services["sleep"])
try:
for network in self.cleanup_networks:
network.delete(self.api_client)
except Exception as e:
raise Exception("Warning: Exception during network cleanup : %s" % e)
return
@attr(tags = ["advancedsg"])
def test_createIsolatedNetwork(self):
""" Test Isolated Network """
# Steps,
# 1. create an Admin Account - admin-XABU1
# 2. listPhysicalNetworks in available zone
# 3. createNetworkOffering:
# 4. Enable network offering - updateNetworkOffering - state=Enabled
# 5. createNetwork
# Validations,
# 1. listAccounts name=admin-XABU1, state=enabled returns your account
# 2. listPhysicalNetworks should return at least one active physical network
# 4. listNetworkOfferings - name=myisolatedoffering, should list enabled offering
# 5. network creation should FAIL since isolated network is not supported in advanced zone with security groups.
#Create admin account
self.admin_account = Account.create(
self.api_client,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup_accounts.append(self.admin_account)
#verify that the account got created with state enabled
list_accounts_response = Account.list(
self.api_client,
id=self.admin_account.id,
listall=True
)
self.assertEqual(
isinstance(list_accounts_response, list),
True,
"listAccounts returned invalid object in response."
)
self.assertNotEqual(
len(list_accounts_response),
0,
"listAccounts returned empty list."
)
self.assertEqual(
list_accounts_response[0].state,
"enabled",
"The admin account created is not enabled."
)
self.debug("Admin type account created: %s" % self.admin_account.name)
#Create an user account
self.user_account = Account.create(
self.api_client,
self.services["account"],
admin=False,
domainid=self.domain.id
)
self.cleanup_accounts.append(self.user_account)
#verify that the account got created with state enabled
list_accounts_response = Account.list(
self.api_client,
id=self.user_account.id,
listall=True
)
self.assertEqual(
isinstance(list_accounts_response, list),
True,
"listAccounts returned invalid object in response."
)
self.assertNotEqual(
len(list_accounts_response),
0,
"listAccounts returned empty list."
)
self.assertEqual(
list_accounts_response[0].state,
"enabled",
"The user account created is not enabled."
)
self.debug("User type account created: %s" % self.user_account.name)
#Verify that there should be at least one physical network present in zone.
list_physical_networks_response = PhysicalNetwork.list(
self.api_client,
zoneid=self.zone.id
)
self.assertEqual(
isinstance(list_physical_networks_response, list),
True,
"listPhysicalNetworks returned invalid object in response."
)
self.assertNotEqual(
len(list_physical_networks_response),
0,
"listPhysicalNetworks should return at least one physical network."
)
physical_network = list_physical_networks_response[0]
self.debug("Physical network found: %s" % physical_network.id)
#Create Network Offering
self.isolated_network_offering = NetworkOffering.create(
self.api_client,
self.services["isolated_network_offering"],
conservemode=False
)
self.cleanup.append(self.isolated_network_offering)
#Verify that the network offering got created
list_network_offerings_response = NetworkOffering.list(
self.api_client,
id=self.isolated_network_offering.id
)
self.assertEqual(
isinstance(list_network_offerings_response, list),
True,
"listNetworkOfferings returned invalid object in response."
)
self.assertNotEqual(
len(list_network_offerings_response),
0,
"listNetworkOfferings returned empty list."
)
self.assertEqual(
list_network_offerings_response[0].state,
"Disabled",
"The network offering created should be bydefault disabled."
)
self.debug("Isolated Network offering created: %s" % self.isolated_network_offering.id)
#Update network offering state from disabled to enabled.
network_offering_update_response = NetworkOffering.update(
self.isolated_network_offering,
self.api_client,
id=self.isolated_network_offering.id,
state="enabled"
)
#Verify that the state of the network offering is updated
list_network_offerings_response = NetworkOffering.list(
self.api_client,
id=self.isolated_network_offering.id
)
self.assertEqual(
isinstance(list_network_offerings_response, list),
True,
"listNetworkOfferings returned invalid object in response."
)
self.assertNotEqual(
len(list_network_offerings_response),
0,
"listNetworkOfferings returned empty list."
)
self.assertEqual(
list_network_offerings_response[0].state,
"Enabled",
"The network offering state should get updated to Enabled."
)
#create network using the isolated network offering created
try:
self.isolated_network = Network.create(
self.api_client,
self.services["isolated_network"],
networkofferingid=self.isolated_network_offering.id,
zoneid=self.zone.id,
)
self.cleanup_networks.append(self.isolated_network)
self.fail("Create isolated network is invalid in advanced zone with security groups.")
except Exception as e:
self.debug("Network creation failed because create isolated network is invalid in advanced zone with security groups.")
@attr(tags = ["advancedsg"])
def test_createSharedNetwork_withoutSG(self):
""" Test Shared Network with without SecurityProvider """
# Steps,
# 1. create an Admin account
# 2. create a shared NetworkOffering
# 3. enable the network offering
# 4. listPhysicalNetworks
# 5. createNetwork
# Validations,
# 1. listAccounts state=enabled returns your account
# 2. listNetworkOfferings - name=mysharedoffering , should list offering in disabled state
# 3. listNetworkOfferings - name=mysharedoffering, should list enabled offering
# 4. listPhysicalNetworks should return at least one active physical network
# 5. network creation should FAIL since there is no SecurityProvide in the network offering
#Create admin account
self.admin_account = Account.create(
self.api_client,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup_accounts.append(self.admin_account)
#verify that the account got created with state enabled
list_accounts_response = Account.list(
self.api_client,
id=self.admin_account.id,
listall=True
)
self.assertEqual(
isinstance(list_accounts_response, list),
True,
"listAccounts returned invalid object in response."
)
self.assertNotEqual(
len(list_accounts_response),
0,
"listAccounts returned empty list."
)
self.assertEqual(
list_accounts_response[0].state,
"enabled",
"The admin account created is not enabled."
)
self.debug("Domain admin account created: %s" % self.admin_account.id)
#Verify that there should be at least one physical network present in zone.
list_physical_networks_response = PhysicalNetwork.list(
self.api_client,
zoneid=self.zone.id
)
self.assertEqual(
isinstance(list_physical_networks_response, list),
True,
"listPhysicalNetworks returned invalid object in response."
)
self.assertNotEqual(
len(list_physical_networks_response),
0,
"listPhysicalNetworks should return at least one physical network."
)
physical_network = list_physical_networks_response[0]
self.debug("Physical Network found: %s" % physical_network.id)
self.services["shared_network_offering"]["specifyVlan"] = "True"
self.services["shared_network_offering"]["specifyIpRanges"] = "True"
#Create Network Offering
self.shared_network_offering = NetworkOffering.create(
self.api_client,
self.services["shared_network_offering"],
conservemode=False
)
self.cleanup.append(self.shared_network_offering)
#Verify that the network offering got created
list_network_offerings_response = NetworkOffering.list(
self.api_client,
id=self.shared_network_offering.id
)
self.assertEqual(
isinstance(list_network_offerings_response, list),
True,
"listNetworkOfferings returned invalid object in response."
)
self.assertNotEqual(
len(list_network_offerings_response),
0,
"listNetworkOfferings returned empty list."
)
self.assertEqual(
list_network_offerings_response[0].state,
"Disabled",
"The network offering created should be bydefault disabled."
)
self.debug("Shared Network Offering created: %s" % self.shared_network_offering.id)
#Update network offering state from disabled to enabled.
network_offering_update_response = NetworkOffering.update(
self.shared_network_offering,
self.api_client,
id=self.shared_network_offering.id,
state="enabled"
)
#Verify that the state of the network offering is updated
list_network_offerings_response = NetworkOffering.list(
self.api_client,
id=self.shared_network_offering.id
)
self.assertEqual(
isinstance(list_network_offerings_response, list),
True,
"listNetworkOfferings returned invalid object in response."
)
self.assertNotEqual(
len(list_network_offerings_response),
0,
"listNetworkOfferings returned empty list."
)
self.assertEqual(
list_network_offerings_response[0].state,
"Enabled",
"The network offering state should get updated to Enabled."
)
#create network using the shared network offering created
self.services["shared_network"]["acltype"] = "domain"
self.services["shared_network"]["networkofferingid"] = self.shared_network_offering.id
self.services["shared_network"]["physicalnetworkid"] = physical_network.id
try:
self.shared_network = Network.create(
self.api_client,
self.services["shared_network"],
networkofferingid=self.shared_network_offering.id,
zoneid=self.zone.id
)
self.cleanup_networks.append(self.shared_network)
self.fail("Network created without SecurityProvider , which is invalid")
except Exception as e:
self.debug("Network creation failed because there is no SecurityProvider in the network offering.")
@attr(tags = ["advancedsg"])
def test_deployVM_SharedwithSG(self):
""" Test VM deployment in shared networks with SecurityProvider """
# Steps,
# 0. create a user account
# 1. Create one shared Network (scope=ALL, different IP ranges)
# 2. deployVirtualMachine in the above networkid within the user account
# 3. delete the user account
# Validations,
# 1. shared network should be created successfully
# 2. VM should deploy successfully
#Create admin account
self.admin_account = Account.create(
self.api_client,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup_accounts.append(self.admin_account)
#verify that the account got created with state enabled
list_accounts_response = Account.list(
self.api_client,
id=self.admin_account.id,
liistall=True
)
self.assertEqual(
isinstance(list_accounts_response, list),
True,
"listAccounts returned invalid object in response."
)
self.assertNotEqual(
len(list_accounts_response),
0,
"listAccounts returned empty list."
)
self.assertEqual(
list_accounts_response[0].state,
"enabled",
"The admin account created is not enabled."
)
self.debug("Admin type account created: %s" % self.admin_account.name)
self.services["shared_network_offering_sg"]["specifyVlan"] = "True"
self.services["shared_network_offering_sg"]["specifyIpRanges"] = "True"
#Create Network Offering
self.shared_network_offering_sg = NetworkOffering.create(
self.api_client,
self.services["shared_network_offering_sg"],
conservemode=False
)
self.cleanup.append(self.shared_network_offering_sg)
#Verify that the network offering got created
list_network_offerings_response = NetworkOffering.list(
self.api_client,
id=self.shared_network_offering_sg.id
)
self.assertEqual(
isinstance(list_network_offerings_response, list),
True,
"listNetworkOfferings returned invalid object in response."
)
self.assertNotEqual(
len(list_network_offerings_response),
0,
"listNetworkOfferings returned empty list."
)
self.assertEqual(
list_network_offerings_response[0].state,
"Disabled",
"The network offering created should be bydefault disabled."
)
self.debug("Shared Network offering created: %s" % self.shared_network_offering_sg.id)
#Update network offering state from disabled to enabled.
network_offering_update_response = NetworkOffering.update(
self.shared_network_offering_sg,
self.api_client,
id=self.shared_network_offering_sg.id,
state="enabled"
)
#Verify that the state of the network offering is updated
list_network_offerings_response = NetworkOffering.list(
self.api_client,
id=self.shared_network_offering_sg.id
)
self.assertEqual(
isinstance(list_network_offerings_response, list),
True,
"listNetworkOfferings returned invalid object in response."
)
self.assertNotEqual(
len(list_network_offerings_response),
0,
"listNetworkOfferings returned empty list."
)
self.assertEqual(
list_network_offerings_response[0].state,
"Enabled",
"The network offering state should get updated to Enabled."
)
physical_network = PhysicalNetwork.list(self.api_client)[0]
#create network using the shared network offering created
self.services["shared_network_sg"]["acltype"] = "domain"
self.services["shared_network_sg"]["networkofferingid"] = self.shared_network_offering_sg.id
self.services["shared_network_sg"]["physicalnetworkid"] = physical_network.id
self.shared_network_sg = Network.create(
self.api_client,
self.services["shared_network_sg"],
domainid=self.admin_account.domainid,
networkofferingid=self.shared_network_offering_sg.id,
zoneid=self.zone.id
)
self.cleanup_networks.append(self.shared_network_sg)
list_networks_response = Network.list(
self.api_client,
id=self.shared_network_sg.id
)
self.assertEqual(
isinstance(list_networks_response, list),
True,
"listNetworks returned invalid object in response."
)
self.assertNotEqual(
len(list_networks_response),
0,
"listNetworks returned empty list."
)
self.assertEqual(
list_networks_response[0].specifyipranges,
True,
"The network is created with ip range but the flag is set to False."
)
self.debug("Shared Network created: %s" % self.shared_network_sg.id)
self.shared_network_admin_account_virtual_machine = VirtualMachine.create(
self.api_client,
self.services["virtual_machine"],
accountid=self.admin_account.name,
domainid=self.admin_account.domainid,
networkids=self.shared_network_sg.id,
serviceofferingid=self.service_offering.id
)
self.cleanup_vms.append(self.shared_network_admin_account_virtual_machine)
vms = VirtualMachine.list(
self.api_client,
id=self.shared_network_admin_account_virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"listVirtualMachines returned invalid object in response."
)
self.assertNotEqual(
len(vms),
0,
"listVirtualMachines returned empty list."
)
self.debug("Virtual Machine created: %s" % self.shared_network_admin_account_virtual_machine.id)
ip_range = list(netaddr.iter_iprange(unicode(self.services["shared_network_sg"]["startip"]), unicode(self.services["shared_network_sg"]["endip"])))
if netaddr.IPAddress(unicode(vms[0].nic[0].ipaddress)) not in ip_range:
self.fail("Virtual machine ip should be from the ip range assigned to network created.")
| {
"content_hash": "eab5b411dbfdacb2349d9361ed2ce5a8",
"timestamp": "",
"source": "github",
"line_count": 739,
"max_line_length": 155,
"avg_line_length": 46.39918809201624,
"alnum_prop": 0.45206917670389923,
"repo_name": "mufaddalq/cloudstack-datera-driver",
"id": "483435188ca6005859541de9f9b40da2f203531b",
"size": "35075",
"binary": false,
"copies": "1",
"ref": "refs/heads/4.2",
"path": "test/integration/component/test_advancedsg_networks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "250"
},
{
"name": "Batchfile",
"bytes": "6317"
},
{
"name": "CSS",
"bytes": "302008"
},
{
"name": "FreeMarker",
"bytes": "4917"
},
{
"name": "HTML",
"bytes": "38671"
},
{
"name": "Java",
"bytes": "79758943"
},
{
"name": "JavaScript",
"bytes": "4237188"
},
{
"name": "Perl",
"bytes": "1879"
},
{
"name": "Python",
"bytes": "5187499"
},
{
"name": "Shell",
"bytes": "803262"
}
],
"symlink_target": ""
} |
'''
@author: Dallas Fraser
@date: 2019-03-25
@organization: MLSB API
@summary: Test suite that runs all importing classes
'''
from unittest import TestLoader, TextTestRunner
from api.test.importer import testAdvancedImportLeague
from api.test.importer import testAdvancedImportTeam
if __name__ == "__main__":
# run all the test suites
(TextTestRunner().run(TestLoader()
.loadTestsFromModule(testAdvancedImportLeague)))
(TextTestRunner().run(TestLoader()
.loadTestsFromModule(testAdvancedImportTeam)))
| {
"content_hash": "9bb44fd66199f2086a777b6de9f40ff9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 74,
"avg_line_length": 35.375,
"alnum_prop": 0.7014134275618374,
"repo_name": "fras2560/mlsb-platform",
"id": "c8245fbacb6e738c4007ce7bd692611e9e1e072c",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/test/importer/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "29191"
},
{
"name": "Dockerfile",
"bytes": "445"
},
{
"name": "Gherkin",
"bytes": "3723"
},
{
"name": "HTML",
"bytes": "203137"
},
{
"name": "JavaScript",
"bytes": "132426"
},
{
"name": "Python",
"bytes": "535371"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-06-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.AzureStack/generateDeploymentLicense"
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class DeploymentLicenseOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.azurestack.AzureStackManagementClient`'s
:attr:`deployment_license` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@overload
def create(
self,
deployment_license_request: _models.DeploymentLicenseRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DeploymentLicenseResponse:
"""Creates a license that can be used to deploy an Azure Stack device.
:param deployment_license_request: Request body for creating a deployment license. Required.
:type deployment_license_request: ~azure.mgmt.azurestack.models.DeploymentLicenseRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentLicenseResponse or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.DeploymentLicenseResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create(
self, deployment_license_request: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.DeploymentLicenseResponse:
"""Creates a license that can be used to deploy an Azure Stack device.
:param deployment_license_request: Request body for creating a deployment license. Required.
:type deployment_license_request: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentLicenseResponse or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.DeploymentLicenseResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create(
self, deployment_license_request: Union[_models.DeploymentLicenseRequest, IO], **kwargs: Any
) -> _models.DeploymentLicenseResponse:
"""Creates a license that can be used to deploy an Azure Stack device.
:param deployment_license_request: Request body for creating a deployment license. Is either a
model type or a IO type. Required.
:type deployment_license_request: ~azure.mgmt.azurestack.models.DeploymentLicenseRequest or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentLicenseResponse or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.DeploymentLicenseResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentLicenseResponse] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(deployment_license_request, (IO, bytes)):
_content = deployment_license_request
else:
_json = self._serialize.body(deployment_license_request, "DeploymentLicenseRequest")
request = build_create_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentLicenseResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.AzureStack/generateDeploymentLicense"
}
| {
"content_hash": "6053aa1735d65e0a3f0f88939c9f1ca6",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 114,
"avg_line_length": 43.56923076923077,
"alnum_prop": 0.6790254237288136,
"repo_name": "Azure/azure-sdk-for-python",
"id": "7f6f179cbd35dd9fc01505d764013442f31a5b5b",
"size": "8996",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/azurestack/azure-mgmt-azurestack/azure/mgmt/azurestack/operations/_deployment_license_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import sys
import json
import datetime
from socket import*
from socket import error
from time import sleep
import struct
from ctypes import *
import paho.mqtt.client as mqtt
#------------------------------------------------------------#
ID_STRING = "V0.1"
#------------------------------------------------------------#
PORT = 5678
CMD_PORT = 8765
BUFSIZE = 1024
#------------------------------------------------------------#
ENABLE_MQTT = 1
ENABLE_LOG = 0
#------------------------------------------------------------#
DEBUG_PRINT_JSON = 1
#------------------------------------------------------------#
# If using a client based on the Z1 mote, then enable by equal to 1, else if
# using the RE-Mote equal to 0
EXAMPLE_WITH_Z1 = 1
#------------------------------------------------------------#
MQTT_URL = "iot.eclipse.org"
MQTT_PORT = 1883
MQTT_KEEPALIVE = 60
MQTT_URL_PUB = "v2/zolertia/tutorialthings/"
MQTT_URL_TOPIC = "/cmd"
#------------------------------------------------------------#
# Message structure
#------------------------------------------------------------#
if EXAMPLE_WITH_Z1:
var1 = "temperature"
var2 = "x_axis"
var3 = "y_axis"
var4 = "z_axis"
else:
var1 = "core_temp"
var2 = "ADC1"
var3 = "ADC2"
var4 = "ADC3"
class SENSOR(Structure):
_pack_ = 1
_fields_ = [
("id", c_uint8),
("counter", c_uint16),
(var1, c_int16),
(var2, c_int16),
(var3, c_int16),
(var4, c_int16),
("battery", c_uint16)
]
def __new__(self, socket_buffer):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer):
pass
#------------------------------------------------------------#
# Helper functions
#------------------------------------------------------------#
def print_recv_data(msg):
print "***"
for f_name, f_type in msg._fields_:
print "{0}:{1} ".format(f_name, getattr(msg, f_name)),
print
print "***"
# -----------------------------------------------------------#
def publish_recv_data(data, pubid, conn, addr):
try:
res, mid = conn.publish(MQTT_URL_PUB + str(pubid), payload=data, qos=1)
print "MQTT: Publishing to {0}... " + "{1} ({2})".format(mid, res, str(pubid))
except Exception as error:
print error
# -----------------------------------------------------------#
def jsonify(keyval, val):
return json.dumps(dict(value=val, key=keyval))
# -----------------------------------------------------------#
def jsonify_recv_data(msg):
sensordata = '{"values":['
for f_name, f_type in msg._fields_:
sensordata += jsonify(f_name, getattr(msg, f_name)) + ","
sensordata = sensordata[:-1]
sensordata += ']}'
# Paho MQTT client doesn't support sending JSON objects
json_parsed = json.loads(sensordata)
if DEBUG_PRINT_JSON:
print json.dumps(json_parsed, indent=2)
return sensordata
# -----------------------------------------------------------#
def send_udp_cmd(addr):
client = socket(AF_INET6, SOCK_DGRAM)
print "Sending reply to " + addr
try:
client.sendto("Hello from the server", (addr, CMD_PORT))
except Exception as error:
print error
client.close()
# -----------------------------------------------------------#
# MQTT related functions
# -----------------------------------------------------------#
def on_connect(client, userdata, rc):
print("MQTT: Connected ({0}) ").format(str(rc))
client.subscribe(MQTT_URL_PUB + MQTT_URL_TOPIC)
#------------------------------------------------------------#
def on_message(client, userdata, msg):
print("MQTT: RX: " + msg.topic + " : " + str(msg.payload))
#------------------------------------------------------------#
def on_publish(client, packet, mid):
print("MQTT: Published {0}").format(mid)
#------------------------------------------------------------#
# UDP6 and MQTT client session
#------------------------------------------------------------#
def start_client():
now = datetime.datetime.now()
print "UDP6-MQTT server side application " + ID_STRING
print "Started " + str(now)
try:
s = socket(AF_INET6, SOCK_DGRAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# Replace address below with "aaaa::1" if tunslip6 has created a tunnel
# interface with this address
s.bind(('', PORT))
except Exception:
print "ERROR: Server Port Binding Failed"
return
print 'UDP6-MQTT server ready: %s'% PORT
print "msg structure size: ", sizeof(SENSOR)
print
if ENABLE_MQTT:
# Initialize MQTT connection
try:
client = mqtt.Client()
except Exception as error:
print error
raise
client.on_connect = on_connect
client.on_message = on_message
client.on_publish = on_publish
try:
client.connect(MQTT_URL, MQTT_PORT, MQTT_KEEPALIVE)
except Exception as error:
print error
raise
# Start the MQTT thread and handle reconnections, also ensures the callbacks
# being triggered
client.loop_start()
while True:
data, addr = s.recvfrom(BUFSIZE)
now = datetime.datetime.now()
print str(now)[:19] + " -> " + str(addr[0]) + ":" + str(addr[1]) + " " + str(len(data))
msg_recv = SENSOR(data)
if ENABLE_LOG:
print_recv_data(msg_recv)
sensordata = jsonify_recv_data(msg_recv)
if ENABLE_MQTT:
publish_recv_data(sensordata, msg_recv.id, client, addr[0])
send_udp_cmd(addr[0])
client.loop_stop()
#------------------------------------------------------------#
# MAIN APP
#------------------------------------------------------------#
if __name__ == "__main__":
start_client()
| {
"content_hash": "bc6828facdd1ee0e561351fd13273425",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 91,
"avg_line_length": 31.91891891891892,
"alnum_prop": 0.4575783234546994,
"repo_name": "miarcompanies/sdn-wise-contiki",
"id": "4c9492f84769daeed9ae03f7f979eb2bddd0697d",
"size": "6148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contiki/examples/zolertia/tutorial/02-ipv6/03-udp-client-and-server/UDP-MQTT-server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "150953"
},
{
"name": "Awk",
"bytes": "95"
},
{
"name": "Batchfile",
"bytes": "56"
},
{
"name": "C",
"bytes": "19590553"
},
{
"name": "C++",
"bytes": "1232938"
},
{
"name": "CSS",
"bytes": "10705"
},
{
"name": "GDB",
"bytes": "212"
},
{
"name": "Gnuplot",
"bytes": "1671"
},
{
"name": "HTML",
"bytes": "13655"
},
{
"name": "Java",
"bytes": "2862829"
},
{
"name": "JavaScript",
"bytes": "20835"
},
{
"name": "Makefile",
"bytes": "2349582"
},
{
"name": "Objective-C",
"bytes": "278368"
},
{
"name": "Perl",
"bytes": "97819"
},
{
"name": "Python",
"bytes": "617825"
},
{
"name": "Shell",
"bytes": "17305"
},
{
"name": "XSLT",
"bytes": "4947"
}
],
"symlink_target": ""
} |
import numpy as np
from keras.layers import Dense, Activation
from keras.models import Sequential
from keras.optimizers import SGD
np.random.seed(0)
# XOR
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
Y = np.array([[0], [1], [1], [0]])
model = Sequential([Dense(input_dim=2, units=1),
Activation('sigmoid')])
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.1))
model.fit(X, Y, epochs=200, batch_size=1)
prob = model.predict_proba(X, batch_size=1)
print('-------失敗例------')
print(prob)
print('-------------')
# keras XOR
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
Y = np.array([[0], [1], [1], [0]])
model = Sequential()
model.add(Dense(input_dim=2, units=2))
model.add(Activation('sigmoid'))
model.add(Dense(units=1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.1))
model.fit(X, Y, epochs=6000, batch_size=4)
classes = model.predict_classes(X, batch_size=4)
prob = model.predict_proba(X, batch_size=4)
print('classified:')
print(np.argmax(model.predict(X), axis=1) == classes)
print()
print('output probability:')
print(prob)
| {
"content_hash": "3966c24b22bad63a4bd7737457b68fa0",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 64,
"avg_line_length": 27.341463414634145,
"alnum_prop": 0.6467439785905441,
"repo_name": "hide-tono/python-training",
"id": "9034a709369fe1ec495ca98ef13ff8f663ac9be3",
"size": "1169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep-learning-tf-keras/ch03/xor-keras.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2232788"
},
{
"name": "Python",
"bytes": "30876"
}
],
"symlink_target": ""
} |
from openerp import fields, models
class SomeObj(models.Model):
_name = 'test_access_right.some_obj'
val = fields.Integer()
| {
"content_hash": "088633abd148e7c721642e2f7b6ef333",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 40,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.7014925373134329,
"repo_name": "diogocs1/comps",
"id": "b7752cb3665b94d153e532593fa0b6b521806483",
"size": "134",
"binary": false,
"copies": "299",
"ref": "refs/heads/master",
"path": "web/openerp/addons/test_access_rights/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
} |
"""Tests for Grappler LayoutOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
def _weight(shape):
"""Generates a weight of a given shape."""
return random_ops.truncated_normal(shape, seed=0, stddev=0.1)
def _bias(shape):
"""Generates a bias of a given shape."""
return constant_op.constant(0.1, shape=shape)
def _conv2d(x, w):
"""Returns a 2d convolution layer with full stride."""
return nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
def _max_pool_2x2(x):
"""Downsamples a feature map by 2X."""
return nn.max_pool(
x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Taken from tensorflow/examples/tutorials/mnist/mnist_deep.py
def _two_layer_model(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
b_conv1 = _bias([32])
h_conv1 = nn.relu(_conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = _max_pool_2x2(h_conv1)
w_conv2 = _weight([5, 5, 32, 64])
b_conv2 = _bias([64])
h_conv2 = nn.relu(_conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = _max_pool_2x2(h_conv2)
return h_pool2
def _model_with_second_port():
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 5, 5, 4], seed=0)
scale = constant_op.constant(0.1, shape=[4])
offset = constant_op.constant(0.3, shape=[4])
y, mean, _ = nn.fused_batch_norm(x, scale, offset)
mul = math_ops.add(y, mean)
output = array_ops.identity(mul)
return output
def _model_with_branch(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
w_conv2 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
c_conv2 = _conv2d(x_image, w_conv2)
add = math_ops.add(c_conv1, c_conv2)
return add
def _model_with_vec_and_4d(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
vector = constant_op.constant(6.4, shape=[32])
add = math_ops.add(c_conv1, vector)
return add
def _loop():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(_two_layer_model, elems, dtype=dtypes.float32)
return outputs
def _loop_with_branch():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_branch, elems, dtype=dtypes.float32)
return outputs
def _loop_with_vec_and_4d():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_vec_and_4d, elems, dtype=dtypes.float32)
return outputs
def _get_config(layout_optimizer=True):
if layout_optimizer:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
else:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrite_options, build_cost_model=1)
config = config_pb2.ConfigProto(graph_options=graph_options)
config.graph_options.optimizer_options.opt_level = -1
return config
def _simple_metagraph(depthwise=False):
random_seed.set_random_seed(0)
x = variables.Variable(random_ops.truncated_normal([1, 200, 200, 3], seed=0))
conv = conv_layers.separable_conv2d if depthwise else conv_layers.conv2d
y = conv(x, 32, [3, 3])
z = conv(y, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
loss = math_ops.reduce_mean(z)
train_op = optimizer.minimize(loss)
graph = ops.get_default_graph()
graph.add_to_collection('train_op', train_op)
meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())
return meta_graph
def _get_cluster():
named_device = device_properties_pb2.NamedDevice()
named_device.name = '/GPU:0'
named_device.properties.type = 'GPU'
named_device.properties.num_cores = 24
named_device.properties.frequency = 1000
named_device.properties.environment['architecture'] = '4'
cluster = gcluster.Cluster(devices=[named_device])
return cluster
def _is_transpose(node):
return node.endswith('TransposeNHWCToNCHW-LayoutOptimizer') or node.endswith(
'TransposeNCHWToNHWC-LayoutOptimizer')
def _is_permute(node):
return node.endswith('VecPermuteNHWCToNCHW-LayoutOptimizer') or node.endswith(
'VecPermuteNCHWToNHWC-LayoutOptimizer')
class LayoutOptimizerTest(test.TestCase):
"""Tests the Grappler layout optimizer."""
def _assert_trans_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-TransposeNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_trans_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-TransposeNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_map_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-DimMapNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_vec_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-VecPermuteNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_vec_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-VecPermuteNHWCToNCHW-LayoutOptimizer', nodes)
def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
ops.reset_default_graph()
graph = ops.get_default_graph()
with session.Session(
config=_get_config(layout_optimizer), graph=graph) as sess:
batch = 2
height = 6
width = 7
input_channels = 3
shape = [batch, height, width, input_channels]
image = array_ops.placeholder(dtype='float32', shape=shape)
conv1 = conv_layers.conv2d(image, 32, [3, 3])
conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
loss = math_ops.reduce_mean(conv2)
train_op = optimizer.minimize(loss)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
if restore:
saver.restore(sess, checkpoint_path)
else:
sess.run(variables.global_variables_initializer())
np.random.seed(0)
for _ in range(2):
image_val = np.random.rand(*shape).astype(np.float32)
sess.run([loss, train_op], feed_dict={image: image_val})
if restore:
all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
all_vars_values = [var.eval(session=sess) for var in all_vars]
return all_vars_values
else:
saver.save(sess, checkpoint_path)
def testTwoConvLayers(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
output = _two_layer_model(x)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Relu_1-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
split = array_ops.split(conv, 2, axis=dim)
scale = constant_op.constant(0.1, shape=[32])
offset = constant_op.constant(0.3, shape=[32])
bn0 = nn.fused_batch_norm(split[0], scale, offset)
bn1 = nn.fused_batch_norm(split[1], scale, offset)
add = bn0[0] + bn1[0]
output = array_ops.identity(add)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('add_2-0-0', nodes)
self._assert_map_nhwc_to_nchw('split-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitVWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
sizes = constant_op.constant([50, 10, 4], shape=[3])
split = gen_array_ops.split_v(
value=conv, size_splits=sizes, axis=dim, num_split=3)
output = math_ops.reduce_sum(split[0])
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('SplitV-0-0', nodes)
self._assert_map_nhwc_to_nchw('SplitV-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
paddings = constant_op.constant(
paddings_val, dtype='int32', name='PaddingsConst')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self.assertIn('Pad-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSum(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testCast(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
cast = math_ops.cast(conv, dtype='bool')
output = array_ops.identity(cast)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Cast-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueeze(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2])
squeeze = array_ops.squeeze(reduce_sum)
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueezeAlongHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2], keepdims=True)
squeeze = array_ops.squeeze(reduce_sum, axis=[1, 2])
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueezeAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2], keepdims=True)
squeeze = array_ops.squeeze(reduce_sum, axis=[0, 1, 2])
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongHWC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2, 3])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongCKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Sum-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongHKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[2], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongWCKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[2, 3], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testConcatWithControlDependency(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
axis = constant_op.constant(3)
var = variables.Variable(3)
assign = state_ops.assign(var, 6)
with ops.control_dependencies([assign]):
concat = array_ops.concat([conv, conv], axis)
output = array_ops.identity(concat)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('concat-0-0', nodes)
self.assertIn('concat-2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testFill(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shape = array_ops.shape(conv)
scalar = array_ops.constant(5.7)
fill = array_ops.fill(shape, scalar)
output = array_ops.identity(fill)
x_val = [3.4] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
num_vec_permute = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
if _is_permute(node.name):
num_vec_permute += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
# Two vector permute nodes were initially added in the Expand phase of
# LayoutOptimizer; they cancelled out each other in the Collapse phase.
expected_vec_permute = 0
self.assertEqual(expected_vec_permute, num_vec_permute)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Fill-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testTile(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
multiple = array_ops.placeholder(dtype='int32')
tile = array_ops.tile(conv, multiple)
output = array_ops.identity(tile)
multiple_val = [2, 3, 4, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={multiple: multiple_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
multiple: multiple_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Tile-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Tile-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = constant_op.constant([3, 1], name='DimsConst')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self.assertIn('ReverseV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithNonConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = array_ops.placeholder(dtype='int32')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
dims_val = [2, 3]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dims: dims_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
dims: dims_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self._assert_map_nhwc_to_nchw('ReverseV2-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOp(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
mean = math_ops.reduce_mean(conv)
condition = math_ops.less(conv, mean)
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOpConditionUnknownShape(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = array_ops.placeholder(dtype='bool')
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
condition_val = np.zeros((1, 7, 7, 64))
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={condition: condition_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={condition: condition_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOpScalarCondition(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = constant_op.constant(True)
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithNonConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings = array_ops.placeholder(dtype='int32')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={paddings: paddings_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
paddings: paddings_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Pad-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool = gen_nn_ops.max_pool_v2(conv, ksize, strides, 'VALID')
output = array_ops.identity(max_pool)
strides_val = [1, 3, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolV2-2', nodes)
self.assertIn('MaxPoolV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolGradV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool_grad = gen_nn_ops.max_pool_grad_v2(conv, conv, conv, ksize,
strides, 'VALID')
output = array_ops.identity(max_pool_grad)
strides_val = [1, 3, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolGradV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolGradV2-4', nodes)
self.assertIn('MaxPoolGradV2-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
size = array_ops.placeholder(dtype='int32')
s = array_ops.slice(conv, [0, 0, 0, 0], size)
output = array_ops.identity(s)
size_val = [1, 2, 3, 4]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={size: size_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
size: size_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Slice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Slice-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
s = array_ops.strided_slice(conv, [0, 0, 0, 0], end, strides=[1, 2, 3, 1])
output = array_ops.identity(s)
end_val = [1, 2, 3, 4]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSlice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSlice-2', nodes)
self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithMask1011(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 11(1011).
s = conv[:, :, 1:-1, :]
output = array_ops.identity(s)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithMask0111(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 7(0111).
s = conv[:, :, :, 1:-1]
output = array_ops.identity(s)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceGradWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
shape = array_ops.shape(conv)
end_val = [1, 2, 3, 4]
s = array_ops.strided_slice(
conv, [0, 0, 0, 0], end_val, strides=[1, 2, 3, 1])
s_grad = array_ops.strided_slice_grad(shape, [0, 0, 0, 0], end,
[1, 2, 3, 1], s)
output = array_ops.identity(s_grad)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSliceGrad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSliceGrad-2', nodes)
self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testShapeN(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shapen = array_ops.shape_n([conv, conv])
output = math_ops.add(shapen[0], shapen[1])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_vec_nchw_to_nhwc('ShapeN-0-0', nodes)
self.assertAllEqual(output_val_ref, output_val)
def testShapeNFollowedByNotConvertibleNodeReshape(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
conv_reshape = array_ops.reshape(conv, [1, 1, 1, -1])
shapen = array_ops.shape_n([conv, conv_reshape])
shape = array_ops.identity(shapen[1])
ones = array_ops.ones(shape)
output = math_ops.add_n([conv_reshape, ones])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={x: x_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllEqual(output_val_ref, output_val)
def testLoop(self):
if test.is_gpu_available(cuda_only=True):
output = _loop()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/MaxPool_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithBranch(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_branch()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithVecAnd4D(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_vec_and_4d()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testBinaryOpSecondPort(self):
if test.is_gpu_available(cuda_only=True):
output = _model_with_second_port()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('FusedBatchNorm-0', nodes)
self._assert_trans_nchw_to_nhwc('Add-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testGradient(self):
meta_graph = _simple_metagraph()
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
optimized_graph = tf_optimizer.OptimizeGraph(
rewrite_options, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 5)
def testDepthwise(self):
meta_graph = _simple_metagraph(depthwise=True)
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
optimized_graph = tf_optimizer.OptimizeGraph(
rewrite_options, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in [
'DepthwiseConv2dNative', 'DepthwiseConv2dNativeBackpropFilter',
'DepthwiseConv2dNativeBackpropInput'
]:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 6)
def testCheckpointCompatibility(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
checkpoint_path = self.get_temp_dir()
self._train(checkpoint_path)
vars_expected = self._train(checkpoint_path, restore=True)
vars_layout_optimized = self._train(
checkpoint_path, restore=True, layout_optimizer=True)
for var_expected, var_layout_optimized in zip(vars_expected,
vars_layout_optimized):
self.assertAllClose(var_expected, var_layout_optimized, atol=1e-6)
if __name__ == '__main__':
test.main()
| {
"content_hash": "eab741cc0135dd1ff2c83455a279355d",
"timestamp": "",
"source": "github",
"line_count": 1472,
"max_line_length": 80,
"avg_line_length": 38.72214673913044,
"alnum_prop": 0.6521693363041456,
"repo_name": "eaplatanios/tensorflow",
"id": "e3dd4b0bdfbb28480c78b0add5947c79852fb44d",
"size": "57688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/grappler/layout_optimizer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "163987"
},
{
"name": "C++",
"bytes": "34944901"
},
{
"name": "CMake",
"bytes": "5123"
},
{
"name": "CSS",
"bytes": "9206"
},
{
"name": "Go",
"bytes": "1047216"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "423531"
},
{
"name": "JavaScript",
"bytes": "3127"
},
{
"name": "Jupyter Notebook",
"bytes": "1833814"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "19718973"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Scala",
"bytes": "3606806"
},
{
"name": "Shell",
"bytes": "352897"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
} |
import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import math
import srwl_uti_smp
def set_optics(v=None):
el = []
pp = []
names = ['VFM', 'VFM_HFM', 'HFM', 'HFM_Watchpoint', 'Watchpoint', 'Watchpoint_Mask', 'Mask', 'Watchpoint2']
for el_name in names:
if el_name == 'VFM':
# VFM: ellipsoidMirror 50.0m
el.append(srwlib.SRWLOptMirEl(
_p=v.op_VFM_p,
_q=v.op_VFM_q,
_ang_graz=v.op_VFM_ang,
_size_tang=v.op_VFM_size_tang,
_size_sag=v.op_VFM_size_sag,
_nvx=v.op_VFM_nvx,
_nvy=v.op_VFM_nvy,
_nvz=v.op_VFM_nvz,
_tvx=v.op_VFM_tvx,
_tvy=v.op_VFM_tvy,
_x=v.op_VFM_x,
_y=v.op_VFM_y,
))
pp.append(v.op_VFM_pp)
elif el_name == 'VFM_HFM':
# VFM_HFM: drift 50.0m
el.append(srwlib.SRWLOptD(
_L=v.op_VFM_HFM_L,
))
pp.append(v.op_VFM_HFM_pp)
elif el_name == 'HFM':
# HFM: ellipsoidMirror 50.2m
el.append(srwlib.SRWLOptMirEl(
_p=v.op_HFM_p,
_q=v.op_HFM_q,
_ang_graz=v.op_HFM_ang,
_size_tang=v.op_HFM_size_tang,
_size_sag=v.op_HFM_size_sag,
_nvx=v.op_HFM_nvx,
_nvy=v.op_HFM_nvy,
_nvz=v.op_HFM_nvz,
_tvx=v.op_HFM_tvx,
_tvy=v.op_HFM_tvy,
_x=v.op_HFM_x,
_y=v.op_HFM_y,
))
pp.append(v.op_HFM_pp)
elif el_name == 'HFM_Watchpoint':
# HFM_Watchpoint: drift 50.2m
el.append(srwlib.SRWLOptD(
_L=v.op_HFM_Watchpoint_L,
))
pp.append(v.op_HFM_Watchpoint_pp)
elif el_name == 'Watchpoint':
# Watchpoint: watch 50.4m
pass
elif el_name == 'Watchpoint_Mask':
# Watchpoint_Mask: drift 50.4m
el.append(srwlib.SRWLOptD(
_L=v.op_Watchpoint_Mask_L,
))
pp.append(v.op_Watchpoint_Mask_pp)
elif el_name == 'Mask':
# Mask: mask 50.6m
el.append(srwlib.srwl_opt_setup_mask(
_delta=v.op_Mask_delta,
_atten_len=v.op_Mask_atten_len,
_thick=v.op_Mask_thick,
_grid_sh=v.op_Mask_grid_sh,
_grid_dx=v.op_Mask_grid_dx,
_grid_dy=v.op_Mask_grid_dy,
_pitch_x=v.op_Mask_pitch_x,
_pitch_y=v.op_Mask_pitch_y,
_grid_nx=v.op_Mask_grid_nx,
_grid_ny=v.op_Mask_grid_ny,
_mask_Nx=v.op_Mask_mask_Nx,
_mask_Ny=v.op_Mask_mask_Ny,
_grid_angle=v.op_Mask_gridTiltAngle,
_hx=v.op_Mask_hx,
_hy=v.op_Mask_hy,
_mask_x0=v.op_Mask_mask_x0,
_mask_y0=v.op_Mask_mask_y0,
))
pp.append(v.op_Mask_pp)
elif el_name == 'Watchpoint2':
# Watchpoint2: watch 50.6m
pass
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = [
['name', 's', 'Mask example', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
['gbm_x', 'f', 0.0, 'average horizontal coordinates of waist [m]'],
['gbm_y', 'f', 0.0, 'average vertical coordinates of waist [m]'],
['gbm_z', 'f', 0.0, 'average longitudinal coordinate of waist [m]'],
['gbm_xp', 'f', 0.0, 'average horizontal angle at waist [rad]'],
['gbm_yp', 'f', 0.0, 'average verical angle at waist [rad]'],
['gbm_ave', 'f', 9000.0, 'average photon energy [eV]'],
['gbm_pen', 'f', 0.001, 'energy per pulse [J]'],
['gbm_rep', 'f', 1, 'rep. rate [Hz]'],
['gbm_pol', 'f', 1, 'polarization 1- lin. hor., 2- lin. vert., 3- lin. 45 deg., 4- lin.135 deg., 5- circ. right, 6- circ. left'],
['gbm_sx', 'f', 3e-06, 'rms beam size vs horizontal position [m] at waist (for intensity)'],
['gbm_sy', 'f', 3e-06, 'rms beam size vs vertical position [m] at waist (for intensity)'],
['gbm_st', 'f', 1e-13, 'rms pulse duration [s] (for intensity)'],
['gbm_mx', 'f', 0, 'transverse Gauss-Hermite mode order in horizontal direction'],
['gbm_my', 'f', 0, 'transverse Gauss-Hermite mode order in vertical direction'],
['gbm_ca', 's', 'c', 'treat _sigX, _sigY as sizes in [m] in coordinate representation (_presCA="c") or as angular divergences in [rad] in angular representation (_presCA="a")'],
['gbm_ft', 's', 't', 'treat _sigT as pulse duration in [s] in time domain/representation (_presFT="t") or as bandwidth in [eV] in frequency domain/representation (_presFT="f")'],
#---Calculation Types
# Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 10000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 100.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20000.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'],
['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'],
['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'],
['sm_na', 'i', 5, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'],
['sm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 9000.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.002, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 2048, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.002, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 2048, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 1.0, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 2, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 1000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0.0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0.0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
['wm_fbk', '', '', 'create backup file(s) with propagated multi-e intensity distribution vs horizontal and vertical position and other radiation characteristics', 'store_true'],
#to add options
['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['rs_type', 's', 'g', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---Beamline optics:
# VFM: ellipsoidMirror
['op_VFM_hfn', 's', 'None', 'heightProfileFile'],
['op_VFM_dim', 's', 'x', 'orientation'],
['op_VFM_p', 'f', 50.0, 'firstFocusLength'],
['op_VFM_q', 'f', 0.4, 'focalLength'],
['op_VFM_ang', 'f', 0.003, 'grazingAngle'],
['op_VFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_VFM_size_tang', 'f', 0.2, 'tangentialSize'],
['op_VFM_size_sag', 'f', 0.01, 'sagittalSize'],
['op_VFM_nvx', 'f', 0.0, 'normalVectorX'],
['op_VFM_nvy', 'f', 0.999995500003375, 'normalVectorY'],
['op_VFM_nvz', 'f', -0.002999995500002025, 'normalVectorZ'],
['op_VFM_tvx', 'f', 0.0, 'tangentialVectorX'],
['op_VFM_tvy', 'f', -0.002999995500002025, 'tangentialVectorY'],
['op_VFM_x', 'f', 0.0, 'horizontalOffset'],
['op_VFM_y', 'f', 0.0, 'verticalOffset'],
# VFM_HFM: drift
['op_VFM_HFM_L', 'f', 0.20000000000000284, 'length'],
# HFM: ellipsoidMirror
['op_HFM_hfn', 's', 'None', 'heightProfileFile'],
['op_HFM_dim', 's', 'x', 'orientation'],
['op_HFM_p', 'f', 50.0, 'firstFocusLength'],
['op_HFM_q', 'f', 0.2, 'focalLength'],
['op_HFM_ang', 'f', 0.003, 'grazingAngle'],
['op_HFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_HFM_size_tang', 'f', 0.2, 'tangentialSize'],
['op_HFM_size_sag', 'f', 0.01, 'sagittalSize'],
['op_HFM_nvx', 'f', 0.999995500003375, 'normalVectorX'],
['op_HFM_nvy', 'f', 0.0, 'normalVectorY'],
['op_HFM_nvz', 'f', -0.002999995500002025, 'normalVectorZ'],
['op_HFM_tvx', 'f', -0.002999995500002025, 'tangentialVectorX'],
['op_HFM_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_HFM_x', 'f', 0.0, 'horizontalOffset'],
['op_HFM_y', 'f', 0.0, 'verticalOffset'],
# HFM_Watchpoint: drift
['op_HFM_Watchpoint_L', 'f', 0.19999999999999574, 'length'],
# Watchpoint_Mask: drift
['op_Watchpoint_Mask_L', 'f', 0.20000000000000284, 'length'],
# Mask: mask
['op_Mask_delta', 'f', 1.0, 'refractiveIndex'],
['op_Mask_atten_len', 'f', 1.0, 'attenuationLength'],
['op_Mask_thick', 'f', 1.0, 'maskThickness'],
['op_Mask_grid_sh', 'f', 0, 'gridShape'],
['op_Mask_grid_dx', 'f', 5e-06, 'horizontalGridDimension'],
['op_Mask_grid_dy', 'f', 5e-06, 'verticalGridDimension'],
['op_Mask_pitch_x', 'f', 2e-05, 'horizontalGridPitch'],
['op_Mask_pitch_y', 'f', 2e-05, 'verticalGridPitch'],
['op_Mask_gridTiltAngle', 'f', 0.4363323129985824, 'gridTiltAngle'],
['op_Mask_hx', 'f', 7.319999999999999e-07, 'horizontalSamplingInterval'],
['op_Mask_hy', 'f', 7.319999999999999e-07, 'verticalSamplingInterval'],
['op_Mask_mask_x0', 'f', 0.0, 'horizontalMaskCoordinate'],
['op_Mask_mask_y0', 'f', 0.0, 'verticalMaskCoordinate'],
['op_Mask_mask_Nx', 'i', 1024, 'horizontalPixelsNumber'],
['op_Mask_mask_Ny', 'i', 1024, 'verticalPixelsNumber'],
['op_Mask_grid_nx', 'i', 21, 'horizontalGridsNumber'],
['op_Mask_grid_ny', 'i', 21, 'verticalGridsNumber'],
#---Propagation parameters
['op_VFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM'],
['op_VFM_HFM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM_HFM'],
['op_HFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM'],
['op_HFM_Watchpoint_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM_Watchpoint'],
['op_Watchpoint_Mask_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Watchpoint_Mask'],
['op_Mask_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Mask'],
['op_fin_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
]
def main():
v = srwl_bl.srwl_uti_parse_options(srwl_bl.srwl_uti_ext_options(varParam), use_sys_argv=True)
op = set_optics(v)
v.si = True
v.si_pl = 'xy'
v.ws = True
v.ws_pl = 'xy'
mag = None
if v.rs_type == 'm':
mag = srwlib.SRWLMagFldC()
mag.arXc.append(0)
mag.arYc.append(0)
mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len))
mag.arZc.append(v.mp_zc)
srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
main()
| {
"content_hash": "a1593a3613a04c41e5b416144d9515b0",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 440,
"avg_line_length": 70.12605042016807,
"alnum_prop": 0.6359097263830638,
"repo_name": "mkeilman/sirepo",
"id": "9ddba14cbf8e9a71389dc61c0101c875f30c4ccd",
"size": "25057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/template/srw_generate_data/mask-example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "152"
},
{
"name": "CSS",
"bytes": "261510"
},
{
"name": "HTML",
"bytes": "346870"
},
{
"name": "JavaScript",
"bytes": "2737116"
},
{
"name": "Opal",
"bytes": "38855"
},
{
"name": "Python",
"bytes": "1982222"
},
{
"name": "Shell",
"bytes": "13951"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from category.models import Category
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
admin.site.register(Category, CategoryAdmin)
| {
"content_hash": "52980f72f827e81b7aa583fb729a6da4",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 44,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.7708333333333334,
"repo_name": "c-rhodes/hack2014",
"id": "78f4c710e4fb633bd2dae75fb254678db80ebd93",
"size": "192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hack2014/category/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "104715"
},
{
"name": "JavaScript",
"bytes": "2341"
},
{
"name": "Python",
"bytes": "46831"
},
{
"name": "Shell",
"bytes": "5099"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Location
from .models import Event
from .models import Resource
from .models import Discipline
class EventAdmin(admin.ModelAdmin):
list_display = ['event_name', 'setup_start', 'teardown_end']
admin.site.register(Location)
admin.site.register(Event, EventAdmin)
admin.site.register(Resource)
admin.site.register(Discipline)
| {
"content_hash": "ef248e45fbb1021327f89ccd06fcc0a1",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 64,
"avg_line_length": 24.0625,
"alnum_prop": 0.787012987012987,
"repo_name": "bable5/schdlr",
"id": "e4c78922f0b852b988ab99a703c3780ea78e0b55",
"size": "385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/schedule/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12692"
},
{
"name": "JavaScript",
"bytes": "179952"
},
{
"name": "PHP",
"bytes": "6147"
},
{
"name": "Puppet",
"bytes": "479"
},
{
"name": "Python",
"bytes": "11020"
},
{
"name": "Shell",
"bytes": "210"
}
],
"symlink_target": ""
} |
import argparse
from google.cloud import firestore
def seed_approver(email):
client = firestore.Client()
print("Seeding data into Google Cloud Project '{}'.".format(client.project))
approver = {
"kind": "approvers",
"email": email,
"active": True,
"name": "Seeded test user",
}
doc_ref = client.collection("approvers").document()
doc_ref.set(approver)
parser = argparse.ArgumentParser(description='Seed a test "approver" resource')
parser.add_argument("email", help="email address of the seeded approver")
args = parser.parse_args()
email = args.email
seed_approver(email)
print("Approver with email {} added.".format(email))
| {
"content_hash": "89d9830f96b7b13c5e9f4a453c7d83e0",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 80,
"avg_line_length": 24.678571428571427,
"alnum_prop": 0.6743849493487699,
"repo_name": "GoogleCloudPlatform/emblem",
"id": "8b6ffadb9eb915a6f4f54b986d243a42dcd27b0b",
"size": "1267",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "content-api/data/seed_test_approver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4990"
},
{
"name": "Dockerfile",
"bytes": "5231"
},
{
"name": "HCL",
"bytes": "44794"
},
{
"name": "HTML",
"bytes": "33127"
},
{
"name": "JavaScript",
"bytes": "46125"
},
{
"name": "Python",
"bytes": "88953"
},
{
"name": "Shell",
"bytes": "32833"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import copy
import psutil
import threading
import netifaces
import socket
import time
import signal
import Tkinter as tk
from macdivert import MacDivert
from tkMessageBox import showerror, showwarning
from enum import Defaults
from tkFileDialog import askopenfilename, askdirectory
from ctypes import POINTER, pointer, cast
from ctypes import (c_uint8, c_void_p, c_int32, c_char_p, c_int, c_float,
create_string_buffer, c_size_t, c_ssize_t, c_uint64)
# import pydevd
# pydevd.settrace('localhost', port=9999, stdoutToServer=True, stderrToServer=True)
__author__ = 'huangyan13@baidu.com'
class Flags(object):
# direction flags
DIRECTION_IN = 0
DIRECTION_OUT = 1
DIRECTION_UNKNOWN = 2
# feature flags
EMULATOR_IS_RUNNING = 1
EMULATOR_DUMP_PCAP = (1 << 1)
EMULATOR_RECHECKSUM = (1 << 2)
# pipe flags
PIPE_DROP = 0
PIPE_DELAY = 1
PIPE_THROTTLE = 2
PIPE_DISORDER = 3
PIPE_BITERR = 4
PIPE_DUPLICATE = 5
PIPE_BANDWIDTH = 6
PIPE_REINJECT = 7
# buffer size
EMULALTOR_BUF_SIZE = 8172
DELAY_QUEUE_SIZE = 8172
class BasicPipe(object):
def __init__(self):
self.handle = None
if Emulator.libdivert_ref is None:
raise RuntimeError("Should first instantiate an Emulator object")
else:
self._lib = Emulator.libdivert_ref
class DelayPipe(BasicPipe):
def __init__(self, delay_time, t=None,
queue_size=Flags.DELAY_QUEUE_SIZE,
ip_filter_obj=None, size_filter_obj=None):
super(DelayPipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'delay_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float), c_size_t])
setattr(getattr(self._lib, 'delay_pipe_create'), "restype", c_void_p)
arr_len = len(delay_time)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.delay_pipe_create(ip_filter_handle, size_filter_handle, arr_len,
arr_type(*list(t)) if t else None,
arr_type(*list(delay_time)),
queue_size)
class DropPipe(BasicPipe):
def __init__(self, drop_rate, t=None,
ip_filter_obj=None, size_filter_obj=None):
super(DropPipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'drop_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float)])
setattr(getattr(self._lib, 'drop_pipe_create'), "restype", c_void_p)
arr_len = len(drop_rate)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.drop_pipe_create(ip_filter_handle, size_filter_handle, arr_len,
arr_type(*list(t)) if t else None,
arr_type(*list(drop_rate)))
class BandwidthPipe(BasicPipe):
def __init__(self, t, bandwidth, queue_size=Flags.DELAY_QUEUE_SIZE,
ip_filter_obj=None, size_filter_obj=None):
super(BandwidthPipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'bandwidth_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float), c_size_t])
setattr(getattr(self._lib, 'bandwidth_pipe_create'), "restype", c_void_p)
arr_len = len(t)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.bandwidth_pipe_create(ip_filter_handle, size_filter_handle,
arr_len, arr_type(*list(t)),
arr_type(*list(bandwidth)),
queue_size)
class BiterrPipe(BasicPipe):
def __init__(self, t, biterr_rate, max_flip, ip_filter_obj=None, size_filter_obj=None):
super(BiterrPipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'biterr_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float), c_int])
setattr(getattr(self._lib, 'biterr_pipe_create'), "restype", c_void_p)
arr_len = len(t)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.biterr_pipe_create(ip_filter_handle, size_filter_handle,
arr_len, arr_type(*list(t)),
arr_type(*list(biterr_rate)), max_flip)
class DisorderPipe(BasicPipe):
def __init__(self, t, disorder_rate, queue_size, max_disorder,
ip_filter_obj=None, size_filter_obj=None):
super(DisorderPipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'disorder_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float), c_size_t, c_int])
setattr(getattr(self._lib, 'disorder_pipe_create'), "restype", c_void_p)
arr_len = len(t)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.disorder_pipe_create(ip_filter_handle, size_filter_handle,
arr_len, arr_type(*list(t)),
arr_type(*list(disorder_rate)),
queue_size, max_disorder)
class DuplicatePipe(BasicPipe):
def __init__(self, t, duplicate_rate, max_duplicate,
ip_filter_obj=None, size_filter_obj=None):
super(DuplicatePipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'duplicate_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float), c_size_t])
setattr(getattr(self._lib, 'duplicate_pipe_create'), "restype", c_void_p)
arr_len = len(t)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.duplicate_pipe_create(ip_filter_handle, size_filter_handle, arr_len,
arr_type(*list(t)),
arr_type(*list(duplicate_rate)),
max_duplicate)
class ThrottlePipe(BasicPipe):
def __init__(self, t_start, t_end, queue_size, ip_filter_obj=None, size_filter_obj=None):
super(ThrottlePipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'throttle_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float), c_size_t])
setattr(getattr(self._lib, 'throttle_pipe_create'), "restype", c_void_p)
arr_len = len(t_start)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.throttle_pipe_create(ip_filter_handle, size_filter_handle,
arr_len, arr_type(*list(t_start)),
arr_type(*list(t_end)),
queue_size)
class Emulator(object):
libdivert_ref = None
emulator_argtypes = {
'emulator_callback': [c_void_p, c_void_p, c_char_p, c_char_p],
'emulator_create_config': [c_void_p],
'emulator_destroy_config': [c_void_p],
'emulator_flush': [c_void_p],
'emulator_add_pipe': [c_void_p, c_void_p, c_int],
'emulator_del_pipe': [c_void_p, c_void_p, c_int],
'emulator_add_flag': [c_void_p, c_uint64],
'emulator_clear_flags': [c_void_p],
'emulator_clear_flag': [c_void_p, c_uint64],
'emulator_set_dump_pcap': [c_void_p, c_char_p],
'emulator_set_pid_list': [c_void_p, POINTER(c_int32), c_ssize_t],
'emulator_config_check': [c_void_p, c_char_p],
'emulator_is_running': [c_void_p],
'emulator_data_size': [c_void_p, c_int],
'emulator_create_ip_filter': [c_char_p, c_char_p, c_char_p, c_char_p, c_int, c_int],
'emulator_create_size_filter': [c_size_t, POINTER(c_size_t), POINTER(c_float)],
}
emulator_restypes = {
'emulator_callback': None,
'emulator_create_config': c_void_p,
'emulator_destroy_config': None,
'emulator_flush': None,
'emulator_add_pipe': c_int,
'emulator_del_pipe': c_int,
'emulator_add_flag': None,
'emulator_clear_flags': None,
'emulator_clear_flag': None,
'emulator_set_dump_pcap': None,
'emulator_set_pid_list': None,
'emulator_config_check': c_int,
'emulator_is_running': c_int,
'emulator_data_size': c_uint64,
'emulator_create_ip_filter': c_void_p,
'emulator_create_size_filter': c_void_p,
}
class PacketIPFilter(object):
def __init__(self, ip_src, ip_src_mask, ip_dst,
ip_dst_mask, port_src, port_dst):
lib = Emulator.libdivert_ref
self.handle = lib.emulator_create_ip_filter(ip_src, ip_src_mask, ip_dst,
ip_dst_mask, port_src, port_dst)
class PacketSizeFilter(object):
def __init__(self, size_arr, rate_arr):
if len(size_arr) != len(rate_arr):
raise RuntimeError('Invalid packet size filter')
arr_len = len(size_arr)
lib = Emulator.libdivert_ref
self.handle = lib.emulator_create_size_filter(len(size_arr),
(c_size_t * arr_len)(*size_arr),
(c_float * arr_len)(*rate_arr))
def __init__(self):
# get reference for libdivert
if Emulator.libdivert_ref is None:
lib_obj = MacDivert()
Emulator.libdivert_ref = lib_obj.get_reference()
# initialize prototype of functions
self._init_func_proto()
# create divert handle and emulator config
self.handle, self.config = self._create_config()
# background thread for divert loop
self.thread = None
# list to store pids
self.pid_list = []
# error information
self.errmsg = create_string_buffer(Defaults.DIVERT_ERRBUF_SIZE)
self.quit_loop = False
self.is_waiting = False
def __del__(self):
lib = self.libdivert_ref
lib.emulator_destroy_config(self.config)
if lib.divert_close(self.handle) != 0:
raise RuntimeError('Divert handle could not be cleaned.')
def _init_func_proto(self):
# set the types of parameters
for func_name, argtypes in self.emulator_argtypes.items():
# first check if function exists
if not hasattr(self.libdivert_ref, func_name):
raise RuntimeError("Not a valid libdivert library")
setattr(getattr(self.libdivert_ref, func_name), "argtypes", argtypes)
# set the types of return value
for func_name, restype in self.emulator_restypes.items():
setattr(getattr(self.libdivert_ref, func_name), "restype", restype)
def _create_config(self):
lib = self.libdivert_ref
# create divert handle
divert_handle = lib.divert_create(0, 0)
if not divert_handle:
raise RuntimeError('Fail to create divert handle.')
# create config handle
config = lib.emulator_create_config(divert_handle,
Flags.EMULALTOR_BUF_SIZE)
if not config:
raise RuntimeError('Fail to create emulator configuration')
# set callback function and callback data for divert handle
if lib.divert_set_callback(divert_handle,
lib.emulator_callback,
config) != 0:
raise RuntimeError(divert_handle.errmsg)
# activate divert handle
if lib.divert_activate(divert_handle) != 0:
raise RuntimeError(divert_handle.errmsg)
return divert_handle, config
def _divert_loop(self, filter_str):
# first add all PIDs into list
self._wait_pid()
if self.quit_loop:
self.quit_loop = False
return
lib = self.libdivert_ref
lib.divert_loop(self.handle, -1)
def _divert_loop_stop(self):
lib = self.libdivert_ref
lib.divert_loop_stop(self.handle)
lib.divert_loop_wait(self.handle)
print 'Emulator stop OK'
lib.emulator_flush(self.config)
print 'Emulator flush OK'
def add_pipe(self, pipe, direction=Flags.DIRECTION_IN):
lib = self.libdivert_ref
if lib.emulator_add_pipe(self.config, pipe.handle, direction) != 0:
raise RuntimeError("Pipe already exists.")
def del_pipe(self, pipe, free_mem=False):
lib = self.libdivert_ref
if lib.emulator_del_pipe(self.config, pipe.handle, int(free_mem)) != 0:
raise RuntimeError("Pipe do not exists.")
def add_pid(self, pid):
self.pid_list.append(pid)
def set_device(self, dev_name):
lib = self.libdivert_ref
if lib.divert_set_device(self.handle, dev_name) != 0:
raise RuntimeError('Could not set capture device.')
def _wait_pid(self):
# first wait until all processes are started
proc_list = filter(lambda x: isinstance(x, str) or isinstance(x, unicode), self.pid_list)
real_pid_list = filter(lambda x: isinstance(x, int), self.pid_list)
self.is_waiting = True
while not self.quit_loop:
if len(real_pid_list) == len(self.pid_list):
break
for proc in psutil.process_iter():
proc_name = proc.name().lower()
for name in proc_list:
if name.lower() in proc_name:
real_pid_list.append(proc.pid)
print 'Waiting for process: %s' % ', '.join(proc_list)
time.sleep(0.2)
self.is_waiting = False
if self.quit_loop:
return
print 'Found PID: %s' % ', '.join(map(str, real_pid_list))
lib = self.libdivert_ref
arr_len = len(real_pid_list)
arr_type = c_int32 * arr_len
lib.emulator_set_pid_list(self.config, arr_type(*real_pid_list), arr_len)
def set_dump(self, directory):
lib = self.libdivert_ref
if not os.path.isdir:
raise RuntimeError('Invalid save position.')
lib.emulator_set_dump_pcap(self.config, directory)
def start(self, filter_str=''):
# first check the config
lib = self.libdivert_ref
if lib.emulator_config_check(self.config, self.errmsg) != 0:
raise RuntimeError('Invalid configuration:\n%s' % self.errmsg.value)
print 'Config check OK'
# then apply filter string
if filter_str:
if lib.divert_update_ipfw(self.handle, filter_str) != 0:
raise RuntimeError(self.handle.errmsg)
# start a new thread to run emulator
self.thread = threading.Thread(target=self._divert_loop, args=(filter_str,))
self.thread.start()
def stop(self):
# if emulator is waiting on PIDs
# then just use a quit loop flag
if self.is_waiting:
self.quit_loop = True
else:
self._divert_loop_stop()
self.thread.join(timeout=1.0)
if self.thread.isAlive():
raise RuntimeError('Divert loop failed to stop.')
self.thread = None
@property
def is_looping(self):
return self.thread is not None and self.thread.isAlive()
def data_size(self, direction):
lib = self.libdivert_ref
return lib.emulator_data_size(self.config, direction)
class EmulatorGUI(object):
LOCAL_MODE = 0
ROUTER_MODE = 1
prompt_str = 'PID / comma separated process name'
default_device = 'bridge100'
kext_errmsg = """
Kernel extension load failed.
Please check if you have root privilege on your Mac.
Since we do not have a valid developer certificate,
you should manually disable the kernel extension protection.
For Mac OS X 10.11:
1. Start your computer from recovery mode: restart your Mac
and hold down the Command and R keys at startup.
2. Run "csrutil enable --without kext" under recovery mode.
3. Reboot.
For Mac OS X 10.10:
1. Run "sudo nvram boot-args=kext-dev-mode=1" from terminal.
2. Reboot.
"""
pipe_name2type = {
'drop': DropPipe,
'delay': DelayPipe,
'biterr': BiterrPipe,
'disorder': DisorderPipe,
'throttle': ThrottlePipe,
'duplicate': DuplicatePipe,
'bandwidth': BandwidthPipe,
}
def exit_func(self):
if self.emulator is not None:
try:
self.emulator.stop()
self.emulator = None
except Exception as e:
print e.message
self._flush_ipfw()
self.master.quit()
self.master.destroy()
def _flush_ipfw(self):
if Emulator.libdivert_ref is not None:
buf = create_string_buffer(256)
lib = Emulator.libdivert_ref
lib.ipfw_flush(buf)
def decide_iface(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com", 80))
local_ip = s.getsockname()[0]
s.close()
except:
showwarning('Network Error',
('Your host machine may not have a valid network connection.\n'
'You should **manually** choose your network device name in filter rule.'))
return
iface_lst = netifaces.interfaces()
for iface in iface_lst:
addrs = netifaces.ifaddresses(iface)
if netifaces.AF_INET in addrs:
addr_dict = addrs[netifaces.AF_INET][0]
if 'addr' in addr_dict:
if addr_dict['addr'] == local_ip:
print 'Found activate network interface: %s' % iface
self.iface = iface
return
def __init__(self, master):
self.master = master
self.emulator = None
self.conf_dict = {}
self.conf_name = tk.StringVar()
self.conf_frame = None
master.title("Wireless Network Reproduction")
master.protocol("WM_DELETE_WINDOW", self.exit_func)
# first check root privilege
if os.getuid() != 0:
self.master.withdraw()
showerror('Privilege Error', 'You should run this program as root.')
self.master.destroy()
return
# then find the current activate network interface
self.iface = '<network device name>'
self.decide_iface()
self.default_rule = 'ip from any to any via %s' % self.iface
self.inbound_list = []
self.outbound_list = []
self.filter_str = tk.StringVar(value=self.default_rule)
self.proc_str = tk.StringVar(value=self.prompt_str)
self.dev_str = tk.StringVar()
self.dump_pos = tk.StringVar()
self.divert_unknown = tk.IntVar(value=1)
self.start_btn = None
self.filter_entry = None
self.proc_entry = None
self.dev_entry = None
self.mode = tk.IntVar(self.LOCAL_MODE)
self.init_GUI()
try:
Emulator()
except OSError:
def close_func():
self.master.quit()
self.master.destroy()
self.master.withdraw()
top = tk.Toplevel(self.master)
top.title('Kernel Extension Error')
tk.Message(top, text=self.kext_errmsg)\
.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
tk.Button(top, text="Close", command=close_func).pack(side=tk.TOP)
top.protocol("WM_DELETE_WINDOW", close_func)
except Exception as e:
self.master.withdraw()
showerror('Emulator Loading Error', e.message)
self.master.destroy()
def init_GUI(self):
new_frame = tk.Frame(master=self.master)
tk.Button(master=new_frame, text='Add Configuration',
command=self.load_data_file).pack(side=tk.LEFT)
self.conf_frame = tk.Frame(master=new_frame)
self.conf_frame.pack(side=tk.RIGHT, fill=tk.X, expand=True)
new_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
new_frame = tk.Frame(master=self.master)
tk.Label(master=new_frame, text='Dump .pcap to').pack(side=tk.LEFT)
tk.Entry(master=new_frame, textvariable=self.dump_pos)\
.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
tk.Button(master=new_frame, text='Select',
command=self.load_dump_pos).pack(side=tk.LEFT)
new_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
new_frame = tk.Frame(master=self.master)
tk.Label(master=new_frame, text='Filter Rule').pack(side=tk.LEFT)
self.filter_entry = tk.Entry(master=new_frame, textvariable=self.filter_str, font='Monaco')
self.filter_entry.pack(side=tk.LEFT, fill=tk.X, expand=True)
new_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
new_frame = tk.Frame(master=self.master)
tk.Label(master=new_frame, text='Process List').pack(side=tk.LEFT)
self.proc_entry = tk.Entry(master=new_frame, textvariable=self.proc_str,
font='Monaco', width=len(self.proc_str.get()))
self.proc_entry.pack(side=tk.LEFT, fill=tk.X, expand=True)
tk.Label(master=new_frame, text='unknown').pack(side=tk.LEFT)
tk.Checkbutton(master=new_frame, variable=self.divert_unknown).pack(side=tk.LEFT)
new_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
new_frame = tk.Frame(master=self.master)
tk.Label(master=new_frame, text='Mode').pack(side=tk.LEFT)
tk.Radiobutton(master=new_frame, text="Local", variable=self.mode,
value=0, command=self._switch_mode).pack(side=tk.LEFT)
tk.Radiobutton(master=new_frame, text="WiFi", variable=self.mode,
value=1, command=self._switch_mode).pack(side=tk.LEFT)
self.dev_entry = tk.Entry(master=new_frame, textvariable=self.dev_str,
state=tk.DISABLED, font='Monaco', width=12)
self.dev_entry.pack(side=tk.LEFT)
tk.Button(master=new_frame, text='Fix network',
command=self._flush_ipfw).pack(side=tk.LEFT)
new_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
new_frame = tk.Frame(master=self.master)
self.start_btn = tk.Button(master=new_frame, text='Start',
command=self.start, font=('Monaco', 20))
self.start_btn.pack(side=tk.TOP)
new_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
def _switch_mode(self):
if self.mode.get() == self.LOCAL_MODE:
# local mode
self.dev_str.set('')
self.dev_entry.config(state=tk.DISABLED)
self.filter_entry.config(state=tk.NORMAL)
self.proc_entry.config(state=tk.NORMAL)
self.filter_str.set(self.default_rule)
self.proc_str.set(self.prompt_str)
elif self.mode.get() == self.ROUTER_MODE:
self.dev_entry.config(state=tk.NORMAL)
self.dev_str.set(self.default_device)
self.filter_str.set('ip from any to any')
self.proc_str.set('')
self.filter_entry.config(state=tk.DISABLED)
self.proc_entry.config(state=tk.DISABLED)
else:
raise RuntimeError('Unknown Mode!')
def load_data_file(self):
dir_name, file_name = os.path.split(__file__)
dir_name = os.path.join(dir_name, 'examples')
file_path = askopenfilename(title='Choose .json file', initialdir=dir_name)
if file_path and os.path.isfile(file_path):
try:
_, fname = os.path.split(file_path)
with open(file_path, 'r') as fid:
data = fid.read()
self.conf_dict[file_path] = json.loads(data)
fname_sec = fname.split('.')
if len(fname_sec) > 1:
fname = '.'.join(fname_sec[:-1])
tk.Radiobutton(self.conf_frame, text=fname,
variable=self.conf_name,
value=file_path).pack(side=tk.LEFT)
self.conf_name.set(file_path)
except Exception as e:
showerror(title='Open file',
message='Unable to load json: %s' % e.message)
def load_dump_pos(self):
dir_name, file_name = os.path.split(__file__)
dir_name = os.path.join(dir_name, 'examples')
dir_path = askdirectory(title='Choose dump position',
initialdir=dir_name)
self.dump_pos.set(dir_path)
def start(self):
if self.conf_name.get() not in self.conf_dict:
showerror(title='Configuration Error',
message='No available conf file.')
return
if self.proc_str.get() == self.prompt_str:
showerror(title='Process/PID Error',
message='You should set legal PIDs or leave it blank.')
return
if self.emulator is None:
try:
self.emulator = Emulator()
self._load_config()
self.emulator.start(self.filter_str.get())
self.start_btn.config(text='Stop')
except Exception as e:
self.emulator = None
showerror(title='Runtime error',
message='Unable to start emulator:\n%s' % e.message)
else:
try:
self.emulator.stop()
self.emulator = None
self.start_btn.config(text='Start')
except Exception as e:
self.emulator = None
showerror(title='Runtime error',
message='Unable to stop emulator:\n%s' % e.message)
def _load_config(self):
if self.emulator is None:
return
# set dump position
dump_path = self.dump_pos.get()
if dump_path and os.path.isdir(dump_path):
self.emulator.set_dump(dump_path)
# set emulation device
dev_name = self.dev_str.get()
if dev_name:
self.emulator.set_device(dev_name)
# set pid list if not empty
if self.mode.get() == self.LOCAL_MODE:
pid_str = self.proc_str.get().strip()
if pid_str and pid_str != self.prompt_str:
if self.divert_unknown.get():
self.emulator.add_pid(-1)
for pid in map(lambda x: x.strip(), pid_str.split(',')):
try:
pid_int = int(pid)
self.emulator.add_pid(pid_int)
except:
self.emulator.add_pid(pid)
elif self.mode.get() == self.ROUTER_MODE:
# this is a fake PID, nothing would match
self.emulator.add_pid(-2)
else:
raise RuntimeError("Unknown Mode!")
# finally load all pipes
for pipe in copy.deepcopy(self.conf_dict[self.conf_name.get()]):
if not isinstance(pipe, dict):
raise TypeError('Invalid configuration')
pipe_name = pipe.pop('pipe', None)
if not pipe_name:
raise RuntimeError('Configuration do not have pipe type')
direction = pipe.pop('direction', None)
if not direction:
raise RuntimeError('Configuration do not have direction field')
if direction == "out":
dir_flag = Flags.DIRECTION_OUT
elif direction == "in":
dir_flag = Flags.DIRECTION_IN
else:
raise RuntimeError('Unknown direction flag')
ip_filter = self._create_ip_filter(pipe.pop('ip_filter', None))
size_filter = self._create_size_filter(pipe.pop('size_filter', None))
try:
pipe_type = self.pipe_name2type[pipe_name.lower()]
except:
raise RuntimeError('Invalid pipe type')
pipe_obj = pipe_type(ip_filter_obj=ip_filter,
size_filter_obj=size_filter, **pipe)
self.emulator.add_pipe(pipe_obj, dir_flag)
def _create_size_filter(self, filter_dict):
if not filter_dict:
return None
size_arr = filter_dict['size']
rate_arr = filter_dict['rate']
return Emulator.PacketSizeFilter(size_arr, rate_arr)
def _create_ip_filter(self, filter_dict):
if not filter_dict:
return None
src_str = filter_dict['src']
dst_str = filter_dict['dst']
strip_func = lambda x: x.strip()
src_addr, port_src = map(strip_func, src_str.split(':'))
src_addr, src_mask = map(strip_func, src_addr.split('/'))
dst_addr, port_dst = map(strip_func, dst_str.split(':'))
dst_addr, dst_mask = map(strip_func, dst_addr.split('/'))
return Emulator.PacketIPFilter(src_addr, src_mask,
dst_addr, dst_mask,
int(port_src), int(port_dst))
def mainloop(self):
self.master.mainloop()
if __name__ == '__main__':
pid_num = 0
try:
pid_num = int(sys.argv[1])
except Exception as e:
print 'Exception: %s' % e.message
print 'Usage: python emulator.py <PID>'
exit(-1)
emulator = Emulator()
emulator.add_pid(pid_num)
emulator.add_pid(-1)
emulator.set_dump('/Users/baidu/Downloads')
emulator.add_pipe(DelayPipe([0, 10], [0.1, 0.6], 1024), Flags.DIRECTION_IN)
is_looping = True
# register signal handler
def sig_handler(signum, frame):
print 'Catch signal: %d' % signum
global is_looping
is_looping = False
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTSTP, sig_handler)
perMB = 1024 * 1024
trans_size = 0
# start loop
emulator.start('ip from any to any via en0')
while is_looping:
data_size = emulator.data_size(Flags.DIRECTION_IN)
if data_size > 5 * perMB:
print 'Finish'
break
if data_size > (trans_size + 1) * perMB:
trans_size = data_size / perMB
print 'Transfer %d MB data.' % trans_size
time.sleep(0.5)
# stop loop
emulator.stop()
print 'Program exited.'
| {
"content_hash": "bcdb6db9c66196d2ca0a2b2d3dac199d",
"timestamp": "",
"source": "github",
"line_count": 784,
"max_line_length": 100,
"avg_line_length": 41.732142857142854,
"alnum_prop": 0.5647350082523381,
"repo_name": "FinalTheory/wireless-network-reproduction",
"id": "f2093ecaa82c98d615aacd5695ef1c1c1bfa451e",
"size": "32736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "macdivert/emulator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "56962"
}
],
"symlink_target": ""
} |
from .core import PanState
| {
"content_hash": "78fbd5ad0fd2d282329f2da7a6d16354",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 26,
"avg_line_length": 27,
"alnum_prop": 0.8148148148148148,
"repo_name": "fmin2958/POCS",
"id": "d8c6e12c30ee02289f3b7fda5da845b26c2e3e05",
"size": "27",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "panoptes/state_machine/states/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "35742"
},
{
"name": "HTML",
"bytes": "88175"
},
{
"name": "JavaScript",
"bytes": "436727"
},
{
"name": "Python",
"bytes": "303564"
}
],
"symlink_target": ""
} |
from .basededatos import BaseDeDatos
class PerDocumento(BaseDeDatos):
def obtener_uno(self, id_):
"""
Obtiene y retorna un objeto según el id dado.
:param id_: int >= 0
:return: tuple
"""
if id_ >= 0:
id_ = (id_,)
sql = 'SELECT * FROM documentos WHERE id=?'
return list(self.obtener(sql, id_))
else:
print 'El parámetro debe ser mayor o igual a 0.'
return None
def obtener_listado(self, **kwargs):
"""
Obtiene y retorna un listado de objetos según los filtros pasados.
:param kwargs: dict
:return: dict
"""
pass
def agregar_objeto(self, obj):
"""
Convierte un objeto para ser insertado en la base de datos.
:param obj: object
:return: int
"""
sql = 'INSERT INTO documentos VALUES (null, ?, ?, ?)'
pk = self.salvar(sql, (obj.tipo, obj.numero, obj.baja))
return pk
def actualizar_objeto(self, obj):
"""
Convierte un objeto para actualizar su registro correlativo en la base
de daots.
:param obj: object
:return: object
"""
sql = 'UPDATE documentos SET tipo = ?, numero = ?, baja = ? ' \
'WHERE id = ?'
return self.actualizar(sql, (obj.tipo, obj.numero, obj.baja, obj.pk))
def baja_objeto(self, obj):
"""
Obtiene el id del objeto para dar una baja lógica en el registro co-
rrespondiente en la base de datos.
:param obj: object
:return: bool
"""
sql = 'UPDATE documentos SET baja = ? WHERE id = ?'
return self.actualizar(sql, (1, obj.pk))
| {
"content_hash": "9c733bb95ab89e2ccc8f0b39b7ecf147",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 78,
"avg_line_length": 30.43859649122807,
"alnum_prop": 0.5400576368876081,
"repo_name": "gabofer82/taller_programacion_2017",
"id": "1313df4c7a823b24de35b9ac32f48791176d7f95",
"size": "1763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Programa/persistencia/perdocumento.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "286708"
}
],
"symlink_target": ""
} |
"""Test for general configuration"""
def tests_config():
"""Test CONFIG"""
from collections.abc import Mapping
from compilertools._config import CONFIG
# Check sections presence and type
assert isinstance(CONFIG.get("architectures"), Mapping)
assert isinstance(CONFIG.get("compilers"), Mapping)
| {
"content_hash": "4c46ca50c972664ec9e7c3680416b29b",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 59,
"avg_line_length": 29.272727272727273,
"alnum_prop": 0.717391304347826,
"repo_name": "JGoutin/compilertools",
"id": "50d9acabb759ae64362068940204cae3b75399df",
"size": "322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test__config.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "144292"
}
],
"symlink_target": ""
} |
import sys, os, datetime
sys.path.append(os.path.abspath('utils'))
import sbt_versions
highlight_language = 'scala'
extensions = ['sphinx.ext.extlinks']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'flask'
html_short_title = 'Util'
html_static_path = ['_static']
html_sidebars = {
'index': ['sidebarintro.html', 'searchbox.html'],
'**': ['sidebarintro.html', 'localtoc.html', 'relations.html', 'searchbox.html']
}
html_favicon = '_static/favicon.ico'
html_theme_options = {
'index_logo': None
}
project = 'Util'
copyright = '{} Twitter, Inc'.format(datetime.datetime.now().year)
htmlhelp_basename = "util"
release = sbt_versions.find_release(os.path.abspath('../../../project/Build.scala'))
version = sbt_versions.release_to_version(release)
pygments_style = 'flask_theme_support.FlaskyStyle'
# fall back if theme is not there
try:
__import__('flask_theme_support')
except ImportError as e:
print('-' * 74)
print('Warning: Flask themes unavailable. Building with default theme')
print('If you want the Flask themes, run this command and build again:')
print()
print(' git submodule update --init')
print('-' * 74)
pygments_style = 'tango'
html_theme = 'default'
html_theme_options = {}
| {
"content_hash": "7c39a81b057ab94d956a8de4aa498dba",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 89,
"avg_line_length": 28.5625,
"alnum_prop": 0.675419401896426,
"repo_name": "twitter/util",
"id": "65dc7b6c358876fe36f4c1b1db50ec9411c5440e",
"size": "1423",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "doc/src/sphinx/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3006"
},
{
"name": "Java",
"bytes": "176316"
},
{
"name": "Makefile",
"bytes": "396"
},
{
"name": "Mako",
"bytes": "3465"
},
{
"name": "Scala",
"bytes": "3032537"
},
{
"name": "Shell",
"bytes": "2167"
},
{
"name": "Starlark",
"bytes": "87606"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import ehs_esports.users.models
class Migration(migrations.Migration):
dependencies = [
('teams', '0001_initial'),
('users', '0014_auto_20160913_2319'),
]
operations = [
migrations.AddField(
model_name='user',
name='teams',
field=models.ManyToManyField(blank=True, to='teams.Team'),
),
migrations.AlterField(
model_name='user',
name='profile_banner',
field=models.ImageField(blank=True, help_text='This is a banner image for your profile. It must be 16:9 aspect ratio.', upload_to=ehs_esports.users.models.generate_banner_filename, verbose_name='Profile Banner'),
),
migrations.AlterField(
model_name='user',
name='profile_icon',
field=models.ImageField(blank=True, help_text='This is a icon image for your profile. It should be a picture of yourself cropped shoulders up, and must be a square image.', upload_to=ehs_esports.users.models.generate_icon_filename, verbose_name='Profile Icon'),
),
]
| {
"content_hash": "a2040bc04f5f52f8efcbf00c1c0be628",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 273,
"avg_line_length": 39.03333333333333,
"alnum_prop": 0.6362083689154568,
"repo_name": "ReilySiegel/ehs_esports",
"id": "e30ad08ea5001264d3fe15a91794765b2cacd985",
"size": "1243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ehs_esports/users/migrations/0015_auto_20160919_0820.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1932"
},
{
"name": "HTML",
"bytes": "32397"
},
{
"name": "JavaScript",
"bytes": "3106"
},
{
"name": "Python",
"bytes": "85591"
},
{
"name": "Shell",
"bytes": "4232"
}
],
"symlink_target": ""
} |
from random import randrange
# usar configuracoes globais?
tabuleiro = [
["", "", ""],
["", "", ""],
["", "", ""]
]
def tabuleiro_cheio():
for linha in tabuleiro:
for posicao in linha:
if posicao == "":
return False
return True
# poderia ser com um tuplo ou dicionario
def fazer_jogada(x, y, jogador_humano=True):
'''
retorna True se a jogada for valida
retorna False caso contrario
'''
if tabuleiro[x][y] == "":
if jogador_humano is True:
tabuleiro[x][y] = "X"
else:
tabuleiro[x][y] = "O"
def imprimir_tabuleiro():
print "--------------"
for linha in tabuleiro:
for posicao in linha:
if posicao == "":
print "_", # imprimir sem nova linha
else:
print posicao, # imprimir sem nova linha
print "" # nova linha
print "--------------"
def verificar_vencedor(jogador_humano=True):
# verificar linhas
venceu = False
for linha in tabuleiro:
venceu = True
for posicao in linha:
# se houver uma posicao vazia
# ou do outro jogador
# sai-se do ciclo
if jogador_humano:
if not posicao == "X":
venceu = False
break
else:
if not posicao == "O":
venceu = False
break
if venceu:
break
return venceu
# verificar colunas
# verificar diagonais?
def realizar_turno(jogador_humano=True):
# imprimir tabuleiro
# fazer_jogada
# verificar se ha vencedor
# verificar se tabuleiro esta cheio
# trocar de jogador
imprimir_tabuleiro()
if jogador_humano is True:
posicao = raw_input("diga as coordenadas em formato xy: ")
# try... except while cycle?
x = int(posicao[0])
y = int(posicao[1])
while not tabuleiro[x][y] == "":
posicao = raw_input("diga as coordenadas em formato xy: ")
# try... except while cycle?
x = int(posicao[0])
y = int(posicao[1])
# preencher posicao
tabuleiro[x][y] = "X"
else:
# podia-se buscar lista de posicoes vazias
x = randrange(3)
y = randrange(3)
while not tabuleiro[x][y] == "":
x = randrange(3)
y = randrange(3)
# preencher posicao
tabuleiro[x][y] = "O"
if verificar_vencedor(True) is True:
print "Jogador venceu"
imprimir_tabuleiro()
elif verificar_vencedor(False) is True:
print "Computador venceu"
imprimir_tabuleiro()
elif tabuleiro_cheio():
print "Empate"
else:
realizar_turno(not jogador_humano)
# e quando o tabuleiro estiver cheio?
realizar_turno(False)
| {
"content_hash": "5871a37a7564cb61d3a756a8c4f69037",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 70,
"avg_line_length": 24.3781512605042,
"alnum_prop": 0.5249913822819717,
"repo_name": "Painatalman/python101",
"id": "5997630e8a82a8ec598794fc68a828e2da2049b2",
"size": "2901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sources/ttt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1022"
},
{
"name": "Python",
"bytes": "23991"
}
],
"symlink_target": ""
} |
"""This module contains a helper function for Telegram's ReplyMarkups
.. versionchanged:: 20.0
Previously, the contents of this module were available through the (no longer existing)
class ``telegram.ReplyMarkup``.
Warning:
Contents of this module are intended to be used internally by the library and *not* by the
user. Changes to this module are not considered breaking changes and may not be documented in
the changelog.
"""
def check_keyboard_type(keyboard: object) -> bool:
"""Checks if the keyboard provided is of the correct type - A list of lists.
Implicitly tested in the init-tests of `{Inline, Reply}KeyboardMarkup`
"""
if not isinstance(keyboard, list):
return False
for row in keyboard:
if not isinstance(row, list):
return False
return True
| {
"content_hash": "4aeb5964e88d740456e06b6c5efa2ce2",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 97,
"avg_line_length": 36.04347826086956,
"alnum_prop": 0.7080820265379976,
"repo_name": "tzpBingo/github-trending",
"id": "888937ba60158f157a7e603916c498e544fbfa46",
"size": "1637",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "codespace/python/telegram/_utils/markup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth.tokens import default_token_generator
from django.utils import translation
from emailing.emails import HtmlEmail
def send_confirmation_mail(user, template, extra_context, subject):
translation.activate(settings.LANGUAGE_CODE)
if not extra_context:
extra_context = dict()
conf = user.appconfig
bcc = settings.ADDITIONALLY_SEND_TO
subject = subject or conf.CONFIRM_EMAIL_SUBJECT
if settings.IGNORE_USER_EMAIL:
recipients = bcc
bcc = None
else:
recipients = [user.email]
token = default_token_generator.make_token(user)
context = {
'user': user,
'password_reset_confirm_url': user.get_confirm_link(user.urlnames.password_reset_confirm_urlname, token),
'account_confirm_url': user.get_confirm_link(user.urlnames.account_confirm_urlname, token),
'login_url': user._get_domain() + settings.LOGIN_URL
}
context.update(extra_context)
email = HtmlEmail(
from_email=conf.FROM_EMAIL,
to=recipients,
bcc=bcc,
subject=subject,
template=template,
context=context
)
email.send()
| {
"content_hash": "a0c6124c6a1b833835c706284795ea85",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 113,
"avg_line_length": 30.170731707317074,
"alnum_prop": 0.677445432497979,
"repo_name": "tonimichel/djpl-users",
"id": "031969547083b46f126c16180f94d70c56e1d953",
"size": "1237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/schedule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7916"
},
{
"name": "Python",
"bytes": "25463"
}
],
"symlink_target": ""
} |
from .base import Template, dump, dumps
from .compute import VirtualMachine
from .storage import StorageAccount
from .network import VirtualNetwork, PublicIPAddress, NetworkInterface
| {
"content_hash": "e5ae1277466bcf388479f0c745f23737",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 70,
"avg_line_length": 30.833333333333332,
"alnum_prop": 0.8378378378378378,
"repo_name": "hoffmann/tropo",
"id": "d343ead9e93f236330c3f9a06861a7e2dd8400f0",
"size": "185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tropo/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "46268"
}
],
"symlink_target": ""
} |
import unittest
# set the library path, otherwise upscaledb.so/.dll is not found
import os
import sys
import distutils.util
p = distutils.util.get_platform()
ps = ".%s-%s" % (p, sys.version[0:3])
sys.path.insert(0, os.path.join('build', 'lib' + ps))
sys.path.insert(1, os.path.join('..', 'build', 'lib' + ps))
import upscaledb
class TransactionTestCase(unittest.TestCase):
def testBeginAbort(self):
env = upscaledb.env()
env.create("test.db", upscaledb.UPS_ENABLE_TRANSACTIONS)
db = env.create_db(1)
txn = upscaledb.txn(env)
txn.abort()
db.close()
def testBeginCommit(self):
env = upscaledb.env()
env.create("test.db", upscaledb.UPS_ENABLE_TRANSACTIONS)
db = env.create_db(1)
txn = upscaledb.txn(env)
db.insert(txn, "key1", "value1")
db.insert(txn, "key2", "value2")
db.insert(txn, "key3", "value3")
db.erase(txn, "key1")
db.erase(txn, "key2")
try:
db.find(txn, "key1")
except upscaledb.error, (errno, strerror):
assert upscaledb.UPS_KEY_NOT_FOUND == errno
try:
db.find(txn, "key2")
except upscaledb.error, (errno, strerror):
assert upscaledb.UPS_KEY_NOT_FOUND == errno
txn.commit()
db.close()
def testCursor(self):
env = upscaledb.env()
env.create("test.db", upscaledb.UPS_ENABLE_TRANSACTIONS)
db = env.create_db(1)
txn = upscaledb.txn(env)
c = upscaledb.cursor(db, txn)
c.insert("key1", "value1")
c.insert("key2", "value2")
c.insert("key3", "value3")
c.find("key1")
c.erase()
try:
c.find("key2")
except upscaledb.error, (errno, strerror):
assert upscaledb.UPS_KEY_NOT_FOUND == errno
c.close()
txn.commit()
db.close()
unittest.main()
| {
"content_hash": "9f21367ea0ff252f54ca1424fafeb907",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 64,
"avg_line_length": 27.49206349206349,
"alnum_prop": 0.6287528868360277,
"repo_name": "yorickdewid/Mavicona",
"id": "730cee53b8c80df6bdbba6631efa6a12602b7727",
"size": "2363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/upscaledb/python/unittests/transaction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "553"
},
{
"name": "C",
"bytes": "313673"
},
{
"name": "C++",
"bytes": "1031794"
},
{
"name": "HTML",
"bytes": "3319"
},
{
"name": "Lex",
"bytes": "6147"
},
{
"name": "Makefile",
"bytes": "18921"
},
{
"name": "PHP",
"bytes": "18560"
},
{
"name": "Protocol Buffer",
"bytes": "2181"
},
{
"name": "Python",
"bytes": "35980"
},
{
"name": "Shell",
"bytes": "2263"
},
{
"name": "Yacc",
"bytes": "20558"
}
],
"symlink_target": ""
} |
"""Tests for the Question Editor controller."""
from __future__ import annotations
import os
from core import feconf
from core import utils
from core.constants import constants
from core.domain import question_fetchers
from core.domain import question_services
from core.domain import skill_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import question_models
(question_models,) = models.Registry.import_models([models.Names.QUESTION])
class BaseQuestionEditorControllerTests(test_utils.GenericTestBase):
def setUp(self) -> None:
"""Completes the sign-up process for the various users."""
super().setUp()
self.signup(self.TOPIC_MANAGER_EMAIL, self.TOPIC_MANAGER_USERNAME)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)
self.topic_manager_id = self.get_user_id_from_email(
self.TOPIC_MANAGER_EMAIL)
self.new_user_id = self.get_user_id_from_email(
self.NEW_USER_EMAIL)
self.editor_id = self.get_user_id_from_email(
self.EDITOR_EMAIL)
self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME])
self.topic_id = topic_fetchers.get_new_topic_id()
subtopic_1 = topic_domain.Subtopic.create_default_subtopic(
1, 'Subtopic Title 1', 'url-frag-one')
subtopic_1.skill_ids = ['skill_id_1']
subtopic_1.url_fragment = 'sub-one-frag'
self.save_new_topic(
self.topic_id, self.admin_id, name='Name',
description='Description', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[subtopic_1], next_subtopic_id=2)
self.set_topic_managers([self.TOPIC_MANAGER_USERNAME], self.topic_id)
self.topic_manager = user_services.get_user_actions_info(
self.topic_manager_id)
self.admin = user_services.get_user_actions_info(self.admin_id)
self.new_user = user_services.get_user_actions_info(self.new_user_id)
self.editor = user_services.get_user_actions_info(self.editor_id)
self.skill_id = skill_services.get_new_skill_id()
self.save_new_skill(
self.skill_id, self.admin_id, description='Skill Description')
self.question_id = question_services.get_new_question_id()
self.question = self.save_new_question(
self.question_id,
self.editor_id,
self._create_valid_question_data('ABC'),
[self.skill_id])
class QuestionCreationHandlerTest(BaseQuestionEditorControllerTests):
"""Tests returning of new question ids and creating questions."""
def test_post_with_non_admin_or_topic_manager_email_disallows_access(
self
) -> None:
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.NEW_QUESTION_URL, {
'skill_ids': [self.skill_id]
}, csrf_token=csrf_token, expected_status_int=401)
self.logout()
def test_post_with_editor_email_does_not_allow_question_creation(
self
) -> None:
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
question_dict = self.question.to_dict()
# Here we use MyPy ignore because the 'id' of a question can only
# be of string type but here we are assigning it with None because
# we want to test the scenario where the question is just created
# and the id is still needed to be assigned.
question_dict['id'] = None # type: ignore[arg-type]
self.post_json(
feconf.NEW_QUESTION_URL, {
'question_dict': question_dict,
'skill_ids': [self.skill_id]
}, csrf_token=csrf_token, expected_status_int=401)
self.logout()
def test_post_with_incorrect_skill_id_returns_404(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
incorrect_skill_id = 'abc123456789'
self.post_json(
feconf.NEW_QUESTION_URL, {
'skill_ids': [incorrect_skill_id]
}, csrf_token=csrf_token, expected_status_int=404)
self.logout()
def test_post_with_no_skill_ids_returns_400(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.NEW_QUESTION_URL, {},
csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_post_with_incorrect_list_of_skill_ids_returns_400(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
incorrect_skill_ids = [1, 2]
self.post_json(
feconf.NEW_QUESTION_URL, {
'skill_ids': incorrect_skill_ids
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_post_with_incorrect_type_of_skill_ids_returns_400(
self
) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
incorrect_skill_id = 1
self.post_json(
feconf.NEW_QUESTION_URL, {
'skill_ids': [incorrect_skill_id],
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_post_with_incorrect_question_id_returns_400(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
question_dict = self.question.to_dict()
question_dict['id'] = 'abc123456789'
question_dict['version'] = 0
self.post_json(
feconf.NEW_QUESTION_URL, {
'question_dict': question_dict,
'skill_ids': [self.skill_id]
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_post_with_incorrect_question_schema_returns_400(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
question_dict = self.question.to_dict()
# Here we use MyPy ignore because MyPy doesn't allow key deletion
# from TypedDict.
del question_dict['question_state_data']['content'] # type: ignore[misc]
question_dict['version'] = 0
self.post_json(
feconf.NEW_QUESTION_URL, {
'question_dict': question_dict,
'skill_ids': [self.skill_id],
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_post_with_no_skill_difficulty_returns_400(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
question_dict = self.question.to_dict()
# Here we use MyPy ignore because the 'id' of a question can only
# be of string type but here we are assigning it with None because
# we want to test the scenario where the question is just created
# and the id is still needed to be assigned.
question_dict['id'] = None # type: ignore[arg-type]
question_dict['version'] = 0
self.post_json(
feconf.NEW_QUESTION_URL, {
'question_dict': question_dict,
'skill_ids': [self.skill_id]
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_post_with_incorrect_version_returns_400(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
question_dict = self.question.to_dict()
question_dict['version'] = 1
self.post_json(
feconf.NEW_QUESTION_URL, {
'question_dict': question_dict,
'skill_ids': [self.skill_id]
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_post_with_wrong_skill_difficulty_length_returns_400(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
question_dict = self.question.to_dict()
# Here we use MyPy ignore because the 'id' of a question can only
# be of string type but here we are assigning it with None because
# we want to test the scenario where the question is just created
# and the id is still needed to be assigned.
question_dict['id'] = None # type: ignore[arg-type]
question_dict['version'] = 0
self.post_json(
feconf.NEW_QUESTION_URL, {
'question_dict': question_dict,
'skill_ids': [self.skill_id],
'skill_difficulties': [0.6, 0.8]
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_post_with_invalid_skill_difficulty_type_returns_400(
self
) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
question_dict = self.question.to_dict()
# Here we use MyPy ignore because the 'id' of a question can only
# be of string type but here we are assigning it with None because
# we want to test the scenario where the question is just created
# and the id is still needed to be assigned.
question_dict['id'] = None # type: ignore[arg-type]
question_dict['version'] = 0
self.post_json(
feconf.NEW_QUESTION_URL, {
'question_dict': question_dict,
'skill_ids': [self.skill_id],
'skill_difficulties': ['test']
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_post_with_invalid_skill_difficulty_value_returns_400(
self
) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
question_dict = self.question.to_dict()
# Here we use MyPy ignore because the 'id' of a question can only
# be of string type but here we are assigning it with None because
# we want to test the scenario where the question is just created
# and the id is still needed to be assigned.
question_dict['id'] = None # type: ignore[arg-type]
question_dict['version'] = 0
self.post_json(
feconf.NEW_QUESTION_URL, {
'question_dict': question_dict,
'skill_ids': [self.skill_id],
'skill_difficulties': [2.0]
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_post_with_admin_email_allows_question_creation(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
question_dict = self.question.to_dict()
# Here we use MyPy ignore because the 'id' of a question can only
# be of string type but here we are assigning it with None because
# we want to test the scenario where the question is just created
# and the id is still needed to be assigned.
question_dict['id'] = None # type: ignore[arg-type]
question_dict['version'] = 0
self.post_json(
feconf.NEW_QUESTION_URL, {
'question_dict': question_dict,
'skill_ids': [self.skill_id],
'skill_difficulties': [0.6]
}, csrf_token=csrf_token, expected_status_int=200)
all_models = question_models.QuestionModel.get_all()
questions = [
question_fetchers.get_question_from_model(model)
for model in all_models
]
self.assertEqual(len(questions), 2)
self.logout()
def test_post_with_topic_manager_email_allows_question_creation(
self
) -> None:
self.login(self.TOPIC_MANAGER_EMAIL)
csrf_token = self.get_new_csrf_token()
question_dict = self.question.to_dict()
# Here we use MyPy ignore because the 'id' of a question can only
# be of string type but here we are assigning it with None because
# we want to test the scenario where the question is just created
# and the id is still needed to be assigned.
question_dict['id'] = None # type: ignore[arg-type]
question_dict['version'] = 0
self.post_json(
feconf.NEW_QUESTION_URL, {
'question_dict': question_dict,
'skill_ids': [self.skill_id],
'skill_difficulties': [0.6]
}, csrf_token=csrf_token)
all_models = question_models.QuestionModel.get_all()
questions = [
question_fetchers.get_question_from_model(model)
for model in all_models
]
self.assertEqual(len(questions), 2)
self.logout()
def test_post_with_invalid_question_returns_400_status(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
question_dict = self.question.to_dict()
# Here we use MyPy ignore because the 'id' of a question can only
# be of string type but here we are assigning it with None because
# we want to test the scenario where the question is just created
# and the id is still needed to be assigned.
question_dict['id'] = None # type: ignore[arg-type]
# TODO(#13059): Here we use MyPy ignore because after we fully type
# the codebase we plan to get rid of the tests that intentionally
# test wrong inputs that we can normally catch by typing.
question_dict['question_state_data'] = 'invalid_question_state_data' # type: ignore[arg-type]
question_dict['version'] = 0
self.post_json(
feconf.NEW_QUESTION_URL, {
'question_dict': question_dict,
'skill_ids': [self.skill_id],
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_post_with_too_many_skills_returns_400(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
skill_ids = [1, 2, 3, 4]
self.post_json(
feconf.NEW_QUESTION_URL, {
'skill_ids': skill_ids
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_post_with_valid_images(self) -> None:
"""Test question creation with valid images."""
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
filename = 'img.png'
question_dict = self.question.to_dict()
# Here we use MyPy ignore because the 'id' of a question can only
# be of string type but here we are assigning it with None because
# we want to test the scenario where the question is just created
# and the id is still needed to be assigned.
question_dict['id'] = None # type: ignore[arg-type]
question_dict['version'] = 0
content_html = (
'<oppia-noninteractive-image filepath-with-value='
'""img.png"" caption-with-value="""" '
'alt-with-value=""Image""></oppia-noninteractive-image>'
)
question_dict['question_state_data']['content']['html'] = content_html
post_data = {
'question_dict': question_dict,
'skill_ids': [self.skill_id],
'skill_difficulties': [0.6]
}
with utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'),
'rb', encoding=None
) as f:
raw_image = f.read()
self.post_json(
feconf.NEW_QUESTION_URL, post_data,
csrf_token=csrf_token,
upload_files=[(filename, filename, raw_image)]
)
all_models = question_models.QuestionModel.get_all()
questions = [
question_fetchers.get_question_from_model(model)
for model in all_models
]
self.assertEqual(len(questions), 2)
self.logout()
def test_post_with_invalid_images(self) -> None:
"""Test question creation with invalid images."""
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
question_dict = self.question.to_dict()
# Here we use MyPy ignore because the 'id' of a question can only
# be of string type but here we are assigning it with None because
# we want to test the scenario where the question is just created
# and the id is still needed to be assigned.
question_dict['id'] = None # type: ignore[arg-type]
question_dict['version'] = 0
content_html = (
'<oppia-noninteractive-image filepath-with-value='
'""img.svg"" caption-with-value="""" '
'alt-with-value=""Image""></oppia-noninteractive-image>'
)
question_dict['question_state_data']['content']['html'] = content_html
post_data = {
'question_dict': question_dict,
'skill_ids': [self.skill_id],
'skill_difficulties': [0.6]
}
response_dict = self.post_json(
feconf.NEW_QUESTION_URL, post_data,
csrf_token=csrf_token,
expected_status_int=400)
self.assertIn(
'No image data provided for file with name img.svg.',
response_dict['error'])
large_image = b'<svg><path d="%s" /></svg>' % (
b'M150 0 L75 200 L225 200 Z ' * 4000)
response_dict = self.post_json(
feconf.NEW_QUESTION_URL, post_data,
csrf_token=csrf_token,
upload_files=[
('img.svg', 'img.svg', large_image)
], expected_status_int=400)
self.assertIn(
'Image exceeds file size limit of 100 KB.',
response_dict['error'])
self.logout()
class QuestionSkillLinkHandlerTest(BaseQuestionEditorControllerTests):
"""Tests link and unlink question from skills."""
def setUp(self) -> None:
"""Completes the setup for QuestionSkillLinkHandlerTest."""
super().setUp()
self.skill_id = skill_services.get_new_skill_id()
self.save_new_skill(
self.skill_id, self.admin_id, description='Skill Description')
self.skill_id_2 = skill_services.get_new_skill_id()
self.save_new_skill(
self.skill_id_2, self.admin_id, description='Skill Description 2')
self.question_id_2 = question_services.get_new_question_id()
self.save_new_question(
self.question_id_2, self.editor_id,
self._create_valid_question_data('ABC'), [self.skill_id])
def test_put_with_non_admin_or_topic_manager_disallows_access(self) -> None:
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.QUESTION_SKILL_LINK_URL_PREFIX, self.question_id
), {
'skill_ids_task_list': [{
'id': 'skill_2',
'task': 'update_difficulty',
'difficulty': 0.9
}]
},
csrf_token=csrf_token,
expected_status_int=401)
self.logout()
def test_put_with_admin_email_allows_updation(self) -> None:
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, self.skill_id, 0.5)
(
question_summaries, merged_question_skill_links) = (
question_services.get_displayable_question_skill_link_details(
5, [self.skill_id], 0))
self.assertEqual(len(question_summaries), 1)
self.assertEqual(
merged_question_skill_links[0].skill_difficulties, [0.5])
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.QUESTION_SKILL_LINK_URL_PREFIX, self.question_id
), {
'skill_ids_task_list': [{
'id': self.skill_id,
'task': 'update_difficulty',
'difficulty': 0.9
}]
}, csrf_token=csrf_token)
self.put_json(
'%s/%s' % (
feconf.QUESTION_SKILL_LINK_URL_PREFIX, self.question_id
), {
'skill_ids_task_list': [{
'id': 'skill_2',
'task': 'add',
'difficulty': 0.6
}]
}, csrf_token=csrf_token)
(
question_summaries, merged_question_skill_links) = (
question_services.get_displayable_question_skill_link_details(
5, [self.skill_id, 'skill_2'], 0))
self.assertEqual(len(question_summaries), 1)
self.assertEqual(len(merged_question_skill_links), 1)
self.assertEqual(
merged_question_skill_links[0].skill_difficulties, [0.6, 0.9])
self.put_json(
'%s/%s' % (
feconf.QUESTION_SKILL_LINK_URL_PREFIX, self.question_id
), {
'skill_ids_task_list': [{
'id': 'skill_2',
'task': 'remove',
'difficulty': 0
}]
}, csrf_token=csrf_token)
question_summaries, _, = (
question_services.get_displayable_question_skill_link_details(
5, ['skill_2'], 0))
self.assertEqual(len(question_summaries), 0)
self.logout()
def test_put_with_invalid_input_throws_error(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.QUESTION_SKILL_LINK_URL_PREFIX, self.question_id
), {
'skill_ids_task_list': [{
'task': 'update_difficulty',
'difficulty': 0.9
}]
}, csrf_token=csrf_token, expected_status_int=400)
self.put_json(
'%s/%s' % (
feconf.QUESTION_SKILL_LINK_URL_PREFIX, self.question_id
), {
'skill_ids_task_list': {
'task': 'invalid_task'
}
}, csrf_token=csrf_token, expected_status_int=400)
self.put_json(
'%s/%s' % (
feconf.QUESTION_SKILL_LINK_URL_PREFIX, self.question_id
), {}, csrf_token=csrf_token, expected_status_int=400)
self.put_json(
'%s/%s' % (
feconf.QUESTION_SKILL_LINK_URL_PREFIX, self.question_id
), {
'skill_ids_task_list': [{
'id': 'skill_2',
'task': 'invalid'
}]
}, csrf_token=csrf_token, expected_status_int=400)
self.put_json(
'%s/%s' % (
feconf.QUESTION_SKILL_LINK_URL_PREFIX, self.question_id
), {
'skill_ids_task_list': [{
'task': 'add'
}]
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_put_with_topic_manager_email_allows_updation(self) -> None:
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, self.skill_id, 0.3)
self.login(self.TOPIC_MANAGER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.QUESTION_SKILL_LINK_URL_PREFIX, self.question_id
), {
'skill_ids_task_list': [{
'id': self.skill_id,
'task': 'update_difficulty',
'difficulty': 0.6
}]
}, csrf_token=csrf_token)
(
question_summaries, merged_question_skill_links) = (
question_services.get_displayable_question_skill_link_details(
5, [self.skill_id], 0))
self.assertEqual(len(question_summaries), 1)
self.assertEqual(len(merged_question_skill_links), 1)
self.assertEqual(
merged_question_skill_links[0].skill_difficulties, [0.6])
self.logout()
class EditableQuestionDataHandlerTest(BaseQuestionEditorControllerTests):
"""Tests get, put and delete methods of editable questions data handler."""
def test_get_can_not_access_handler_with_invalid_question_id(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
self.get_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, 'invalid_question_id'),
expected_status_int=400)
self.logout()
def test_delete_with_guest_does_not_allow_question_deletion(self) -> None:
response = self.delete_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, self.question_id),
expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
def test_delete_with_new_user_does_not_allow_question_deletion(
self
) -> None:
self.login(self.NEW_USER_EMAIL)
response = self.delete_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, self.question_id),
expected_status_int=401)
self.assertIn(
'does not have enough rights to delete the question.',
response['error'])
self.logout()
def test_get_with_non_admin_or_topic_manager_email_disallows_access(
self
) -> None:
self.login(self.NEW_USER_EMAIL)
self.get_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, self.question_id),
expected_status_int=401)
self.logout()
def test_get_with_admin_email_allows_question_fetching(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
response_dict = self.get_json('%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, self.question_id))
self.assertEqual(
response_dict['question_dict']['id'], self.question_id)
self.assertEqual(
response_dict['question_dict']['version'], 1)
self.assertEqual(
response_dict['question_dict']['question_state_data'],
self.question.question_state_data.to_dict())
self.assertEqual(
len(response_dict['associated_skill_dicts']), 1)
self.assertEqual(
response_dict['associated_skill_dicts'][0]['id'],
self.skill_id)
self.logout()
def test_get_with_topic_manager_email_allows_question_fetching(
self
) -> None:
self.login(self.TOPIC_MANAGER_EMAIL)
response_dict = self.get_json('%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, self.question_id))
self.assertEqual(
response_dict['question_dict']['id'], self.question_id)
self.assertEqual(
response_dict['question_dict']['version'], 1)
self.assertEqual(
response_dict['question_dict']['question_state_data'],
self.question.question_state_data.to_dict())
self.assertEqual(
len(response_dict['associated_skill_dicts']), 1)
self.assertEqual(
response_dict['associated_skill_dicts'][0]['id'],
self.skill_id)
self.logout()
def test_get_with_invalid_question_id_returns_404_status(self) -> None:
def _mock_get_question_by_id(
unused_question_id: str, **unused_kwargs: str
) -> None:
"""Mocks '_get_question_by_id'. Returns None."""
return None
question_services_swap = self.swap(
question_services, 'get_question_by_id', _mock_get_question_by_id)
with question_services_swap:
self.login(self.EDITOR_EMAIL)
self.get_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX,
self.question_id), expected_status_int=404)
self.logout()
def test_delete_with_incorrect_question_id_returns_404_status(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
self.delete_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, 'abc123456789'),
expected_status_int=404)
self.logout()
def test_delete_with_admin_email_allows_question_deletion(self) -> None:
self.login(self.CURRICULUM_ADMIN_EMAIL)
self.delete_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, self.question_id),
expected_status_int=200)
self.logout()
def test_put_with_long_commit_message_fails(self) -> None:
new_question_data = self._create_valid_question_data('DEF')
change_list = [{
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'new_value': new_question_data.to_dict(),
'old_value': self.question.question_state_data.to_dict()
}]
payload = {
'change_list': change_list,
'commit_message': ('a' * (constants.MAX_COMMIT_MESSAGE_LENGTH + 1))
}
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
response_json = self.put_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, self.question_id),
payload,
csrf_token=csrf_token, expected_status_int=400)
max_len_object = 'a' * 376
self.assertEqual(
response_json['error'],
'Schema validation for \'commit_message\' failed: Validation '
'failed: has_length_at_most ({\'max_value\': 375}) for object %s'
% max_len_object
)
def test_put_with_admin_email_allows_question_editing(self) -> None:
new_question_data = self._create_valid_question_data('DEF')
change_list = [{
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'new_value': new_question_data.to_dict(),
'old_value': self.question.question_state_data.to_dict()
}]
payload = {
'change_list': change_list,
'commit_message': 'update question data'
}
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
response_json = self.put_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, self.question_id),
payload,
csrf_token=csrf_token)
self.assertEqual(
response_json['question_dict']['language_code'], 'en')
self.assertEqual(
response_json['question_dict']['question_state_data'],
new_question_data.to_dict())
self.assertEqual(
response_json['question_dict']['id'], self.question_id)
del payload['change_list']
self.put_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX,
self.question_id), payload,
csrf_token=csrf_token, expected_status_int=400)
del payload['commit_message']
payload['change_list'] = change_list
self.put_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX,
self.question_id), payload,
csrf_token=csrf_token, expected_status_int=400)
payload['commit_message'] = 'update question data'
self.put_json(
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, payload,
csrf_token=csrf_token, expected_status_int=404)
self.logout()
def test_put_with_topic_manager_email_allows_question_editing(self) -> None:
new_question_data = self._create_valid_question_data('DEF')
change_list = [{
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'new_value': new_question_data.to_dict(),
'old_value': self.question.question_state_data.to_dict()
}]
payload = {
'change_list': change_list,
'commit_message': 'update question data'
}
self.login(self.TOPIC_MANAGER_EMAIL)
csrf_token = self.get_new_csrf_token()
new_question_data = self._create_valid_question_data('GHI')
change_list = [{
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'new_value': new_question_data.to_dict(),
'old_value': self.question.question_state_data.to_dict()
}]
payload['change_list'] = change_list
payload['commit_message'] = 'update question data'
response_json = self.put_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, self.question_id),
payload, csrf_token=csrf_token)
self.assertEqual(
response_json['question_dict']['language_code'], 'en')
self.assertEqual(
response_json['question_dict']['question_state_data'],
new_question_data.to_dict())
self.assertEqual(
response_json['question_dict']['id'], self.question_id)
self.logout()
def test_put_with_creating_new_fully_specified_question_returns_400(
self
) -> None:
self._create_valid_question_data('XXX')
change_list = [{
'cmd': 'create_new_fully_specified_question',
'question_dict': {},
'skill_id': 'abc123'
}]
payload = {
'change_list': change_list,
'commit_message': 'update question data'
}
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.QUESTION_EDITOR_DATA_URL_PREFIX, self.question_id),
payload,
csrf_token=csrf_token, expected_status_int=400)
self.logout()
| {
"content_hash": "d0df896422887fd55b511af8d0e18615",
"timestamp": "",
"source": "github",
"line_count": 842,
"max_line_length": 102,
"avg_line_length": 41.28978622327791,
"alnum_prop": 0.5756486222171087,
"repo_name": "oppia/oppia",
"id": "e266ff10b36d241ff3741c056b57211063e6e0ec",
"size": "35371",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/controllers/question_editor_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "476480"
},
{
"name": "HTML",
"bytes": "2092923"
},
{
"name": "JavaScript",
"bytes": "1247116"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "17628953"
},
{
"name": "Shell",
"bytes": "2240"
},
{
"name": "TypeScript",
"bytes": "15541372"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
import random
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.externals import joblib
from sklearn.decomposition import PCA
import random
class Standardiser:
def __init__(self):
pass
def initialise(self):
#self.input = input
print("standardiser initialising")
self.data = self.loadData()
self.X_train, self.X_test, self.y_train, self.y_test = self.splitData(self.data)
self.std_scale, self.X_train_std, self.X_test_std = self.standardise(self.X_train, self.X_test, self.y_train, self.y_test)
#self.std_scale, self.X_train_std, self.X_test_std = self.PCAan(self.X_train, self.X_test, self.y_train)
#standardised
#pca_std, X_train_stdpca, X_test_stdpca = PCAan(X_train_std, X_test_std)
#non standardised
#pca_std, X_train_stdpca, X_test_stdpca = PCAan(X_train, X_test)
#genGraph(pca_std, X_train_stdpca, X_test_stdpca, y_train)
def get_std_X_train(self):
return self.X_train_std
def get_std_X_test(self):
return self.X_test_std
def get_y_train(self):
return self.y_train
def get_y_test(self):
return self.y_test
def loadData(self):
df = pd.io.parsers.read_csv(
'Data/NewBalanced.csv',
header=None,
skiprows = [0],
usecols=[5,10,15,17,18,19,20,22])
return df
def loadForecast(self,forecast_loc):
#load in forecast csv
self.foredf = pd.io.parsers.read_csv(
forecast_loc,
header=None,
skiprows = [0],
usecols=[1,2,3,4,5,6,8,9,10])
X_forecast = self.foredf.values[:,3:]
X_forecast = self.standardise_Pred(X_forecast)
return X_forecast
def splitData(self, data):
X = data.values[:,:7]
y = data.values[:,7]
#split the data into training and testing data
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.30, random_state=random.randint(10,100000))
return X_train, X_test, y_train, y_test
def standardise(self, X_train, X_test, y_train, y_test):
#standardisation using sklearn
self.std_scale = preprocessing.StandardScaler().fit(X_train)
X_train_std = self.std_scale.transform(X_train)
X_test_std = self.std_scale.transform(X_test)
self.saveScale()
return self.std_scale, X_train_std, X_test_std
def standardise_Pred(self, X_forecast):
X_forecast_std = self.std_scale.transform(X_forecast)
return X_forecast_std
def PCAan(self, X_train_std, X_test_std, y_train):
pca_std = PCA(n_components=2).fit(X_train_std)
X_train_std = pca_std.transform(X_train_std)
X_test_std = pca_std.transform(X_test_std)
#genGraph(pca_std, X_train_std, X_test_std, y_train)
return pca_std, X_train_std, X_test_std
def make_CSV(self, fore_pred, fore_prob,outputfile):
print("make_CSV")
forearray = self.foredf.values.tolist()
i = 0
for element in forearray:
element.append(fore_pred[i])
element.append(fore_prob[i][1])
i +=1
df = pd.DataFrame(forearray)
df.to_csv(outputfile)
def saveScale(self):
print("saveScale")
joblib.dump(self.std_scale, 'Models/Scaler.pkl')
def loadScale(self):
print("loadScale")
self.std_scale = joblib.load('Scaler.pkl')
| {
"content_hash": "2b6531aa7b179dc3f91b26ec55c7d997",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 124,
"avg_line_length": 30.13821138211382,
"alnum_prop": 0.5848394928513623,
"repo_name": "TheProgrammingDuck/Europa-Challenge",
"id": "6e27b8c8e8a92d147842bed25f18fa52340a3153",
"size": "3707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Site/WPSsite/Standardiser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19508"
},
{
"name": "HTML",
"bytes": "440631"
},
{
"name": "JavaScript",
"bytes": "8759"
},
{
"name": "Jupyter Notebook",
"bytes": "58850"
},
{
"name": "Python",
"bytes": "73808"
}
],
"symlink_target": ""
} |
doc="Exception not in block"
err=ZeroDivisionError()
1/0
err=None
doc="finished"
| {
"content_hash": "4305834ab376a94807b034c49af30b8e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 28,
"avg_line_length": 13.666666666666666,
"alnum_prop": 0.7560975609756098,
"repo_name": "go-python/gpython",
"id": "26f7a7e6adc257e5d90c383878c71c8a650d88ac",
"size": "247",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "vm/tests/raise2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1205"
},
{
"name": "Go",
"bytes": "1479898"
},
{
"name": "HTML",
"bytes": "1653"
},
{
"name": "JavaScript",
"bytes": "13418"
},
{
"name": "Makefile",
"bytes": "227"
},
{
"name": "Python",
"bytes": "301848"
},
{
"name": "Shell",
"bytes": "1276"
},
{
"name": "Yacc",
"bytes": "36569"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import unittest
import mock
import pytest
from artman.tasks import protoc_tasks, package_metadata_tasks
from artman.pipelines import code_generation
from artman.pipelines import batch_generation
from artman.pipelines import grpc_generation
class GrpcClientPipelineTests(unittest.TestCase):
@mock.patch.object(grpc_generation, 'get_grpc_task_factory')
@mock.patch.object(code_generation.CodeGenerationPipelineBase, '__init__')
def test_constructor(self, cgpb, ggtf):
grpc_generation.GrpcClientPipeline(foo='bar')
cgpb.assert_called_once_with(ggtf(), foo='bar')
class ProtoClientPipelineTests(unittest.TestCase):
@mock.patch.object(grpc_generation, 'get_proto_task_factory')
@mock.patch.object(code_generation.CodeGenerationPipelineBase, '__init__')
def test_constructor(self, cgpb, gptf):
grpc_generation.ProtoClientPipeline(foo='bar')
cgpb.assert_called_once_with(gptf(), foo='bar')
class GrpcTaskFactoryBaseTests(unittest.TestCase):
def setUp(self):
self._gtfb = grpc_generation.GrpcTaskFactoryBase()
def test_get_validate_kwargs(self):
COMMON_REQUIRED = code_generation.COMMON_REQUIRED
assert self._gtfb.get_validate_kwargs() == COMMON_REQUIRED
def test_get_invalid_kwargs(self):
assert self._gtfb.get_invalid_kwargs() == []
def test_get_tasks(self):
expected = [protoc_tasks.GrpcPackmanTask]
actual = self._gtfb.get_tasks(publish='noop')
for task, class_ in zip(actual, expected):
assert isinstance(task, class_)
class GrpcClientBatchPipelineTests(unittest.TestCase):
@mock.patch.object(grpc_generation, '_make_grpc_batch_pipeline_tasks')
@mock.patch.object(batch_generation.BatchPipeline, '__init__')
def test_constructor(self, bp, mgbpt):
grpc_generation.GrpcClientBatchPipeline(foo='bar')
bp.assert_called_once_with(mgbpt, foo='bar')
class ProtoClientBatchPipelineTests(unittest.TestCase):
@mock.patch.object(grpc_generation, '_make_proto_batch_pipeline_tasks')
@mock.patch.object(batch_generation.BatchPipeline, '__init__')
def test_constructor(self, bp, mpbpt):
grpc_generation.ProtoClientBatchPipeline(foo='bar')
bp.assert_called_once_with(mpbpt, foo='bar')
class MakeGrpcBatchPipelineTasksTest(unittest.TestCase):
def test_make_java(self):
expected = [
protoc_tasks.ProtoDescGenTask,
protoc_tasks.ProtoCodeGenTask,
protoc_tasks.GrpcCodeGenTask,
package_metadata_tasks.PackageMetadataConfigGenTask,
package_metadata_tasks.ProtoPackageMetadataGenTask,
package_metadata_tasks.GrpcPackageMetadataGenTask
]
actual = grpc_generation._make_grpc_batch_pipeline_tasks(language='java')
for task, class_ in zip(actual, expected):
assert isinstance(task, class_)
def test_no_language(self):
with pytest.raises(ValueError):
grpc_generation._make_grpc_batch_pipeline_tasks()
def test_bad_language(self):
with pytest.raises(ValueError):
grpc_generation._make_grpc_batch_pipeline_tasks(language='cpp')
class MakeProtoBatchPipelineTasksTest(unittest.TestCase):
def test_make_java(self):
expected = [
protoc_tasks.ProtoDescGenTask,
protoc_tasks.ProtoCodeGenTask,
package_metadata_tasks.PackageMetadataConfigGenTask,
package_metadata_tasks.ProtoPackageMetadataGenTask,
protoc_tasks.JavaProtoCopyTask,
]
actual = grpc_generation._make_proto_batch_pipeline_tasks(language='java')
for task, class_ in zip(actual, expected):
assert isinstance(task, class_)
def test_no_language(self):
with pytest.raises(ValueError):
grpc_generation._make_proto_batch_pipeline_tasks()
def test_bad_language(self):
with pytest.raises(ValueError):
grpc_generation._make_proto_batch_pipeline_tasks(language='python')
| {
"content_hash": "aab95193bf3a41a37a11e88a5372810d",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 82,
"avg_line_length": 38.367924528301884,
"alnum_prop": 0.6960904843865257,
"repo_name": "ethanbao/artman",
"id": "a3538e7e4115faa9fb54e7dc411ebe3c252188ab",
"size": "4637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/pipelines/test_grpc_generation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "1359"
},
{
"name": "Python",
"bytes": "319976"
}
],
"symlink_target": ""
} |
import numpy as np
import random
import json
import cma
from es import SimpleGA, CMAES, PEPG, OpenES
from env import make_env
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(x, 0)
def passthru(x):
return x
# useful for discrete actions
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
# useful for discrete actions
def sample(p):
return np.argmax(np.random.multinomial(1, p))
"""
learning the model
"""
class RNNCell:
def __init__(self, input_size, weight, bias):
self.input_size=input_size
self.weight = weight
self.bias = bias
def __call__(self, x, h):
concat = np.concatenate((x, h), axis=1)
hidden = np.matmul(concat, self.weight)+self.bias
return np.tanh(hidden)
# LSTM in a few lines of numpy
class LSTMCell:
'''Numpy LSTM cell used for inference only.'''
def __init__(self, input_size, weight, bias, forget_bias=1.0):
self.input_size=input_size
self.W_full=weight # np.concatenate((Wxh, Whh), axis=0)
self.bias=bias
self.forget_bias=1.0
def __call__(self, x, h, c):
concat = np.concatenate((x, h), axis=1)
hidden = np.matmul(concat, self.W_full)+self.bias
i, g, f, o = np.split(hidden, 4, axis=1)
i = sigmoid(i)
g = np.tanh(g)
f = sigmoid(f+self.forget_bias)
o = sigmoid(o)
new_c = np.multiply(c, f) + np.multiply(g, i)
new_h = np.multiply(np.tanh(new_c), o)
return new_h, new_c
class RNNModel:
def __init__(self, game):
self.env_name = game.env_name
self.hidden_size = game.layers[0]
self.layer_1 = game.layers[1]
self.layer_2 = game.layers[2]
self.rnn_mode = True
self.input_size = game.input_size
self.output_size = game.output_size
self.render_mode = False
self.shapes = [ (self.input_size + self.hidden_size, 1*self.hidden_size), # RNN weights
(self.input_size + self.hidden_size, self.layer_1),# predict actions output
(self.layer_1, self.output_size)] # predict actions output
self.weight = []
self.bias = []
self.param_count = 0
idx = 0
for shape in self.shapes:
self.weight.append(np.zeros(shape=shape))
self.bias.append(np.zeros(shape=shape[1]))
self.param_count += (np.product(shape) + shape[1])
idx += 1
self.init_h = np.zeros((1, self.hidden_size))
self.h = self.init_h
self.param_count += 1*self.hidden_size
self.rnn = RNNCell(self.input_size, self.weight[0], self.bias[0])
def reset(self):
self.h = self.init_h
def make_env(self, seed=-1, render_mode=False):
self.render_mode = render_mode
self.env = make_env(self.env_name, seed=seed, render_mode=render_mode)
def get_action(self, real_obs):
obs = real_obs.reshape(1, 3)
# update rnn:
#update_obs = np.concatenate([obs, action], axis=1)
self.h = self.rnn(obs, self.h)
# get action
total_obs = np.concatenate([obs, self.h], axis=1)
# calculate action using 2 layer network from output
hidden = np.tanh(np.matmul(total_obs, self.weight[1]) + self.bias[1])
action = np.tanh(np.matmul(hidden, self.weight[2]) + self.bias[2])
return action[0]
def set_model_params(self, model_params):
pointer = 0
for i in range(len(self.shapes)):
w_shape = self.shapes[i]
b_shape = self.shapes[i][1]
s_w = np.product(w_shape)
s = s_w + b_shape
chunk = np.array(model_params[pointer:pointer+s])
self.weight[i] = chunk[:s_w].reshape(w_shape)
self.bias[i] = chunk[s_w:].reshape(b_shape)
pointer += s
# rnn states
s = self.hidden_size
self.init_h = model_params[pointer:pointer+s].reshape((1, self.hidden_size))
self.h = self.init_h
self.rnn = RNNCell(self.input_size, self.weight[0], self.bias[0])
def load_model(self, filename):
with open(filename) as f:
data = json.load(f)
print('loading file %s' % (filename))
self.data = data
model_params = np.array(data[0]) # assuming other stuff is in data
self.set_model_params(model_params)
def get_random_model_params(self, stdev=0.1):
return np.random.randn(self.param_count)*stdev
| {
"content_hash": "abc698a6b8afd6ce23a07d4f4d21cbe8",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 95,
"avg_line_length": 27.37908496732026,
"alnum_prop": 0.6268799236094533,
"repo_name": "google/brain-tokyo-workshop",
"id": "e729e78cbbf98432eb4fe42e17e0e8c3c1a64290",
"size": "4229",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "learntopredict/carracing/nn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "671"
},
{
"name": "HTML",
"bytes": "1031"
},
{
"name": "Jupyter Notebook",
"bytes": "47079538"
},
{
"name": "Python",
"bytes": "1037153"
},
{
"name": "Shell",
"bytes": "6053"
}
],
"symlink_target": ""
} |
import autograd.numpy as np
from numpy import testing as np_testing
from pymanopt.manifolds import Euclidean
from ._manifold_tests import ManifoldTestCase
class TestEuclideanManifold(ManifoldTestCase):
def setUp(self):
self.m = m = 10
self.n = n = 5
self.manifold = Euclidean(m, n)
super().setUp()
def test_dim(self):
assert self.manifold.dim == self.m * self.n
def test_typical_dist(self):
np_testing.assert_almost_equal(
self.manifold.typical_dist, np.sqrt(self.m * self.n)
)
def test_dist(self):
e = self.manifold
x, y = np.random.normal(size=(2, self.m, self.n))
np_testing.assert_almost_equal(e.dist(x, y), np.linalg.norm(x - y))
def test_inner_product(self):
e = self.manifold
x = e.random_point()
y = e.random_tangent_vector(x)
z = e.random_tangent_vector(x)
np_testing.assert_almost_equal(np.sum(y * z), e.inner_product(x, y, z))
def test_projection(self):
e = self.manifold
x = e.random_point()
u = e.random_tangent_vector(x)
np_testing.assert_allclose(e.projection(x, u), u)
def test_euclidean_to_riemannian_hessian(self):
e = self.manifold
x = e.random_point()
u = e.random_tangent_vector(x)
egrad, ehess = np.random.normal(size=(2, self.m, self.n))
np_testing.assert_allclose(
e.euclidean_to_riemannian_hessian(x, egrad, ehess, u), ehess
)
def test_retraction(self):
e = self.manifold
x = e.random_point()
u = e.random_tangent_vector(x)
np_testing.assert_allclose(e.retraction(x, u), x + u)
def test_euclidean_to_riemannian_gradient(self):
e = self.manifold
x = e.random_point()
u = e.random_tangent_vector(x)
np_testing.assert_allclose(e.euclidean_to_riemannian_gradient(x, u), u)
def test_first_order_function_approximation(self):
self.run_gradient_approximation_test()
def test_second_order_function_approximation(self):
self.run_hessian_approximation_test()
def test_norm(self):
e = self.manifold
x = e.random_point()
u = np.random.normal(size=(self.m, self.n))
np_testing.assert_almost_equal(np.sqrt(np.sum(u**2)), e.norm(x, u))
def test_random_point(self):
e = self.manifold
x = e.random_point()
y = e.random_point()
assert np.shape(x) == (self.m, self.n)
assert np.linalg.norm(x - y) > 1e-6
def test_random_tangent_vector(self):
e = self.manifold
x = e.random_point()
u = e.random_tangent_vector(x)
v = e.random_tangent_vector(x)
assert np.shape(u) == (self.m, self.n)
np_testing.assert_almost_equal(np.linalg.norm(u), 1)
assert np.linalg.norm(u - v) > 1e-6
def test_transport(self):
e = self.manifold
x = e.random_point()
y = e.random_point()
u = e.random_tangent_vector(x)
np_testing.assert_allclose(e.transport(x, y, u), u)
def test_exp_log_inverse(self):
s = self.manifold
X = s.random_point()
Y = s.random_point()
Yexplog = s.exp(X, s.log(X, Y))
np_testing.assert_array_almost_equal(Y, Yexplog)
def test_log_exp_inverse(self):
s = self.manifold
X = s.random_point()
U = s.random_tangent_vector(X)
Ulogexp = s.log(X, s.exp(X, U))
np_testing.assert_array_almost_equal(U, Ulogexp)
def test_pair_mean(self):
s = self.manifold
X = s.random_point()
Y = s.random_point()
Z = s.pair_mean(X, Y)
np_testing.assert_array_almost_equal(s.dist(X, Z), s.dist(Y, Z))
| {
"content_hash": "6a7f7b01f8505d0b5ccbf56471586df3",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 31.872881355932204,
"alnum_prop": 0.5865461313480458,
"repo_name": "pymanopt/pymanopt",
"id": "5f273298ea1aa9dd5f4d8199312d166a3b65a4d2",
"size": "3761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/manifolds/test_euclidean.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "340544"
},
{
"name": "Shell",
"bytes": "638"
}
],
"symlink_target": ""
} |
"""
Task
You are given an HTML code snippet of N lines.
Your task is to print start tags, end tags and empty tags separately.
Format for printing the result is:
Start : Tag1
End : Tag1
Start : Tag2
-> Attribute2[0] > Attribute_value2[0]
-> Attribute2[1] > Attribute_value2[1]
-> Attribute2[2] > Attribute_value2[2]
Start : Tag3
-> Attribute3[0] > None
Empty : Tag4
-> Attribute4[0] > Attribute_value4[0]
End : Tag3
End : Tag2
-> symbol indicates that tag contains an attribute. It is immediately followed by the name of attribute and attribute value.
> symbol acts as a separator of attribute and attribute value.
If an HTML tag has no attribute then simply print the name of the tag.
If an attribute has no attribute value then simply print the name of attribute value as None.
Note: Do not detect any HTML tag, attribute and attribute value, inside the HTML comment tags (<!-- Comments -->).Comments can be multiline also.
Input Format
First line contains, integer N, number of lines in HTML code snippet.
Next N lines contain, HTML code.
Constraints
0<N<100
Output Format
Print the HTML tags, attributes and attribute values in order of their occurence from top to bottom in the snippet.
Ensure proper formatting, as explained in the problem statement.
Sample Input
2
<html><head><title>HTML Parser - I</title></head>
<body data-modal-target class='1'><h1>HackerRank</h1><br /></body></html>
Sample Output
Start : html
Start : head
Start : title
End : title
End : head
Start : body
-> data-modal-target > None
-> class > 1
Start : h1
End : h1
Empty : br
End : body
End : html
"""
from HTMLParser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print "Start :", tag
for i in attrs:
print "->", i[0], ">", i[1]
def handle_endtag(self, tag):
print "End :", tag
def handle_startendtag(self, tag, attrs):
print "Empty :", tag
for i in attrs:
print "->", i[0], ">", i[1]
s = ""
comment = False
for _ in range(int(raw_input())):
temp = raw_input()
if temp.find("<!--") != -1 and temp.find("-->") != -1:
temp = temp[:temp.find("<!--")]
elif temp.find("<!--") != -1:
comment = True
elif temp.find("-->") != -1:
comment = False
continue
if temp.strip() and not comment:
s += temp
parser = MyHTMLParser()
parser.feed(s) | {
"content_hash": "567154e2655c88b62683359165ef6d54",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 145,
"avg_line_length": 26.32608695652174,
"alnum_prop": 0.6581337737407101,
"repo_name": "spradeepv/dive-into-python",
"id": "c270c4f80c5327287a890c6a1214e697ecaf375f",
"size": "2422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hackerrank/domain/python/regex/html_parser_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "530165"
}
],
"symlink_target": ""
} |
from . import perma_base
| {
"content_hash": "8309fc1862594cb3914d38154b60d375",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 25,
"alnum_prop": 0.76,
"repo_name": "permamodel/permamodel",
"id": "45984ddfe4d122e78d176c1538db7773807e558f",
"size": "25",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "permamodel/components/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "263623"
},
{
"name": "Python",
"bytes": "382478"
},
{
"name": "Shell",
"bytes": "5013"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from etsdevtools.developer.helper.fbibp import *
| {
"content_hash": "c0e20030e62b320632a9c8dd63958157",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 48,
"avg_line_length": 44,
"alnum_prop": 0.8068181818181818,
"repo_name": "enthought/etsproxy",
"id": "445c02b4011a7056a8efba7bea4dc73ff0a1de7e",
"size": "103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/developer/helper/fbibp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
import os
import ctypes
from ctypes.util import find_library
lib = find_library('ssl')
if not lib:
AES = None
else:
""" <aes.h>
# define AES_ENCRYPT 1
# define AES_DECRYPT 0
# define AES_MAXNR 14
struct aes_key_st {
# ifdef AES_LONG
unsigned long rd_key[4 * (AES_MAXNR + 1)];
# else
unsigned int rd_key[4 * (AES_MAXNR + 1)];
# endif
int rounds;
};
typedef struct aes_key_st AES_KEY;
int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
AES_KEY *key);
int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
AES_KEY *key);
void AES_ige_encrypt(const unsigned char *in, unsigned char *out,
size_t length, const AES_KEY *key,
unsigned char *ivec, const int enc);
"""
_libssl = ctypes.cdll.LoadLibrary(lib)
AES_MAXNR = 14
AES_ENCRYPT = ctypes.c_int(1)
AES_DECRYPT = ctypes.c_int(0)
class AES_KEY(ctypes.Structure):
_fields_ = [
('rd_key', ctypes.c_uint32 * (4*(AES_MAXNR + 1))),
('rounds', ctypes.c_uint),
]
class AES:
@staticmethod
def decrypt_ige(cipher_text, key, iv):
aeskey = AES_KEY()
ckey = (ctypes.c_ubyte * len(key))(*key)
cklen = ctypes.c_int(len(key)*8)
cin = (ctypes.c_ubyte * len(cipher_text))(*cipher_text)
ctlen = ctypes.c_size_t(len(cipher_text))
cout = (ctypes.c_ubyte * len(cipher_text))()
civ = (ctypes.c_ubyte * len(iv))(*iv)
_libssl.AES_set_decrypt_key(ckey, cklen, ctypes.byref(aeskey))
_libssl.AES_ige_encrypt(
ctypes.byref(cin),
ctypes.byref(cout),
ctlen,
ctypes.byref(aeskey),
ctypes.byref(civ),
AES_DECRYPT
)
return bytes(cout)
@staticmethod
def encrypt_ige(plain_text, key, iv):
# Add random padding iff it's not evenly divisible by 16 already
if len(plain_text) % 16 != 0:
padding_count = 16 - len(plain_text) % 16
plain_text += os.urandom(padding_count)
aeskey = AES_KEY()
ckey = (ctypes.c_ubyte * len(key))(*key)
cklen = ctypes.c_int(len(key)*8)
cin = (ctypes.c_ubyte * len(plain_text))(*plain_text)
ctlen = ctypes.c_size_t(len(plain_text))
cout = (ctypes.c_ubyte * len(plain_text))()
civ = (ctypes.c_ubyte * len(iv))(*iv)
_libssl.AES_set_encrypt_key(ckey, cklen, ctypes.byref(aeskey))
_libssl.AES_ige_encrypt(
ctypes.byref(cin),
ctypes.byref(cout),
ctlen,
ctypes.byref(aeskey),
ctypes.byref(civ),
AES_ENCRYPT
)
return bytes(cout)
| {
"content_hash": "c467e919445ef630ab6f8d47c1773974",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 76,
"avg_line_length": 33.02197802197802,
"alnum_prop": 0.5104825291181364,
"repo_name": "andr-04/Telethon",
"id": "f9917d71319a891da78fabe105f53da31fea5837",
"size": "3005",
"binary": false,
"copies": "1",
"ref": "refs/heads/asyncio",
"path": "telethon/crypto/libssl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "276159"
}
],
"symlink_target": ""
} |
import sys
import os
import random
import unittest
DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep
sys.path.append(DIRNAME_MODULE)
sys.path.append(DIRNAME_MODULE + "pyss" + os.sep)
from pyss import pyssobject
from pyss.pyss_model import PyssModel
from pyss.segment import Segment
from pyss.generate import Generate
from pyss.terminate import Terminate
from pyss import logger
from pyss.table import Table
from pyss.handle import Handle
from pyss.enter import Enter
from pyss.leave import Leave
from pyss.assign import Assign
from pyss.storage import Storage
from pyss.advance import Advance
from pyss.preempt import Preempt
from pyss.g_return import GReturn
from pyss.facility import Facility
from pyss.seize import Seize
from pyss.release import Release
from pyss.transfer import Transfer
from pyss.test import Test
from pyss.queue import Queue
from pyss.depart import Depart
from pyss.split import Split
from pyss.pyss_const import *
class TestEnterLeave(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init_001(self):
#
with self.assertRaises(pyssobject.ErrorIsNone) as context:
Split(None, funcCountCopies=2, funcNextBlockLabel="A", paramName=P1)
def test_init_002(self):
m = PyssModel(optionz=None)
m[OPTIONS].setAllFalse()
sgm = Segment(m)
#
Split(sgm, funcCountCopies=2, funcNextBlockLabel="A", paramName=P1)
def test_001(self):
"""
Создаётся один транзакт.
При проходе через блок split создаётся одна копия транзакта.
Копия транзакта движется в следующий блок.
"""
logger.info("--- test_001 ----------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
MAX_TIME = 20
#
list_all_transact = []
# for test
def funcTransactTo_list_all_transact(owner, transact):
# складируем транзакты в список
list_all_transact.append(transact)
### SEGMENT ----------------------
Generate(sgm, modificatorFunc=[0])
# sgm.addBlock(advance.Advance())
COPY_NUM = "COPY_NUM"
Split(sgm, funcCountCopies=1, funcNextBlockLabel=None, paramName=COPY_NUM)
# for test
Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)
#
Terminate(sgm, deltaTerminate=1)
# ЗАПУСК ----------------------
m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)
# ТЕСТЫ ----------------------
parentTransact = None
for i, t in enumerate(list_all_transact):
if i == 0:
parentTransact = t
self.assertTrue(COPY_NUM not in t)
self.assertEqual(t[ASSEMBLY_SET], 1)
self.assertEqual(t[PARENT], None)
self.assertEqual(t.strTrack(),
'[0]:[1]:[GENERATE]; [0]:[2]:[SPLIT]; [0]:[3]:[HANDLE]; [0]:[4]:[TERMINATE]')
elif i == 1:
self.assertTrue(COPY_NUM in t)
self.assertEqual(t[COPY_NUM], 1)
self.assertEqual(t[NUM], 2)
self.assertEqual(t[ASSEMBLY_SET], parentTransact[NUM]) # pylint: disable=unsubscriptable-object
self.assertEqual(t[PARENT], parentTransact)
self.assertEqual(t[TIME_CREATED], 0)
self.assertEqual(t.strTrack(),
"[0]:[3]:[HANDLE]; [0]:[4]:[TERMINATE]")
def test_002(self):
"""
Создаётся один транзакт.
Задерживается на 3 единицы времени.
При проходе через блок split создаётся одна копия транзакта.
Копия транзакта движется в следующий блок.
Время создания копии - 3.
Родитель - транзакт 1.
И т.д.
"""
logger.info("--- test_002 ----------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
MAX_TIME = 20
#
list_all_transact = []
# for test
def funcTransactTo_list_all_transact(owner, transact):
# складируем транзакты в список
list_all_transact.append(transact)
### SEGMENT ----------------------
Generate(sgm, modificatorFunc=[0])
Advance(sgm, meanTime=3)
COPY_NUM = "COPY_NUM"
Split(sgm, funcCountCopies=1, funcNextBlockLabel=None, paramName=COPY_NUM)
# for test
Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)
#
Terminate(sgm, deltaTerminate=1)
# ЗАПУСК ----------------------
m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)
# ТЕСТЫ ----------------------
parentTransact = None
for i, t in enumerate(list_all_transact):
if i == 0:
parentTransact = t
self.assertTrue(COPY_NUM not in t)
self.assertEqual(t[ASSEMBLY_SET], 1)
self.assertEqual(t[PARENT], None)
self.assertEqual(t.strTrack(),
"[0]:[1]:[GENERATE]; [0]:[2]:[ADVANCE]; [3]:[3]:[SPLIT]; [3]:[4]:[HANDLE]; [3]:[5]:[TERMINATE]")
elif i == 1:
self.assertTrue(COPY_NUM in t)
self.assertEqual(t[COPY_NUM], 1)
self.assertEqual(t[NUM], 2)
self.assertEqual(t[ASSEMBLY_SET], parentTransact[NUM]) # pylint: disable=unsubscriptable-object
self.assertEqual(t[PARENT], parentTransact)
self.assertEqual(t[TIME_CREATED], 3)
self.assertEqual(t.strTrack(), "[3]:[4]:[HANDLE]; [3]:[5]:[TERMINATE]")
def test_003(self):
"""
Создаётся один транзакт.
Задерживается на 3 единицы времени.
При проходе через блок split создаётся одна копия транзакта.
Копия транзакта движется в блок с меткой ALTERNATIVE.
Транзакт-родитель переходит в следующий блок.
Время создания копии - 3.
Родитель - транзакт 1.
И т.д.
"""
logger.info("--- test_003 ----------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
m[OPTIONS].printResult = True
MAX_TIME = 20
#
list_all_transact = []
# for test
def funcTransactTo_list_all_transact(owner, transact):
# складируем транзакты в список
list_all_transact.append(transact)
### MODEL ----------------------
Generate(sgm, modificatorFunc=[0])
# for test
Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)
#
Advance(sgm, meanTime=3)
COPY_NUM = "COPY_NUM"
ALTERNATIVE = "ALTER"
Split(sgm, funcCountCopies=1, funcNextBlockLabel=ALTERNATIVE, paramName=COPY_NUM)
# здесь только родитель
# for test
MY_PARENT = "my_parent"
def handlerFunc(o, t):
t[MY_PARENT] = None
Handle(sgm, handlerFunc=handlerFunc)
#
Terminate(sgm, deltaTerminate=1)
# for test
Handle(sgm, label=ALTERNATIVE, handlerFunc=funcTransactTo_list_all_transact)
Terminate(sgm, deltaTerminate=1)
# ЗАПУСК ----------------------
m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)
# ТЕСТЫ ----------------------
parentTransact = None
for i, t in enumerate(list_all_transact):
if i == 0:
self.assertTrue(MY_PARENT in t, "MY_PARENT in t")
parentTransact = t
self.assertTrue(COPY_NUM not in t)
# родитель получает ASSEMBLY_SET равный NUM
self.assertEqual(t[ASSEMBLY_SET], 1)
self.assertEqual(t[ASSEMBLY_SET], t[NUM])
self.assertEqual(t[PARENT], None)
self.assertEqual(t.strTrack(),
"[0]:[1]:[GENERATE]; [0]:[2]:[HANDLE]; [0]:[3]:[ADVANCE]; [3]:[4]:[SPLIT]; [3]:[5]:[HANDLE]; [3]:[6]:[TERMINATE]")
elif i == 1:
self.assertTrue(MY_PARENT not in t, "MY_PARENT in t")
self.assertTrue(COPY_NUM in t)
self.assertEqual(t[COPY_NUM], 1)
self.assertEqual(t[NUM], 2)
self.assertEqual(t[ASSEMBLY_SET], parentTransact[NUM]) # pylint: disable=unsubscriptable-object
self.assertEqual(t[PARENT], parentTransact)
self.assertEqual(t[TIME_CREATED], 3)
self.assertEqual(t.strTrack(),
"[3]:[ALTER]:[HANDLE]; [3]:[8]:[TERMINATE]")
def test_004(self):
"""
Создаётся один транзакт.
Задерживается на 3 единицы времени.
При проходе через блок split создаётся одна копия транзакта.
Копия транзакта движется в блок с меткой вычисленной функцией alter.
Время создания копии - 3.
Родитель - транзакт 1.
И т.д.
"""
logger.info("--- test_004 ----------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
#
list_all_transact = []
#
MAX_TIME = 20
# for test
def funcTransactTo_list_all_transact(owner, transact):
# складируем транзакты в список
list_all_transact.append(transact)
### MODEL ----------------------
Generate(sgm, modificatorFunc=[0])
Advance(sgm, meanTime=3)
COPY_NUM = "COPY_NUM"
ALTERNATIVE = "ALTER"
def alter(t):
return ALTERNATIVE
Split(sgm, funcCountCopies=1, funcNextBlockLabel=alter, paramName=COPY_NUM)
# for test
Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)
#
Terminate(sgm, deltaTerminate=1)
Terminate(sgm, deltaTerminate=1, label=ALTERNATIVE)
# ЗАПУСК ----------------------
m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)
# ТЕСТЫ ----------------------
parentTransact = None
for i, t in enumerate(list_all_transact):
if i == 0:
parentTransact = t
self.assertTrue(COPY_NUM not in t)
self.assertEqual(t[ASSEMBLY_SET], 1)
self.assertEqual(t[PARENT], None)
self.assertEqual(t.strTrack(),
"[0]:[1]:[GENERATE]; [0]:[2]:[ADVANCE]; [3]:[3]:[SPLIT]; [3]:[4]:[HANDLE]; [3]:[5]:[TERMINATE]")
elif i == 1:
self.assertTrue(COPY_NUM in t)
self.assertEqual(t[COPY_NUM], 1)
self.assertEqual(t[NUM], 2)
self.assertEqual(t[ASSEMBLY_SET], parentTransact[NUM]) # pylint: disable=unsubscriptable-object
self.assertEqual(t[PARENT], parentTransact)
self.assertEqual(t[TIME_CREATED], 3)
self.assertEqual(t.strTrack(), "[3.000000]:[6]:[HANDLE]; [3.000000]:[ALTERNATIVE]:[TERMINATE]")
if __name__ == '__main__':
unittest.main(module="test_split")
| {
"content_hash": "6e91583840bb3cc876280460537deb62",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 147,
"avg_line_length": 34.40119760479042,
"alnum_prop": 0.5378590078328982,
"repo_name": "vpv11110000/pyss",
"id": "4c79647ed360906584940289089db5bf8b2f7f3d",
"size": "12454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_split.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "875"
},
{
"name": "Python",
"bytes": "545152"
},
{
"name": "Shell",
"bytes": "42"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""code generator for GLES2 command buffers."""
import itertools
import os
import os.path
import sys
import re
from optparse import OptionParser
_SIZE_OF_UINT32 = 4
_SIZE_OF_COMMAND_HEADER = 4
_FIRST_SPECIFIC_COMMAND_ID = 256
_LICENSE = """// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
"""
_DO_NOT_EDIT_WARNING = """// This file is auto-generated from
// gpu/command_buffer/build_gles2_cmd_buffer.py
// DO NOT EDIT!
"""
# This string is copied directly out of the gl2.h file from GLES2.0
#
# Edits:
#
# *) Any argument that is a resourceID has been changed to GLid<Type>.
# (not pointer arguments) and if it's allowed to be zero it's GLidZero<Type>
# If it's allowed to not exist it's GLidBind<Type>
#
# *) All GLenums have been changed to GLenumTypeOfEnum
#
_GL_TYPES = {
'GLenum': 'unsigned int',
'GLboolean': 'unsigned char',
'GLbitfield': 'unsigned int',
'GLbyte': 'signed char',
'GLshort': 'short',
'GLint': 'int',
'GLsizei': 'int',
'GLubyte': 'unsigned char',
'GLushort': 'unsigned short',
'GLuint': 'unsigned int',
'GLfloat': 'float',
'GLclampf': 'float',
'GLvoid': 'void',
'GLfixed': 'int',
'GLclampx': 'int',
'GLintptr': 'long int',
'GLsizeiptr': 'long int',
}
# Capabilites selected with glEnable
_CAPABILITY_FLAGS = [
{'name': 'blend'},
{'name': 'cull_face'},
{'name': 'depth_test', 'state_flag': 'clear_state_dirty_'},
{'name': 'dither', 'default': True},
{'name': 'polygon_offset_fill'},
{'name': 'sample_alpha_to_coverage'},
{'name': 'sample_coverage'},
{'name': 'scissor_test', 'state_flag': 'clear_state_dirty_'},
{'name': 'stencil_test', 'state_flag': 'clear_state_dirty_'},
]
_STATES = {
'ClearColor': {
'type': 'Normal',
'func': 'ClearColor',
'enum': 'GL_COLOR_CLEAR_VALUE',
'states': [
{'name': 'color_clear_red', 'type': 'GLfloat', 'default': '0.0f'},
{'name': 'color_clear_green', 'type': 'GLfloat', 'default': '0.0f'},
{'name': 'color_clear_blue', 'type': 'GLfloat', 'default': '0.0f'},
{'name': 'color_clear_alpha', 'type': 'GLfloat', 'default': '0.0f'},
],
},
'ClearDepthf': {
'type': 'Normal',
'func': 'ClearDepth',
'enum': 'GL_DEPTH_CLEAR_VALUE',
'states': [
{'name': 'depth_clear', 'type': 'GLclampf', 'default': '1.0f'},
],
},
'ColorMask': {
'type': 'Normal',
'func': 'ColorMask',
'enum': 'GL_COLOR_WRITEMASK',
'states': [
{'name': 'color_mask_red', 'type': 'GLboolean', 'default': 'true'},
{'name': 'color_mask_green', 'type': 'GLboolean', 'default': 'true'},
{'name': 'color_mask_blue', 'type': 'GLboolean', 'default': 'true'},
{'name': 'color_mask_alpha', 'type': 'GLboolean', 'default': 'true'},
],
'state_flag': 'clear_state_dirty_',
},
'ClearStencil': {
'type': 'Normal',
'func': 'ClearStencil',
'enum': 'GL_STENCIL_CLEAR_VALUE',
'states': [
{'name': 'stencil_clear', 'type': 'GLint', 'default': '0'},
],
},
'BlendColor': {
'type': 'Normal',
'func': 'BlendColor',
'enum': 'GL_BLEND_COLOR',
'states': [
{'name': 'blend_color_red', 'type': 'GLfloat', 'default': '0.0f'},
{'name': 'blend_color_green', 'type': 'GLfloat', 'default': '0.0f'},
{'name': 'blend_color_blue', 'type': 'GLfloat', 'default': '0.0f'},
{'name': 'blend_color_alpha', 'type': 'GLfloat', 'default': '0.0f'},
],
},
'BlendEquation': {
'type': 'SrcDst',
'func': 'BlendEquationSeparate',
'states': [
{
'name': 'blend_equation_rgb',
'type': 'GLenum',
'enum': 'GL_BLEND_EQUATION_RGB',
'default': 'GL_FUNC_ADD',
},
{
'name': 'blend_equation_alpha',
'type': 'GLenum',
'enum': 'GL_BLEND_EQUATION_ALPHA',
'default': 'GL_FUNC_ADD',
},
],
},
'BlendFunc': {
'type': 'SrcDst',
'func': 'BlendFuncSeparate',
'states': [
{
'name': 'blend_source_rgb',
'type': 'GLenum',
'enum': 'GL_BLEND_SRC_RGB',
'default': 'GL_ONE',
},
{
'name': 'blend_dest_rgb',
'type': 'GLenum',
'enum': 'GL_BLEND_DST_RGB',
'default': 'GL_ZERO',
},
{
'name': 'blend_source_alpha',
'type': 'GLenum',
'enum': 'GL_BLEND_SRC_ALPHA',
'default': 'GL_ONE',
},
{
'name': 'blend_dest_alpha',
'type': 'GLenum',
'enum': 'GL_BLEND_DST_ALPHA',
'default': 'GL_ZERO',
},
],
},
'PolygonOffset': {
'type': 'Normal',
'func': 'PolygonOffset',
'states': [
{
'name': 'polygon_offset_factor',
'type': 'GLfloat',
'enum': 'GL_POLYGON_OFFSET_FACTOR',
'default': '0.0f',
},
{
'name': 'polygon_offset_units',
'type': 'GLfloat',
'enum': 'GL_POLYGON_OFFSET_UNITS',
'default': '0.0f',
},
],
},
'CullFace': {
'type': 'Normal',
'func': 'CullFace',
'enum': 'GL_CULL_FACE_MODE',
'states': [
{
'name': 'cull_mode',
'type': 'GLenum',
'default': 'GL_BACK',
},
],
},
'FrontFace': {
'type': 'Normal',
'func': 'FrontFace',
'enum': 'GL_FRONT_FACE',
'states': [{'name': 'front_face', 'type': 'GLenum', 'default': 'GL_CCW'}],
},
'DepthFunc': {
'type': 'Normal',
'func': 'DepthFunc',
'enum': 'GL_DEPTH_FUNC',
'states': [{'name': 'depth_func', 'type': 'GLenum', 'default': 'GL_LESS'}],
},
'DepthRange': {
'type': 'Normal',
'func': 'DepthRange',
'enum': 'GL_DEPTH_RANGE',
'states': [
{'name': 'z_near', 'type': 'GLclampf', 'default': '0.0f'},
{'name': 'z_far', 'type': 'GLclampf', 'default': '1.0f'},
],
},
'SampleCoverage': {
'type': 'Normal',
'func': 'SampleCoverage',
'states': [
{
'name': 'sample_coverage_value',
'type': 'GLclampf',
'enum': 'GL_SAMPLE_COVERAGE_VALUE',
'default': '1.0f',
},
{
'name': 'sample_coverage_invert',
'type': 'GLboolean',
'enum': 'GL_SAMPLE_COVERAGE_INVERT',
'default': 'false',
},
],
},
'StencilMask': {
'type': 'FrontBack',
'func': 'StencilMaskSeparate',
'state_flag': 'clear_state_dirty_',
'states': [
{
'name': 'stencil_front_writemask',
'type': 'GLuint',
'enum': 'GL_STENCIL_WRITEMASK',
'default': '0xFFFFFFFFU',
},
{
'name': 'stencil_back_writemask',
'type': 'GLuint',
'enum': 'GL_STENCIL_BACK_WRITEMASK',
'default': '0xFFFFFFFFU',
},
],
},
'StencilOp': {
'type': 'FrontBack',
'func': 'StencilOpSeparate',
'states': [
{
'name': 'stencil_front_fail_op',
'type': 'GLenum',
'enum': 'GL_STENCIL_FAIL',
'default': 'GL_KEEP',
},
{
'name': 'stencil_front_z_fail_op',
'type': 'GLenum',
'enum': 'GL_STENCIL_PASS_DEPTH_FAIL',
'default': 'GL_KEEP',
},
{
'name': 'stencil_front_z_pass_op',
'type': 'GLenum',
'enum': 'GL_STENCIL_PASS_DEPTH_PASS',
'default': 'GL_KEEP',
},
{
'name': 'stencil_back_fail_op',
'type': 'GLenum',
'enum': 'GL_STENCIL_BACK_FAIL',
'default': 'GL_KEEP',
},
{
'name': 'stencil_back_z_fail_op',
'type': 'GLenum',
'enum': 'GL_STENCIL_BACK_PASS_DEPTH_FAIL',
'default': 'GL_KEEP',
},
{
'name': 'stencil_back_z_pass_op',
'type': 'GLenum',
'enum': 'GL_STENCIL_BACK_PASS_DEPTH_PASS',
'default': 'GL_KEEP',
},
],
},
'StencilFunc': {
'type': 'FrontBack',
'func': 'StencilFuncSeparate',
'states': [
{
'name': 'stencil_front_func',
'type': 'GLenum',
'enum': 'GL_STENCIL_FUNC',
'default': 'GL_ALWAYS',
},
{
'name': 'stencil_front_ref',
'type': 'GLint',
'enum': 'GL_STENCIL_REF',
'default': '0',
},
{
'name': 'stencil_front_mask',
'type': 'GLuint',
'enum': 'GL_STENCIL_VALUE_MASK',
'default': '0xFFFFFFFFU',
},
{
'name': 'stencil_back_func',
'type': 'GLenum',
'enum': 'GL_STENCIL_BACK_FUNC',
'default': 'GL_ALWAYS',
},
{
'name': 'stencil_back_ref',
'type': 'GLint',
'enum': 'GL_STENCIL_BACK_REF',
'default': '0',
},
{
'name': 'stencil_back_mask',
'type': 'GLuint',
'enum': 'GL_STENCIL_BACK_VALUE_MASK',
'default': '0xFFFFFFFFU',
},
],
},
# TODO: Consider implemenenting these states
# GL_GENERATE_MIPMAP_HINT
# GL_ACTIVE_TEXTURE,
# GL_PACK_ALIGNMENT,
# GL_UNPACK_ALIGNMENT
'LineWidth': {
'type': 'Normal',
'func': 'LineWidth',
'enum': 'GL_LINE_WIDTH',
'states': [
{
'name': 'line_width',
'type': 'GLfloat',
'default': '1.0f',
'range_checks': [{'check': "<= 0.0f", 'test_value': "0.0f"}],
}],
},
'DepthMask': {
'type': 'Normal',
'func': 'DepthMask',
'enum': 'GL_DEPTH_WRITEMASK',
'states': [
{'name': 'depth_mask', 'type': 'GLboolean', 'default': 'true'},
],
'state_flag': 'clear_state_dirty_',
},
'Scissor': {
'type': 'Normal',
'func': 'Scissor',
'enum': 'GL_SCISSOR_BOX',
'states': [
# NOTE: These defaults reset at GLES2DecoderImpl::Initialization.
{
'name': 'scissor_x',
'type': 'GLint',
'default': '0',
'expected': 'kViewportX',
},
{
'name': 'scissor_y',
'type': 'GLint',
'default': '0',
'expected': 'kViewportY',
},
{
'name': 'scissor_width',
'type': 'GLsizei',
'default': '1',
'expected': 'kViewportWidth',
},
{
'name': 'scissor_height',
'type': 'GLsizei',
'default': '1',
'expected': 'kViewportHeight',
},
],
},
'Viewport': {
'type': 'Normal',
'func': 'Viewport',
'enum': 'GL_VIEWPORT',
'states': [
# NOTE: These defaults reset at GLES2DecoderImpl::Initialization.
{
'name': 'viewport_x',
'type': 'GLint',
'default': '0',
'expected': 'kViewportX',
},
{
'name': 'viewport_y',
'type': 'GLint',
'default': '0',
'expected': 'kViewportY',
},
{
'name': 'viewport_width',
'type': 'GLsizei',
'default': '1',
'expected': 'kViewportWidth',
},
{
'name': 'viewport_height',
'type': 'GLsizei',
'default': '1',
'expected': 'kViewportHeight',
},
],
},
}
# This is a list of enum names and their valid values. It is used to map
# GLenum arguments to a specific set of valid values.
_ENUM_LISTS = {
'BlitFilter': {
'type': 'GLenum',
'valid': [
'GL_NEAREST',
'GL_LINEAR',
],
'invalid': [
'GL_LINEAR_MIPMAP_LINEAR',
],
},
'FrameBufferTarget': {
'type': 'GLenum',
'valid': [
'GL_FRAMEBUFFER',
],
'invalid': [
'GL_DRAW_FRAMEBUFFER' ,
'GL_READ_FRAMEBUFFER' ,
],
},
'RenderBufferTarget': {
'type': 'GLenum',
'valid': [
'GL_RENDERBUFFER',
],
'invalid': [
'GL_FRAMEBUFFER',
],
},
'BufferTarget': {
'type': 'GLenum',
'valid': [
'GL_ARRAY_BUFFER',
'GL_ELEMENT_ARRAY_BUFFER',
],
'invalid': [
'GL_RENDERBUFFER',
],
},
'BufferUsage': {
'type': 'GLenum',
'valid': [
'GL_STREAM_DRAW',
'GL_STATIC_DRAW',
'GL_DYNAMIC_DRAW',
],
'invalid': [
'GL_STATIC_READ',
],
},
'CompressedTextureFormat': {
'type': 'GLenum',
'valid': [
],
},
'GLState': {
'type': 'GLenum',
'valid': [
# NOTE: State an Capability entries added later.
'GL_ACTIVE_TEXTURE',
'GL_ALIASED_LINE_WIDTH_RANGE',
'GL_ALIASED_POINT_SIZE_RANGE',
'GL_ALPHA_BITS',
'GL_ARRAY_BUFFER_BINDING',
'GL_BLUE_BITS',
'GL_COMPRESSED_TEXTURE_FORMATS',
'GL_CURRENT_PROGRAM',
'GL_DEPTH_BITS',
'GL_DEPTH_RANGE',
'GL_ELEMENT_ARRAY_BUFFER_BINDING',
'GL_FRAMEBUFFER_BINDING',
'GL_GENERATE_MIPMAP_HINT',
'GL_GREEN_BITS',
'GL_IMPLEMENTATION_COLOR_READ_FORMAT',
'GL_IMPLEMENTATION_COLOR_READ_TYPE',
'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS',
'GL_MAX_CUBE_MAP_TEXTURE_SIZE',
'GL_MAX_FRAGMENT_UNIFORM_VECTORS',
'GL_MAX_RENDERBUFFER_SIZE',
'GL_MAX_TEXTURE_IMAGE_UNITS',
'GL_MAX_TEXTURE_SIZE',
'GL_MAX_VARYING_VECTORS',
'GL_MAX_VERTEX_ATTRIBS',
'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS',
'GL_MAX_VERTEX_UNIFORM_VECTORS',
'GL_MAX_VIEWPORT_DIMS',
'GL_NUM_COMPRESSED_TEXTURE_FORMATS',
'GL_NUM_SHADER_BINARY_FORMATS',
'GL_PACK_ALIGNMENT',
'GL_RED_BITS',
'GL_RENDERBUFFER_BINDING',
'GL_SAMPLE_BUFFERS',
'GL_SAMPLE_COVERAGE_INVERT',
'GL_SAMPLE_COVERAGE_VALUE',
'GL_SAMPLES',
'GL_SCISSOR_BOX',
'GL_SHADER_BINARY_FORMATS',
'GL_SHADER_COMPILER',
'GL_SUBPIXEL_BITS',
'GL_STENCIL_BITS',
'GL_TEXTURE_BINDING_2D',
'GL_TEXTURE_BINDING_CUBE_MAP',
'GL_UNPACK_ALIGNMENT',
'GL_UNPACK_FLIP_Y_CHROMIUM',
'GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM',
'GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM',
# we can add this because we emulate it if the driver does not support it.
'GL_VERTEX_ARRAY_BINDING_OES',
'GL_VIEWPORT',
],
'invalid': [
'GL_FOG_HINT',
],
},
'GetTexParamTarget': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_2D',
'GL_TEXTURE_CUBE_MAP',
],
'invalid': [
'GL_PROXY_TEXTURE_CUBE_MAP',
]
},
'TextureTarget': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_2D',
'GL_TEXTURE_CUBE_MAP_POSITIVE_X',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_X',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Y',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Z',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z',
],
'invalid': [
'GL_PROXY_TEXTURE_CUBE_MAP',
]
},
'TextureBindTarget': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_2D',
'GL_TEXTURE_CUBE_MAP',
],
'invalid': [
'GL_TEXTURE_1D',
'GL_TEXTURE_3D',
],
},
'ShaderType': {
'type': 'GLenum',
'valid': [
'GL_VERTEX_SHADER',
'GL_FRAGMENT_SHADER',
],
'invalid': [
'GL_GEOMETRY_SHADER',
],
},
'FaceType': {
'type': 'GLenum',
'valid': [
'GL_FRONT',
'GL_BACK',
'GL_FRONT_AND_BACK',
],
},
'FaceMode': {
'type': 'GLenum',
'valid': [
'GL_CW',
'GL_CCW',
],
},
'CmpFunction': {
'type': 'GLenum',
'valid': [
'GL_NEVER',
'GL_LESS',
'GL_EQUAL',
'GL_LEQUAL',
'GL_GREATER',
'GL_NOTEQUAL',
'GL_GEQUAL',
'GL_ALWAYS',
],
},
'Equation': {
'type': 'GLenum',
'valid': [
'GL_FUNC_ADD',
'GL_FUNC_SUBTRACT',
'GL_FUNC_REVERSE_SUBTRACT',
],
'invalid': [
'GL_MIN',
'GL_MAX',
],
},
'SrcBlendFactor': {
'type': 'GLenum',
'valid': [
'GL_ZERO',
'GL_ONE',
'GL_SRC_COLOR',
'GL_ONE_MINUS_SRC_COLOR',
'GL_DST_COLOR',
'GL_ONE_MINUS_DST_COLOR',
'GL_SRC_ALPHA',
'GL_ONE_MINUS_SRC_ALPHA',
'GL_DST_ALPHA',
'GL_ONE_MINUS_DST_ALPHA',
'GL_CONSTANT_COLOR',
'GL_ONE_MINUS_CONSTANT_COLOR',
'GL_CONSTANT_ALPHA',
'GL_ONE_MINUS_CONSTANT_ALPHA',
'GL_SRC_ALPHA_SATURATE',
],
},
'DstBlendFactor': {
'type': 'GLenum',
'valid': [
'GL_ZERO',
'GL_ONE',
'GL_SRC_COLOR',
'GL_ONE_MINUS_SRC_COLOR',
'GL_DST_COLOR',
'GL_ONE_MINUS_DST_COLOR',
'GL_SRC_ALPHA',
'GL_ONE_MINUS_SRC_ALPHA',
'GL_DST_ALPHA',
'GL_ONE_MINUS_DST_ALPHA',
'GL_CONSTANT_COLOR',
'GL_ONE_MINUS_CONSTANT_COLOR',
'GL_CONSTANT_ALPHA',
'GL_ONE_MINUS_CONSTANT_ALPHA',
],
},
'Capability': {
'type': 'GLenum',
'valid': ["GL_%s" % cap['name'].upper() for cap in _CAPABILITY_FLAGS],
'invalid': [
'GL_CLIP_PLANE0',
'GL_POINT_SPRITE',
],
},
'DrawMode': {
'type': 'GLenum',
'valid': [
'GL_POINTS',
'GL_LINE_STRIP',
'GL_LINE_LOOP',
'GL_LINES',
'GL_TRIANGLE_STRIP',
'GL_TRIANGLE_FAN',
'GL_TRIANGLES',
],
'invalid': [
'GL_QUADS',
'GL_POLYGON',
],
},
'IndexType': {
'type': 'GLenum',
'valid': [
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_SHORT',
],
'invalid': [
'GL_UNSIGNED_INT',
'GL_INT',
],
},
'GetMaxIndexType': {
'type': 'GLenum',
'valid': [
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_SHORT',
'GL_UNSIGNED_INT',
],
'invalid': [
'GL_INT',
],
},
'Attachment': {
'type': 'GLenum',
'valid': [
'GL_COLOR_ATTACHMENT0',
'GL_DEPTH_ATTACHMENT',
'GL_STENCIL_ATTACHMENT',
],
},
'BackbufferAttachment': {
'type': 'GLenum',
'valid': [
'GL_COLOR_EXT',
'GL_DEPTH_EXT',
'GL_STENCIL_EXT',
],
},
'BufferParameter': {
'type': 'GLenum',
'valid': [
'GL_BUFFER_SIZE',
'GL_BUFFER_USAGE',
],
'invalid': [
'GL_PIXEL_PACK_BUFFER',
],
},
'FrameBufferParameter': {
'type': 'GLenum',
'valid': [
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE',
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE',
],
},
'ProgramParameter': {
'type': 'GLenum',
'valid': [
'GL_DELETE_STATUS',
'GL_LINK_STATUS',
'GL_VALIDATE_STATUS',
'GL_INFO_LOG_LENGTH',
'GL_ATTACHED_SHADERS',
'GL_ACTIVE_ATTRIBUTES',
'GL_ACTIVE_ATTRIBUTE_MAX_LENGTH',
'GL_ACTIVE_UNIFORMS',
'GL_ACTIVE_UNIFORM_MAX_LENGTH',
],
},
'QueryObjectParameter': {
'type': 'GLenum',
'valid': [
'GL_QUERY_RESULT_EXT',
'GL_QUERY_RESULT_AVAILABLE_EXT',
],
},
'QueryParameter': {
'type': 'GLenum',
'valid': [
'GL_CURRENT_QUERY_EXT',
],
},
'QueryTarget': {
'type': 'GLenum',
'valid': [
'GL_ANY_SAMPLES_PASSED_EXT',
'GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT',
'GL_COMMANDS_ISSUED_CHROMIUM',
'GL_LATENCY_QUERY_CHROMIUM',
'GL_ASYNC_PIXEL_TRANSFERS_COMPLETED_CHROMIUM',
],
},
'RenderBufferParameter': {
'type': 'GLenum',
'valid': [
'GL_RENDERBUFFER_RED_SIZE',
'GL_RENDERBUFFER_GREEN_SIZE',
'GL_RENDERBUFFER_BLUE_SIZE',
'GL_RENDERBUFFER_ALPHA_SIZE',
'GL_RENDERBUFFER_DEPTH_SIZE',
'GL_RENDERBUFFER_STENCIL_SIZE',
'GL_RENDERBUFFER_WIDTH',
'GL_RENDERBUFFER_HEIGHT',
'GL_RENDERBUFFER_INTERNAL_FORMAT',
],
},
'ShaderParameter': {
'type': 'GLenum',
'valid': [
'GL_SHADER_TYPE',
'GL_DELETE_STATUS',
'GL_COMPILE_STATUS',
'GL_INFO_LOG_LENGTH',
'GL_SHADER_SOURCE_LENGTH',
'GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE',
],
},
'ShaderPrecision': {
'type': 'GLenum',
'valid': [
'GL_LOW_FLOAT',
'GL_MEDIUM_FLOAT',
'GL_HIGH_FLOAT',
'GL_LOW_INT',
'GL_MEDIUM_INT',
'GL_HIGH_INT',
],
},
'StringType': {
'type': 'GLenum',
'valid': [
'GL_VENDOR',
'GL_RENDERER',
'GL_VERSION',
'GL_SHADING_LANGUAGE_VERSION',
'GL_EXTENSIONS',
],
},
'TextureParameter': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_MAG_FILTER',
'GL_TEXTURE_MIN_FILTER',
'GL_TEXTURE_POOL_CHROMIUM',
'GL_TEXTURE_WRAP_S',
'GL_TEXTURE_WRAP_T',
],
'invalid': [
'GL_GENERATE_MIPMAP',
],
},
'TexturePool': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_POOL_MANAGED_CHROMIUM',
'GL_TEXTURE_POOL_UNMANAGED_CHROMIUM',
],
},
'TextureWrapMode': {
'type': 'GLenum',
'valid': [
'GL_CLAMP_TO_EDGE',
'GL_MIRRORED_REPEAT',
'GL_REPEAT',
],
},
'TextureMinFilterMode': {
'type': 'GLenum',
'valid': [
'GL_NEAREST',
'GL_LINEAR',
'GL_NEAREST_MIPMAP_NEAREST',
'GL_LINEAR_MIPMAP_NEAREST',
'GL_NEAREST_MIPMAP_LINEAR',
'GL_LINEAR_MIPMAP_LINEAR',
],
},
'TextureMagFilterMode': {
'type': 'GLenum',
'valid': [
'GL_NEAREST',
'GL_LINEAR',
],
},
'TextureUsage': {
'type': 'GLenum',
'valid': [
'GL_NONE',
'GL_FRAMEBUFFER_ATTACHMENT_ANGLE',
],
},
'VertexAttribute': {
'type': 'GLenum',
'valid': [
# some enum that the decoder actually passes through to GL needs
# to be the first listed here since it's used in unit tests.
'GL_VERTEX_ATTRIB_ARRAY_NORMALIZED',
'GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING',
'GL_VERTEX_ATTRIB_ARRAY_ENABLED',
'GL_VERTEX_ATTRIB_ARRAY_SIZE',
'GL_VERTEX_ATTRIB_ARRAY_STRIDE',
'GL_VERTEX_ATTRIB_ARRAY_TYPE',
'GL_CURRENT_VERTEX_ATTRIB',
],
},
'VertexPointer': {
'type': 'GLenum',
'valid': [
'GL_VERTEX_ATTRIB_ARRAY_POINTER',
],
},
'HintTarget': {
'type': 'GLenum',
'valid': [
'GL_GENERATE_MIPMAP_HINT',
],
'invalid': [
'GL_PERSPECTIVE_CORRECTION_HINT',
],
},
'HintMode': {
'type': 'GLenum',
'valid': [
'GL_FASTEST',
'GL_NICEST',
'GL_DONT_CARE',
],
},
'PixelStore': {
'type': 'GLenum',
'valid': [
'GL_PACK_ALIGNMENT',
'GL_UNPACK_ALIGNMENT',
'GL_UNPACK_FLIP_Y_CHROMIUM',
'GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM',
'GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM',
],
'invalid': [
'GL_PACK_SWAP_BYTES',
'GL_UNPACK_SWAP_BYTES',
],
},
'PixelStoreAlignment': {
'type': 'GLint',
'valid': [
'1',
'2',
'4',
'8',
],
'invalid': [
'3',
'9',
],
},
'ReadPixelFormat': {
'type': 'GLenum',
'valid': [
'GL_ALPHA',
'GL_RGB',
'GL_RGBA',
],
},
'PixelType': {
'type': 'GLenum',
'valid': [
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_SHORT_5_6_5',
'GL_UNSIGNED_SHORT_4_4_4_4',
'GL_UNSIGNED_SHORT_5_5_5_1',
],
'invalid': [
'GL_SHORT',
'GL_INT',
],
},
'ReadPixelType': {
'type': 'GLenum',
'valid': [
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_SHORT_5_6_5',
'GL_UNSIGNED_SHORT_4_4_4_4',
'GL_UNSIGNED_SHORT_5_5_5_1',
],
'invalid': [
'GL_SHORT',
'GL_INT',
],
},
'RenderBufferFormat': {
'type': 'GLenum',
'valid': [
'GL_RGBA4',
'GL_RGB565',
'GL_RGB5_A1',
'GL_DEPTH_COMPONENT16',
'GL_STENCIL_INDEX8',
],
},
'ShaderBinaryFormat': {
'type': 'GLenum',
'valid': [
],
},
'StencilOp': {
'type': 'GLenum',
'valid': [
'GL_KEEP',
'GL_ZERO',
'GL_REPLACE',
'GL_INCR',
'GL_INCR_WRAP',
'GL_DECR',
'GL_DECR_WRAP',
'GL_INVERT',
],
},
'TextureFormat': {
'type': 'GLenum',
'valid': [
'GL_ALPHA',
'GL_LUMINANCE',
'GL_LUMINANCE_ALPHA',
'GL_RGB',
'GL_RGBA',
],
'invalid': [
'GL_BGRA',
'GL_BGR',
],
},
'TextureInternalFormat': {
'type': 'GLenum',
'valid': [
'GL_ALPHA',
'GL_LUMINANCE',
'GL_LUMINANCE_ALPHA',
'GL_RGB',
'GL_RGBA',
],
'invalid': [
'GL_BGRA',
'GL_BGR',
],
},
'TextureInternalFormatStorage': {
'type': 'GLenum',
'valid': [
'GL_RGB565',
'GL_RGBA4',
'GL_RGB5_A1',
'GL_ALPHA8_EXT',
'GL_LUMINANCE8_EXT',
'GL_LUMINANCE8_ALPHA8_EXT',
'GL_RGB8_OES',
'GL_RGBA8_OES',
],
},
'VertexAttribType': {
'type': 'GLenum',
'valid': [
'GL_BYTE',
'GL_UNSIGNED_BYTE',
'GL_SHORT',
'GL_UNSIGNED_SHORT',
# 'GL_FIXED', // This is not available on Desktop GL.
'GL_FLOAT',
],
'invalid': [
'GL_DOUBLE',
],
},
'TextureBorder': {
'type': 'GLint',
'valid': [
'0',
],
'invalid': [
'1',
],
},
'VertexAttribSize': {
'type': 'GLint',
'valid': [
'1',
'2',
'3',
'4',
],
'invalid': [
'0',
'5',
],
},
'ZeroOnly': {
'type': 'GLint',
'valid': [
'0',
],
'invalid': [
'1',
],
},
'FalseOnly': {
'type': 'GLboolean',
'valid': [
'false',
],
'invalid': [
'true',
],
},
'ResetStatus': {
'type': 'GLenum',
'valid': [
'GL_GUILTY_CONTEXT_RESET_ARB',
'GL_INNOCENT_CONTEXT_RESET_ARB',
'GL_UNKNOWN_CONTEXT_RESET_ARB',
],
},
}
# This table specifies the different pepper interfaces that are supported for
# GL commands. 'dev' is true if it's a dev interface.
_PEPPER_INTERFACES = [
{'name': '', 'dev': False},
{'name': 'InstancedArrays', 'dev': False},
{'name': 'FramebufferBlit', 'dev': False},
{'name': 'FramebufferMultisample', 'dev': False},
{'name': 'ChromiumEnableFeature', 'dev': False},
{'name': 'ChromiumMapSub', 'dev': False},
{'name': 'Query', 'dev': False},
]
# This table specifies types and other special data for the commands that
# will be generated.
#
# Must match function names specified in "cmd_buffer_functions.txt".
#
# cmd_comment: A comment added to the cmd format.
# type: defines which handler will be used to generate code.
# decoder_func: defines which function to call in the decoder to execute the
# corresponding GL command. If not specified the GL command will
# be called directly.
# gl_test_func: GL function that is expected to be called when testing.
# cmd_args: The arguments to use for the command. This overrides generating
# them based on the GL function arguments.
# a NonImmediate type is a type that stays a pointer even in
# and immediate version of acommand.
# gen_cmd: Whether or not this function geneates a command. Default = True.
# immediate: Whether or not to generate an immediate command for the GL
# function. The default is if there is exactly 1 pointer argument
# in the GL function an immediate command is generated.
# bucket: True to generate a bucket version of the command.
# impl_func: Whether or not to generate the GLES2Implementation part of this
# command.
# impl_decl: Whether or not to generate the GLES2Implementation declaration
# for this command.
# needs_size: If true a data_size field is added to the command.
# data_type: The type of data the command uses. For PUTn or PUT types.
# count: The number of units per element. For PUTn or PUT types.
# unit_test: If False no service side unit test will be generated.
# client_test: If False no client side unit test will be generated.
# expectation: If False the unit test will have no expected calls.
# gen_func: Name of function that generates GL resource for corresponding
# bind function.
# states: array of states that get set by this function corresponding to
# the given arguments
# state_flag: name of flag that is set to true when function is called.
# no_gl: no GL function is called.
# valid_args: A dictionary of argument indices to args to use in unit tests
# when they can not be automatically determined.
# pepper_interface: The pepper interface that is used for this extension
# invalid_test: False if no invalid test needed.
_FUNCTION_INFO = {
'ActiveTexture': {
'decoder_func': 'DoActiveTexture',
'unit_test': False,
'impl_func': False,
'client_test': False,
},
'AttachShader': {'decoder_func': 'DoAttachShader'},
'BindAttribLocation': {'type': 'GLchar', 'bucket': True, 'needs_size': True},
'BindBuffer': {
'type': 'Bind',
'decoder_func': 'DoBindBuffer',
'gen_func': 'GenBuffersARB',
},
'BindFramebuffer': {
'type': 'Bind',
'decoder_func': 'DoBindFramebuffer',
'gl_test_func': 'glBindFramebufferEXT',
'gen_func': 'GenFramebuffersEXT',
},
'BindRenderbuffer': {
'type': 'Bind',
'decoder_func': 'DoBindRenderbuffer',
'gl_test_func': 'glBindRenderbufferEXT',
'gen_func': 'GenRenderbuffersEXT',
},
'BindTexture': {
'type': 'Bind',
'decoder_func': 'DoBindTexture',
'gen_func': 'GenTextures',
# TODO(gman): remove this once client side caching works.
'client_test': False,
},
'BlitFramebufferEXT': {
'decoder_func': 'DoBlitFramebufferEXT',
'unit_test': False,
'extension': True,
'pepper_interface': 'FramebufferBlit',
'defer_reads': True,
'defer_draws': True,
},
'BufferData': {
'type': 'Manual',
'immediate': True,
'client_test': False,
},
'BufferSubData': {
'type': 'Data',
'client_test': False,
'decoder_func': 'DoBufferSubData',
},
'CheckFramebufferStatus': {
'type': 'Is',
'decoder_func': 'DoCheckFramebufferStatus',
'gl_test_func': 'glCheckFramebufferStatusEXT',
'error_value': 'GL_FRAMEBUFFER_UNSUPPORTED',
'result': ['GLenum'],
},
'Clear': {
'decoder_func': 'DoClear',
'defer_draws': True,
},
'ClearColor': {
'type': 'StateSet',
'state': 'ClearColor',
},
'ClearDepthf': {
'type': 'StateSet',
'state': 'ClearDepthf',
'decoder_func': 'glClearDepth',
'gl_test_func': 'glClearDepth',
'valid_args': {
'0': '0.5f'
},
},
'ColorMask': {
'type': 'StateSet',
'state': 'ColorMask',
'no_gl': True,
'expectation': False,
},
'ConsumeTextureCHROMIUM': {
'decoder_func': 'DoConsumeTextureCHROMIUM',
'type': 'PUT',
'data_type': 'GLbyte',
'count': 64,
'unit_test': False,
'extension': True,
'chromium': True,
},
'ClearStencil': {
'type': 'StateSet',
'state': 'ClearStencil',
},
'EnableFeatureCHROMIUM': {
'type': 'Custom',
'immediate': False,
'decoder_func': 'DoEnableFeatureCHROMIUM',
'expectation': False,
'cmd_args': 'GLuint bucket_id, GLint* result',
'result': ['GLint'],
'extension': True,
'chromium': True,
'pepper_interface': 'ChromiumEnableFeature',
},
'CompileShader': {'decoder_func': 'DoCompileShader', 'unit_test': False},
'CompressedTexImage2D': {
'type': 'Manual',
'immediate': True,
'bucket': True,
},
'CompressedTexSubImage2D': {
'type': 'Data',
'bucket': True,
'decoder_func': 'DoCompressedTexSubImage2D',
},
'CopyTexImage2D': {
'decoder_func': 'DoCopyTexImage2D',
'unit_test': False,
'defer_reads': True,
},
'CopyTexSubImage2D': {
'decoder_func': 'DoCopyTexSubImage2D',
'defer_reads': True,
},
'CreateProgram': {
'type': 'Create',
'client_test': False,
},
'CreateShader': {
'type': 'Create',
'client_test': False,
},
'BlendColor': {
'type': 'StateSet',
'state': 'BlendColor',
},
'BlendEquation': {
'type': 'StateSetRGBAlpha',
'state': 'BlendEquation',
'valid_args': {
'0': 'GL_FUNC_SUBTRACT'
},
},
'BlendEquationSeparate': {
'type': 'StateSet',
'state': 'BlendEquation',
'valid_args': {
'0': 'GL_FUNC_SUBTRACT'
},
},
'BlendFunc': {
'type': 'StateSetRGBAlpha',
'state': 'BlendFunc',
},
'BlendFuncSeparate': {
'type': 'StateSet',
'state': 'BlendFunc',
},
'SampleCoverage': {'decoder_func': 'DoSampleCoverage'},
'StencilFunc': {
'type': 'StateSetFrontBack',
'state': 'StencilFunc',
},
'StencilFuncSeparate': {
'type': 'StateSetFrontBackSeparate',
'state': 'StencilFunc',
},
'StencilOp': {
'type': 'StateSetFrontBack',
'state': 'StencilOp',
'valid_args': {
'1': 'GL_INCR'
},
},
'StencilOpSeparate': {
'type': 'StateSetFrontBackSeparate',
'state': 'StencilOp',
'valid_args': {
'1': 'GL_INCR'
},
},
'Hint': {'decoder_func': 'DoHint'},
'CullFace': {'type': 'StateSet', 'state': 'CullFace'},
'FrontFace': {'type': 'StateSet', 'state': 'FrontFace'},
'DepthFunc': {'type': 'StateSet', 'state': 'DepthFunc'},
'LineWidth': {
'type': 'StateSet',
'state': 'LineWidth',
'valid_args': {
'0': '0.5f'
},
},
'PolygonOffset': {
'type': 'StateSet',
'state': 'PolygonOffset',
},
'DeleteBuffers': {
'type': 'DELn',
'gl_test_func': 'glDeleteBuffersARB',
'resource_type': 'Buffer',
'resource_types': 'Buffers',
},
'DeleteFramebuffers': {
'type': 'DELn',
'gl_test_func': 'glDeleteFramebuffersEXT',
'resource_type': 'Framebuffer',
'resource_types': 'Framebuffers',
},
'DeleteProgram': {'type': 'Delete', 'decoder_func': 'DoDeleteProgram'},
'DeleteRenderbuffers': {
'type': 'DELn',
'gl_test_func': 'glDeleteRenderbuffersEXT',
'resource_type': 'Renderbuffer',
'resource_types': 'Renderbuffers',
},
'DeleteShader': {'type': 'Delete', 'decoder_func': 'DoDeleteShader'},
'DeleteSharedIdsCHROMIUM': {
'type': 'Custom',
'decoder_func': 'DoDeleteSharedIdsCHROMIUM',
'impl_func': False,
'expectation': False,
'immediate': False,
'extension': True,
'chromium': True,
},
'DeleteTextures': {
'type': 'DELn',
'resource_type': 'Texture',
'resource_types': 'Textures',
},
'DepthRangef': {
'decoder_func': 'DoDepthRangef',
'gl_test_func': 'glDepthRange',
},
'DepthMask': {
'type': 'StateSet',
'state': 'DepthMask',
'no_gl': True,
'expectation': False,
},
'DetachShader': {'decoder_func': 'DoDetachShader'},
'Disable': {
'decoder_func': 'DoDisable',
'impl_func': False,
'client_test': False,
},
'DisableVertexAttribArray': {
'decoder_func': 'DoDisableVertexAttribArray',
'impl_decl': False,
},
'DrawArrays': {
'type': 'Manual',
'cmd_args': 'GLenumDrawMode mode, GLint first, GLsizei count',
'defer_draws': True,
},
'DrawElements': {
'type': 'Manual',
'cmd_args': 'GLenumDrawMode mode, GLsizei count, '
'GLenumIndexType type, GLuint index_offset',
'client_test': False,
'defer_draws': True,
},
'Enable': {
'decoder_func': 'DoEnable',
'impl_func': False,
'client_test': False,
},
'EnableVertexAttribArray': {
'decoder_func': 'DoEnableVertexAttribArray',
'impl_decl': False,
},
'Finish': {
'impl_func': False,
'client_test': False,
'decoder_func': 'DoFinish',
},
'Flush': {
'impl_func': False,
'decoder_func': 'DoFlush',
},
'ShallowFlushCHROMIUM': {
'impl_func': False,
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
},
'FramebufferRenderbuffer': {
'decoder_func': 'DoFramebufferRenderbuffer',
'gl_test_func': 'glFramebufferRenderbufferEXT',
},
'FramebufferTexture2D': {
'decoder_func': 'DoFramebufferTexture2D',
'gl_test_func': 'glFramebufferTexture2DEXT',
},
'GenerateMipmap': {
'decoder_func': 'DoGenerateMipmap',
'gl_test_func': 'glGenerateMipmapEXT',
},
'GenBuffers': {
'type': 'GENn',
'gl_test_func': 'glGenBuffersARB',
'resource_type': 'Buffer',
'resource_types': 'Buffers',
},
'GenMailboxCHROMIUM': {
'type': 'Manual',
'cmd_args': 'GLuint bucket_id',
'result': ['SizedResult<GLint>'],
'client_test': False,
'unit_test': False,
'extension': True,
'chromium': True,
},
'GenFramebuffers': {
'type': 'GENn',
'gl_test_func': 'glGenFramebuffersEXT',
'resource_type': 'Framebuffer',
'resource_types': 'Framebuffers',
},
'GenRenderbuffers': {
'type': 'GENn', 'gl_test_func': 'glGenRenderbuffersEXT',
'resource_type': 'Renderbuffer',
'resource_types': 'Renderbuffers',
},
'GenTextures': {
'type': 'GENn',
'gl_test_func': 'glGenTextures',
'resource_type': 'Texture',
'resource_types': 'Textures',
},
'GenSharedIdsCHROMIUM': {
'type': 'Custom',
'decoder_func': 'DoGenSharedIdsCHROMIUM',
'impl_func': False,
'expectation': False,
'immediate': False,
'extension': True,
'chromium': True,
},
'GetActiveAttrib': {
'type': 'Custom',
'immediate': False,
'cmd_args':
'GLidProgram program, GLuint index, uint32 name_bucket_id, '
'void* result',
'result': [
'int32 success',
'int32 size',
'uint32 type',
],
},
'GetActiveUniform': {
'type': 'Custom',
'immediate': False,
'cmd_args':
'GLidProgram program, GLuint index, uint32 name_bucket_id, '
'void* result',
'result': [
'int32 success',
'int32 size',
'uint32 type',
],
},
'GetAttachedShaders': {
'type': 'Custom',
'immediate': False,
'cmd_args': 'GLidProgram program, void* result, uint32 result_size',
'result': ['SizedResult<GLuint>'],
},
'GetAttribLocation': {
'type': 'HandWritten',
'immediate': True,
'bucket': True,
'needs_size': True,
'cmd_args':
'GLidProgram program, const char* name, NonImmediate GLint* location',
'result': ['GLint'],
},
'GetBooleanv': {
'type': 'GETn',
'result': ['SizedResult<GLboolean>'],
'decoder_func': 'DoGetBooleanv',
'gl_test_func': 'glGetBooleanv',
},
'GetBufferParameteriv': {'type': 'GETn', 'result': ['SizedResult<GLint>']},
'GetError': {
'type': 'Is',
'decoder_func': 'GetGLError',
'impl_func': False,
'result': ['GLenum'],
'client_test': False,
},
'GetFloatv': {
'type': 'GETn',
'result': ['SizedResult<GLfloat>'],
'decoder_func': 'DoGetFloatv',
'gl_test_func': 'glGetFloatv',
},
'GetFramebufferAttachmentParameteriv': {
'type': 'GETn',
'decoder_func': 'DoGetFramebufferAttachmentParameteriv',
'gl_test_func': 'glGetFramebufferAttachmentParameterivEXT',
'result': ['SizedResult<GLint>'],
},
'GetIntegerv': {
'type': 'GETn',
'result': ['SizedResult<GLint>'],
'decoder_func': 'DoGetIntegerv',
'client_test': False,
},
'GetMaxValueInBufferCHROMIUM': {
'type': 'Is',
'decoder_func': 'DoGetMaxValueInBufferCHROMIUM',
'result': ['GLuint'],
'unit_test': False,
'client_test': False,
'extension': True,
'chromium': True,
'impl_func': False,
},
'GetMultipleIntegervCHROMIUM': {
'type': 'Custom',
'immediate': False,
'expectation': False,
'extension': True,
'chromium': True,
'client_test': False,
},
'GetProgramiv': {
'type': 'GETn',
'decoder_func': 'DoGetProgramiv',
'result': ['SizedResult<GLint>'],
'expectation': False,
},
'GetProgramInfoCHROMIUM': {
'type': 'Custom',
'immediate': False,
'expectation': False,
'impl_func': False,
'extension': True,
'chromium': True,
'client_test': False,
'cmd_args': 'GLidProgram program, uint32 bucket_id',
'result': [
'uint32 link_status',
'uint32 num_attribs',
'uint32 num_uniforms',
],
},
'GetProgramInfoLog': {
'type': 'STRn',
'expectation': False,
},
'GetRenderbufferParameteriv': {
'type': 'GETn',
'decoder_func': 'DoGetRenderbufferParameteriv',
'gl_test_func': 'glGetRenderbufferParameterivEXT',
'result': ['SizedResult<GLint>'],
},
'GetShaderiv': {
'type': 'GETn',
'decoder_func': 'DoGetShaderiv',
'result': ['SizedResult<GLint>'],
},
'GetShaderInfoLog': {
'type': 'STRn',
'get_len_func': 'glGetShaderiv',
'get_len_enum': 'GL_INFO_LOG_LENGTH',
'unit_test': False,
},
'GetShaderPrecisionFormat': {
'type': 'Custom',
'immediate': False,
'cmd_args':
'GLenumShaderType shadertype, GLenumShaderPrecision precisiontype, '
'void* result',
'result': [
'int32 success',
'int32 min_range',
'int32 max_range',
'int32 precision',
],
},
'GetShaderSource': {
'type': 'STRn',
'get_len_func': 'DoGetShaderiv',
'get_len_enum': 'GL_SHADER_SOURCE_LENGTH',
'unit_test': False,
'client_test': False,
},
'GetString': {
'type': 'Custom',
'client_test': False,
'cmd_args': 'GLenumStringType name, uint32 bucket_id',
},
'GetTexParameterfv': {'type': 'GETn', 'result': ['SizedResult<GLfloat>']},
'GetTexParameteriv': {'type': 'GETn', 'result': ['SizedResult<GLint>']},
'GetTranslatedShaderSourceANGLE': {
'type': 'STRn',
'get_len_func': 'DoGetShaderiv',
'get_len_enum': 'GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE',
'unit_test': False,
'extension': True,
},
'GetUniformfv': {
'type': 'Custom',
'immediate': False,
'result': ['SizedResult<GLfloat>'],
},
'GetUniformiv': {
'type': 'Custom',
'immediate': False,
'result': ['SizedResult<GLint>'],
},
'GetUniformLocation': {
'type': 'HandWritten',
'immediate': True,
'bucket': True,
'needs_size': True,
'cmd_args':
'GLidProgram program, const char* name, NonImmediate GLint* location',
'result': ['GLint'],
},
'GetVertexAttribfv': {
'type': 'GETn',
'result': ['SizedResult<GLfloat>'],
'impl_decl': False,
'decoder_func': 'DoGetVertexAttribfv',
'expectation': False,
'client_test': False,
},
'GetVertexAttribiv': {
'type': 'GETn',
'result': ['SizedResult<GLint>'],
'impl_decl': False,
'decoder_func': 'DoGetVertexAttribiv',
'expectation': False,
'client_test': False,
},
'GetVertexAttribPointerv': {
'type': 'Custom',
'immediate': False,
'result': ['SizedResult<GLuint>'],
'client_test': False,
},
'IsBuffer': {
'type': 'Is',
'decoder_func': 'DoIsBuffer',
'expectation': False,
},
'IsEnabled': {
'type': 'Is',
'decoder_func': 'DoIsEnabled',
'impl_func': False,
'expectation': False,
},
'IsFramebuffer': {
'type': 'Is',
'decoder_func': 'DoIsFramebuffer',
'expectation': False,
},
'IsProgram': {
'type': 'Is',
'decoder_func': 'DoIsProgram',
'expectation': False,
},
'IsRenderbuffer': {
'type': 'Is',
'decoder_func': 'DoIsRenderbuffer',
'expectation': False,
},
'IsShader': {
'type': 'Is',
'decoder_func': 'DoIsShader',
'expectation': False,
},
'IsTexture': {
'type': 'Is',
'decoder_func': 'DoIsTexture',
'expectation': False,
},
'LinkProgram': {
'decoder_func': 'DoLinkProgram',
'impl_func': False,
},
'MapBufferCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
'chromium': True,
},
'MapBufferSubDataCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
'pepper_interface': 'ChromiumMapSub',
},
'MapTexSubImage2DCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
'pepper_interface': 'ChromiumMapSub',
},
'PixelStorei': {'type': 'Manual'},
'PostSubBufferCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'unit_test': False,
'client_test': False,
'extension': True,
'chromium': True,
},
'ProduceTextureCHROMIUM': {
'decoder_func': 'DoProduceTextureCHROMIUM',
'type': 'PUT',
'data_type': 'GLbyte',
'count': 64,
'unit_test': False,
'extension': True,
'chromium': True,
},
'RenderbufferStorage': {
'decoder_func': 'DoRenderbufferStorage',
'gl_test_func': 'glRenderbufferStorageEXT',
'expectation': False,
},
'RenderbufferStorageMultisampleEXT': {
'decoder_func': 'DoRenderbufferStorageMultisample',
'gl_test_func': 'glRenderbufferStorageMultisampleEXT',
'expectation': False,
'unit_test': False,
'extension': True,
'pepper_interface': 'FramebufferMultisample',
},
'ReadPixels': {
'cmd_comment':
'// ReadPixels has the result separated from the pixel buffer so that\n'
'// it is easier to specify the result going to some specific place\n'
'// that exactly fits the rectangle of pixels.\n',
'type': 'Custom',
'immediate': False,
'impl_func': False,
'client_test': False,
'cmd_args':
'GLint x, GLint y, GLsizei width, GLsizei height, '
'GLenumReadPixelFormat format, GLenumReadPixelType type, '
'uint32 pixels_shm_id, uint32 pixels_shm_offset, '
'uint32 result_shm_id, uint32 result_shm_offset',
'result': ['uint32'],
'defer_reads': True,
},
'RegisterSharedIdsCHROMIUM': {
'type': 'Custom',
'decoder_func': 'DoRegisterSharedIdsCHROMIUM',
'impl_func': False,
'expectation': False,
'immediate': False,
'extension': True,
'chromium': True,
},
'ReleaseShaderCompiler': {
'decoder_func': 'DoReleaseShaderCompiler',
'unit_test': False,
},
'ShaderBinary': {
'type': 'Custom',
'client_test': False,
},
'ShaderSource': {
'type': 'Manual',
'immediate': True,
'bucket': True,
'needs_size': True,
'client_test': False,
'cmd_args':
'GLuint shader, const char* data',
},
'StencilMask': {
'type': 'StateSetFrontBack',
'state': 'StencilMask',
'no_gl': True,
'expectation': False,
},
'StencilMaskSeparate': {
'type': 'StateSetFrontBackSeparate',
'state': 'StencilMask',
'no_gl': True,
'expectation': False,
},
'SwapBuffers': {
'type': 'Custom',
'impl_func': False,
'unit_test': False,
'client_test': False,
'extension': True,
},
'TexImage2D': {
'type': 'Manual',
'immediate': True,
'client_test': False,
},
'TexParameterf': {
'decoder_func': 'DoTexParameterf',
'valid_args': {
'2': 'GL_NEAREST'
},
},
'TexParameteri': {
'decoder_func': 'DoTexParameteri',
'valid_args': {
'2': 'GL_NEAREST'
},
},
'TexParameterfv': {
'type': 'PUT',
'data_type': 'GLfloat',
'data_value': 'GL_NEAREST',
'count': 1,
'decoder_func': 'DoTexParameterfv',
},
'TexParameteriv': {
'type': 'PUT',
'data_type': 'GLint',
'data_value': 'GL_NEAREST',
'count': 1,
'decoder_func': 'DoTexParameteriv',
},
'TexSubImage2D': {
'type': 'Manual',
'immediate': True,
'client_test': False,
'cmd_args': 'GLenumTextureTarget target, GLint level, '
'GLint xoffset, GLint yoffset, '
'GLsizei width, GLsizei height, '
'GLenumTextureFormat format, GLenumPixelType type, '
'const void* pixels, GLboolean internal'
},
'Uniform1f': {'type': 'PUTXn', 'data_type': 'GLfloat', 'count': 1},
'Uniform1fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 1,
'decoder_func': 'DoUniform1fv',
},
'Uniform1i': {'decoder_func': 'DoUniform1i', 'unit_test': False},
'Uniform1iv': {
'type': 'PUTn',
'data_type': 'GLint',
'count': 1,
'decoder_func': 'DoUniform1iv',
'unit_test': False,
},
'Uniform2i': {'type': 'PUTXn', 'data_type': 'GLint', 'count': 2},
'Uniform2f': {'type': 'PUTXn', 'data_type': 'GLfloat', 'count': 2},
'Uniform2fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 2,
'decoder_func': 'DoUniform2fv',
},
'Uniform2iv': {
'type': 'PUTn',
'data_type': 'GLint',
'count': 2,
'decoder_func': 'DoUniform2iv',
},
'Uniform3i': {'type': 'PUTXn', 'data_type': 'GLint', 'count': 3},
'Uniform3f': {'type': 'PUTXn', 'data_type': 'GLfloat', 'count': 3},
'Uniform3fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 3,
'decoder_func': 'DoUniform3fv',
},
'Uniform3iv': {
'type': 'PUTn',
'data_type': 'GLint',
'count': 3,
'decoder_func': 'DoUniform3iv',
},
'Uniform4i': {'type': 'PUTXn', 'data_type': 'GLint', 'count': 4},
'Uniform4f': {'type': 'PUTXn', 'data_type': 'GLfloat', 'count': 4},
'Uniform4fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 4,
'decoder_func': 'DoUniform4fv',
},
'Uniform4iv': {
'type': 'PUTn',
'data_type': 'GLint',
'count': 4,
'decoder_func': 'DoUniform4iv',
},
'UniformMatrix2fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 4,
'decoder_func': 'DoUniformMatrix2fv',
},
'UniformMatrix3fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 9,
'decoder_func': 'DoUniformMatrix3fv',
},
'UniformMatrix4fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 16,
'decoder_func': 'DoUniformMatrix4fv',
},
'UnmapBufferCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
'chromium': True,
},
'UnmapBufferSubDataCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
'pepper_interface': 'ChromiumMapSub',
},
'UnmapTexSubImage2DCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
'pepper_interface': 'ChromiumMapSub',
},
'UseProgram': {
'decoder_func': 'DoUseProgram',
'impl_func': False,
'unit_test': False,
},
'ValidateProgram': {'decoder_func': 'DoValidateProgram'},
'VertexAttrib1f': {'decoder_func': 'DoVertexAttrib1f'},
'VertexAttrib1fv': {
'type': 'PUT',
'data_type': 'GLfloat',
'count': 1,
'decoder_func': 'DoVertexAttrib1fv',
},
'VertexAttrib2f': {'decoder_func': 'DoVertexAttrib2f'},
'VertexAttrib2fv': {
'type': 'PUT',
'data_type': 'GLfloat',
'count': 2,
'decoder_func': 'DoVertexAttrib2fv',
},
'VertexAttrib3f': {'decoder_func': 'DoVertexAttrib3f'},
'VertexAttrib3fv': {
'type': 'PUT',
'data_type': 'GLfloat',
'count': 3,
'decoder_func': 'DoVertexAttrib3fv',
},
'VertexAttrib4f': {'decoder_func': 'DoVertexAttrib4f'},
'VertexAttrib4fv': {
'type': 'PUT',
'data_type': 'GLfloat',
'count': 4,
'decoder_func': 'DoVertexAttrib4fv',
},
'VertexAttribPointer': {
'type': 'Manual',
'cmd_args': 'GLuint indx, GLintVertexAttribSize size, '
'GLenumVertexAttribType type, GLboolean normalized, '
'GLsizei stride, GLuint offset',
'client_test': False,
},
'Scissor': {
'type': 'StateSet',
'state': 'Scissor',
},
'Viewport': {
'decoder_func': 'DoViewport',
},
'ResizeCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'unit_test': False,
'extension': True,
'chromium': True,
},
'GetRequestableExtensionsCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'immediate': False,
'cmd_args': 'uint32 bucket_id',
'extension': True,
'chromium': True,
},
'RequestExtensionCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'immediate': False,
'client_test': False,
'cmd_args': 'uint32 bucket_id',
'extension': True,
'chromium': True,
},
'RateLimitOffscreenContextCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
},
'CreateStreamTextureCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLuint client_id, void* result',
'result': ['GLuint'],
'immediate': False,
'impl_func': False,
'expectation': False,
'extension': True,
'chromium': True,
'client_test': False,
},
'DestroyStreamTextureCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'expectation': False,
'extension': True,
'chromium': True,
},
'TexImageIOSurface2DCHROMIUM': {
'decoder_func': 'DoTexImageIOSurface2DCHROMIUM',
'unit_test': False,
'extension': True,
'chromium': True,
},
'CopyTextureCHROMIUM': {
'decoder_func': 'DoCopyTextureCHROMIUM',
'unit_test': False,
'extension': True,
'chromium': True,
},
'TexStorage2DEXT': {
'unit_test': False,
'extension': True,
'decoder_func': 'DoTexStorage2DEXT',
},
'DrawArraysInstancedANGLE': {
'type': 'Manual',
'cmd_args': 'GLenumDrawMode mode, GLint first, GLsizei count, '
'GLsizei primcount',
'extension': True,
'unit_test': False,
'pepper_interface': 'InstancedArrays',
'defer_draws': True,
},
'DrawElementsInstancedANGLE': {
'type': 'Manual',
'cmd_args': 'GLenumDrawMode mode, GLsizei count, '
'GLenumIndexType type, GLuint index_offset, GLsizei primcount',
'extension': True,
'unit_test': False,
'client_test': False,
'pepper_interface': 'InstancedArrays',
'defer_draws': True,
},
'VertexAttribDivisorANGLE': {
'type': 'Manual',
'cmd_args': 'GLuint index, GLuint divisor',
'extension': True,
'unit_test': False,
'pepper_interface': 'InstancedArrays',
},
'GenQueriesEXT': {
'type': 'GENn',
'gl_test_func': 'glGenQueriesARB',
'resource_type': 'Query',
'resource_types': 'Queries',
'unit_test': False,
'pepper_interface': 'Query',
},
'DeleteQueriesEXT': {
'type': 'DELn',
'gl_test_func': 'glDeleteQueriesARB',
'resource_type': 'Query',
'resource_types': 'Queries',
'unit_test': False,
'pepper_interface': 'Query',
},
'IsQueryEXT': {
'gen_cmd': False,
'client_test': False,
'pepper_interface': 'Query',
},
'BeginQueryEXT': {
'type': 'Manual',
'cmd_args': 'GLenumQueryTarget target, GLidQuery id, void* sync_data',
'immediate': False,
'gl_test_func': 'glBeginQuery',
'pepper_interface': 'Query',
},
'EndQueryEXT': {
'type': 'Manual',
'cmd_args': 'GLenumQueryTarget target, GLuint submit_count',
'gl_test_func': 'glEndnQuery',
'client_test': False,
'pepper_interface': 'Query',
},
'GetQueryivEXT': {
'gen_cmd': False,
'client_test': False,
'gl_test_func': 'glGetQueryiv',
'pepper_interface': 'Query',
},
'GetQueryObjectuivEXT': {
'gen_cmd': False,
'client_test': False,
'gl_test_func': 'glGetQueryObjectuiv',
'pepper_interface': 'Query',
},
'BindUniformLocationCHROMIUM': {
'type': 'GLchar',
'bucket': True,
'needs_size': True,
'gl_test_func': 'DoBindUniformLocationCHROMIUM',
},
'InsertEventMarkerEXT': {
'type': 'GLcharN',
'decoder_func': 'DoInsertEventMarkerEXT',
'expectation': False,
},
'PushGroupMarkerEXT': {
'type': 'GLcharN',
'decoder_func': 'DoPushGroupMarkerEXT',
'expectation': False,
},
'PopGroupMarkerEXT': {
'decoder_func': 'DoPopGroupMarkerEXT',
'expectation': False,
'impl_func': False,
},
'GenVertexArraysOES': {
'type': 'GENn',
'gl_test_func': 'glGenVertexArraysOES',
'resource_type': 'VertexArray',
'resource_types': 'VertexArrays',
'unit_test': False,
},
'BindVertexArrayOES': {
'type': 'Bind',
'gl_test_func': 'glBindVertexArrayOES',
'decoder_func': 'DoBindVertexArrayOES',
'gen_func': 'GenVertexArraysOES',
'unit_test': False,
'client_test': False,
},
'DeleteVertexArraysOES': {
'type': 'DELn',
'gl_test_func': 'glDeleteVertexArraysOES',
'resource_type': 'VertexArray',
'resource_types': 'VertexArrays',
'unit_test': False,
},
'IsVertexArrayOES': {
'type': 'Is',
'gl_test_func': 'glIsVertexArrayOES',
'decoder_func': 'DoIsVertexArrayOES',
'expectation': False,
'unit_test': False,
},
'BindTexImage2DCHROMIUM': {
'decoder_func': 'DoBindTexImage2DCHROMIUM',
'unit_test': False,
'extension': True,
'chromium': True,
},
'ReleaseTexImage2DCHROMIUM': {
'decoder_func': 'DoReleaseTexImage2DCHROMIUM',
'unit_test': False,
'extension': True,
'chromium': True,
},
'TraceBeginCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'immediate': False,
'client_test': False,
'cmd_args': 'GLuint bucket_id',
'extension': True,
'chromium': True,
},
'TraceEndCHROMIUM': {
'impl_func': False,
'immediate': False,
'client_test': False,
'decoder_func': 'DoTraceEndCHROMIUM',
'unit_test': False,
'extension': True,
'chromium': True,
},
'AsyncTexImage2DCHROMIUM': {
'type': 'Manual',
'immediate': False,
'client_test': False,
'extension': True,
'chromium': True,
},
'AsyncTexSubImage2DCHROMIUM': {
'type': 'Manual',
'immediate': False,
'client_test': False,
'extension': True,
'chromium': True,
},
'DiscardFramebufferEXT': {
'type': 'PUTn',
'count': 1,
'data_type': 'GLenum',
'cmd_args': 'GLenum target, GLsizei count, '
'const GLenum* attachments',
'decoder_func': 'DoDiscardFramebufferEXT',
'unit_test': False,
'client_test': False,
'extension': True,
},
'LoseContextCHROMIUM': {
'type': 'Manual',
'impl_func': True,
'extension': True,
'chromium': True,
},
}
def Grouper(n, iterable, fillvalue=None):
"""Collect data into fixed-length chunks or blocks"""
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def SplitWords(input_string):
"""Transforms a input_string into a list of lower-case components.
Args:
input_string: the input string.
Returns:
a list of lower-case words.
"""
if input_string.find('_') > -1:
# 'some_TEXT_' -> 'some text'
return input_string.replace('_', ' ').strip().lower().split()
else:
if re.search('[A-Z]', input_string) and re.search('[a-z]', input_string):
# mixed case.
# look for capitalization to cut input_strings
# 'SomeText' -> 'Some Text'
input_string = re.sub('([A-Z])', r' \1', input_string).strip()
# 'Vector3' -> 'Vector 3'
input_string = re.sub('([^0-9])([0-9])', r'\1 \2', input_string)
return input_string.lower().split()
def Lower(words):
"""Makes a lower-case identifier from words.
Args:
words: a list of lower-case words.
Returns:
the lower-case identifier.
"""
return '_'.join(words)
def ToUnderscore(input_string):
"""converts CamelCase to camel_case."""
words = SplitWords(input_string)
return Lower(words)
class CWriter(object):
"""Writes to a file formatting it for Google's style guidelines."""
def __init__(self, filename):
self.filename = filename
self.file_num = 0
self.content = []
def SetFileNum(self, num):
"""Used to help write number files and tests."""
self.file_num = num
def Write(self, string):
"""Writes a string to a file spliting if it's > 80 characters."""
lines = string.splitlines()
num_lines = len(lines)
for ii in range(0, num_lines):
self.__WriteLine(lines[ii], ii < (num_lines - 1) or string[-1] == '\n')
def __FindSplit(self, string):
"""Finds a place to split a string."""
splitter = string.find('=')
if splitter >= 1 and not string[splitter + 1] == '=' and splitter < 80:
return splitter
# parts = string.split('(')
parts = re.split("(?<=[^\"])\((?!\")", string)
fptr = re.compile('\*\w*\)')
if len(parts) > 1:
splitter = len(parts[0])
for ii in range(1, len(parts)):
# Don't split on the dot in "if (.condition)".
if (not parts[ii - 1][-3:] == "if " and
# Don't split "(.)" or "(.*fptr)".
(len(parts[ii]) > 0 and
not parts[ii][0] == ")" and not fptr.match(parts[ii]))
and splitter < 80):
return splitter
splitter += len(parts[ii]) + 1
done = False
end = len(string)
last_splitter = -1
while not done:
splitter = string[0:end].rfind(',')
if splitter < 0 or (splitter > 0 and string[splitter - 1] == '"'):
return last_splitter
elif splitter >= 80:
end = splitter
else:
return splitter
def __WriteLine(self, line, ends_with_eol):
"""Given a signle line, writes it to a file, splitting if it's > 80 chars"""
if len(line) >= 80:
i = self.__FindSplit(line)
if i > 0:
line1 = line[0:i + 1]
if line1[-1] == ' ':
line1 = line1[:-1]
lineend = ''
if line1[0] == '#':
lineend = ' \\'
nolint = ''
if len(line1) > 80:
nolint = ' // NOLINT'
self.__AddLine(line1 + nolint + lineend + '\n')
match = re.match("( +)", line1)
indent = ""
if match:
indent = match.group(1)
splitter = line[i]
if not splitter == ',':
indent = " " + indent
self.__WriteLine(indent + line[i + 1:].lstrip(), True)
return
nolint = ''
if len(line) > 80:
nolint = ' // NOLINT'
self.__AddLine(line + nolint)
if ends_with_eol:
self.__AddLine('\n')
def __AddLine(self, line):
self.content.append(line)
def Close(self):
"""Close the file."""
content = "".join(self.content)
write_file = True
if os.path.exists(self.filename):
old_file = open(self.filename, "rb");
old_content = old_file.read()
old_file.close();
if content == old_content:
write_file = False
if write_file:
file = open(self.filename, "wb")
file.write(content)
file.close()
class CHeaderWriter(CWriter):
"""Writes a C Header file."""
_non_alnum_re = re.compile(r'[^a-zA-Z0-9]')
def __init__(self, filename, file_comment = None):
CWriter.__init__(self, filename)
base = os.path.abspath(filename)
while os.path.basename(base) != 'src':
new_base = os.path.dirname(base)
assert new_base != base # Prevent infinite loop.
base = new_base
hpath = os.path.relpath(filename, base)
self.guard = self._non_alnum_re.sub('_', hpath).upper() + '_'
self.Write(_LICENSE)
self.Write(_DO_NOT_EDIT_WARNING)
if not file_comment == None:
self.Write(file_comment)
self.Write("#ifndef %s\n" % self.guard)
self.Write("#define %s\n\n" % self.guard)
def Close(self):
self.Write("#endif // %s\n\n" % self.guard)
CWriter.Close(self)
class TypeHandler(object):
"""This class emits code for a particular type of function."""
_remove_expected_call_re = re.compile(r' EXPECT_CALL.*?;\n', re.S)
def __init__(self):
pass
def InitFunction(self, func):
"""Add or adjust anything type specific for this function."""
if func.GetInfo('needs_size') and not func.name.endswith('Bucket'):
func.AddCmdArg(DataSizeArgument('data_size'))
def AddImmediateFunction(self, generator, func):
"""Adds an immediate version of a function."""
# Generate an immediate command if there is only 1 pointer arg.
immediate = func.GetInfo('immediate') # can be True, False or None
if immediate == True or immediate == None:
if func.num_pointer_args == 1 or immediate:
generator.AddFunction(ImmediateFunction(func))
def AddBucketFunction(self, generator, func):
"""Adds a bucket version of a function."""
# Generate an immediate command if there is only 1 pointer arg.
bucket = func.GetInfo('bucket') # can be True, False or None
if bucket:
generator.AddFunction(BucketFunction(func))
def WriteStruct(self, func, file):
"""Writes a structure that matches the arguments to a function."""
comment = func.GetInfo('cmd_comment')
if not comment == None:
file.Write(comment)
file.Write("struct %s {\n" % func.name)
file.Write(" typedef %s ValueType;\n" % func.name)
file.Write(" static const CommandId kCmdId = k%s;\n" % func.name)
func.WriteCmdArgFlag(file)
file.Write("\n")
result = func.GetInfo('result')
if not result == None:
if len(result) == 1:
file.Write(" typedef %s Result;\n\n" % result[0])
else:
file.Write(" struct Result {\n")
for line in result:
file.Write(" %s;\n" % line)
file.Write(" };\n\n")
func.WriteCmdComputeSize(file)
func.WriteCmdSetHeader(file)
func.WriteCmdInit(file)
func.WriteCmdSet(file)
file.Write(" gpu::CommandHeader header;\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s %s;\n" % (arg.cmd_type, arg.name))
file.Write("};\n")
file.Write("\n")
size = len(args) * _SIZE_OF_UINT32 + _SIZE_OF_COMMAND_HEADER
file.Write("COMPILE_ASSERT(sizeof(%s) == %d,\n" % (func.name, size))
file.Write(" Sizeof_%s_is_not_%d);\n" % (func.name, size))
file.Write("COMPILE_ASSERT(offsetof(%s, header) == 0,\n" % func.name)
file.Write(" OffsetOf_%s_header_not_0);\n" % func.name)
offset = _SIZE_OF_COMMAND_HEADER
for arg in args:
file.Write("COMPILE_ASSERT(offsetof(%s, %s) == %d,\n" %
(func.name, arg.name, offset))
file.Write(" OffsetOf_%s_%s_not_%d);\n" %
(func.name, arg.name, offset))
offset += _SIZE_OF_UINT32
if not result == None and len(result) > 1:
offset = 0;
for line in result:
parts = line.split()
name = parts[-1]
check = """
COMPILE_ASSERT(offsetof(%(cmd_name)s::Result, %(field_name)s) == %(offset)d,
OffsetOf_%(cmd_name)s_Result_%(field_name)s_not_%(offset)d);
"""
file.Write((check.strip() + "\n") % {
'cmd_name': func.name,
'field_name': name,
'offset': offset,
})
offset += _SIZE_OF_UINT32
file.Write("\n")
def WriteHandlerImplementation(self, func, file):
"""Writes the handler implementation for this command."""
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
def WriteCmdSizeTest(self, func, file):
"""Writes the size test for a command."""
file.Write(" EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);\n")
def WriteFormatTest(self, func, file):
"""Writes a format test for a command."""
file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
file.Write(" %s& cmd = *GetBufferAs<%s>();\n" % (func.name, func.name))
file.Write(" void* next_cmd = cmd.Set(\n")
file.Write(" &cmd")
args = func.GetCmdArgs()
for value, arg in enumerate(args):
file.Write(",\n static_cast<%s>(%d)" % (arg.type, value + 11))
file.Write(");\n")
file.Write(" EXPECT_EQ(static_cast<uint32>(%s::kCmdId),\n" % func.name)
file.Write(" cmd.header.command);\n")
func.type_handler.WriteCmdSizeTest(func, file)
for value, arg in enumerate(args):
file.Write(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);\n" %
(arg.type, value + 11, arg.name))
file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
file.Write(" next_cmd, sizeof(cmd));\n")
file.Write("}\n")
file.Write("\n")
def WriteImmediateFormatTest(self, func, file):
"""Writes a format test for an immediate version of a command."""
pass
def WriteBucketFormatTest(self, func, file):
"""Writes a format test for a bucket version of a command."""
pass
def WriteGetDataSizeCode(self, func, file):
"""Writes the code to set data_size used in validation"""
pass
def WriteImmediateCmdSizeTest(self, func, file):
"""Writes a size test for an immediate version of a command."""
file.Write(" // TODO(gman): Compute correct size.\n")
file.Write(" EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);\n")
def WriteImmediateHandlerImplementation (self, func, file):
"""Writes the handler impl for the immediate version of a command."""
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
def WriteBucketHandlerImplementation (self, func, file):
"""Writes the handler impl for the bucket version of a command."""
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
def WriteServiceImplementation(self, func, file):
"""Writes the service implementation for a command."""
file.Write(
"error::Error GLES2DecoderImpl::Handle%s(\n" % func.name)
file.Write(
" uint32 immediate_data_size, const gles2::%s& c) {\n" % func.name)
self.WriteHandlerDeferReadWrite(func, file);
if len(func.GetOriginalArgs()) > 0:
last_arg = func.GetLastOriginalArg()
all_but_last_arg = func.GetOriginalArgs()[:-1]
for arg in all_but_last_arg:
arg.WriteGetCode(file)
self.WriteGetDataSizeCode(func, file)
last_arg.WriteGetCode(file)
func.WriteHandlerValidation(file)
func.WriteHandlerImplementation(file)
file.Write(" return error::kNoError;\n")
file.Write("}\n")
file.Write("\n")
def WriteImmediateServiceImplementation(self, func, file):
"""Writes the service implementation for an immediate version of command."""
file.Write(
"error::Error GLES2DecoderImpl::Handle%s(\n" % func.name)
file.Write(
" uint32 immediate_data_size, const gles2::%s& c) {\n" % func.name)
self.WriteHandlerDeferReadWrite(func, file);
last_arg = func.GetLastOriginalArg()
all_but_last_arg = func.GetOriginalArgs()[:-1]
for arg in all_but_last_arg:
arg.WriteGetCode(file)
self.WriteGetDataSizeCode(func, file)
last_arg.WriteGetCode(file)
func.WriteHandlerValidation(file)
func.WriteHandlerImplementation(file)
file.Write(" return error::kNoError;\n")
file.Write("}\n")
file.Write("\n")
def WriteBucketServiceImplementation(self, func, file):
"""Writes the service implementation for a bucket version of command."""
file.Write(
"error::Error GLES2DecoderImpl::Handle%s(\n" % func.name)
file.Write(
" uint32 immediate_data_size, const gles2::%s& c) {\n" % func.name)
self.WriteHandlerDeferReadWrite(func, file);
last_arg = func.GetLastOriginalArg()
all_but_last_arg = func.GetOriginalArgs()[:-1]
for arg in all_but_last_arg:
arg.WriteGetCode(file)
self.WriteGetDataSizeCode(func, file)
last_arg.WriteGetCode(file)
func.WriteHandlerValidation(file)
func.WriteHandlerImplementation(file)
file.Write(" return error::kNoError;\n")
file.Write("}\n")
file.Write("\n")
def WriteHandlerDeferReadWrite(self, func, file):
"""Writes the code to handle deferring reads or writes."""
defer_reads = func.GetInfo('defer_reads')
defer_draws = func.GetInfo('defer_draws')
conditions = []
if defer_draws:
conditions.append('ShouldDeferDraws()');
if defer_reads:
conditions.append('ShouldDeferReads()');
if not conditions:
return
file.Write(" if (%s)\n" % ' || '.join(conditions))
file.Write(" return error::kDeferCommandUntilLater;\n")
def WriteValidUnitTest(self, func, file, test, extra = {}):
"""Writes a valid unit test."""
if func.GetInfo('expectation') == False:
test = self._remove_expected_call_re.sub('', test)
name = func.name
arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()):
arg_strings.append(arg.GetValidArg(func, count, 0))
gl_arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()):
gl_arg_strings.append(arg.GetValidGLArg(func, count, 0))
gl_func_name = func.GetGLTestFunctionName()
vars = {
'test_name': 'GLES2DecoderTest%d' % file.file_num,
'name':name,
'gl_func_name': gl_func_name,
'args': ", ".join(arg_strings),
'gl_args': ", ".join(gl_arg_strings),
}
vars.update(extra)
file.Write(test % vars)
def WriteInvalidUnitTest(self, func, file, test, extra = {}):
"""Writes a invalid unit test."""
for arg_index, arg in enumerate(func.GetOriginalArgs()):
num_invalid_values = arg.GetNumInvalidValues(func)
for value_index in range(0, num_invalid_values):
arg_strings = []
parse_result = "kNoError"
gl_error = None
for count, arg in enumerate(func.GetOriginalArgs()):
if count == arg_index:
(arg_string, parse_result, gl_error) = arg.GetInvalidArg(
count, value_index)
else:
arg_string = arg.GetValidArg(func, count, 0)
arg_strings.append(arg_string)
gl_arg_strings = []
for arg in func.GetOriginalArgs():
gl_arg_strings.append("_")
gl_func_name = func.GetGLTestFunctionName()
gl_error_test = ''
if not gl_error == None:
gl_error_test = '\n EXPECT_EQ(%s, GetGLError());' % gl_error
vars = {
'test_name': 'GLES2DecoderTest%d' % file.file_num ,
'name': func.name,
'arg_index': arg_index,
'value_index': value_index,
'gl_func_name': gl_func_name,
'args': ", ".join(arg_strings),
'all_but_last_args': ", ".join(arg_strings[:-1]),
'gl_args': ", ".join(gl_arg_strings),
'parse_result': parse_result,
'gl_error_test': gl_error_test,
}
vars.update(extra)
file.Write(test % vars)
def WriteServiceUnitTest(self, func, file):
"""Writes the service unit test for a command."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
self.WriteValidUnitTest(func, file, valid_test)
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test)
def WriteImmediateServiceUnitTest(self, func, file):
"""Writes the service unit test for an immediate command."""
file.Write("// TODO(gman): %s\n" % func.name)
def WriteImmediateValidationCode(self, func, file):
"""Writes the validation code for an immediate version of a command."""
pass
def WriteBucketServiceUnitTest(self, func, file):
"""Writes the service unit test for a bucket command."""
file.Write("// TODO(gman): %s\n" % func.name)
def WriteBucketValidationCode(self, func, file):
"""Writes the validation code for a bucket version of a command."""
file.Write("// TODO(gman): %s\n" % func.name)
def WriteGLES2ImplementationDeclaration(self, func, file):
"""Writes the GLES2 Implemention declaration."""
impl_decl = func.GetInfo('impl_decl')
if impl_decl == None or impl_decl == True:
file.Write("virtual %s %s(%s) OVERRIDE;\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write("\n")
def WriteGLES2CLibImplementation(self, func, file):
file.Write("%s GLES2%s(%s) {\n" %
(func.return_type, func.name,
func.MakeTypedOriginalArgString("")))
result_string = "return "
if func.return_type == "void":
result_string = ""
file.Write(" %sgles2::GetGLContext()->%s(%s);\n" %
(result_string, func.original_name,
func.MakeOriginalArgString("")))
file.Write("}\n")
def WriteGLES2Header(self, func, file):
"""Writes a re-write macro for GLES"""
file.Write("#define gl%s GLES2_GET_FUN(%s)\n" %(func.name, func.name))
def WriteClientGLCallLog(self, func, file):
"""Writes a logging macro for the client side code."""
comma = ""
if len(func.GetOriginalArgs()):
comma = " << "
file.Write(
' GPU_CLIENT_LOG("[" << GetLogPrefix() << "] gl%s("%s%s << ")");\n' %
(func.original_name, comma, func.MakeLogArgString()))
def WriteClientGLReturnLog(self, func, file):
"""Writes the return value logging code."""
if func.return_type != "void":
file.Write(' GPU_CLIENT_LOG("return:" << result)\n')
def WriteGLES2ImplementationHeader(self, func, file):
"""Writes the GLES2 Implemention."""
self.WriteGLES2ImplementationDeclaration(func, file)
def WriteGLES2TraceImplementationHeader(self, func, file):
"""Writes the GLES2 Trace Implemention header."""
file.Write("virtual %s %s(%s) OVERRIDE;\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
def WriteGLES2TraceImplementation(self, func, file):
"""Writes the GLES2 Trace Implemention."""
file.Write("%s GLES2TraceImplementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
result_string = "return "
if func.return_type == "void":
result_string = ""
file.Write(' TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::%s");\n' %
func.name)
file.Write(" %sgl_->%s(%s);\n" %
(result_string, func.name, func.MakeOriginalArgString("")))
file.Write("}\n")
file.Write("\n")
def WriteGLES2Implementation(self, func, file):
"""Writes the GLES2 Implemention."""
impl_func = func.GetInfo('impl_func')
impl_decl = func.GetInfo('impl_decl')
gen_cmd = func.GetInfo('gen_cmd')
if (func.can_auto_generate and
(impl_func == None or impl_func == True) and
(impl_decl == None or impl_decl == True) and
(gen_cmd == None or gen_cmd == True)):
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
self.WriteClientGLCallLog(func, file)
func.WriteDestinationInitalizationValidation(file)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
file.Write(" helper_->%s(%s);\n" %
(func.name, func.MakeOriginalArgString("")))
file.Write(" CheckGLError();\n")
self.WriteClientGLReturnLog(func, file)
file.Write("}\n")
file.Write("\n")
def WriteGLES2InterfaceHeader(self, func, file):
"""Writes the GLES2 Interface."""
file.Write("virtual %s %s(%s) = 0;\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
def WriteGLES2InterfaceStub(self, func, file):
"""Writes the GLES2 Interface stub declaration."""
file.Write("virtual %s %s(%s) OVERRIDE;\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
def WriteGLES2InterfaceStubImpl(self, func, file):
"""Writes the GLES2 Interface stub declaration."""
args = func.GetOriginalArgs()
arg_string = ", ".join(
["%s /* %s */" % (arg.type, arg.name) for arg in args])
file.Write("%s GLES2InterfaceStub::%s(%s) {\n" %
(func.return_type, func.original_name, arg_string))
if func.return_type != "void":
file.Write(" return 0;\n")
file.Write("}\n")
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Writes the GLES2 Implemention unit test."""
client_test = func.GetInfo('client_test')
if (func.can_auto_generate and
(client_test == None or client_test == True)):
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
struct Cmds {
%(name)s cmd;
};
Cmds expected;
expected.cmd.Init(%(cmd_args)s);
gl_->%(name)s(%(args)s);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
"""
cmd_arg_strings = []
for count, arg in enumerate(func.GetCmdArgs()):
cmd_arg_strings.append(arg.GetValidClientSideCmdArg(func, count, 0))
count += 1
gl_arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()):
gl_arg_strings.append(arg.GetValidClientSideArg(func, count, 0))
file.Write(code % {
'name': func.name,
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
})
else:
if client_test != False:
file.Write("// TODO: Implement unit test for %s\n" % func.name)
def WriteDestinationInitalizationValidation(self, func, file):
"""Writes the client side destintion initialization validation."""
for arg in func.GetOriginalArgs():
arg.WriteDestinationInitalizationValidation(file, func)
def WriteTraceEvent(self, func, file):
file.Write(' TRACE_EVENT0("gpu", "GLES2Implementation::%s");\n' %
func.original_name)
def WriteImmediateCmdComputeSize(self, func, file):
"""Writes the size computation code for the immediate version of a cmd."""
file.Write(" static uint32 ComputeSize(uint32 size_in_bytes) {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(" sizeof(ValueType) + // NOLINT\n")
file.Write(" RoundSizeToMultipleOfEntries(size_in_bytes));\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSetHeader(self, func, file):
"""Writes the SetHeader function for the immediate version of a cmd."""
file.Write(" void SetHeader(uint32 size_in_bytes) {\n")
file.Write(" header.SetCmdByTotalSize<ValueType>(size_in_bytes);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdInit(self, func, file):
"""Writes the Init function for the immediate version of a command."""
raise NotImplementedError(func.name)
def WriteImmediateCmdSet(self, func, file):
"""Writes the Set function for the immediate version of a command."""
raise NotImplementedError(func.name)
def WriteCmdHelper(self, func, file):
"""Writes the cmd helper definition for a cmd."""
code = """ void %(name)s(%(typed_args)s) {
gles2::%(name)s* c = GetCmdSpace<gles2::%(name)s>();
if (c) {
c->Init(%(args)s);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedCmdArgString(""),
"args": func.MakeCmdArgString(""),
})
def WriteImmediateCmdHelper(self, func, file):
"""Writes the cmd helper definition for the immediate version of a cmd."""
code = """ void %(name)s(%(typed_args)s) {
const uint32 s = 0; // TODO(gman): compute correct size
gles2::%(name)s* c =
GetImmediateCmdSpaceTotalSize<gles2::%(name)s>(s);
if (c) {
c->Init(%(args)s);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedCmdArgString(""),
"args": func.MakeCmdArgString(""),
})
class StateSetHandler(TypeHandler):
"""Handler for commands that simply set state."""
def __init__(self):
TypeHandler.__init__(self)
def WriteHandlerImplementation(self, func, file):
"""Overrriden from TypeHandler."""
state_name = func.GetInfo('state')
state = _STATES[state_name]
states = state['states']
args = func.GetOriginalArgs()
code = []
for ndx,item in enumerate(states):
if 'range_checks' in item:
for range_check in item['range_checks']:
code.append("%s %s" % (args[ndx].name, range_check['check']))
if len(code):
file.Write(" if (%s) {\n" % " ||\n ".join(code))
file.Write(
' SetGLError(GL_INVALID_VALUE, "%s", "%s out of range");\n' %
(func.name, args[ndx].name))
file.Write(" return error::kNoError;\n")
file.Write(" }\n")
code = []
for ndx,item in enumerate(states):
code.append("state_.%s != %s" % (item['name'], args[ndx].name))
file.Write(" if (%s) {\n" % " ||\n ".join(code))
for ndx,item in enumerate(states):
file.Write(" state_.%s = %s;\n" % (item['name'], args[ndx].name))
if 'state_flag' in state:
file.Write(" %s = true;\n" % state['state_flag'])
if not func.GetInfo("no_gl"):
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
file.Write(" }\n")
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
TypeHandler.WriteServiceUnitTest(self, func, file)
state_name = func.GetInfo('state')
state = _STATES[state_name]
states = state['states']
for ndx,item in enumerate(states):
if 'range_checks' in item:
for check_ndx, range_check in enumerate(item['range_checks']):
valid_test = """
TEST_F(%(test_name)s, %(name)sInvalidValue%(ndx)d_%(check_ndx)d) {
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
}
"""
name = func.name
arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()):
arg_strings.append(arg.GetValidArg(func, count, 0))
arg_strings[ndx] = range_check['test_value']
vars = {
'test_name': 'GLES2DecoderTest%d' % file.file_num,
'name': name,
'ndx': ndx,
'check_ndx': check_ndx,
'args': ", ".join(arg_strings),
}
file.Write(valid_test % vars)
class StateSetRGBAlphaHandler(TypeHandler):
"""Handler for commands that simply set state that have rgb/alpha."""
def __init__(self):
TypeHandler.__init__(self)
def WriteHandlerImplementation(self, func, file):
"""Overrriden from TypeHandler."""
state_name = func.GetInfo('state')
state = _STATES[state_name]
states = state['states']
args = func.GetOriginalArgs()
num_args = len(args)
code = []
for ndx,item in enumerate(states):
code.append("state_.%s != %s" % (item['name'], args[ndx % num_args].name))
file.Write(" if (%s) {\n" % " ||\n ".join(code))
for ndx, item in enumerate(states):
file.Write(" state_.%s = %s;\n" %
(item['name'], args[ndx % num_args].name))
if 'state_flag' in state:
file.Write(" %s = true;\n" % state['state_flag'])
if not func.GetInfo("no_gl"):
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
file.Write(" }\n")
class StateSetFrontBackSeparateHandler(TypeHandler):
"""Handler for commands that simply set state that have front/back."""
def __init__(self):
TypeHandler.__init__(self)
def WriteHandlerImplementation(self, func, file):
"""Overrriden from TypeHandler."""
state_name = func.GetInfo('state')
state = _STATES[state_name]
states = state['states']
args = func.GetOriginalArgs()
face = args[0].name
num_args = len(args)
file.Write(" bool changed = false;\n")
for group_ndx, group in enumerate(Grouper(num_args - 1, states)):
file.Write(" if (%s == %s || %s == GL_FRONT_AND_BACK) {\n" %
(face, ('GL_FRONT', 'GL_BACK')[group_ndx], face))
code = []
for ndx, item in enumerate(group):
code.append("state_.%s != %s" % (item['name'], args[ndx + 1].name))
file.Write(" changed |= %s;\n" % " ||\n ".join(code))
file.Write(" }\n")
file.Write(" if (changed) {\n")
for group_ndx, group in enumerate(Grouper(num_args - 1, states)):
file.Write(" if (%s == %s || %s == GL_FRONT_AND_BACK) {\n" %
(face, ('GL_FRONT', 'GL_BACK')[group_ndx], face))
for ndx, item in enumerate(group):
file.Write(" state_.%s = %s;\n" %
(item['name'], args[ndx + 1].name))
file.Write(" }\n")
if 'state_flag' in state:
file.Write(" %s = true;\n" % state['state_flag'])
if not func.GetInfo("no_gl"):
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
file.Write(" }\n")
class StateSetFrontBackHandler(TypeHandler):
"""Handler for commands that simply set state that set both front/back."""
def __init__(self):
TypeHandler.__init__(self)
def WriteHandlerImplementation(self, func, file):
"""Overrriden from TypeHandler."""
state_name = func.GetInfo('state')
state = _STATES[state_name]
states = state['states']
args = func.GetOriginalArgs()
num_args = len(args)
code = []
for group_ndx, group in enumerate(Grouper(num_args, states)):
for ndx, item in enumerate(group):
code.append("state_.%s != %s" % (item['name'], args[ndx].name))
file.Write(" if (%s) {\n" % " ||\n ".join(code))
for group_ndx, group in enumerate(Grouper(num_args, states)):
for ndx, item in enumerate(group):
file.Write(" state_.%s = %s;\n" % (item['name'], args[ndx].name))
if 'state_flag' in state:
file.Write(" %s = true;\n" % state['state_flag'])
if not func.GetInfo("no_gl"):
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
file.Write(" }\n")
class CustomHandler(TypeHandler):
"""Handler for commands that are auto-generated but require minor tweaks."""
def __init__(self):
TypeHandler.__init__(self)
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteImmediateServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteBucketServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteImmediateCmdGetTotalSize(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" uint32 total_size = 0; // TODO(gman): get correct size.\n")
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void Init(%s) {\n" % func.MakeTypedCmdArgString("_"))
self.WriteImmediateCmdGetTotalSize(func, file)
file.Write(" SetHeader(total_size);\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
copy_args = func.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s) {\n" %
func.MakeTypedCmdArgString("_", True))
self.WriteImmediateCmdGetTotalSize(func, file)
file.Write(" static_cast<ValueType*>(cmd)->Init(%s);\n" % copy_args)
file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
"cmd, total_size);\n")
file.Write(" }\n")
file.Write("\n")
class TodoHandler(CustomHandler):
"""Handle for commands that are not yet implemented."""
def AddImmediateFunction(self, generator, func):
"""Overrriden from TypeHandler."""
pass
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" // TODO: for now this is a no-op\n")
file.Write(
" SetGLError(GL_INVALID_OPERATION, \"gl%s\", \"not implemented\");\n" %
func.name)
if func.return_type != "void":
file.Write(" return 0;\n")
file.Write("}\n")
file.Write("\n")
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(
"error::Error GLES2DecoderImpl::Handle%s(\n" % func.name)
file.Write(
" uint32 immediate_data_size, const gles2::%s& c) {\n" % func.name)
file.Write(" // TODO: for now this is a no-op\n")
file.Write(
" SetGLError(GL_INVALID_OPERATION, \"gl%s\", \"not implemented\");\n" %
func.name)
file.Write(" return error::kNoError;\n")
file.Write("}\n")
file.Write("\n")
class HandWrittenHandler(CustomHandler):
"""Handler for comands where everything must be written by hand."""
def InitFunction(self, func):
"""Add or adjust anything type specific for this function."""
CustomHandler.InitFunction(self, func)
func.can_auto_generate = False
def WriteStruct(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteDocs(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteBucketServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteImmediateServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteBucketServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteImmediateCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteBucketCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): Write test for %s\n" % func.name)
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): Write test for %s\n" % func.name)
def WriteBucketFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): Write test for %s\n" % func.name)
class ManualHandler(CustomHandler):
"""Handler for commands who's handlers must be written by hand."""
def __init__(self):
CustomHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
if (func.name == 'CompressedTexImage2DBucket'):
func.cmd_args = func.cmd_args[:-1]
func.AddCmdArg(Argument('bucket_id', 'GLuint'))
else:
CustomHandler.InitFunction(self, func)
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteBucketServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteImmediateServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): Implement test for %s\n" % func.name)
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
if func.GetInfo('impl_func'):
super(ManualHandler, self).WriteGLES2Implementation(func, file)
def WriteGLES2ImplementationHeader(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("virtual %s %s(%s) OVERRIDE;\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write("\n")
def WriteImmediateCmdGetTotalSize(self, func, file):
"""Overrriden from TypeHandler."""
# TODO(gman): Move this data to _FUNCTION_INFO?
if func.name == 'ShaderSourceImmediate':
file.Write(" uint32 total_size = ComputeSize(_data_size);\n")
else:
CustomHandler.WriteImmediateCmdGetTotalSize(self, func, file)
class DataHandler(TypeHandler):
"""Handler for glBufferData, glBufferSubData, glTexImage2D, glTexSubImage2D,
glCompressedTexImage2D, glCompressedTexImageSub2D."""
def __init__(self):
TypeHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
if func.name == 'CompressedTexSubImage2DBucket':
func.cmd_args = func.cmd_args[:-1]
func.AddCmdArg(Argument('bucket_id', 'GLuint'))
def WriteGetDataSizeCode(self, func, file):
"""Overrriden from TypeHandler."""
# TODO(gman): Move this data to _FUNCTION_INFO?
name = func.name
if name.endswith("Immediate"):
name = name[0:-9]
if name == 'BufferData' or name == 'BufferSubData':
file.Write(" uint32 data_size = size;\n")
elif (name == 'CompressedTexImage2D' or
name == 'CompressedTexSubImage2D'):
file.Write(" uint32 data_size = imageSize;\n")
elif (name == 'CompressedTexSubImage2DBucket'):
file.Write(" Bucket* bucket = GetBucket(c.bucket_id);\n")
file.Write(" uint32 data_size = bucket->size();\n")
file.Write(" GLsizei imageSize = data_size;\n")
elif name == 'TexImage2D' or name == 'TexSubImage2D':
code = """ uint32 data_size;
if (!GLES2Util::ComputeImageDataSize(
width, height, format, type, unpack_alignment_, &data_size)) {
return error::kOutOfBounds;
}
"""
file.Write(code)
else:
file.Write("// uint32 data_size = 0; // TODO(gman): get correct size!\n")
def WriteImmediateCmdGetTotalSize(self, func, file):
"""Overrriden from TypeHandler."""
# TODO(gman): Move this data to _FUNCTION_INFO?
if func.name == 'BufferDataImmediate':
file.Write(" uint32 total_size = ComputeSize(_size);\n")
elif func.name == 'BufferSubDataImmediate':
file.Write(" uint32 total_size = ComputeSize(_size);\n")
elif func.name == 'CompressedTexImage2DImmediate':
file.Write(" uint32 total_size = ComputeSize(_imageSize);\n")
elif func.name == 'CompressedTexSubImage2DImmediate':
file.Write(" uint32 total_size = ComputeSize(_imageSize);\n")
elif func.name == 'TexImage2DImmediate':
file.Write(
" uint32 total_size = 0; // TODO(gman): get correct size\n")
elif func.name == 'TexSubImage2DImmediate':
file.Write(
" uint32 total_size = 0; // TODO(gman): get correct size\n")
def WriteImmediateCmdSizeTest(self, func, file):
"""Overrriden from TypeHandler."""
# TODO(gman): Move this data to _FUNCTION_INFO?
if func.name == 'BufferDataImmediate':
file.Write(" uint32 total_size = cmd.ComputeSize(cmd.size);\n")
elif func.name == 'BufferSubDataImmediate':
file.Write(" uint32 total_size = cmd.ComputeSize(cmd.size);\n")
elif func.name == 'CompressedTexImage2DImmediate':
file.Write(" uint32 total_size = cmd.ComputeSize(cmd.imageSize);\n")
elif func.name == 'CompressedTexSubImage2DImmediate':
file.Write(" uint32 total_size = cmd.ComputeSize(cmd.imageSize);\n")
elif func.name == 'TexImage2DImmediate':
file.Write(
" uint32 total_size = 0; // TODO(gman): get correct size\n")
elif func.name == 'TexSubImage2DImmediate':
file.Write(
" uint32 total_size = 0; // TODO(gman): get correct size\n")
file.Write(" EXPECT_EQ(sizeof(cmd), total_size);\n")
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void Init(%s) {\n" % func.MakeTypedCmdArgString("_"))
self.WriteImmediateCmdGetTotalSize(func, file)
file.Write(" SetHeader(total_size);\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
copy_args = func.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s) {\n" %
func.MakeTypedCmdArgString("_", True))
self.WriteImmediateCmdGetTotalSize(func, file)
file.Write(" static_cast<ValueType*>(cmd)->Init(%s);\n" % copy_args)
file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
"cmd, total_size);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
# TODO(gman): Remove this exception.
file.Write("// TODO(gman): Implement test for %s\n" % func.name)
return
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteBucketServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
if not func.name == 'CompressedTexSubImage2DBucket':
TypeHandler.WriteBucketServiceImplemenation(self, func, file)
class BindHandler(TypeHandler):
"""Handler for glBind___ type functions."""
def __init__(self):
TypeHandler.__init__(self)
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
if len(func.GetOriginalArgs()) == 1:
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
TEST_F(%(test_name)s, %(name)sValidArgsNewId) {
EXPECT_CALL(*gl_, %(gl_func_name)s(kNewServiceId));
EXPECT_CALL(*gl_, %(gl_gen_func_name)s(1, _))
.WillOnce(SetArgumentPointee<1>(kNewServiceId));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(kNewClientId);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(Get%(resource_type)sInfo(kNewClientId) != NULL);
}
"""
gen_func_names = {
}
self.WriteValidUnitTest(func, file, valid_test, {
'resource_type': func.GetOriginalArgs()[0].resource_type,
'gl_gen_func_name': func.GetInfo("gen_func"),
})
else:
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
TEST_F(%(test_name)s, %(name)sValidArgsNewId) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(first_gl_arg)s, kNewServiceId));
EXPECT_CALL(*gl_, %(gl_gen_func_name)s(1, _))
.WillOnce(SetArgumentPointee<1>(kNewServiceId));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(first_arg)s, kNewClientId);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(Get%(resource_type)sInfo(kNewClientId) != NULL);
}
"""
gen_func_names = {
}
self.WriteValidUnitTest(func, file, valid_test, {
'first_arg': func.GetOriginalArgs()[0].GetValidArg(func, 0, 0),
'first_gl_arg': func.GetOriginalArgs()[0].GetValidGLArg(func, 0, 0),
'resource_type': func.GetOriginalArgs()[1].resource_type,
'gl_gen_func_name': func.GetInfo("gen_func"),
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test)
def WriteGLES2Implementation(self, func, file):
"""Writes the GLES2 Implemention."""
impl_func = func.GetInfo('impl_func')
impl_decl = func.GetInfo('impl_decl')
if (func.can_auto_generate and
(impl_func == None or impl_func == True) and
(impl_decl == None or impl_decl == True)):
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
code = """ if (Is%(type)sReservedId(%(id)s)) {
SetGLError(GL_INVALID_OPERATION, "%(name)s\", \"%(id)s reserved id");
return;
}
if (Bind%(type)sHelper(%(arg_string)s)) {
helper_->%(name)s(%(arg_string)s);
}
CheckGLError();
}
"""
name_arg = None
if len(func.GetOriginalArgs()) == 1:
# Bind functions that have no target (like BindVertexArrayOES)
name_arg = func.GetOriginalArgs()[0]
else:
# Bind functions that have both a target and a name (like BindTexture)
name_arg = func.GetOriginalArgs()[1]
file.Write(code % {
'name': func.name,
'arg_string': func.MakeOriginalArgString(""),
'id': name_arg.name,
'type': name_arg.resource_type,
'lc_type': name_arg.resource_type.lower(),
})
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
client_test = func.GetInfo('client_test')
if client_test == False:
return
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
struct Cmds {
%(name)s cmd;
};
Cmds expected;
expected.cmd.Init(%(cmd_args)s);
gl_->%(name)s(%(args)s);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
ClearCommands();
gl_->%(name)s(%(args)s);
EXPECT_TRUE(NoCommandsWritten());
}
"""
cmd_arg_strings = []
for count, arg in enumerate(func.GetCmdArgs()):
cmd_arg_strings.append(arg.GetValidClientSideCmdArg(func, count, 0))
count += 1
gl_arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()):
gl_arg_strings.append(arg.GetValidClientSideArg(func, count, 0))
file.Write(code % {
'name': func.name,
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
})
class GENnHandler(TypeHandler):
"""Handler for glGen___ type functions."""
def __init__(self):
TypeHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
pass
def WriteGetDataSizeCode(self, func, file):
"""Overrriden from TypeHandler."""
code = """ uint32 data_size;
if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
return error::kOutOfBounds;
}
"""
file.Write(code)
def WriteHandlerImplementation (self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" if (!%sHelper(n, %s)) {\n"
" return error::kInvalidArguments;\n"
" }\n" %
(func.name, func.GetLastOriginalArg().name))
def WriteImmediateHandlerImplementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" if (!%sHelper(n, %s)) {\n"
" return error::kInvalidArguments;\n"
" }\n" %
(func.original_name, func.GetLastOriginalArg().name))
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
log_code = (""" GPU_CLIENT_LOG_CODE_BLOCK({
for (GLsizei i = 0; i < n; ++i) {
GPU_CLIENT_LOG(" " << i << ": " << %s[i]);
}
});""" % func.GetOriginalArgs()[1].name)
args = {
'log_code': log_code,
'return_type': func.return_type,
'name': func.original_name,
'typed_args': func.MakeTypedOriginalArgString(""),
'args': func.MakeOriginalArgString(""),
'resource_types': func.GetInfo('resource_types'),
'count_name': func.GetOriginalArgs()[0].name,
}
file.Write(
"%(return_type)s GLES2Implementation::%(name)s(%(typed_args)s) {\n" %
args)
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
code = """ GPU_CLIENT_SINGLE_THREAD_CHECK();
GetIdHandler(id_namespaces::k%(resource_types)s)->
MakeIds(this, 0, %(args)s);
%(name)sHelper(%(args)s);
helper_->%(name)sImmediate(%(args)s);
helper_->CommandBufferHelper::Flush();
%(log_code)s
CheckGLError();
}
"""
file.Write(code % args)
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
GLuint ids[2] = { 0, };
struct Cmds {
%(name)sImmediate gen;
GLuint data[2];
};
Cmds expected;
expected.gen.Init(arraysize(ids), &ids[0]);
expected.data[0] = k%(types)sStartId;
expected.data[1] = k%(types)sStartId + 1;
gl_->%(name)s(arraysize(ids), &ids[0]);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
EXPECT_EQ(k%(types)sStartId, ids[0]);
EXPECT_EQ(k%(types)sStartId + 1, ids[1]);
}
"""
file.Write(code % {
'name': func.name,
'types': func.GetInfo('resource_types'),
})
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(1, _))
.WillOnce(SetArgumentPointee<1>(kNewServiceId));
GetSharedMemoryAs<GLuint*>()[0] = kNewClientId;
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(Get%(resource_name)sInfo(kNewClientId) != NULL);
}
"""
self.WriteValidUnitTest(func, file, valid_test, {
'resource_name': func.GetInfo('resource_type'),
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(_, _)).Times(0);
GetSharedMemoryAs<GLuint*>()[0] = client_%(resource_name)s_id_;
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(cmd));
}
"""
self.WriteValidUnitTest(func, file, invalid_test, {
'resource_name': func.GetInfo('resource_type').lower(),
})
def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(1, _))
.WillOnce(SetArgumentPointee<1>(kNewServiceId));
%(name)s* cmd = GetImmediateAs<%(name)s>();
GLuint temp = kNewClientId;
SpecializedSetup<%(name)s, 0>(true);
cmd->Init(1, &temp);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(*cmd, sizeof(temp)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(Get%(resource_name)sInfo(kNewClientId) != NULL);
}
"""
self.WriteValidUnitTest(func, file, valid_test, {
'resource_name': func.GetInfo('resource_type'),
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(_, _)).Times(0);
%(name)s* cmd = GetImmediateAs<%(name)s>();
SpecializedSetup<%(name)s, 0>(false);
cmd->Init(1, &client_%(resource_name)s_id_);
EXPECT_EQ(error::kInvalidArguments,
ExecuteImmediateCmd(*cmd, sizeof(&client_%(resource_name)s_id_)));
}
"""
self.WriteValidUnitTest(func, file, invalid_test, {
'resource_name': func.GetInfo('resource_type').lower(),
})
def WriteImmediateCmdComputeSize(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" static uint32 ComputeDataSize(GLsizei n) {\n")
file.Write(
" return static_cast<uint32>(sizeof(GLuint) * n); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
file.Write(" static uint32 ComputeSize(GLsizei n) {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(" sizeof(ValueType) + ComputeDataSize(n)); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSetHeader(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void SetHeader(GLsizei n) {\n")
file.Write(" header.SetCmdByTotalSize<ValueType>(ComputeSize(n));\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
file.Write(" void Init(%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_"),
last_arg.type, last_arg.name))
file.Write(" SetHeader(_n);\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" memcpy(ImmediateDataAddress(this),\n")
file.Write(" _%s, ComputeDataSize(_n));\n" % last_arg.name)
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
copy_args = func.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_", True),
last_arg.type, last_arg.name))
file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _%s);\n" %
(copy_args, last_arg.name))
file.Write(" const uint32 size = ComputeSize(_n);\n")
file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
"cmd, size);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
const uint32 size = gles2::%(name)s::ComputeSize(n);
gles2::%(name)s* c =
GetImmediateCmdSpaceTotalSize<gles2::%(name)s>(size);
if (c) {
c->Init(%(args)s);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
})
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
file.Write(" static GLuint ids[] = { 12, 23, 34, };\n")
file.Write(" %s& cmd = *GetBufferAs<%s>();\n" % (func.name, func.name))
file.Write(" void* next_cmd = cmd.Set(\n")
file.Write(" &cmd, static_cast<GLsizei>(arraysize(ids)), ids);\n")
file.Write(" EXPECT_EQ(static_cast<uint32>(%s::kCmdId),\n" % func.name)
file.Write(" cmd.header.command);\n")
file.Write(" EXPECT_EQ(sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(cmd.n * 4u),\n")
file.Write(" cmd.header.size * 4u);\n")
file.Write(" EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);\n");
file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
file.Write(" next_cmd, sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));\n")
file.Write(" // TODO(gman): Check that ids were inserted;\n")
file.Write("}\n")
file.Write("\n")
class CreateHandler(TypeHandler):
"""Handler for glCreate___ type functions."""
def __init__(self):
TypeHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
func.AddCmdArg(Argument("client_id", 'uint32'))
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s))
.WillOnce(Return(kNewServiceId));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s%(comma)skNewClientId);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(Get%(resource_type)sInfo(kNewClientId) != NULL);
}
"""
comma = ""
if len(func.GetOriginalArgs()):
comma =", "
self.WriteValidUnitTest(func, file, valid_test, {
'comma': comma,
'resource_type': func.name[6:],
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s%(comma)skNewClientId);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test, {
'comma': comma,
})
def WriteHandlerImplementation (self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" uint32 client_id = c.client_id;\n")
file.Write(" if (!%sHelper(%s)) {\n" %
(func.name, func.MakeCmdArgString("")))
file.Write(" return error::kInvalidArguments;\n")
file.Write(" }\n")
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
file.Write(" GLuint client_id;\n")
file.Write(
" GetIdHandler(id_namespaces::kProgramsAndShaders)->\n")
file.Write(" MakeIds(this, 0, 1, &client_id);\n")
file.Write(" helper_->%s(%s);\n" %
(func.name, func.MakeCmdArgString("")))
file.Write(' GPU_CLIENT_LOG("returned " << client_id);\n')
file.Write(" CheckGLError();\n")
file.Write(" return client_id;\n")
file.Write("}\n")
file.Write("\n")
class DeleteHandler(TypeHandler):
"""Handler for glDelete___ single resource type functions."""
def __init__(self):
TypeHandler.__init__(self)
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
file.Write(
" GPU_CLIENT_DCHECK(%s != 0);\n" % func.GetOriginalArgs()[-1].name)
file.Write(" %sHelper(%s);\n" %
(func.original_name, func.GetOriginalArgs()[-1].name))
file.Write(" CheckGLError();\n")
file.Write("}\n")
file.Write("\n")
class DELnHandler(TypeHandler):
"""Handler for glDelete___ type functions."""
def __init__(self):
TypeHandler.__init__(self)
def WriteGetDataSizeCode(self, func, file):
"""Overrriden from TypeHandler."""
code = """ uint32 data_size;
if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
return error::kOutOfBounds;
}
"""
file.Write(code)
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
GLuint ids[2] = { k%(types)sStartId, k%(types)sStartId + 1 };
struct Cmds {
%(name)sImmediate del;
GLuint data[2];
};
Cmds expected;
expected.del.Init(arraysize(ids), &ids[0]);
expected.data[0] = k%(types)sStartId;
expected.data[1] = k%(types)sStartId + 1;
gl_->%(name)s(arraysize(ids), &ids[0]);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
"""
file.Write(code % {
'name': func.name,
'types': func.GetInfo('resource_types'),
})
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(
*gl_,
%(gl_func_name)s(1, Pointee(kService%(upper_resource_name)sId)))
.Times(1);
GetSharedMemoryAs<GLuint*>()[0] = client_%(resource_name)s_id_;
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(
Get%(upper_resource_name)sInfo(client_%(resource_name)s_id_) == NULL);
}
"""
self.WriteValidUnitTest(func, file, valid_test, {
'resource_name': func.GetInfo('resource_type').lower(),
'upper_resource_name': func.GetInfo('resource_type'),
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs) {
GetSharedMemoryAs<GLuint*>()[0] = kInvalidClientId;
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
"""
self.WriteValidUnitTest(func, file, invalid_test)
def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(
*gl_,
%(gl_func_name)s(1, Pointee(kService%(upper_resource_name)sId)))
.Times(1);
%(name)s& cmd = *GetImmediateAs<%(name)s>();
SpecializedSetup<%(name)s, 0>(true);
cmd.Init(1, &client_%(resource_name)s_id_);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(client_%(resource_name)s_id_)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(
Get%(upper_resource_name)sInfo(client_%(resource_name)s_id_) == NULL);
}
"""
self.WriteValidUnitTest(func, file, valid_test, {
'resource_name': func.GetInfo('resource_type').lower(),
'upper_resource_name': func.GetInfo('resource_type'),
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs) {
%(name)s& cmd = *GetImmediateAs<%(name)s>();
SpecializedSetup<%(name)s, 0>(false);
GLuint temp = kInvalidClientId;
cmd.Init(1, &temp);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(temp)));
}
"""
self.WriteValidUnitTest(func, file, invalid_test)
def WriteHandlerImplementation (self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" %sHelper(n, %s);\n" %
(func.name, func.GetLastOriginalArg().name))
def WriteImmediateHandlerImplementation (self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" %sHelper(n, %s);\n" %
(func.original_name, func.GetLastOriginalArg().name))
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
impl_decl = func.GetInfo('impl_decl')
if impl_decl == None or impl_decl == True:
args = {
'return_type': func.return_type,
'name': func.original_name,
'typed_args': func.MakeTypedOriginalArgString(""),
'args': func.MakeOriginalArgString(""),
'resource_type': func.GetInfo('resource_type').lower(),
'count_name': func.GetOriginalArgs()[0].name,
}
file.Write(
"%(return_type)s GLES2Implementation::%(name)s(%(typed_args)s) {\n" %
args)
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
file.Write(""" GPU_CLIENT_LOG_CODE_BLOCK({
for (GLsizei i = 0; i < n; ++i) {
GPU_CLIENT_LOG(" " << i << ": " << %s[i]);
}
});
""" % func.GetOriginalArgs()[1].name)
file.Write(""" GPU_CLIENT_DCHECK_CODE_BLOCK({
for (GLsizei i = 0; i < n; ++i) {
GPU_DCHECK(%s[i] != 0);
}
});
""" % func.GetOriginalArgs()[1].name)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
code = """ %(name)sHelper(%(args)s);
CheckGLError();
}
"""
file.Write(code % args)
def WriteImmediateCmdComputeSize(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" static uint32 ComputeDataSize(GLsizei n) {\n")
file.Write(
" return static_cast<uint32>(sizeof(GLuint) * n); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
file.Write(" static uint32 ComputeSize(GLsizei n) {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(" sizeof(ValueType) + ComputeDataSize(n)); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSetHeader(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void SetHeader(GLsizei n) {\n")
file.Write(" header.SetCmdByTotalSize<ValueType>(ComputeSize(n));\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
file.Write(" void Init(%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_"),
last_arg.type, last_arg.name))
file.Write(" SetHeader(_n);\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" memcpy(ImmediateDataAddress(this),\n")
file.Write(" _%s, ComputeDataSize(_n));\n" % last_arg.name)
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
copy_args = func.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_", True),
last_arg.type, last_arg.name))
file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _%s);\n" %
(copy_args, last_arg.name))
file.Write(" const uint32 size = ComputeSize(_n);\n")
file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
"cmd, size);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
const uint32 size = gles2::%(name)s::ComputeSize(n);
gles2::%(name)s* c =
GetImmediateCmdSpaceTotalSize<gles2::%(name)s>(size);
if (c) {
c->Init(%(args)s);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
})
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
file.Write(" static GLuint ids[] = { 12, 23, 34, };\n")
file.Write(" %s& cmd = *GetBufferAs<%s>();\n" % (func.name, func.name))
file.Write(" void* next_cmd = cmd.Set(\n")
file.Write(" &cmd, static_cast<GLsizei>(arraysize(ids)), ids);\n")
file.Write(" EXPECT_EQ(static_cast<uint32>(%s::kCmdId),\n" % func.name)
file.Write(" cmd.header.command);\n")
file.Write(" EXPECT_EQ(sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(cmd.n * 4u),\n")
file.Write(" cmd.header.size * 4u);\n")
file.Write(" EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);\n");
file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
file.Write(" next_cmd, sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));\n")
file.Write(" // TODO(gman): Check that ids were inserted;\n")
file.Write("}\n")
file.Write("\n")
class GETnHandler(TypeHandler):
"""Handler for GETn for glGetBooleanv, glGetFloatv, ... type functions."""
def __init__(self):
TypeHandler.__init__(self)
def AddImmediateFunction(self, generator, func):
"""Overrriden from TypeHandler."""
pass
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(
"error::Error GLES2DecoderImpl::Handle%s(\n" % func.name)
file.Write(
" uint32 immediate_data_size, const gles2::%s& c) {\n" % func.name)
last_arg = func.GetLastOriginalArg()
all_but_last_args = func.GetOriginalArgs()[:-1]
for arg in all_but_last_args:
arg.WriteGetCode(file)
code = """ typedef %(func_name)s::Result Result;
GLsizei num_values = 0;
GetNumValuesReturnedForGLGet(pname, &num_values);
Result* result = GetSharedMemoryAs<Result*>(
c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
%(last_arg_type)s params = result ? result->GetData() : NULL;
"""
file.Write(code % {
'last_arg_type': last_arg.type,
'func_name': func.name,
})
func.WriteHandlerValidation(file)
code = """ // Check that the client initialized the result.
if (result->size != 0) {
return error::kInvalidArguments;
}
CopyRealGLErrorsToWrapper();
"""
file.Write(code)
func.WriteHandlerImplementation(file)
code = """ GLenum error = glGetError();
if (error == GL_NO_ERROR) {
result->SetNumResults(num_values);
} else {
SetGLError(error, "", "");
}
return error::kNoError;
}
"""
file.Write(code)
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
impl_decl = func.GetInfo('impl_decl')
if impl_decl == None or impl_decl == True:
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
all_but_last_args = func.GetOriginalArgs()[:-1]
arg_string = (
", ".join(["%s" % arg.name for arg in all_but_last_args]))
all_arg_string = (
", ".join(["%s" % arg.name for arg in func.GetOriginalArgs()]))
self.WriteTraceEvent(func, file)
code = """ if (%(func_name)sHelper(%(all_arg_string)s)) {
return;
}
typedef %(func_name)s::Result Result;
Result* result = GetResultAs<Result*>();
if (!result) {
return;
}
result->SetNumResults(0);
helper_->%(func_name)s(%(arg_string)s,
GetResultShmId(), GetResultShmOffset());
WaitForCmd();
result->CopyResult(params);
GPU_CLIENT_LOG_CODE_BLOCK({
for (int32 i = 0; i < result->GetNumResults(); ++i) {
GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
}
});
CheckGLError();
}
"""
file.Write(code % {
'func_name': func.name,
'arg_string': arg_string,
'all_arg_string': all_arg_string,
})
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Writes the GLES2 Implemention unit test."""
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
struct Cmds {
%(name)s cmd;
};
typedef %(name)s::Result Result;
Result::Type result = 0;
Cmds expected;
ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
expected.cmd.Init(%(cmd_args)s, result1.id, result1.offset);
EXPECT_CALL(*command_buffer(), OnFlush())
.WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
.RetiresOnSaturation();
gl_->%(name)s(%(args)s, &result);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
EXPECT_EQ(static_cast<Result::Type>(1), result);
}
"""
cmd_arg_strings = []
for count, arg in enumerate(func.GetCmdArgs()[0:-2]):
cmd_arg_strings.append(arg.GetValidClientSideCmdArg(func, count, 0))
cmd_arg_strings[0] = '123'
gl_arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()[0:-1]):
gl_arg_strings.append(arg.GetValidClientSideArg(func, count, 0))
gl_arg_strings[0] = '123'
file.Write(code % {
'name': func.name,
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
})
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
SpecializedSetup<%(name)s, 0>(true);
typedef %(name)s::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
EXPECT_CALL(*gl_, %(gl_func_name)s(%(local_gl_args)s));
result->size = 0;
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
%(valid_pname)s),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
gl_arg_strings = []
valid_pname = ''
for count, arg in enumerate(func.GetOriginalArgs()[:-1]):
arg_value = arg.GetValidGLArg(func, count, 0)
gl_arg_strings.append(arg_value)
if arg.name == 'pname':
valid_pname = arg_value
if func.GetInfo('gl_test_func') == 'glGetIntegerv':
gl_arg_strings.append("_")
else:
gl_arg_strings.append("result->GetData()")
self.WriteValidUnitTest(func, file, valid_test, {
'local_gl_args': ", ".join(gl_arg_strings),
'valid_pname': valid_pname,
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s::Result* result =
static_cast<%(name)s::Result*>(shared_memory_address_);
result->size = 0;
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));
EXPECT_EQ(0u, result->size);%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test)
class PUTHandler(TypeHandler):
"""Handler for glTexParameter_v, glVertexAttrib_v functions."""
def __init__(self):
TypeHandler.__init__(self)
def WriteServiceUnitTest(self, func, file):
"""Writes the service unit test for a command."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
GetSharedMemoryAs<%(data_type)s*>()[0] = %(data_value)s;
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
extra = {
'data_type': func.GetInfo('data_type'),
'data_value': func.GetInfo('data_value') or '0',
}
self.WriteValidUnitTest(func, file, valid_test, extra)
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
GetSharedMemoryAs<%(data_type)s*>()[0] = %(data_value)s;
EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test, extra)
def WriteImmediateServiceUnitTest(self, func, file):
"""Writes the service unit test for a command."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
%(name)s& cmd = *GetImmediateAs<%(name)s>();
EXPECT_CALL(
*gl_,
%(gl_func_name)s(%(gl_args)s,
reinterpret_cast<%(data_type)s*>(ImmediateDataAddress(&cmd))));
SpecializedSetup<%(name)s, 0>(true);
%(data_type)s temp[%(data_count)s] = { %(data_value)s, };
cmd.Init(%(gl_args)s, &temp[0]);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(temp)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
gl_arg_strings = []
gl_any_strings = []
for count, arg in enumerate(func.GetOriginalArgs()[0:-1]):
gl_arg_strings.append(arg.GetValidGLArg(func, count, 0))
gl_any_strings.append("_")
extra = {
'data_type': func.GetInfo('data_type'),
'data_count': func.GetInfo('count'),
'data_value': func.GetInfo('data_value') or '0',
'gl_args': ", ".join(gl_arg_strings),
'gl_any_args': ", ".join(gl_any_strings),
}
self.WriteValidUnitTest(func, file, valid_test, extra)
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
%(name)s& cmd = *GetImmediateAs<%(name)s>();
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_any_args)s, _)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(data_type)s temp[%(data_count)s] = { %(data_value)s, };
cmd.Init(%(all_but_last_args)s, &temp[0]);
EXPECT_EQ(error::%(parse_result)s,
ExecuteImmediateCmd(cmd, sizeof(temp)));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test, extra)
def WriteGetDataSizeCode(self, func, file):
"""Overrriden from TypeHandler."""
code = """ uint32 data_size;
if (!ComputeDataSize(1, sizeof(%s), %d, &data_size)) {
return error::kOutOfBounds;
}
"""
file.Write(code % (func.info.data_type, func.info.count))
if func.is_immediate:
file.Write(" if (data_size > immediate_data_size) {\n")
file.Write(" return error::kOutOfBounds;\n")
file.Write(" }\n")
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
last_arg_name = func.GetLastOriginalArg().name
values_str = ' << ", " << '.join(
["%s[%d]" % (last_arg_name, ndx) for ndx in range(0, func.info.count)])
file.Write(' GPU_CLIENT_LOG("values: " << %s);\n' % values_str)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
file.Write(" helper_->%sImmediate(%s);\n" %
(func.name, func.MakeOriginalArgString("")))
file.Write(" CheckGLError();\n")
file.Write("}\n")
file.Write("\n")
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Writes the GLES2 Implemention unit test."""
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
struct Cmds {
%(name)sImmediate cmd;
%(type)s data[%(count)d];
};
Cmds expected;
for (int jj = 0; jj < %(count)d; ++jj) {
expected.data[jj] = static_cast<%(type)s>(jj);
}
expected.cmd.Init(%(cmd_args)s, &expected.data[0]);
gl_->%(name)s(%(args)s, &expected.data[0]);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
"""
cmd_arg_strings = []
for count, arg in enumerate(func.GetCmdArgs()[0:-2]):
cmd_arg_strings.append(arg.GetValidClientSideCmdArg(func, count, 0))
gl_arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()[0:-1]):
gl_arg_strings.append(arg.GetValidClientSideArg(func, count, 0))
file.Write(code % {
'name': func.name,
'type': func.GetInfo('data_type'),
'count': func.GetInfo('count'),
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
})
def WriteImmediateCmdComputeSize(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" static uint32 ComputeDataSize() {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(" sizeof(%s) * %d); // NOLINT\n" %
(func.info.data_type, func.info.count))
file.Write(" }\n")
file.Write("\n")
file.Write(" static uint32 ComputeSize() {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(
" sizeof(ValueType) + ComputeDataSize()); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSetHeader(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void SetHeader() {\n")
file.Write(
" header.SetCmdByTotalSize<ValueType>(ComputeSize());\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
file.Write(" void Init(%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_"),
last_arg.type, last_arg.name))
file.Write(" SetHeader();\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" memcpy(ImmediateDataAddress(this),\n")
file.Write(" _%s, ComputeDataSize());\n" % last_arg.name)
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
copy_args = func.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_", True),
last_arg.type, last_arg.name))
file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _%s);\n" %
(copy_args, last_arg.name))
file.Write(" const uint32 size = ComputeSize();\n")
file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
"cmd, size);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
const uint32 size = gles2::%(name)s::ComputeSize();
gles2::%(name)s* c =
GetImmediateCmdSpaceTotalSize<gles2::%(name)s>(size);
if (c) {
c->Init(%(args)s);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
})
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
file.Write(" const int kSomeBaseValueToTestWith = 51;\n")
file.Write(" static %s data[] = {\n" % func.info.data_type)
for v in range(0, func.info.count):
file.Write(" static_cast<%s>(kSomeBaseValueToTestWith + %d),\n" %
(func.info.data_type, v))
file.Write(" };\n")
file.Write(" %s& cmd = *GetBufferAs<%s>();\n" % (func.name, func.name))
file.Write(" void* next_cmd = cmd.Set(\n")
file.Write(" &cmd")
args = func.GetCmdArgs()
for value, arg in enumerate(args):
file.Write(",\n static_cast<%s>(%d)" % (arg.type, value + 11))
file.Write(",\n data);\n")
args = func.GetCmdArgs()
file.Write(" EXPECT_EQ(static_cast<uint32>(%s::kCmdId),\n" % func.name)
file.Write(" cmd.header.command);\n")
file.Write(" EXPECT_EQ(sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(sizeof(data)),\n")
file.Write(" cmd.header.size * 4u);\n")
for value, arg in enumerate(args):
file.Write(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);\n" %
(arg.type, value + 11, arg.name))
file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
file.Write(" next_cmd, sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(sizeof(data)));\n")
file.Write(" // TODO(gman): Check that data was inserted;\n")
file.Write("}\n")
file.Write("\n")
class PUTnHandler(TypeHandler):
"""Handler for PUTn 'glUniform__v' type functions."""
def __init__(self):
TypeHandler.__init__(self)
def WriteServiceUnitTest(self, func, file):
"""Overridden from TypeHandler."""
TypeHandler.WriteServiceUnitTest(self, func, file)
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgsCountTooLarge) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
gl_arg_strings = []
arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()):
# hardcoded to match unit tests.
if count == 0:
# the location of the second element of the 2nd uniform.
# defined in GLES2DecoderBase::SetupShaderForUniform
gl_arg_strings.append("3")
arg_strings.append("ProgramManager::MakeFakeLocation(1, 1)")
elif count == 1:
# the number of elements that gl will be called with.
gl_arg_strings.append("3")
# the number of elements requested in the command.
arg_strings.append("5")
else:
gl_arg_strings.append(arg.GetValidGLArg(func, count, 0))
arg_strings.append(arg.GetValidArg(func, count, 0))
extra = {
'gl_args': ", ".join(gl_arg_strings),
'args': ", ".join(arg_strings),
}
self.WriteValidUnitTest(func, file, valid_test, extra)
def WriteImmediateServiceUnitTest(self, func, file):
"""Overridden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
%(name)s& cmd = *GetImmediateAs<%(name)s>();
EXPECT_CALL(
*gl_,
%(gl_func_name)s(%(gl_args)s,
reinterpret_cast<%(data_type)s*>(ImmediateDataAddress(&cmd))));
SpecializedSetup<%(name)s, 0>(true);
%(data_type)s temp[%(data_count)s * 2] = { 0, };
cmd.Init(%(args)s, &temp[0]);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(temp)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
gl_arg_strings = []
gl_any_strings = []
arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()[0:-1]):
gl_arg_strings.append(arg.GetValidGLArg(func, count, 0))
gl_any_strings.append("_")
arg_strings.append(arg.GetValidArg(func, count, 0))
extra = {
'data_type': func.GetInfo('data_type'),
'data_count': func.GetInfo('count'),
'args': ", ".join(arg_strings),
'gl_args': ", ".join(gl_arg_strings),
'gl_any_args': ", ".join(gl_any_strings),
}
self.WriteValidUnitTest(func, file, valid_test, extra)
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
%(name)s& cmd = *GetImmediateAs<%(name)s>();
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_any_args)s, _)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(data_type)s temp[%(data_count)s * 2] = { 0, };
cmd.Init(%(all_but_last_args)s, &temp[0]);
EXPECT_EQ(error::%(parse_result)s,
ExecuteImmediateCmd(cmd, sizeof(temp)));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test, extra)
def WriteGetDataSizeCode(self, func, file):
"""Overrriden from TypeHandler."""
code = """ uint32 data_size;
if (!ComputeDataSize(count, sizeof(%s), %d, &data_size)) {
return error::kOutOfBounds;
}
"""
file.Write(code % (func.info.data_type, func.info.count))
if func.is_immediate:
file.Write(" if (data_size > immediate_data_size) {\n")
file.Write(" return error::kOutOfBounds;\n")
file.Write(" }\n")
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
last_arg_name = func.GetLastOriginalArg().name
file.Write(""" GPU_CLIENT_LOG_CODE_BLOCK({
for (GLsizei i = 0; i < count; ++i) {
""")
values_str = ' << ", " << '.join(
["%s[%d + i * %d]" % (
last_arg_name, ndx, func.info.count) for ndx in range(
0, func.info.count)])
file.Write(' GPU_CLIENT_LOG(" " << i << ": " << %s);\n' % values_str)
file.Write(" }\n });\n")
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
file.Write(" helper_->%sImmediate(%s);\n" %
(func.name, func.MakeOriginalArgString("")))
file.Write(" CheckGLError();\n")
file.Write("}\n")
file.Write("\n")
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Writes the GLES2 Implemention unit test."""
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
struct Cmds {
%(name)sImmediate cmd;
%(type)s data[2][%(count)d];
};
Cmds expected;
for (int ii = 0; ii < 2; ++ii) {
for (int jj = 0; jj < %(count)d; ++jj) {
expected.data[ii][jj] = static_cast<%(type)s>(ii * %(count)d + jj);
}
}
expected.cmd.Init(%(cmd_args)s, &expected.data[0][0]);
gl_->%(name)s(%(args)s, &expected.data[0][0]);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
"""
cmd_arg_strings = []
for count, arg in enumerate(func.GetCmdArgs()[0:-2]):
cmd_arg_strings.append(arg.GetValidClientSideCmdArg(func, count, 0))
gl_arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()[0:-1]):
gl_arg_strings.append(arg.GetValidClientSideArg(func, count, 0))
file.Write(code % {
'name': func.name,
'type': func.GetInfo('data_type'),
'count': func.GetInfo('count'),
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
})
def WriteImmediateCmdComputeSize(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" static uint32 ComputeDataSize(GLsizei count) {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(" sizeof(%s) * %d * count); // NOLINT\n" %
(func.info.data_type, func.info.count))
file.Write(" }\n")
file.Write("\n")
file.Write(" static uint32 ComputeSize(GLsizei count) {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(
" sizeof(ValueType) + ComputeDataSize(count)); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSetHeader(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void SetHeader(GLsizei count) {\n")
file.Write(
" header.SetCmdByTotalSize<ValueType>(ComputeSize(count));\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
file.Write(" void Init(%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_"),
last_arg.type, last_arg.name))
file.Write(" SetHeader(_count);\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" memcpy(ImmediateDataAddress(this),\n")
file.Write(" _%s, ComputeDataSize(_count));\n" % last_arg.name)
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
copy_args = func.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_", True),
last_arg.type, last_arg.name))
file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _%s);\n" %
(copy_args, last_arg.name))
file.Write(" const uint32 size = ComputeSize(_count);\n")
file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
"cmd, size);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
const uint32 size = gles2::%(name)s::ComputeSize(count);
gles2::%(name)s* c =
GetImmediateCmdSpaceTotalSize<gles2::%(name)s>(size);
if (c) {
c->Init(%(args)s);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
})
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
file.Write(" const int kSomeBaseValueToTestWith = 51;\n")
file.Write(" static %s data[] = {\n" % func.info.data_type)
for v in range(0, func.info.count * 2):
file.Write(" static_cast<%s>(kSomeBaseValueToTestWith + %d),\n" %
(func.info.data_type, v))
file.Write(" };\n")
file.Write(" %s& cmd = *GetBufferAs<%s>();\n" % (func.name, func.name))
file.Write(" const GLsizei kNumElements = 2;\n")
file.Write(" const size_t kExpectedCmdSize =\n")
file.Write(" sizeof(cmd) + kNumElements * sizeof(%s) * %d;\n" %
(func.info.data_type, func.info.count))
file.Write(" void* next_cmd = cmd.Set(\n")
file.Write(" &cmd")
args = func.GetCmdArgs()
for value, arg in enumerate(args):
file.Write(",\n static_cast<%s>(%d)" % (arg.type, value + 1))
file.Write(",\n data);\n")
args = func.GetCmdArgs()
file.Write(" EXPECT_EQ(static_cast<uint32>(%s::kCmdId),\n" % func.name)
file.Write(" cmd.header.command);\n")
file.Write(" EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);\n")
for value, arg in enumerate(args):
file.Write(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);\n" %
(arg.type, value + 1, arg.name))
file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
file.Write(" next_cmd, sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(sizeof(data)));\n")
file.Write(" // TODO(gman): Check that data was inserted;\n")
file.Write("}\n")
file.Write("\n")
class PUTXnHandler(TypeHandler):
"""Handler for glUniform?f functions."""
def __init__(self):
TypeHandler.__init__(self)
def WriteHandlerImplementation(self, func, file):
"""Overrriden from TypeHandler."""
code = """ %(type)s temp[%(count)s] = { %(values)s};
Do%(name)sv(%(location)s, 1, &temp[0]);
"""
values = ""
args = func.GetOriginalArgs()
count = int(func.GetInfo('count'))
num_args = len(args)
for ii in range(count):
values += "%s, " % args[len(args) - count + ii].name
file.Write(code % {
'name': func.name,
'count': func.GetInfo('count'),
'type': func.GetInfo('data_type'),
'location': args[0].name,
'args': func.MakeOriginalArgString(""),
'values': values,
})
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(name)sv(%(local_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
args = func.GetOriginalArgs()
local_args = "%s, 1, _" % args[0].GetValidGLArg(func, 0, 0)
self.WriteValidUnitTest(func, file, valid_test, {
'name': func.name,
'count': func.GetInfo('count'),
'local_args': local_args,
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(name)sv(_, _, _).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test, {
'name': func.GetInfo('name'),
'count': func.GetInfo('count'),
})
class GLcharHandler(CustomHandler):
"""Handler for functions that pass a single string ."""
def __init__(self):
CustomHandler.__init__(self)
def WriteImmediateCmdComputeSize(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" static uint32 ComputeSize(uint32 data_size) {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(" sizeof(ValueType) + data_size); // NOLINT\n")
file.Write(" }\n")
def WriteImmediateCmdSetHeader(self, func, file):
"""Overrriden from TypeHandler."""
code = """
void SetHeader(uint32 data_size) {
header.SetCmdBySize<ValueType>(data_size);
}
"""
file.Write(code)
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
args = func.GetCmdArgs()
set_code = []
for arg in args:
set_code.append(" %s = _%s;" % (arg.name, arg.name))
code = """
void Init(%(typed_args)s, uint32 _data_size) {
SetHeader(_data_size);
%(set_code)s
memcpy(ImmediateDataAddress(this), _%(last_arg)s, _data_size);
}
"""
file.Write(code % {
"typed_args": func.MakeTypedOriginalArgString("_"),
"set_code": "\n".join(set_code),
"last_arg": last_arg.name
})
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
file.Write(" void* Set(void* cmd%s, uint32 _data_size) {\n" %
func.MakeTypedOriginalArgString("_", True))
file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _data_size);\n" %
func.MakeOriginalArgString("_"))
file.Write(" return NextImmediateCmdAddress<ValueType>("
"cmd, _data_size);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
const uint32 data_size = strlen(name);
gles2::%(name)s* c = GetImmediateCmdSpace<gles2::%(name)s>(data_size);
if (c) {
c->Init(%(args)s, data_size);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
})
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
init_code = []
check_code = []
all_but_last_arg = func.GetCmdArgs()[:-1]
for value, arg in enumerate(all_but_last_arg):
init_code.append(" static_cast<%s>(%d)," % (arg.type, value + 11))
for value, arg in enumerate(all_but_last_arg):
check_code.append(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);" %
(arg.type, value + 11, arg.name))
code = """
TEST_F(GLES2FormatTest, %(func_name)s) {
%(func_name)s& cmd = *GetBufferAs<%(func_name)s>();
static const char* const test_str = \"test string\";
void* next_cmd = cmd.Set(
&cmd,
%(init_code)s
test_str,
strlen(test_str));
EXPECT_EQ(static_cast<uint32>(%(func_name)s::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd) +
RoundSizeToMultipleOfEntries(strlen(test_str)),
cmd.header.size * 4u);
EXPECT_EQ(static_cast<char*>(next_cmd),
reinterpret_cast<char*>(&cmd) + sizeof(cmd) +
RoundSizeToMultipleOfEntries(strlen(test_str)));
%(check_code)s
EXPECT_EQ(static_cast<uint32>(strlen(test_str)), cmd.data_size);
EXPECT_EQ(0, memcmp(test_str, ImmediateDataAddress(&cmd), strlen(test_str)));
CheckBytesWritten(
next_cmd,
sizeof(cmd) + RoundSizeToMultipleOfEntries(strlen(test_str)),
sizeof(cmd) + strlen(test_str));
}
"""
file.Write(code % {
'func_name': func.name,
'init_code': "\n".join(init_code),
'check_code': "\n".join(check_code),
})
class GLcharNHandler(CustomHandler):
"""Handler for functions that pass a single string with an optional len."""
def __init__(self):
CustomHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
func.cmd_args = []
func.AddCmdArg(Argument('bucket_id', 'GLuint'))
def AddImmediateFunction(self, generator, func):
"""Overrriden from TypeHandler."""
pass
def AddBucketFunction(self, generator, func):
"""Overrriden from TypeHandler."""
pass
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("""error::Error GLES2DecoderImpl::Handle%(name)s(
uint32 immediate_data_size, const gles2::%(name)s& c) {
GLuint bucket_id = static_cast<GLuint>(c.%(bucket_id)s);
Bucket* bucket = GetBucket(bucket_id);
if (!bucket || bucket->size() == 0) {
return error::kInvalidArguments;
}
std::string str;
if (!bucket->GetAsString(&str)) {
return error::kInvalidArguments;
}
%(gl_func_name)s(0, str.c_str());
return error::kNoError;
}
""" % {
'name': func.name,
'gl_func_name': func.GetGLFunctionName(),
'bucket_id': func.cmd_args[0].name,
})
class IsHandler(TypeHandler):
"""Handler for glIs____ type and glGetError functions."""
def __init__(self):
TypeHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
func.AddCmdArg(Argument("result_shm_id", 'uint32'))
func.AddCmdArg(Argument("result_shm_offset", 'uint32'))
if func.GetInfo('result') == None:
func.AddInfo('result', ['uint32'])
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s%(comma)sshared_memory_id_, shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
comma = ""
if len(func.GetOriginalArgs()):
comma =", "
self.WriteValidUnitTest(func, file, valid_test, {
'comma': comma,
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s%(comma)sshared_memory_id_, shared_memory_offset_);
EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test, {
'comma': comma,
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgsBadSharedMemoryId) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s%(comma)skInvalidSharedMemoryId, shared_memory_offset_);
EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
cmd.Init(%(args)s%(comma)sshared_memory_id_, kInvalidSharedMemoryOffset);
EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
}
"""
self.WriteValidUnitTest(func, file, invalid_test, {
'comma': comma,
})
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(
"error::Error GLES2DecoderImpl::Handle%s(\n" % func.name)
file.Write(
" uint32 immediate_data_size, const gles2::%s& c) {\n" % func.name)
args = func.GetOriginalArgs()
for arg in args:
arg.WriteGetCode(file)
code = """ typedef %(func_name)s::Result Result;
Result* result_dst = GetSharedMemoryAs<Result*>(
c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
if (!result_dst) {
return error::kOutOfBounds;
}
"""
file.Write(code % {'func_name': func.name})
func.WriteHandlerValidation(file)
file.Write(" *result_dst = %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
file.Write(" return error::kNoError;\n")
file.Write("}\n")
file.Write("\n")
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
impl_func = func.GetInfo('impl_func')
if impl_func == None or impl_func == True:
error_value = func.GetInfo("error_value") or "GL_FALSE"
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
self.WriteTraceEvent(func, file)
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
file.Write(" typedef %s::Result Result;\n" % func.name)
file.Write(" Result* result = GetResultAs<Result*>();\n")
file.Write(" if (!result) {\n")
file.Write(" return %s;\n" % error_value)
file.Write(" }\n")
file.Write(" *result = 0;\n")
arg_string = func.MakeOriginalArgString("")
comma = ""
if len(arg_string) > 0:
comma = ", "
file.Write(
" helper_->%s(%s%sGetResultShmId(), GetResultShmOffset());\n" %
(func.name, arg_string, comma))
file.Write(" WaitForCmd();\n")
file.Write(" %s result_value = *result;\n" % func.return_type)
file.Write(' GPU_CLIENT_LOG("returned " << result_value);\n')
file.Write(" CheckGLError();\n")
file.Write(" return result_value;\n")
file.Write("}\n")
file.Write("\n")
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
client_test = func.GetInfo('client_test')
if client_test == None or client_test == True:
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
struct Cmds {
%(name)s cmd;
};
typedef %(name)s::Result Result;
Cmds expected;
ExpectedMemoryInfo result1 =
GetExpectedResultMemory(sizeof(%(name)s::Result));
expected.cmd.Init(1, result1.id, result1.offset);
EXPECT_CALL(*command_buffer(), OnFlush())
.WillOnce(SetMemory(result1.ptr, uint32(1)))
.RetiresOnSaturation();
GLboolean result = gl_->%(name)s(1);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
EXPECT_TRUE(result);
}
"""
file.Write(code % {
'name': func.name,
})
class STRnHandler(TypeHandler):
"""Handler for GetProgramInfoLog, GetShaderInfoLog, GetShaderSource, and
GetTranslatedShaderSourceANGLE."""
def __init__(self):
TypeHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
# remove all but the first cmd args.
cmd_args = func.GetCmdArgs()
func.ClearCmdArgs()
func.AddCmdArg(cmd_args[0])
# add on a bucket id.
func.AddCmdArg(Argument('bucket_id', 'uint32'))
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
code_1 = """%(return_type)s GLES2Implementation::%(func_name)s(%(args)s) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
"""
code_2 = """ GPU_CLIENT_LOG("[" << GetLogPrefix()
<< "] gl%(func_name)s" << "("
<< %(arg0)s << ", "
<< %(arg1)s << ", "
<< static_cast<void*>(%(arg2)s) << ", "
<< static_cast<void*>(%(arg3)s) << ")");
helper_->SetBucketSize(kResultBucketId, 0);
helper_->%(func_name)s(%(id_name)s, kResultBucketId);
std::string str;
GLsizei max_size = 0;
if (GetBucketAsString(kResultBucketId, &str)) {
if (bufsize > 0) {
max_size =
std::min(static_cast<size_t>(%(bufsize_name)s) - 1, str.size());
memcpy(%(dest_name)s, str.c_str(), max_size);
%(dest_name)s[max_size] = '\\0';
GPU_CLIENT_LOG("------\\n" << %(dest_name)s << "\\n------");
}
}
if (%(length_name)s != NULL) {
*%(length_name)s = max_size;
}
CheckGLError();
}
"""
args = func.GetOriginalArgs()
str_args = {
'return_type': func.return_type,
'func_name': func.original_name,
'args': func.MakeTypedOriginalArgString(""),
'id_name': args[0].name,
'bufsize_name': args[1].name,
'length_name': args[2].name,
'dest_name': args[3].name,
'arg0': args[0].name,
'arg1': args[1].name,
'arg2': args[2].name,
'arg3': args[3].name,
}
file.Write(code_1 % str_args)
func.WriteDestinationInitalizationValidation(file)
file.Write(code_2 % str_args)
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
const char* kInfo = "hello";
const uint32 kBucketId = 123;
SpecializedSetup<%(name)s, 0>(true);
%(expect_len_code)s
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s))
.WillOnce(DoAll(SetArgumentPointee<2>(strlen(kInfo)),
SetArrayArgument<3>(kInfo, kInfo + strlen(kInfo) + 1)));
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
ASSERT_TRUE(bucket != NULL);
EXPECT_EQ(strlen(kInfo) + 1, bucket->size());
EXPECT_EQ(0, memcmp(bucket->GetData(0, bucket->size()), kInfo,
bucket->size()));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
args = func.GetOriginalArgs()
id_name = args[0].GetValidGLArg(func, 0, 0)
get_len_func = func.GetInfo('get_len_func')
get_len_enum = func.GetInfo('get_len_enum')
sub = {
'id_name': id_name,
'get_len_func': get_len_func,
'get_len_enum': get_len_enum,
'gl_args': '%s, strlen(kInfo) + 1, _, _' %
args[0].GetValidGLArg(func, 0, 0),
'args': '%s, kBucketId' % args[0].GetValidArg(func, 0, 0),
'expect_len_code': '',
}
if get_len_func and get_len_func[0:2] == 'gl':
sub['expect_len_code'] = (
" EXPECT_CALL(*gl_, %s(%s, %s, _))\n"
" .WillOnce(SetArgumentPointee<2>(strlen(kInfo) + 1));") % (
get_len_func[2:], id_name, get_len_enum)
self.WriteValidUnitTest(func, file, valid_test, sub)
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs) {
const uint32 kBucketId = 123;
EXPECT_CALL(*gl_, %(gl_func_name)s(_, _, _, _))
.Times(0);
%(name)s cmd;
cmd.Init(kInvalidClientId, kBucketId);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
}
"""
self.WriteValidUnitTest(func, file, invalid_test)
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
class FunctionInfo(object):
"""Holds info about a function."""
def __init__(self, info, type_handler):
for key in info:
setattr(self, key, info[key])
self.type_handler = type_handler
if not 'type' in info:
self.type = ''
class Argument(object):
"""A class that represents a function argument."""
cmd_type_map_ = {
'GLenum': 'uint32',
'GLint': 'int32',
'GLintptr': 'int32',
'GLsizei': 'int32',
'GLsizeiptr': 'int32',
'GLfloat': 'float',
'GLclampf': 'float',
}
need_validation_ = ['GLsizei*', 'GLboolean*', 'GLenum*', 'GLint*']
def __init__(self, name, type):
self.name = name
self.optional = type.endswith("Optional*")
if self.optional:
type = type[:-9] + "*"
self.type = type
if type in self.cmd_type_map_:
self.cmd_type = self.cmd_type_map_[type]
else:
self.cmd_type = 'uint32'
def IsPointer(self):
"""Returns true if argument is a pointer."""
return False
def AddCmdArgs(self, args):
"""Adds command arguments for this argument to the given list."""
return args.append(self)
def AddInitArgs(self, args):
"""Adds init arguments for this argument to the given list."""
return args.append(self)
def GetValidArg(self, func, offset, index):
"""Gets a valid value for this argument."""
valid_arg = func.GetValidArg(offset)
if valid_arg != None:
return valid_arg
return str(offset + 1)
def GetValidClientSideArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return str(offset + 1)
def GetValidClientSideCmdArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return str(offset + 1)
def GetValidGLArg(self, func, offset, index):
"""Gets a valid GL value for this argument."""
valid_arg = func.GetValidArg(offset)
if valid_arg != None:
return valid_arg
return str(offset + 1)
def GetNumInvalidValues(self, func):
"""returns the number of invalid values to be tested."""
return 0
def GetInvalidArg(self, offset, index):
"""returns an invalid value and expected parse result by index."""
return ("---ERROR0---", "---ERROR2---", None)
def GetLogArg(self):
"""Get argument appropriate for LOG macro."""
if self.type == 'GLboolean':
return 'GLES2Util::GetStringBool(%s)' % self.name
if self.type == 'GLenum':
return 'GLES2Util::GetStringEnum(%s)' % self.name
return self.name
def WriteGetCode(self, file):
"""Writes the code to get an argument from a command structure."""
file.Write(" %s %s = static_cast<%s>(c.%s);\n" %
(self.type, self.name, self.type, self.name))
def WriteValidationCode(self, file, func):
"""Writes the validation code for an argument."""
pass
def WriteClientSideValidationCode(self, file, func):
"""Writes the validation code for an argument."""
pass
def WriteDestinationInitalizationValidation(self, file, func):
"""Writes the client side destintion initialization validation."""
pass
def WriteDestinationInitalizationValidatationIfNeeded(self, file, func):
"""Writes the client side destintion initialization validation if needed."""
parts = self.type.split(" ")
if len(parts) > 1:
return
if parts[0] in self.need_validation_:
file.Write(
" GPU_CLIENT_VALIDATE_DESTINATION_%sINITALIZATION(%s, %s);\n" %
("OPTIONAL_" if self.optional else "", self.type[:-1], self.name))
def WriteGetAddress(self, file):
"""Writes the code to get the address this argument refers to."""
pass
def GetImmediateVersion(self):
"""Gets the immediate version of this argument."""
return self
def GetBucketVersion(self):
"""Gets the bucket version of this argument."""
return self
class BoolArgument(Argument):
"""class for GLboolean"""
def __init__(self, name, type):
Argument.__init__(self, name, 'GLboolean')
def GetValidArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return 'true'
def GetValidClientSideArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return 'true'
def GetValidClientSideCmdArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return 'true'
def GetValidGLArg(self, func, offset, index):
"""Gets a valid GL value for this argument."""
return 'true'
class UniformLocationArgument(Argument):
"""class for uniform locations."""
def __init__(self, name):
Argument.__init__(self, name, "GLint")
def WriteGetCode(self, file):
"""Writes the code to get an argument from a command structure."""
code = """ %s %s = static_cast<%s>(c.%s);
"""
file.Write(code % (self.type, self.name, self.type, self.name))
def GetValidArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return "%d" % (offset + 1)
class DataSizeArgument(Argument):
"""class for data_size which Bucket commands do not need."""
def __init__(self, name):
Argument.__init__(self, name, "uint32")
def GetBucketVersion(self):
return None
class SizeArgument(Argument):
"""class for GLsizei and GLsizeiptr."""
def __init__(self, name, type):
Argument.__init__(self, name, type)
def GetNumInvalidValues(self, func):
"""overridden from Argument."""
if func.is_immediate:
return 0
return 1
def GetInvalidArg(self, offset, index):
"""overridden from Argument."""
return ("-1", "kNoError", "GL_INVALID_VALUE")
def WriteValidationCode(self, file, func):
"""overridden from Argument."""
file.Write(" if (%s < 0) {\n" % self.name)
file.Write(" SetGLError(GL_INVALID_VALUE, \"gl%s\", \"%s < 0\");\n" %
(func.original_name, self.name))
file.Write(" return error::kNoError;\n")
file.Write(" }\n")
def WriteClientSideValidationCode(self, file, func):
"""overridden from Argument."""
file.Write(" if (%s < 0) {\n" % self.name)
file.Write(" SetGLError(GL_INVALID_VALUE, \"gl%s\", \"%s < 0\");\n" %
(func.original_name, self.name))
file.Write(" return;\n")
file.Write(" }\n")
class SizeNotNegativeArgument(SizeArgument):
"""class for GLsizeiNotNegative. It's NEVER allowed to be negative"""
def __init__(self, name, type, gl_type):
SizeArgument.__init__(self, name, gl_type)
def GetInvalidArg(self, offset, index):
"""overridden from SizeArgument."""
return ("-1", "kOutOfBounds", "GL_NO_ERROR")
def WriteValidationCode(self, file, func):
"""overridden from SizeArgument."""
pass
class EnumBaseArgument(Argument):
"""Base class for EnumArgument, IntArgument and ValidatedBoolArgument"""
def __init__(self, name, gl_type, type, gl_error):
Argument.__init__(self, name, gl_type)
self.local_type = type
self.gl_error = gl_error
name = type[len(gl_type):]
self.type_name = name
self.enum_info = _ENUM_LISTS[name]
def WriteValidationCode(self, file, func):
file.Write(" if (!validators_->%s.IsValid(%s)) {\n" %
(ToUnderscore(self.type_name), self.name))
if self.gl_error == "GL_INVALID_ENUM":
file.Write(
" SetGLErrorInvalidEnum(\"gl%s\", %s, \"%s\");\n" %
(func.original_name, self.name, self.name))
else:
file.Write(
" SetGLError(%s, \"gl%s\", \"%s %s\");\n" %
(self.gl_error, func.original_name, self.name, self.gl_error))
file.Write(" return error::kNoError;\n")
file.Write(" }\n")
def GetValidArg(self, func, offset, index):
valid_arg = func.GetValidArg(offset)
if valid_arg != None:
return valid_arg
if 'valid' in self.enum_info:
valid = self.enum_info['valid']
num_valid = len(valid)
if index >= num_valid:
index = num_valid - 1
return valid[index]
return str(offset + 1)
def GetValidClientSideArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return self.GetValidArg(func, offset, index)
def GetValidClientSideCmdArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return self.GetValidArg(func, offset, index)
def GetValidGLArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return self.GetValidArg(func, offset, index)
def GetNumInvalidValues(self, func):
"""returns the number of invalid values to be tested."""
if 'invalid' in self.enum_info:
invalid = self.enum_info['invalid']
return len(invalid)
return 0
def GetInvalidArg(self, offset, index):
"""returns an invalid value by index."""
if 'invalid' in self.enum_info:
invalid = self.enum_info['invalid']
num_invalid = len(invalid)
if index >= num_invalid:
index = num_invalid - 1
return (invalid[index], "kNoError", self.gl_error)
return ("---ERROR1---", "kNoError", self.gl_error)
class EnumArgument(EnumBaseArgument):
"""A class that represents a GLenum argument"""
def __init__(self, name, type):
EnumBaseArgument.__init__(self, name, "GLenum", type, "GL_INVALID_ENUM")
def GetLogArg(self):
"""Overridden from Argument."""
return ("GLES2Util::GetString%s(%s)" %
(self.type_name, self.name))
class IntArgument(EnumBaseArgument):
"""A class for a GLint argument that can only except specific values.
For example glTexImage2D takes a GLint for its internalformat
argument instead of a GLenum.
"""
def __init__(self, name, type):
EnumBaseArgument.__init__(self, name, "GLint", type, "GL_INVALID_VALUE")
class ValidatedBoolArgument(EnumBaseArgument):
"""A class for a GLboolean argument that can only except specific values.
For example glUniformMatrix takes a GLboolean for it's transpose but it
must be false.
"""
def __init__(self, name, type):
EnumBaseArgument.__init__(self, name, "GLboolean", type, "GL_INVALID_VALUE")
def GetLogArg(self):
"""Overridden from Argument."""
return 'GLES2Util::GetStringBool(%s)' % self.name
class ImmediatePointerArgument(Argument):
"""A class that represents an immediate argument to a function.
An immediate argument is one where the data follows the command.
"""
def __init__(self, name, type):
Argument.__init__(self, name, type)
def AddCmdArgs(self, args):
"""Overridden from Argument."""
pass
def WriteGetCode(self, file):
"""Overridden from Argument."""
file.Write(
" %s %s = GetImmediateDataAs<%s>(\n" %
(self.type, self.name, self.type))
file.Write(" c, data_size, immediate_data_size);\n")
def WriteValidationCode(self, file, func):
"""Overridden from Argument."""
file.Write(" if (%s == NULL) {\n" % self.name)
file.Write(" return error::kOutOfBounds;\n")
file.Write(" }\n")
def GetImmediateVersion(self):
"""Overridden from Argument."""
return None
def WriteDestinationInitalizationValidation(self, file, func):
"""Overridden from Argument."""
self.WriteDestinationInitalizationValidatationIfNeeded(file, func)
def GetLogArg(self):
"""Overridden from Argument."""
return "static_cast<const void*>(%s)" % self.name
class BucketPointerArgument(Argument):
"""A class that represents an bucket argument to a function."""
def __init__(self, name, type):
Argument.__init__(self, name, type)
def AddCmdArgs(self, args):
"""Overridden from Argument."""
pass
def WriteGetCode(self, file):
"""Overridden from Argument."""
file.Write(
" %s %s = bucket->GetData(0, data_size);\n" %
(self.type, self.name))
def WriteValidationCode(self, file, func):
"""Overridden from Argument."""
pass
def GetImmediateVersion(self):
"""Overridden from Argument."""
return None
def WriteDestinationInitalizationValidation(self, file, func):
"""Overridden from Argument."""
self.WriteDestinationInitalizationValidatationIfNeeded(file, func)
def GetLogArg(self):
"""Overridden from Argument."""
return "static_cast<const void*>(%s)" % self.name
class PointerArgument(Argument):
"""A class that represents a pointer argument to a function."""
def __init__(self, name, type):
Argument.__init__(self, name, type)
def IsPointer(self):
"""Returns true if argument is a pointer."""
return True
def GetValidArg(self, func, offset, index):
"""Overridden from Argument."""
return "shared_memory_id_, shared_memory_offset_"
def GetValidGLArg(self, func, offset, index):
"""Overridden from Argument."""
return "reinterpret_cast<%s>(shared_memory_address_)" % self.type
def GetNumInvalidValues(self, func):
"""Overridden from Argument."""
return 2
def GetInvalidArg(self, offset, index):
"""Overridden from Argument."""
if index == 0:
return ("kInvalidSharedMemoryId, 0", "kOutOfBounds", None)
else:
return ("shared_memory_id_, kInvalidSharedMemoryOffset",
"kOutOfBounds", None)
def GetLogArg(self):
"""Overridden from Argument."""
return "static_cast<const void*>(%s)" % self.name
def AddCmdArgs(self, args):
"""Overridden from Argument."""
args.append(Argument("%s_shm_id" % self.name, 'uint32'))
args.append(Argument("%s_shm_offset" % self.name, 'uint32'))
def WriteGetCode(self, file):
"""Overridden from Argument."""
file.Write(
" %s %s = GetSharedMemoryAs<%s>(\n" %
(self.type, self.name, self.type))
file.Write(
" c.%s_shm_id, c.%s_shm_offset, data_size);\n" %
(self.name, self.name))
def WriteGetAddress(self, file):
"""Overridden from Argument."""
file.Write(
" %s %s = GetSharedMemoryAs<%s>(\n" %
(self.type, self.name, self.type))
file.Write(
" %s_shm_id, %s_shm_offset, %s_size);\n" %
(self.name, self.name, self.name))
def WriteValidationCode(self, file, func):
"""Overridden from Argument."""
file.Write(" if (%s == NULL) {\n" % self.name)
file.Write(" return error::kOutOfBounds;\n")
file.Write(" }\n")
def GetImmediateVersion(self):
"""Overridden from Argument."""
return ImmediatePointerArgument(self.name, self.type)
def GetBucketVersion(self):
"""Overridden from Argument."""
if self.type == "const char*":
return InputStringBucketArgument(self.name, self.type)
return BucketPointerArgument(self.name, self.type)
def WriteDestinationInitalizationValidation(self, file, func):
"""Overridden from Argument."""
self.WriteDestinationInitalizationValidatationIfNeeded(file, func)
class InputStringBucketArgument(Argument):
"""An string input argument where the string is passed in a bucket."""
def __init__(self, name, type):
Argument.__init__(self, name + "_bucket_id", "uint32")
def WriteGetCode(self, file):
"""Overridden from Argument."""
code = """
Bucket* %(name)s_bucket = GetBucket(c.%(name)s);
if (!%(name)s_bucket) {
return error::kInvalidArguments;
}
std::string %(name)s_str;
if (!%(name)s_bucket->GetAsString(&%(name)s_str)) {
return error::kInvalidArguments;
}
const char* %(name)s = %(name)s_str.c_str();
"""
file.Write(code % {
'name': self.name,
})
def GetValidArg(self, func, offset, index):
return "kNameBucketId"
def GetValidGLArg(self, func, offset, index):
return "_"
class NonImmediatePointerArgument(PointerArgument):
"""A pointer argument that stays a pointer even in an immediate cmd."""
def __init__(self, name, type):
PointerArgument.__init__(self, name, type)
def IsPointer(self):
"""Returns true if argument is a pointer."""
return False
def GetImmediateVersion(self):
"""Overridden from Argument."""
return self
class ResourceIdArgument(Argument):
"""A class that represents a resource id argument to a function."""
def __init__(self, name, type):
match = re.match("(GLid\w+)", type)
self.resource_type = match.group(1)[4:]
type = type.replace(match.group(1), "GLuint")
Argument.__init__(self, name, type)
def WriteGetCode(self, file):
"""Overridden from Argument."""
file.Write(" %s %s = c.%s;\n" % (self.type, self.name, self.name))
def GetValidArg(self, func, offset, index):
return "client_%s_id_" % self.resource_type.lower()
def GetValidGLArg(self, func, offset, index):
return "kService%sId" % self.resource_type
class ResourceIdBindArgument(Argument):
"""Represents a resource id argument to a bind function."""
def __init__(self, name, type):
match = re.match("(GLidBind\w+)", type)
self.resource_type = match.group(1)[8:]
type = type.replace(match.group(1), "GLuint")
Argument.__init__(self, name, type)
def WriteGetCode(self, file):
"""Overridden from Argument."""
code = """ %(type)s %(name)s = c.%(name)s;
"""
file.Write(code % {'type': self.type, 'name': self.name})
def GetValidArg(self, func, offset, index):
return "client_%s_id_" % self.resource_type.lower()
def GetValidGLArg(self, func, offset, index):
return "kService%sId" % self.resource_type
class ResourceIdZeroArgument(Argument):
"""Represents a resource id argument to a function that can be zero."""
def __init__(self, name, type):
match = re.match("(GLidZero\w+)", type)
self.resource_type = match.group(1)[8:]
type = type.replace(match.group(1), "GLuint")
Argument.__init__(self, name, type)
def WriteGetCode(self, file):
"""Overridden from Argument."""
file.Write(" %s %s = c.%s;\n" % (self.type, self.name, self.name))
def GetValidArg(self, func, offset, index):
return "client_%s_id_" % self.resource_type.lower()
def GetValidGLArg(self, func, offset, index):
return "kService%sId" % self.resource_type
def GetNumInvalidValues(self, func):
"""returns the number of invalid values to be tested."""
return 1
def GetInvalidArg(self, offset, index):
"""returns an invalid value by index."""
return ("kInvalidClientId", "kNoError", "GL_INVALID_VALUE")
class Function(object):
"""A class that represents a function."""
def __init__(self, original_name, name, info, return_type, original_args,
args_for_cmds, cmd_args, init_args, num_pointer_args):
self.name = name
self.original_name = original_name
self.info = info
self.type_handler = info.type_handler
self.return_type = return_type
self.original_args = original_args
self.num_pointer_args = num_pointer_args
self.can_auto_generate = num_pointer_args == 0 and return_type == "void"
self.cmd_args = cmd_args
self.init_args = init_args
self.InitFunction()
self.args_for_cmds = args_for_cmds
self.is_immediate = False
def IsType(self, type_name):
"""Returns true if function is a certain type."""
return self.info.type == type_name
def InitFunction(self):
"""Calls the init function for the type handler."""
self.type_handler.InitFunction(self)
def GetInfo(self, name):
"""Returns a value from the function info for this function."""
if hasattr(self.info, name):
return getattr(self.info, name)
return None
def GetValidArg(self, index):
"""Gets a valid arg from the function info if one exists."""
valid_args = self.GetInfo('valid_args')
if valid_args and str(index) in valid_args:
return valid_args[str(index)]
return None
def AddInfo(self, name, value):
"""Adds an info."""
setattr(self.info, name, value)
def IsCoreGLFunction(self):
return (not self.GetInfo('extension') and
not self.GetInfo('pepper_interface'))
def InPepperInterface(self, interface):
ext = self.GetInfo('pepper_interface')
if not interface.GetName():
return self.IsCoreGLFunction()
return ext == interface.GetName()
def InAnyPepperExtension(self):
return self.IsCoreGLFunction() or self.GetInfo('pepper_interface')
def GetGLFunctionName(self):
"""Gets the function to call to execute GL for this command."""
if self.GetInfo('decoder_func'):
return self.GetInfo('decoder_func')
return "gl%s" % self.original_name
def GetGLTestFunctionName(self):
gl_func_name = self.GetInfo('gl_test_func')
if gl_func_name == None:
gl_func_name = self.GetGLFunctionName()
if gl_func_name.startswith("gl"):
gl_func_name = gl_func_name[2:]
else:
gl_func_name = self.original_name
return gl_func_name
def AddCmdArg(self, arg):
"""Adds a cmd argument to this function."""
self.cmd_args.append(arg)
def GetCmdArgs(self):
"""Gets the command args for this function."""
return self.cmd_args
def ClearCmdArgs(self):
"""Clears the command args for this function."""
self.cmd_args = []
def GetInitArgs(self):
"""Gets the init args for this function."""
return self.init_args
def GetOriginalArgs(self):
"""Gets the original arguments to this function."""
return self.original_args
def GetLastOriginalArg(self):
"""Gets the last original argument to this function."""
return self.original_args[len(self.original_args) - 1]
def __GetArgList(self, arg_string, add_comma):
"""Adds a comma if arg_string is not empty and add_comma is true."""
comma = ""
if add_comma and len(arg_string):
comma = ", "
return "%s%s" % (comma, arg_string)
def MakeTypedOriginalArgString(self, prefix, add_comma = False):
"""Gets a list of arguments as they arg in GL."""
args = self.GetOriginalArgs()
arg_string = ", ".join(
["%s %s%s" % (arg.type, prefix, arg.name) for arg in args])
return self.__GetArgList(arg_string, add_comma)
def MakeOriginalArgString(self, prefix, add_comma = False, separator = ", "):
"""Gets the list of arguments as they are in GL."""
args = self.GetOriginalArgs()
arg_string = separator.join(
["%s%s" % (prefix, arg.name) for arg in args])
return self.__GetArgList(arg_string, add_comma)
def MakeTypedCmdArgString(self, prefix, add_comma = False):
"""Gets a typed list of arguments as they need to be for command buffers."""
args = self.GetCmdArgs()
arg_string = ", ".join(
["%s %s%s" % (arg.type, prefix, arg.name) for arg in args])
return self.__GetArgList(arg_string, add_comma)
def MakeCmdArgString(self, prefix, add_comma = False):
"""Gets the list of arguments as they need to be for command buffers."""
args = self.GetCmdArgs()
arg_string = ", ".join(
["%s%s" % (prefix, arg.name) for arg in args])
return self.__GetArgList(arg_string, add_comma)
def MakeTypedInitString(self, prefix, add_comma = False):
"""Gets a typed list of arguments as they need to be for cmd Init/Set."""
args = self.GetInitArgs()
arg_string = ", ".join(
["%s %s%s" % (arg.type, prefix, arg.name) for arg in args])
return self.__GetArgList(arg_string, add_comma)
def MakeInitString(self, prefix, add_comma = False):
"""Gets the list of arguments as they need to be for cmd Init/Set."""
args = self.GetInitArgs()
arg_string = ", ".join(
["%s%s" % (prefix, arg.name) for arg in args])
return self.__GetArgList(arg_string, add_comma)
def MakeLogArgString(self):
"""Makes a string of the arguments for the LOG macros"""
args = self.GetOriginalArgs()
return ' << ", " << '.join([arg.GetLogArg() for arg in args])
def WriteCommandDescription(self, file):
"""Writes a description of the command."""
file.Write("//! Command that corresponds to gl%s.\n" % self.original_name)
def WriteHandlerValidation(self, file):
"""Writes validation code for the function."""
for arg in self.GetOriginalArgs():
arg.WriteValidationCode(file, self)
self.WriteValidationCode(file)
def WriteHandlerImplementation(self, file):
"""Writes the handler implementation for this command."""
self.type_handler.WriteHandlerImplementation(self, file)
def WriteValidationCode(self, file):
"""Writes the validation code for a command."""
pass
def WriteCmdArgFlag(self, file):
"""Writes the cmd kArgFlags constant."""
file.Write(" static const cmd::ArgFlags kArgFlags = cmd::kFixed;\n")
def WriteCmdComputeSize(self, file):
"""Writes the ComputeSize function for the command."""
file.Write(" static uint32 ComputeSize() {\n")
file.Write(
" return static_cast<uint32>(sizeof(ValueType)); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
def WriteCmdSetHeader(self, file):
"""Writes the cmd's SetHeader function."""
file.Write(" void SetHeader() {\n")
file.Write(" header.SetCmd<ValueType>();\n")
file.Write(" }\n")
file.Write("\n")
def WriteCmdInit(self, file):
"""Writes the cmd's Init function."""
file.Write(" void Init(%s) {\n" % self.MakeTypedCmdArgString("_"))
file.Write(" SetHeader();\n")
args = self.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" }\n")
file.Write("\n")
def WriteCmdSet(self, file):
"""Writes the cmd's Set function."""
copy_args = self.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s) {\n" %
self.MakeTypedCmdArgString("_", True))
file.Write(" static_cast<ValueType*>(cmd)->Init(%s);\n" % copy_args)
file.Write(" return NextCmdAddress<ValueType>(cmd);\n")
file.Write(" }\n")
file.Write("\n")
def WriteStruct(self, file):
self.type_handler.WriteStruct(self, file)
def WriteDocs(self, file):
self.type_handler.WriteDocs(self, file)
def WriteCmdHelper(self, file):
"""Writes the cmd's helper."""
self.type_handler.WriteCmdHelper(self, file)
def WriteServiceImplementation(self, file):
"""Writes the service implementation for a command."""
self.type_handler.WriteServiceImplementation(self, file)
def WriteServiceUnitTest(self, file):
"""Writes the service implementation for a command."""
self.type_handler.WriteServiceUnitTest(self, file)
def WriteGLES2CLibImplementation(self, file):
"""Writes the GLES2 C Lib Implemention."""
self.type_handler.WriteGLES2CLibImplementation(self, file)
def WriteGLES2InterfaceHeader(self, file):
"""Writes the GLES2 Interface declaration."""
self.type_handler.WriteGLES2InterfaceHeader(self, file)
def WriteGLES2InterfaceStub(self, file):
"""Writes the GLES2 Interface Stub declaration."""
self.type_handler.WriteGLES2InterfaceStub(self, file)
def WriteGLES2InterfaceStubImpl(self, file):
"""Writes the GLES2 Interface Stub declaration."""
self.type_handler.WriteGLES2InterfaceStubImpl(self, file)
def WriteGLES2ImplementationHeader(self, file):
"""Writes the GLES2 Implemention declaration."""
self.type_handler.WriteGLES2ImplementationHeader(self, file)
def WriteGLES2Implementation(self, file):
"""Writes the GLES2 Implemention definition."""
self.type_handler.WriteGLES2Implementation(self, file)
def WriteGLES2TraceImplementationHeader(self, file):
"""Writes the GLES2 Trace Implemention declaration."""
self.type_handler.WriteGLES2TraceImplementationHeader(self, file)
def WriteGLES2TraceImplementation(self, file):
"""Writes the GLES2 Trace Implemention definition."""
self.type_handler.WriteGLES2TraceImplementation(self, file)
def WriteGLES2Header(self, file):
"""Writes the GLES2 Implemention unit test."""
self.type_handler.WriteGLES2Header(self, file)
def WriteGLES2ImplementationUnitTest(self, file):
"""Writes the GLES2 Implemention unit test."""
self.type_handler.WriteGLES2ImplementationUnitTest(self, file)
def WriteDestinationInitalizationValidation(self, file):
"""Writes the client side destintion initialization validation."""
self.type_handler.WriteDestinationInitalizationValidation(self, file)
def WriteFormatTest(self, file):
"""Writes the cmd's format test."""
self.type_handler.WriteFormatTest(self, file)
class PepperInterface(object):
"""A class that represents a function."""
def __init__(self, info):
self.name = info["name"]
self.dev = info["dev"]
def GetName(self):
return self.name
def GetInterfaceName(self):
upperint = ""
dev = ""
if self.name:
upperint = "_" + self.name.upper()
if self.dev:
dev = "_DEV"
return "PPB_OPENGLES2%s%s_INTERFACE" % (upperint, dev)
def GetInterfaceString(self):
dev = ""
if self.dev:
dev = "(Dev)"
return "PPB_OpenGLES2%s%s" % (self.name, dev)
def GetStructName(self):
dev = ""
if self.dev:
dev = "_Dev"
return "PPB_OpenGLES2%s%s" % (self.name, dev)
class ImmediateFunction(Function):
"""A class that represnets an immediate function command."""
def __init__(self, func):
new_args = []
for arg in func.GetOriginalArgs():
new_arg = arg.GetImmediateVersion()
if new_arg:
new_args.append(new_arg)
cmd_args = []
new_args_for_cmds = []
for arg in func.args_for_cmds:
new_arg = arg.GetImmediateVersion()
if new_arg:
new_args_for_cmds.append(new_arg)
new_arg.AddCmdArgs(cmd_args)
new_init_args = []
for arg in new_args_for_cmds:
arg.AddInitArgs(new_init_args)
Function.__init__(
self,
func.original_name,
"%sImmediate" % func.name,
func.info,
func.return_type,
new_args,
new_args_for_cmds,
cmd_args,
new_init_args,
0)
self.is_immediate = True
def WriteCommandDescription(self, file):
"""Overridden from Function"""
file.Write("//! Immediate version of command that corresponds to gl%s.\n" %
self.original_name)
def WriteServiceImplementation(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateServiceImplementation(self, file)
def WriteHandlerImplementation(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateHandlerImplementation(self, file)
def WriteServiceUnitTest(self, file):
"""Writes the service implementation for a command."""
self.type_handler.WriteImmediateServiceUnitTest(self, file)
def WriteValidationCode(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateValidationCode(self, file)
def WriteCmdArgFlag(self, file):
"""Overridden from Function"""
file.Write(" static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;\n")
def WriteCmdComputeSize(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateCmdComputeSize(self, file)
def WriteCmdSetHeader(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateCmdSetHeader(self, file)
def WriteCmdInit(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateCmdInit(self, file)
def WriteCmdSet(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateCmdSet(self, file)
def WriteCmdHelper(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateCmdHelper(self, file)
def WriteFormatTest(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateFormatTest(self, file)
class BucketFunction(Function):
"""A class that represnets a bucket version of a function command."""
def __init__(self, func):
new_args = []
for arg in func.GetOriginalArgs():
new_arg = arg.GetBucketVersion()
if new_arg:
new_args.append(new_arg)
cmd_args = []
new_args_for_cmds = []
for arg in func.args_for_cmds:
new_arg = arg.GetBucketVersion()
if new_arg:
new_args_for_cmds.append(new_arg)
new_arg.AddCmdArgs(cmd_args)
new_init_args = []
for arg in new_args_for_cmds:
arg.AddInitArgs(new_init_args)
Function.__init__(
self,
func.original_name,
"%sBucket" % func.name,
func.info,
func.return_type,
new_args,
new_args_for_cmds,
cmd_args,
new_init_args,
0)
# def InitFunction(self):
# """Overridden from Function"""
# pass
def WriteCommandDescription(self, file):
"""Overridden from Function"""
file.Write("//! Bucket version of command that corresponds to gl%s.\n" %
self.original_name)
def WriteServiceImplementation(self, file):
"""Overridden from Function"""
self.type_handler.WriteBucketServiceImplementation(self, file)
def WriteHandlerImplementation(self, file):
"""Overridden from Function"""
self.type_handler.WriteBucketHandlerImplementation(self, file)
def WriteServiceUnitTest(self, file):
"""Writes the service implementation for a command."""
self.type_handler.WriteBucketServiceUnitTest(self, file)
def CreateArg(arg_string):
"""Creates an Argument."""
arg_parts = arg_string.split()
if len(arg_parts) == 1 and arg_parts[0] == 'void':
return None
# Is this a pointer argument?
elif arg_string.find('*') >= 0:
if arg_parts[0] == 'NonImmediate':
return NonImmediatePointerArgument(
arg_parts[-1],
" ".join(arg_parts[1:-1]))
else:
return PointerArgument(
arg_parts[-1],
" ".join(arg_parts[0:-1]))
# Is this a resource argument? Must come after pointer check.
elif arg_parts[0].startswith('GLidBind'):
return ResourceIdBindArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif arg_parts[0].startswith('GLidZero'):
return ResourceIdZeroArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif arg_parts[0].startswith('GLid'):
return ResourceIdArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif arg_parts[0].startswith('GLenum') and len(arg_parts[0]) > 6:
return EnumArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif arg_parts[0].startswith('GLboolean') and len(arg_parts[0]) > 9:
return ValidatedBoolArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif arg_parts[0].startswith('GLboolean'):
return BoolArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif arg_parts[0].startswith('GLintUniformLocation'):
return UniformLocationArgument(arg_parts[-1])
elif (arg_parts[0].startswith('GLint') and len(arg_parts[0]) > 5 and
not arg_parts[0].startswith('GLintptr')):
return IntArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif (arg_parts[0].startswith('GLsizeiNotNegative') or
arg_parts[0].startswith('GLintptrNotNegative')):
return SizeNotNegativeArgument(arg_parts[-1],
" ".join(arg_parts[0:-1]),
arg_parts[0][0:-11])
elif arg_parts[0].startswith('GLsize'):
return SizeArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
else:
return Argument(arg_parts[-1], " ".join(arg_parts[0:-1]))
class GLGenerator(object):
"""A class to generate GL command buffers."""
_function_re = re.compile(r'GL_APICALL(.*?)GL_APIENTRY (.*?) \((.*?)\);')
def __init__(self, verbose):
self.original_functions = []
self.functions = []
self.verbose = verbose
self.errors = 0
self._function_info = {}
self._empty_type_handler = TypeHandler()
self._empty_function_info = FunctionInfo({}, self._empty_type_handler)
self.pepper_interfaces = []
self.interface_info = {}
self._type_handlers = {
'Bind': BindHandler(),
'Create': CreateHandler(),
'Custom': CustomHandler(),
'Data': DataHandler(),
'Delete': DeleteHandler(),
'DELn': DELnHandler(),
'GENn': GENnHandler(),
'GETn': GETnHandler(),
'GLchar': GLcharHandler(),
'GLcharN': GLcharNHandler(),
'HandWritten': HandWrittenHandler(),
'Is': IsHandler(),
'Manual': ManualHandler(),
'PUT': PUTHandler(),
'PUTn': PUTnHandler(),
'PUTXn': PUTXnHandler(),
'StateSet': StateSetHandler(),
'StateSetRGBAlpha': StateSetRGBAlphaHandler(),
'StateSetFrontBack': StateSetFrontBackHandler(),
'StateSetFrontBackSeparate': StateSetFrontBackSeparateHandler(),
'STRn': STRnHandler(),
'Todo': TodoHandler(),
}
for func_name in _FUNCTION_INFO:
info = _FUNCTION_INFO[func_name]
type = ''
if 'type' in info:
type = info['type']
self._function_info[func_name] = FunctionInfo(info,
self.GetTypeHandler(type))
for interface in _PEPPER_INTERFACES:
interface = PepperInterface(interface)
self.pepper_interfaces.append(interface)
self.interface_info[interface.GetName()] = interface
def AddFunction(self, func):
"""Adds a function."""
self.functions.append(func)
def GetTypeHandler(self, name):
"""Gets a type info for the given type."""
if len(name):
if name in self._type_handlers:
return self._type_handlers[name]
else:
raise KeyError("no such type handler: %s" % name)
return self._empty_type_handler
def GetFunctionInfo(self, name):
"""Gets a type info for the given function name."""
if name in self._function_info:
return self._function_info[name]
return self._empty_function_info
def Log(self, msg):
"""Prints something if verbose is true."""
if self.verbose:
print msg
def Error(self, msg):
"""Prints an error."""
print "Error: %s" % msg
self.errors += 1
def WriteLicense(self, file):
"""Writes the license."""
file.Write(_LICENSE)
def WriteNamespaceOpen(self, file):
"""Writes the code for the namespace."""
file.Write("namespace gpu {\n")
file.Write("namespace gles2 {\n")
file.Write("\n")
def WriteNamespaceClose(self, file):
"""Writes the code to close the namespace."""
file.Write("} // namespace gles2\n")
file.Write("} // namespace gpu\n")
file.Write("\n")
def ParseArgs(self, arg_string):
"""Parses a function arg string."""
args = []
num_pointer_args = 0
parts = arg_string.split(',')
is_gl_enum = False
for arg_string in parts:
if arg_string.startswith('GLenum '):
is_gl_enum = True
arg = CreateArg(arg_string)
if arg:
args.append(arg)
if arg.IsPointer():
num_pointer_args += 1
return (args, num_pointer_args, is_gl_enum)
def ParseGLH(self, filename):
"""Parses the cmd_buffer_functions.txt file and extracts the functions"""
f = open("gpu/command_buffer/cmd_buffer_functions.txt", "r")
functions = f.read()
f.close()
for line in functions.splitlines():
match = self._function_re.match(line)
if match:
func_name = match.group(2)[2:]
func_info = self.GetFunctionInfo(func_name)
if func_info.type != 'Noop':
return_type = match.group(1).strip()
arg_string = match.group(3)
(args, num_pointer_args, is_gl_enum) = self.ParseArgs(arg_string)
# comment in to find out which functions use bare enums.
# if is_gl_enum:
# self.Log("%s uses bare GLenum" % func_name)
args_for_cmds = args
if hasattr(func_info, 'cmd_args'):
(args_for_cmds, num_pointer_args, is_gl_enum) = (
self.ParseArgs(getattr(func_info, 'cmd_args')))
cmd_args = []
for arg in args_for_cmds:
arg.AddCmdArgs(cmd_args)
init_args = []
for arg in args_for_cmds:
arg.AddInitArgs(init_args)
return_arg = CreateArg(return_type + " result")
if return_arg:
init_args.append(return_arg)
f = Function(func_name, func_name, func_info, return_type, args,
args_for_cmds, cmd_args, init_args, num_pointer_args)
self.original_functions.append(f)
gen_cmd = f.GetInfo('gen_cmd')
if gen_cmd == True or gen_cmd == None:
self.AddFunction(f)
f.type_handler.AddImmediateFunction(self, f)
f.type_handler.AddBucketFunction(self, f)
self.Log("Auto Generated Functions : %d" %
len([f for f in self.functions if f.can_auto_generate or
(not f.IsType('') and not f.IsType('Custom') and
not f.IsType('Todo'))]))
funcs = [f for f in self.functions if not f.can_auto_generate and
(f.IsType('') or f.IsType('Custom') or f.IsType('Todo'))]
self.Log("Non Auto Generated Functions: %d" % len(funcs))
for f in funcs:
self.Log(" %-10s %-20s gl%s" % (f.info.type, f.return_type, f.name))
def WriteCommandIds(self, filename):
"""Writes the command buffer format"""
file = CHeaderWriter(filename)
file.Write("#define GLES2_COMMAND_LIST(OP) \\\n")
id = 256
for func in self.functions:
file.Write(" %-60s /* %d */ \\\n" %
("OP(%s)" % func.name, id))
id += 1
file.Write("\n")
file.Write("enum CommandId {\n")
file.Write(" kStartPoint = cmd::kLastCommonId, "
"// All GLES2 commands start after this.\n")
file.Write("#define GLES2_CMD_OP(name) k ## name,\n")
file.Write(" GLES2_COMMAND_LIST(GLES2_CMD_OP)\n")
file.Write("#undef GLES2_CMD_OP\n")
file.Write(" kNumCommands\n")
file.Write("};\n")
file.Write("\n")
file.Close()
def WriteFormat(self, filename):
"""Writes the command buffer format"""
file = CHeaderWriter(filename)
for func in self.functions:
if True:
#gen_cmd = func.GetInfo('gen_cmd')
#if gen_cmd == True or gen_cmd == None:
func.WriteStruct(file)
file.Write("\n")
file.Close()
def WriteDocs(self, filename):
"""Writes the command buffer doc version of the commands"""
file = CWriter(filename)
for func in self.functions:
if True:
#gen_cmd = func.GetInfo('gen_cmd')
#if gen_cmd == True or gen_cmd == None:
func.WriteDocs(file)
file.Write("\n")
file.Close()
def WriteFormatTest(self, filename):
"""Writes the command buffer format test."""
file = CHeaderWriter(
filename,
"// This file contains unit tests for gles2 commmands\n"
"// It is included by gles2_cmd_format_test.cc\n"
"\n")
for func in self.functions:
if True:
#gen_cmd = func.GetInfo('gen_cmd')
#if gen_cmd == True or gen_cmd == None:
func.WriteFormatTest(file)
file.Close()
def WriteCmdHelperHeader(self, filename):
"""Writes the gles2 command helper."""
file = CHeaderWriter(filename)
for func in self.functions:
if True:
#gen_cmd = func.GetInfo('gen_cmd')
#if gen_cmd == True or gen_cmd == None:
func.WriteCmdHelper(file)
file.Close()
def WriteServiceContextStateHeader(self, filename):
"""Writes the service context state header."""
file = CHeaderWriter(
filename,
"// It is included by context_state.h\n")
file.Write("struct EnableFlags {\n")
file.Write(" EnableFlags();\n")
for capability in _CAPABILITY_FLAGS:
file.Write(" bool %s;\n" % capability['name'])
file.Write("};\n\n")
for state_name in sorted(_STATES.keys()):
state = _STATES[state_name]
for item in state['states']:
file.Write("%s %s;\n" % (item['type'], item['name']))
file.Write("\n")
file.Close()
def WriteClientContextStateHeader(self, filename):
"""Writes the client context state header."""
file = CHeaderWriter(
filename,
"// It is included by client_context_state.h\n")
file.Write("struct EnableFlags {\n")
file.Write(" EnableFlags();\n")
for capability in _CAPABILITY_FLAGS:
file.Write(" bool %s;\n" % capability['name'])
file.Write("};\n\n")
file.Close()
def WriteContextStateGetters(self, file, class_name):
"""Writes the state getters."""
for gl_type in ["GLint", "GLfloat"]:
file.Write("""
bool %s::GetStateAs%s(
GLenum pname, %s* params, GLsizei* num_written) const {
switch (pname) {
""" % (class_name, gl_type, gl_type))
for state_name in _STATES.keys():
state = _STATES[state_name]
if 'enum' in state:
file.Write(" case %s:\n" % state['enum'])
file.Write(" *num_written = %d;\n" % len(state['states']))
file.Write(" if (params) {\n")
for ndx,item in enumerate(state['states']):
file.Write(" params[%d] = static_cast<%s>(%s);\n" %
(ndx, gl_type, item['name']))
file.Write(" }\n")
file.Write(" return true;\n")
else:
for item in state['states']:
file.Write(" case %s:\n" % item['enum'])
file.Write(" *num_written = 1;\n")
file.Write(" if (params) {\n")
file.Write(" params[0] = static_cast<%s>(%s);\n" %
(gl_type, item['name']))
file.Write(" }\n")
file.Write(" return true;\n")
for capability in _CAPABILITY_FLAGS:
file.Write(" case GL_%s:\n" % capability['name'].upper())
file.Write(" *num_written = 1;\n")
file.Write(" if (params) {\n")
file.Write(
" params[0] = static_cast<%s>(enable_flags.%s);\n" %
(gl_type, capability['name']))
file.Write(" }\n")
file.Write(" return true;\n")
file.Write(""" default:
return false;
}
}
""")
def WriteServiceContextStateImpl(self, filename):
"""Writes the context state service implementation."""
file = CHeaderWriter(
filename,
"// It is included by context_state.cc\n")
code = []
for capability in _CAPABILITY_FLAGS:
code.append("%s(%s)" %
(capability['name'],
('false', 'true')['default' in capability]))
file.Write("ContextState::EnableFlags::EnableFlags()\n : %s {\n}\n" %
",\n ".join(code))
file.Write("\n")
file.Write("void ContextState::Initialize() {\n")
for state_name in sorted(_STATES.keys()):
state = _STATES[state_name]
for item in state['states']:
file.Write(" %s = %s;\n" % (item['name'], item['default']))
file.Write("}\n")
file.Write("""
void ContextState::InitCapabilities() const {
""")
for capability in _CAPABILITY_FLAGS:
file.Write(" EnableDisable(GL_%s, enable_flags.%s);\n" %
(capability['name'].upper(), capability['name']))
file.Write("""}
void ContextState::InitState() const {
""")
# We need to sort the keys so the expectations match
for state_name in sorted(_STATES.keys()):
state = _STATES[state_name]
if state['type'] == 'FrontBack':
num_states = len(state['states'])
for ndx, group in enumerate(Grouper(num_states / 2, state['states'])):
args = []
for item in group:
args.append('%s' % item['name'])
file.Write(
" gl%s(%s, %s);\n" %
(state['func'], ('GL_FRONT', 'GL_BACK')[ndx], ", ".join(args)))
else:
args = []
for item in state['states']:
args.append('%s' % item['name'])
file.Write(" gl%s(%s);\n" % (state['func'], ", ".join(args)))
file.Write("}\n")
file.Write("""bool ContextState::GetEnabled(GLenum cap) const {
switch (cap) {
""")
for capability in _CAPABILITY_FLAGS:
file.Write(" case GL_%s:\n" % capability['name'].upper())
file.Write(" return enable_flags.%s;\n" % capability['name'])
file.Write(""" default:
GPU_NOTREACHED();
return false;
}
}
""")
self.WriteContextStateGetters(file, "ContextState")
file.Close()
def WriteClientContextStateImpl(self, filename):
"""Writes the context state client side implementation."""
file = CHeaderWriter(
filename,
"// It is included by client_context_state.cc\n")
code = []
for capability in _CAPABILITY_FLAGS:
code.append("%s(%s)" %
(capability['name'],
('false', 'true')['default' in capability]))
file.Write(
"ClientContextState::EnableFlags::EnableFlags()\n : %s {\n}\n" %
",\n ".join(code))
file.Write("\n")
file.Write("""
bool ClientContextState::SetCapabilityState(
GLenum cap, bool enabled, bool* changed) {
*changed = false;
switch (cap) {
""")
for capability in _CAPABILITY_FLAGS:
file.Write(" case GL_%s:\n" % capability['name'].upper())
file.Write(""" if (enable_flags.%(name)s != enabled) {
*changed = true;
enable_flags.%(name)s = enabled;
}
return true;
""" % capability)
file.Write(""" default:
return false;
}
}
""")
file.Write("""bool ClientContextState::GetEnabled(
GLenum cap, bool* enabled) const {
switch (cap) {
""")
for capability in _CAPABILITY_FLAGS:
file.Write(" case GL_%s:\n" % capability['name'].upper())
file.Write(" *enabled = enable_flags.%s;\n" % capability['name'])
file.Write(" return true;\n")
file.Write(""" default:
return false;
}
}
""")
file.Close()
def WriteServiceImplementation(self, filename):
"""Writes the service decorder implementation."""
file = CHeaderWriter(
filename,
"// It is included by gles2_cmd_decoder.cc\n")
for func in self.functions:
if True:
#gen_cmd = func.GetInfo('gen_cmd')
#if gen_cmd == True or gen_cmd == None:
func.WriteServiceImplementation(file)
file.Write("""
bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
switch (cap) {
""")
for capability in _CAPABILITY_FLAGS:
file.Write(" case GL_%s:\n" % capability['name'].upper())
if 'state_flag' in capability:
file.Write(""" if (state_.enable_flags.%(name)s != enabled) {
state_.enable_flags.%(name)s = enabled;
%(state_flag)s = true;
}
return false;
""" % capability)
else:
file.Write(""" state_.enable_flags.%(name)s = enabled;
return true;
""" % capability)
file.Write(""" default:
NOTREACHED();
return false;
}
}
""")
file.Close()
def WriteServiceUnitTests(self, filename):
"""Writes the service decorder unit tests."""
num_tests = len(self.functions)
FUNCTIONS_PER_FILE = 98 # hard code this so it doesn't change.
count = 0
for test_num in range(0, num_tests, FUNCTIONS_PER_FILE):
count += 1
name = filename % count
file = CHeaderWriter(
name,
"// It is included by gles2_cmd_decoder_unittest_%d.cc\n" % count)
file.SetFileNum(count)
end = test_num + FUNCTIONS_PER_FILE
if end > num_tests:
end = num_tests
for idx in range(test_num, end):
func = self.functions[idx]
if True:
#gen_cmd = func.GetInfo('gen_cmd')
#if gen_cmd == True or gen_cmd == None:
if func.GetInfo('unit_test') == False:
file.Write("// TODO(gman): %s\n" % func.name)
else:
func.WriteServiceUnitTest(file)
file.Close()
file = CHeaderWriter(
filename % 0,
"// It is included by gles2_cmd_decoder_unittest_base.cc\n")
file.Write(
"""void GLES2DecoderTestBase::SetupInitCapabilitiesExpectations() {
""")
for capability in _CAPABILITY_FLAGS:
file.Write(" ExpectEnableDisable(GL_%s, %s);\n" %
(capability['name'].upper(),
('false', 'true')['default' in capability]))
file.Write("""}
void GLES2DecoderTestBase::SetupInitStateExpectations() {
""")
# We need to sort the keys so the expectations match
for state_name in sorted(_STATES.keys()):
state = _STATES[state_name]
if state['type'] == 'FrontBack':
num_states = len(state['states'])
for ndx, group in enumerate(Grouper(num_states / 2, state['states'])):
args = []
for item in group:
if 'expected' in item:
args.append(item['expected'])
else:
args.append(item['default'])
file.Write(
" EXPECT_CALL(*gl_, %s(%s, %s))\n" %
(state['func'], ('GL_FRONT', 'GL_BACK')[ndx], ", ".join(args)))
file.Write(" .Times(1)\n")
file.Write(" .RetiresOnSaturation();\n")
else:
args = []
for item in state['states']:
if 'expected' in item:
args.append(item['expected'])
else:
args.append(item['default'])
file.Write(" EXPECT_CALL(*gl_, %s(%s))\n" %
(state['func'], ", ".join(args)))
file.Write(" .Times(1)\n")
file.Write(" .RetiresOnSaturation();\n")
file.Write("""}
""")
file.Close()
def WriteGLES2Header(self, filename):
"""Writes the GLES2 header."""
file = CHeaderWriter(
filename,
"// This file contains Chromium-specific GLES2 declarations.\n\n")
for func in self.original_functions:
func.WriteGLES2Header(file)
file.Write("\n")
file.Close()
def WriteGLES2CLibImplementation(self, filename):
"""Writes the GLES2 c lib implementation."""
file = CHeaderWriter(
filename,
"// These functions emulate GLES2 over command buffers.\n")
for func in self.original_functions:
func.WriteGLES2CLibImplementation(file)
file.Write("""
namespace gles2 {
NameToFunc g_gles2_function_table[] = {
""")
for func in self.original_functions:
file.Write(
' { "gl%s", reinterpret_cast<GLES2FunctionPointer>(gl%s), },\n' %
(func.name, func.name))
file.Write(""" { NULL, NULL, },
};
} // namespace gles2
""")
file.Close()
def WriteGLES2InterfaceHeader(self, filename):
"""Writes the GLES2 interface header."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_interface.h to declare the\n"
"// GL api functions.\n")
for func in self.original_functions:
func.WriteGLES2InterfaceHeader(file)
file.Close()
def WriteGLES2InterfaceStub(self, filename):
"""Writes the GLES2 interface stub header."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_interface_stub.h.\n")
for func in self.original_functions:
func.WriteGLES2InterfaceStub(file)
file.Close()
def WriteGLES2InterfaceStubImpl(self, filename):
"""Writes the GLES2 interface header."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_interface_stub.cc.\n")
for func in self.original_functions:
func.WriteGLES2InterfaceStubImpl(file)
file.Close()
def WriteGLES2ImplementationHeader(self, filename):
"""Writes the GLES2 Implementation header."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_implementation.h to declare the\n"
"// GL api functions.\n")
for func in self.original_functions:
func.WriteGLES2ImplementationHeader(file)
file.Close()
def WriteGLES2Implementation(self, filename):
"""Writes the GLES2 Implementation."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_implementation.cc to define the\n"
"// GL api functions.\n")
for func in self.original_functions:
func.WriteGLES2Implementation(file)
file.Close()
def WriteGLES2TraceImplementationHeader(self, filename):
"""Writes the GLES2 Trace Implementation header."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_trace_implementation.h\n")
for func in self.original_functions:
func.WriteGLES2TraceImplementationHeader(file)
file.Close()
def WriteGLES2TraceImplementation(self, filename):
"""Writes the GLES2 Trace Implementation."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_trace_implementation.cc\n")
for func in self.original_functions:
func.WriteGLES2TraceImplementation(file)
file.Close()
def WriteGLES2ImplementationUnitTests(self, filename):
"""Writes the GLES2 helper header."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_implementation.h to declare the\n"
"// GL api functions.\n")
for func in self.original_functions:
func.WriteGLES2ImplementationUnitTest(file)
file.Close()
def WriteServiceUtilsHeader(self, filename):
"""Writes the gles2 auto generated utility header."""
file = CHeaderWriter(filename)
for enum in sorted(_ENUM_LISTS.keys()):
file.Write("ValueValidator<%s> %s;\n" %
(_ENUM_LISTS[enum]['type'], ToUnderscore(enum)))
file.Write("\n")
file.Close()
def WriteServiceUtilsImplementation(self, filename):
"""Writes the gles2 auto generated utility implementation."""
file = CHeaderWriter(filename)
enums = sorted(_ENUM_LISTS.keys())
for enum in enums:
if len(_ENUM_LISTS[enum]['valid']) > 0:
file.Write("static %s valid_%s_table[] = {\n" %
(_ENUM_LISTS[enum]['type'], ToUnderscore(enum)))
for value in _ENUM_LISTS[enum]['valid']:
file.Write(" %s,\n" % value)
file.Write("};\n")
file.Write("\n")
file.Write("Validators::Validators()\n")
pre = ': '
post = ','
for count, enum in enumerate(enums):
if count + 1 == len(enums):
post = ' {'
if len(_ENUM_LISTS[enum]['valid']) > 0:
code = """ %(pre)s%(name)s(
valid_%(name)s_table, arraysize(valid_%(name)s_table))%(post)s
"""
else:
code = """ %(pre)s%(name)s()%(post)s
"""
file.Write(code % {
'name': ToUnderscore(enum),
'pre': pre,
'post': post,
})
pre = ' '
file.Write("}\n\n");
file.Close()
def WriteCommonUtilsHeader(self, filename):
"""Writes the gles2 common utility header."""
file = CHeaderWriter(filename)
enums = sorted(_ENUM_LISTS.keys())
for enum in enums:
if _ENUM_LISTS[enum]['type'] == 'GLenum':
file.Write("static std::string GetString%s(uint32 value);\n" % enum)
file.Write("\n")
file.Close()
def WriteCommonUtilsImpl(self, filename):
"""Writes the gles2 common utility header."""
enum_re = re.compile(r'\#define\s+(GL_[a-zA-Z0-9_]+)\s+([0-9A-Fa-fx]+)')
dict = {}
for fname in ['../../third_party/khronos/GLES2/gl2.h',
'../../third_party/khronos/GLES2/gl2ext.h',
'../../gpu/GLES2/gl2chromium.h',
'../../gpu/GLES2/gl2extchromium.h']:
lines = open(fname).readlines()
for line in lines:
m = enum_re.match(line)
if m:
name = m.group(1)
value = m.group(2)
if len(value) <= 10 and not value in dict:
dict[value] = name
file = CHeaderWriter(filename)
file.Write("static GLES2Util::EnumToString enum_to_string_table[] = {\n")
for value in dict:
file.Write(' { %s, "%s", },\n' % (value, dict[value]))
file.Write("""};
const GLES2Util::EnumToString* GLES2Util::enum_to_string_table_ =
enum_to_string_table;
const size_t GLES2Util::enum_to_string_table_len_ =
sizeof(enum_to_string_table) / sizeof(enum_to_string_table[0]);
""")
enums = sorted(_ENUM_LISTS.keys())
for enum in enums:
if _ENUM_LISTS[enum]['type'] == 'GLenum':
file.Write("std::string GLES2Util::GetString%s(uint32 value) {\n" %
enum)
if len(_ENUM_LISTS[enum]['valid']) > 0:
file.Write(" static EnumToString string_table[] = {\n")
for value in _ENUM_LISTS[enum]['valid']:
file.Write(' { %s, "%s" },\n' % (value, value))
file.Write(""" };
return GLES2Util::GetQualifiedEnumString(
string_table, arraysize(string_table), value);
}
""")
else:
file.Write(""" return GLES2Util::GetQualifiedEnumString(
NULL, 0, value);
}
""")
file.Close()
def WritePepperGLES2Interface(self, filename, dev):
"""Writes the Pepper OpenGLES interface definition."""
file = CHeaderWriter(
filename,
"// OpenGL ES interface.\n",
2)
file.Write("#include \"ppapi/c/pp_resource.h\"\n")
if dev:
file.Write("#include \"ppapi/c/ppb_opengles2.h\"\n\n")
else:
file.Write("\n#ifndef __gl2_h_\n")
for (k, v) in _GL_TYPES.iteritems():
file.Write("typedef %s %s;\n" % (v, k))
file.Write("#endif // __gl2_h_\n\n")
for interface in self.pepper_interfaces:
if interface.dev != dev:
continue
file.Write("#define %s_1_0 \"%s;1.0\"\n" %
(interface.GetInterfaceName(), interface.GetInterfaceString()))
file.Write("#define %s %s_1_0\n" %
(interface.GetInterfaceName(), interface.GetInterfaceName()))
file.Write("\nstruct %s {\n" % interface.GetStructName())
for func in self.original_functions:
if not func.InPepperInterface(interface):
continue
original_arg = func.MakeTypedOriginalArgString("")
context_arg = "PP_Resource context"
if len(original_arg):
arg = context_arg + ", " + original_arg
else:
arg = context_arg
file.Write(" %s (*%s)(%s);\n" % (func.return_type, func.name, arg))
file.Write("};\n\n")
file.Close()
def WritePepperGLES2Implementation(self, filename):
"""Writes the Pepper OpenGLES interface implementation."""
file = CWriter(filename)
file.Write(_LICENSE)
file.Write(_DO_NOT_EDIT_WARNING)
file.Write("#include \"ppapi/shared_impl/ppb_opengles2_shared.h\"\n\n")
file.Write("#include \"base/logging.h\"\n")
file.Write("#include \"gpu/command_buffer/client/gles2_implementation.h\"\n")
file.Write("#include \"ppapi/shared_impl/ppb_graphics_3d_shared.h\"\n")
file.Write("#include \"ppapi/thunk/enter.h\"\n\n")
file.Write("namespace ppapi {\n\n")
file.Write("namespace {\n\n")
file.Write("gpu::gles2::GLES2Implementation*"
" GetGLES(PP_Resource context) {\n")
file.Write(" thunk::EnterResource<thunk::PPB_Graphics3D_API>"
" enter_g3d(context, false);\n")
file.Write(" DCHECK(enter_g3d.succeeded());\n")
file.Write(" return static_cast<PPB_Graphics3D_Shared*>"
"(enter_g3d.object())->gles2_impl();\n")
file.Write("}\n\n")
for func in self.original_functions:
if not func.InAnyPepperExtension():
continue
original_arg = func.MakeTypedOriginalArgString("")
context_arg = "PP_Resource context_id"
if len(original_arg):
arg = context_arg + ", " + original_arg
else:
arg = context_arg
file.Write("%s %s(%s) {\n" % (func.return_type, func.name, arg))
return_str = "" if func.return_type == "void" else "return "
file.Write(" %sGetGLES(context_id)->%s(%s);\n" %
(return_str, func.original_name,
func.MakeOriginalArgString("")))
file.Write("}\n\n")
file.Write("} // namespace\n")
for interface in self.pepper_interfaces:
file.Write("const %s* PPB_OpenGLES2_Shared::Get%sInterface() {\n" %
(interface.GetStructName(), interface.GetName()))
file.Write(" static const struct %s "
"ppb_opengles2 = {\n" % interface.GetStructName())
file.Write(" &")
file.Write(",\n &".join(
f.name for f in self.original_functions
if f.InPepperInterface(interface)))
file.Write("\n")
file.Write(" };\n")
file.Write(" return &ppb_opengles2;\n")
file.Write("}\n")
file.Write("} // namespace ppapi\n")
file.Close()
def WriteGLES2ToPPAPIBridge(self, filename):
"""Connects GLES2 helper library to PPB_OpenGLES2 interface"""
file = CWriter(filename)
file.Write(_LICENSE)
file.Write(_DO_NOT_EDIT_WARNING)
file.Write("#ifndef GL_GLEXT_PROTOTYPES\n")
file.Write("#define GL_GLEXT_PROTOTYPES\n")
file.Write("#endif\n")
file.Write("#include <GLES2/gl2.h>\n")
file.Write("#include <GLES2/gl2ext.h>\n")
file.Write("#include \"ppapi/lib/gl/gles2/gl2ext_ppapi.h\"\n\n")
for func in self.original_functions:
if not func.InAnyPepperExtension():
continue
interface = self.interface_info[func.GetInfo('pepper_interface') or '']
file.Write("%s GL_APIENTRY gl%s(%s) {\n" %
(func.return_type, func.name,
func.MakeTypedOriginalArgString("")))
return_str = "" if func.return_type == "void" else "return "
interface_str = "glGet%sInterfacePPAPI()" % interface.GetName()
original_arg = func.MakeOriginalArgString("")
context_arg = "glGetCurrentContextPPAPI()"
if len(original_arg):
arg = context_arg + ", " + original_arg
else:
arg = context_arg
if interface.GetName():
file.Write(" const struct %s* ext = %s;\n" %
(interface.GetStructName(), interface_str))
file.Write(" if (ext)\n")
file.Write(" %sext->%s(%s);\n" %
(return_str, func.name, arg))
if return_str:
file.Write(" %s0;\n" % return_str)
else:
file.Write(" %s%s->%s(%s);\n" %
(return_str, interface_str, func.name, arg))
file.Write("}\n\n")
file.Close()
def main(argv):
"""This is the main function."""
parser = OptionParser()
parser.add_option(
"-g", "--generate-implementation-templates", action="store_true",
help="generates files that are generally hand edited..")
parser.add_option(
"--alternate-mode", type="choice",
choices=("ppapi", "chrome_ppapi", "chrome_ppapi_proxy", "nacl_ppapi"),
help="generate files for other projects. \"ppapi\" will generate ppapi "
"bindings. \"chrome_ppapi\" generate chrome implementation for ppapi. "
"\"chrome_ppapi_proxy\" will generate the glue for the chrome IPC ppapi"
"proxy. \"nacl_ppapi\" will generate NaCl implementation for ppapi")
parser.add_option(
"--output-dir",
help="base directory for resulting files, under chrome/src. default is "
"empty. Use this if you want the result stored under gen.")
parser.add_option(
"-v", "--verbose", action="store_true",
help="prints more output.")
(options, args) = parser.parse_args(args=argv)
# Add in states and capabilites to GLState
for state_name in sorted(_STATES.keys()):
state = _STATES[state_name]
if 'enum' in state:
_ENUM_LISTS['GLState']['valid'].append(state['enum'])
else:
for item in state['states']:
_ENUM_LISTS['GLState']['valid'].append(item['enum'])
for capability in _CAPABILITY_FLAGS:
_ENUM_LISTS['GLState']['valid'].append("GL_%s" % capability['name'].upper())
# This script lives under gpu/command_buffer, cd to base directory.
os.chdir(os.path.dirname(__file__) + "/../..")
gen = GLGenerator(options.verbose)
gen.ParseGLH("common/GLES2/gl2.h")
# Support generating files under gen/
if options.output_dir != None:
os.chdir(options.output_dir)
if options.alternate_mode == "ppapi":
# To trigger this action, do "make ppapi_gles_bindings"
os.chdir("ppapi");
gen.WritePepperGLES2Interface("c/ppb_opengles2.h", False)
gen.WritePepperGLES2Interface("c/dev/ppb_opengles2ext_dev.h", True)
gen.WriteGLES2ToPPAPIBridge("lib/gl/gles2/gles2.c")
elif options.alternate_mode == "chrome_ppapi":
# To trigger this action, do "make ppapi_gles_implementation"
gen.WritePepperGLES2Implementation(
"ppapi/shared_impl/ppb_opengles2_shared.cc")
else:
os.chdir("gpu/command_buffer")
gen.WriteCommandIds("common/gles2_cmd_ids_autogen.h")
gen.WriteFormat("common/gles2_cmd_format_autogen.h")
gen.WriteFormatTest("common/gles2_cmd_format_test_autogen.h")
gen.WriteGLES2InterfaceHeader("client/gles2_interface_autogen.h")
gen.WriteGLES2InterfaceStub("client/gles2_interface_stub_autogen.h")
gen.WriteGLES2InterfaceStubImpl(
"client/gles2_interface_stub_impl_autogen.h")
gen.WriteGLES2ImplementationHeader("client/gles2_implementation_autogen.h")
gen.WriteGLES2Implementation("client/gles2_implementation_impl_autogen.h")
gen.WriteGLES2ImplementationUnitTests(
"client/gles2_implementation_unittest_autogen.h")
gen.WriteGLES2TraceImplementationHeader(
"client/gles2_trace_implementation_autogen.h")
gen.WriteGLES2TraceImplementation(
"client/gles2_trace_implementation_impl_autogen.h")
gen.WriteGLES2CLibImplementation("client/gles2_c_lib_autogen.h")
gen.WriteCmdHelperHeader("client/gles2_cmd_helper_autogen.h")
gen.WriteServiceImplementation("service/gles2_cmd_decoder_autogen.h")
gen.WriteServiceContextStateHeader("service/context_state_autogen.h")
gen.WriteServiceContextStateImpl("service/context_state_impl_autogen.h")
gen.WriteClientContextStateHeader("client/client_context_state_autogen.h")
gen.WriteClientContextStateImpl(
"client/client_context_state_impl_autogen.h")
gen.WriteServiceUnitTests("service/gles2_cmd_decoder_unittest_%d_autogen.h")
gen.WriteServiceUtilsHeader("service/gles2_cmd_validation_autogen.h")
gen.WriteServiceUtilsImplementation(
"service/gles2_cmd_validation_implementation_autogen.h")
gen.WriteCommonUtilsHeader("common/gles2_cmd_utils_autogen.h")
gen.WriteCommonUtilsImpl("common/gles2_cmd_utils_implementation_autogen.h")
gen.WriteGLES2Header("../GLES2/gl2chromium_autogen.h")
if gen.errors > 0:
print "%d errors" % gen.errors
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "6a261b73a8bf89366626c55b340f5a8b",
"timestamp": "",
"source": "github",
"line_count": 7576,
"max_line_length": 81,
"avg_line_length": 31.80464625131996,
"alnum_prop": 0.5984220923669444,
"repo_name": "zcbenz/cefode-chromium",
"id": "f2cf609484bdcf8bf744e208782da6f41e03ecb8",
"size": "240952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpu/command_buffer/build_gles2_cmd_buffer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1174304"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "76026099"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "157904700"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "3225038"
},
{
"name": "JavaScript",
"bytes": "18180217"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "Matlab",
"bytes": "5234"
},
{
"name": "Objective-C",
"bytes": "7139426"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "932901"
},
{
"name": "Python",
"bytes": "8654916"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3621"
},
{
"name": "Shell",
"bytes": "1533012"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XML",
"bytes": "13493"
}
],
"symlink_target": ""
} |
import os
import time
| {
"content_hash": "6f202ca6d658e45b414c6af6591504de",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 11,
"avg_line_length": 11,
"alnum_prop": 0.8181818181818182,
"repo_name": "au-chrismor/internetoffish",
"id": "59a3204a18beb15f000e8e2dc4c88299c5af12b0",
"size": "22",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic/python/tempreader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1432"
},
{
"name": "Python",
"bytes": "1624"
}
],
"symlink_target": ""
} |
from threading import Thread, Timer
import websocket
import logging
import time
try:
import simplejson as json
except ImportError:
import json
class Connection(Thread):
def __init__(self, event_handler, url, log_level=logging.INFO, daemon=True, reconnect_interval=10):
self.event_handler = event_handler
self.url = url
self.socket = None
self.socket_id = ""
self.event_callbacks = {}
self.disconnect_called = False
self.needs_reconnect = False
self.default_reconnect_interval = reconnect_interval
self.reconnect_interval = reconnect_interval
self.pong_timer = None
self.pong_received = False
self.pong_timeout = 30
self.bind("pusher:connection_established", self._connect_handler)
self.bind("pusher:connection_failed", self._failed_handler)
self.bind("pusher:pong", self._pong_handler)
self.bind("pusher:ping", self._ping_handler)
self.bind("pusher:error", self._pusher_error_handler)
self.state = "initialized"
self.logger = logging.getLogger(self.__module__) # create a new logger
if log_level == logging.DEBUG:
websocket.enableTrace(True)
self.logger.setLevel(log_level)
# From Martyn's comment at:
# https://pusher.tenderapp.com/discussions/problems/36-no-messages-received-after-1-idle-minute-heartbeat
# "We send a ping every 5 minutes in an attempt to keep connections
# alive..."
# This is why we set the connection timeout to 5 minutes, since we can
# expect a pusher heartbeat message every 5 minutes. Adding 5 sec to
# account for small timing delays which may cause messages to not be
# received in exact 5 minute intervals.
self.connection_timeout = 305
self.connection_timer = None
self.ping_interval = 120
self.ping_timer = None
Thread.__init__(self)
self.daemon = daemon
def bind(self, event_name, callback):
"""Bind an event to a callback
:param event_name: The name of the event to bind to.
:type event_name: str
:param callback: The callback to notify of this event.
"""
if event_name not in self.event_callbacks.keys():
self.event_callbacks[event_name] = []
self.event_callbacks[event_name].append(callback)
def disconnect(self):
self.needs_reconnect = False
self.disconnect_called = True
if self.socket:
self.socket.close()
self.join()
def reconnect(self, reconnect_interval=None):
if reconnect_interval is None:
reconnect_interval = self.default_reconnect_interval
self.logger.info("Connection: Reconnect in %s" % reconnect_interval)
self.reconnect_interval = reconnect_interval
self.needs_reconnect = True
if self.socket:
self.socket.close()
def run(self):
self._connect()
def _connect(self):
self.state = "connecting"
self.socket = websocket.WebSocketApp(
self.url,
on_open=self._on_open,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close
)
self.socket.run_forever()
while self.needs_reconnect and not self.disconnect_called:
self.logger.info("Attempting to connect again in %s seconds."
% self.reconnect_interval)
self.state = "unavailable"
time.sleep(self.reconnect_interval)
# We need to set this flag since closing the socket will set it to
# false
self.socket.keep_running = True
self.socket.run_forever()
def _on_open(self, ws):
self.logger.info("Connection: Connection opened")
# Send a ping right away to inform that the connection is alive. If you
# don't do this, it takes the ping interval to subcribe to channel and
# events
self.send_ping()
self._start_timers()
def _on_error(self, ws, error):
self.logger.info("Connection: Error - %s" % error)
self.state = "failed"
self.needs_reconnect = True
def _on_message(self, ws, message):
self.logger.info("Connection: Message - %s" % message)
# Stop our timeout timer, since we got some data
self._stop_timers()
params = self._parse(message)
if 'event' in params.keys():
if 'channel' not in params.keys():
# We've got a connection event. Lets handle it.
if params['event'] in self.event_callbacks.keys():
for callback in self.event_callbacks[params['event']]:
try:
callback(params['data'])
except Exception:
self.logger.exception("Callback raised unhandled")
else:
self.logger.info("Connection: Unhandled event")
else:
# We've got a channel event. Lets pass it up to the pusher
# so it can be handled by the appropriate channel.
self.event_handler(
params['event'],
params['data'],
params['channel']
)
# We've handled our data, so restart our connection timeout handler
self._start_timers()
def _on_close(self, ws, *args):
self.logger.info("Connection: Connection closed")
self.state = "disconnected"
self._stop_timers()
@staticmethod
def _parse(message):
return json.loads(message)
def _stop_timers(self):
if self.ping_timer:
self.ping_timer.cancel()
if self.connection_timer:
self.connection_timer.cancel()
if self.pong_timer:
self.pong_timer.cancel()
def _start_timers(self):
self._stop_timers()
self.ping_timer = Timer(self.ping_interval, self.send_ping)
self.ping_timer.start()
self.connection_timer = Timer(self.connection_timeout, self._connection_timed_out)
self.connection_timer.start()
def send_event(self, event_name, data, channel_name=None):
event = {'event': event_name, 'data': data}
if channel_name:
event['channel'] = channel_name
self.logger.info("Connection: Sending event - %s" % event)
try:
self.socket.send(json.dumps(event))
except Exception as e:
self.logger.error("Failed send event: %s" % e)
def send_ping(self):
self.logger.info("Connection: ping to pusher")
try:
self.socket.send(json.dumps({'event': 'pusher:ping', 'data': ''}))
except Exception as e:
self.logger.error("Failed send ping: %s" % e)
self.pong_timer = Timer(self.pong_timeout, self._check_pong)
self.pong_timer.start()
def send_pong(self):
self.logger.info("Connection: pong to pusher")
try:
self.socket.send(json.dumps({'event': 'pusher:pong', 'data': ''}))
except Exception as e:
self.logger.error("Failed send pong: %s" % e)
def _check_pong(self):
self.pong_timer.cancel()
if self.pong_received:
self.pong_received = False
else:
self.logger.info("Did not receive pong in time. Will attempt to reconnect.")
self.state = "failed"
self.reconnect()
def _connect_handler(self, data):
parsed = json.loads(data)
self.socket_id = parsed['socket_id']
self.state = "connected"
def _failed_handler(self, data):
self.state = "failed"
def _ping_handler(self, data):
self.send_pong()
# Restart our timers since we received something on the connection
self._start_timers()
def _pong_handler(self, data):
self.logger.info("Connection: pong from pusher")
self.pong_received = True
def _pusher_error_handler(self, data):
if 'code' in data:
error_code = None
try:
error_code = int(data['code'])
except:
pass
if error_code is not None:
self.logger.error("Connection: Received error %s" % error_code)
if (error_code >= 4000) and (error_code <= 4099):
# The connection SHOULD NOT be re-established unchanged
self.logger.info("Connection: Error is unrecoverable. Disconnecting")
self.disconnect()
elif (error_code >= 4100) and (error_code <= 4199):
# The connection SHOULD be re-established after backing off
self.reconnect()
elif (error_code >= 4200) and (error_code <= 4299):
# The connection SHOULD be re-established immediately
self.reconnect(0)
else:
pass
else:
self.logger.error("Connection: Unknown error code")
else:
self.logger.error("Connection: No error code supplied")
def _connection_timed_out(self):
self.logger.info("Did not receive any data in time. Reconnecting.")
self.state = "failed"
self.reconnect()
| {
"content_hash": "22957dfcb790f22385a9075cf83cbac8",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 113,
"avg_line_length": 34.03942652329749,
"alnum_prop": 0.5777613983363167,
"repo_name": "ekulyk/PythonPusherClient",
"id": "75fe53fd2d56b93efda68fa86299c9e323e4b3ec",
"size": "9497",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pusherclient/connection.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24097"
}
],
"symlink_target": ""
} |
from django.http import HttpResponseNotFound, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from remindme.forms import ReminderForm
from remindme.models import Reminder
import parsedatetime
import datetime
import time
def home(request):
return render_to_response("index.html")
@login_required()
def dashboard(request):
reminders = Reminder.objects.all()
return render_to_response("dashboard.html", locals(), context_instance=RequestContext(request))
@login_required()
def create(request):
if request.method == 'POST':
form = ReminderForm(request.POST)
if form.is_valid():
date_and_time = datetime.datetime.fromtimestamp(
time.mktime(parsedatetime.Calendar().parse(
form.cleaned_data['when']
)[0])
)
Reminder.objects.create(
user = request.user,
description = form.cleaned_data['description'],
date_and_time = date_and_time,
)
return HttpResponseRedirect("/dashboard")
else:
form = ReminderForm()
return render_to_response("create.html", locals(), context_instance=RequestContext(request))
| {
"content_hash": "74006088b3fc15fda2f1c6dc43d9f684",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 99,
"avg_line_length": 34.86842105263158,
"alnum_prop": 0.670188679245283,
"repo_name": "hitchtest/django-remindme",
"id": "f8ff1cf7a79fc8c0a20f0e93c37decf1334d8bde",
"size": "1325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "remindme/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3321"
},
{
"name": "Python",
"bytes": "12257"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from aquilon.exceptions_ import InternalError
class ConsistencyChecker(object):
"""Consistency Checker Base Class"""
def __init__(self, config, session, logger):
self.session = session
self.config = config
self.logger = logger
self._failures = dict()
def check(self, repair=False): # pylint: disable=W0613
"""Perform the consistancy check
This method should be overridden to implement the class.
"""
raise InternalError("The check method of %s is unimplemented" %
self.__class__.__name__)
def failure(self, key, item, problem):
"""Record a failuer
Takes a string as a single argument.
"""
if key not in self._failures:
self._failures[key] = dict()
if item not in self._failures[key]:
self._failures[key][item] = list()
self._failures[key][item].append(problem)
def process_failures(self):
"""Process Failures for this checker
This mehtod prints out all of the failures that have occured for this
checker. If there were no failures then True is returned, false
otherwise.
"""
for key in sorted(self._failures):
for item in sorted(self._failures[key]):
for problem in self._failures[key][item]:
print(item + ' ' + problem)
if self._failures:
return False
return True
| {
"content_hash": "8f13fd4e9d3be7be652782ed71f5c0e4",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 77,
"avg_line_length": 33.08695652173913,
"alnum_prop": 0.5893561103810775,
"repo_name": "guillaume-philippon/aquilon",
"id": "2638002d8391b470b9d1af4ab6ce39760c02e8e5",
"size": "2233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/aquilon/consistency/checker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import json
import sys
import repositorytools
from repositorytools.cli.common import CLI
from repositorytools.lib.repository import logger
__all__ = ['ArtifactCLI', 'artifact_cli']
class ArtifactCLI(CLI):
def _get_parser(self):
parser = argparse.ArgumentParser(description='A command line tool for working with artifacts')
subparsers = parser.add_subparsers()
# upload
subparser = subparsers.add_parser('upload', help='Uploads an artifact to repository, can detect name and'
' version from filename')
subparser.add_argument("-s", "--staging", action="store_true", default=False,
help="Uploads to a newly created staging repository which targets given repository")
subparser.add_argument("-x", "--use-existing", action="store_true", default=False,
help="To be used with -s, doesn't create a new repo, but uploads directly to an existing"
"staging repo")
subparser.add_argument("--upload-filelist", action="store_true", default=False, help="uploads list of uploaded "
"files")
subparser.add_argument("--artifact", help="name of artifact, if omitted, will be detected from filename")
subparser.add_argument("--version", help="version of artifact, if omitted, will be detected from filename")
subparser.add_argument("-d", "--description", dest="description", default='No description',
help="Description of a staging repository")
subparser.add_argument("--use-direct-put", action="store_true", help="don't use REST API, but directly put the file to it's probable path. Doesn't generate maven metadata. Good for uploading to snapshot repositories.")
subparser.add_argument("local_file", help="path to an artifact on your machine")
subparser.add_argument("repo_id_or_profile_name", help="id of target repository (normal repo) or profile name (staging repo - option -s)")
subparser.add_argument("group", help="artifact group")
subparser.set_defaults(func=self.upload)
# delete
subparser = subparsers.add_parser('delete', help='Deletes an artifact from repository')
subparser.add_argument("url", help="URL of the artifact")
subparser.set_defaults(func=self.delete)
# get metadata
subparser = subparsers.add_parser('get-metadata', help="Prints artifact's metadata")
subparser.add_argument("repo_id", help="id of repository containing the artifact")
subparser.add_argument("coordinates", help="group:artifact:version[:classifier[:extension]]")
subparser.set_defaults(func=self.get_metadata)
# set metadata
subparser = subparsers.add_parser('set-metadata', help="Sets artifact's metadata")
subparser.add_argument("metadata", help="Dict in JSON format. All keys and values have to be strings,"
"e.g. '{\"key1\":\"value1\",\"key2\":\"value2\"}'")
subparser.add_argument("repo_id", help="id of repository containing the artifact")
subparser.add_argument("coordinates", help="group:artifact:version[:classifier[:extension]]", nargs='+')
subparser.set_defaults(func=self.set_metadata)
# resolve
subparser = subparsers.add_parser('resolve', help="Resolves artifacts' URLs")
subparser.add_argument("repo_id", help="id of repository containing the artifact")
subparser.add_argument("coordinates", help="group:artifact:version[:classifier[:extension]]", nargs='+')
subparser.set_defaults(func=self.resolve)
return parser
def resolve(self, args):
artifacts = [ repositorytools.RemoteArtifact.from_repo_id_and_coordinates(args.repo_id, coordinates_item)
for coordinates_item in args.coordinates ]
for artifact in artifacts:
self.repository.resolve_artifact(artifact)
output = '\n'.join(artifact.url for artifact in artifacts)
print(output)
return output
def upload(self, args):
try:
artifact = repositorytools.LocalArtifact(local_path=args.local_file, group=args.group,
artifact=args.artifact, version=args.version)
except repositorytools.NameVerDetectionError as e:
logger.exception('Unable to create instance of local artifact: %s', e)
sys.exit(1)
if args.staging:
if not args.use_existing:
return self.repository.upload_artifacts_to_new_staging([artifact], args.repo_id_or_profile_name, True,
description=args.description,
upload_filelist=args.upload_filelist)
else:
return self.repository.upload_artifacts_to_staging([artifact], args.repo_id_or_profile_name, True,
upload_filelist=args.upload_filelist)
else:
return self.repository.upload_artifacts([artifact], args.repo_id_or_profile_name, use_direct_put=args.use_direct_put)
def delete(self, args):
self.repository.delete_artifact(args.url)
def get_metadata(self, args):
artifact = repositorytools.RemoteArtifact.from_repo_id_and_coordinates(args.repo_id, args.coordinates)
metadata = self.repository.get_artifact_metadata(artifact)
output = json.dumps(metadata)
print(output)
return output
def set_metadata(self, args):
metadata = json.loads(args.metadata)
for coordinates_item in args.coordinates:
artifact = repositorytools.RemoteArtifact.from_repo_id_and_coordinates(args.repo_id, coordinates_item)
self.repository.set_artifact_metadata(artifact, metadata)
artifact_cli = ArtifactCLI()
if __name__ == '__main__':
artifact_cli()
| {
"content_hash": "e4257522de7370b5ff82c0e707c57292",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 226,
"avg_line_length": 53.48717948717949,
"alnum_prop": 0.62320230105465,
"repo_name": "packagemgmt/repositorytools",
"id": "0e54458f17170a84c96ff02497904d01a6b61995",
"size": "6258",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "repositorytools/cli/commands/artifact.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4727"
},
{
"name": "Python",
"bytes": "52927"
},
{
"name": "Shell",
"bytes": "308"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "deployment.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "fac7753e7e774bfdf0656013aa762494",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.7142857142857143,
"repo_name": "strets123/chembiohub_ws",
"id": "442caf703ba649f0a9e22dea44302f986803e0ea",
"size": "253",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "959"
},
{
"name": "Python",
"bytes": "16558"
},
{
"name": "Shell",
"bytes": "12306"
}
],
"symlink_target": ""
} |
from sklearn import svm, datasets as ds, metrics
from sklearn.model_selection import train_test_split as splitter
#Load dataset
cancer = ds.load_breast_cancer()
print('sample # = {}'.format(len(cancer.data)))
print('target # = {}'.format(len(cancer.target)))
print('shape = {}'.format(cancer.data.shape))
#print("Features: {}, feature dim # = {}".format(cancer.feature_names, len(cancer.feature_names)))
#print('some data ex = {}, feature dim # = {}'.format(cancer.data[0:5], len(cancer.data[0])))
#print("Labels: ", cancer.target_names)
# 70% training and 30% tes
X_train, X_test, y_train, y_test = splitter(cancer.data, cancer.target, test_size=0.4, random_state=6265456)
#print(len(X_train))
#print(len(Y_train))
#print(len(X_test))
#print(len(Y_test))
# linear kernel
clf = svm.SVC(kernel='linear')
# train
print('training...')
clf.fit(X_train, y_train)
# test
y_pred = clf.predict(X_test)
# evaluation
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
# Precision
print("Precision:", metrics.precision_score(y_test, y_pred))
# Recall
print("Recall:", metrics.recall_score(y_test, y_pred))
| {
"content_hash": "5ca537fc5673a34abbd6a82642886795",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 108,
"avg_line_length": 29.263157894736842,
"alnum_prop": 0.6942446043165468,
"repo_name": "hiryou/MLPractice",
"id": "a4af7c1f6c39dd13d4850c3c57d6f9f7bf5de902",
"size": "1208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "svm/svm_class.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "374211"
}
],
"symlink_target": ""
} |
import numpy as np
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
# --------------------------------------------------------------
class SklNMF:
"""
Wrapper class backed by the scikit-learn package NMF implementation.
"""
def __init__( self, max_iters = 100, init_strategy = "random" ):
self.max_iters = 100
self.init_strategy = init_strategy
self.W = None
self.H = None
def apply( self, X, k = 2 ):
"""
Apply NMF to the specified document-term matrix X.
"""
from sklearn import decomposition
self.W = None
self.H = None
model = decomposition.NMF(init=self.init_strategy, n_components=k, max_iter=self.max_iters)
self.W = model.fit_transform(X)
self.H = model.components_
def rank_terms( self, topic_index, top = -1 ):
"""
Return the top ranked terms for the specified topic, generated during the last NMF run.
"""
if self.H is None:
raise ValueError("No results for previous run available")
# NB: reverse
top_indices = np.argsort( self.H[topic_index,:] )[::-1]
# truncate if necessary
if top < 1 or top > len(top_indices):
return top_indices
return top_indices[0:top]
def generate_partition( self ):
if self.W is None:
raise ValueError("No results for previous run available")
return np.argmax( self.W, axis = 1 ).flatten().tolist()
class NimfaNMF:
"""
Wrapper class backed by the Nimfa package NMF implementation.
"""
def __init__( self, max_iters = 100, init_strategy = "random", update = "euclidean", method = "lsnmf" ):
self.max_iters = max_iters
self.init_strategy = init_strategy
self.W = None
self.H = None
self.update = update
self.test_conv = 10
self.method = method
def apply( self, X, k = 2 ):
"""
Apply NMF to the specified document-term matrix X.
"""
import nimfa
self.W = None
self.H = None
initialize_only = self.max_iters < 1
if self.update == "euclidean":
objective = "fro"
else:
objective = "div"
alg = nimfa.mf(X, method = self.method, max_iter = self.max_iters, rank = k, seed = self.init_strategy, update = self.update, objective = objective, test_conv = self.test_conv )
res = nimfa.mf_run(alg)
# TODO: fix
try:
self.W = res.basis().todense()
self.H = res.coef().todense()
except:
self.W = res.basis()
self.H = res.coef()
# last number of iterations
self.n_iter = res.n_iter
def rank_terms( self, topic_index, top = -1 ):
"""
Return the top ranked terms for the specified topic, generated during the last NMF run.
"""
if self.H is None:
raise ValueError("No results for previous run available")
h = np.array( self.H[topic_index,:] ).flatten()
# NB: reverse ordering
top_indices = np.argsort(h)[::-1]
# truncate
if top < 1 or top > len(top_indices):
return top_indices
return top_indices[0:top]
def generate_partition( self ):
if self.W is None:
raise ValueError("No results for previous run available")
return np.argmax( self.W, axis = 1 ).flatten().tolist()[0]
| {
"content_hash": "a10a6dd9fa93d6db6c12d49b23818e74",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 180,
"avg_line_length": 29.07766990291262,
"alnum_prop": 0.6510851419031719,
"repo_name": "akiratu/topic-stability",
"id": "61238dada553226aa90c78a6cabd0cc1476b035f",
"size": "2995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unsupervised/nmf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93182"
}
],
"symlink_target": ""
} |
"""Константы для работы JIM протокола"""
# Возможные ключи в сообщениях от клиентов
PRESENCE = 'presence'
MSG = 'msg'
QUIT = 'quit'
# Кортеж возможных действий (будет дополняться)
ACTIONS = (PRESENCE, MSG, QUIT)
# Обязательные ключи в сообщениях от клиента
ACTION = 'action'
TIME = 'time'
# Кортеж из обязательных ключей для сообщений от клиента
MANDATORY_MESSAGE_KEYS = (ACTION, TIME)
# Обязательные ключи в ответах сервера
RESPONSE = 'response'
# Кортеж обязательных ключей в ответах от сервера
MANDATORY_RESPONSE_KEYS = (RESPONSE,)
# Коды ответов (будут дополняться)
BASIC_NOTICE = 100
OK = 200
ACCEPTED = 202
WRONG_REQUEST = 400 # неправильный запрос/json объект
SERVER_ERROR = 500
# Кортеж из кодов ответов
RESPONSE_CODES = (BASIC_NOTICE, OK, ACCEPTED, WRONG_REQUEST, SERVER_ERROR)
# Другие константы
USER = 'user'
ACCOUNT_NAME = 'account_name'
ADD_CONTACT = 'add_contact'
DEL_CONATCT = 'del_contact'
GET_CONTACTS = 'get_contacts'
QUANTITY = 'quantity'
TO = 'to'
FROM = 'from'
MESSAGE = 'message' | {
"content_hash": "14b211964c3ad12089a4058e44da7b29",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 25.023809523809526,
"alnum_prop": 0.7117031398667936,
"repo_name": "OOPSA45/Python-learn-",
"id": "a3c056d32e7b313e3d1d863044a8b62e8f62cc88",
"size": "1395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my_package/jim/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "54282"
}
],
"symlink_target": ""
} |
from __future__ import division
import os, sys, re
from solid import *
from math import *
RIGHT, TOP, LEFT, BOTTOM = range(4)
EPSILON = 0.01
TAU = 2*pi
X, Y, Z = range(3)
ORIGIN = ( 0, 0, 0)
UP_VEC = ( 0, 0, 1)
RIGHT_VEC = ( 1, 0, 0)
FORWARD_VEC = ( 0, 1, 0)
DOWN_VEC = ( 0, 0,-1)
LEFT_VEC = (-1, 0, 0)
BACK_VEC = ( 0,-1, 0)
# ==========
# = Colors =
# ==========
Red = ( 1, 0, 0)
Green = ( 0, 1, 0)
Blue = ( 0, 0, 1)
Cyan = ( 0, 1, 1)
Magenta = ( 1, 0, 1)
Yellow = ( 1, 1, 0)
Black = ( 0, 0, 0)
White = ( 1, 1, 1)
Oak = (0.65, 0.50, 0.40)
Pine = (0.85, 0.70, 0.45)
Birch = (0.90, 0.80, 0.60)
FiberBoard = (0.70, 0.67, 0.60)
BlackPaint = (0.20, 0.20, 0.20)
Iron = (0.36, 0.33, 0.33)
Steel = (0.65, 0.67, 0.72)
Stainless = (0.45, 0.43, 0.50)
Aluminum = (0.77, 0.77, 0.80)
Brass = (0.88, 0.78, 0.50)
Transparent = (1, 1, 1, 0.2)
# ========================
# = Degrees <==> Radians =
# ========================
def degrees( x_radians):
return 360.0*x_radians/TAU
def radians( x_degrees):
return x_degrees/360.0*TAU
# ==============
# = Grid Plane =
# ==============
def grid_plane( grid_unit=12, count=10, line_weight=0.1, plane='xz'):
# Draws a grid of thin lines in the specified plane. Helpful for
# reference during debugging.
l = count*grid_unit
t = union()
t.set_modifier('background')
for i in range(-count/2, count/2+1):
if 'xz' in plane:
# xz-plane
h = up( i*grid_unit)( cube( [ l, line_weight, line_weight], center=True))
v = right(i*grid_unit)( cube( [ line_weight, line_weight, l], center=True))
t.add([h,v])
# xy plane
if 'xy' in plane:
h = forward(i*grid_unit)( cube([ l, line_weight, line_weight], center=True))
v = right( i*grid_unit)( cube( [ line_weight, l, line_weight], center=True))
t.add([h,v])
# yz plane
if 'yz' in plane:
h = up( i*grid_unit)( cube([ line_weight, l, line_weight], center=True))
v = forward( i*grid_unit)( cube([ line_weight, line_weight, l], center=True))
t.add([h,v])
return t
def distribute_in_grid( objects, max_bounding_box, rows_and_cols=None):
# Translate each object in objects in a grid with each cell of size
# max_bounding_box.
# If
# objects: array of SCAD objects
# max_bounding_box: 2-tuple with x & y dimensions of grid cells.
# if a single number is passed, x & y will both use it
# rows_and_cols: 2-tuple of how many rows and columns to use. If
# not supplied, rows_and_cols will be the smallest square that
# can contain all members of objects (e.g, if len(objects) == 80,
# rows_and_cols will default to (9,9))
# Distributes object in a grid in the xy plane
# with objects spaced max_bounding_box apart
if isinstance( max_bounding_box, (list, tuple)):
x_trans, y_trans = max_bounding_box[0:2]
elif isinstance(max_bounding_box, (int, long, float, complex)):
x_trans = y_trans = max_bounding_box
else:
pass # TypeError
# If we only got passed one object, just return it
try:
l = len(objects)
except:
return objects
ret = []
if rows_and_cols:
grid_w, grid_h = rows_and_cols
else:
grid_w = grid_h = int(ceil( sqrt(len(objects))))
objs_placed = 0
for y in range( grid_h):
for x in range( grid_w):
if objs_placed < len(objects):
ret.append(translate( [x*x_trans, y*y_trans])( objects[objs_placed]))
objs_placed += 1
else:
break
return union()(ret)
# ==============
# = Directions =
# ==============
def up( z):
return translate( [0,0,z])
def down( z):
return translate( [0,0,-z])
def right( x):
return translate( [x, 0,0])
def left( x):
return translate( [-x, 0,0])
def forward(y):
return translate( [0,y,0])
def back( y):
return translate( [0,-y,0])
# ===========================
# = Box-alignment rotations =
# ===========================
def rot_z_to_up( obj):
# NOTE: Null op
return rotate( a=0, v=FORWARD_VEC)(obj)
def rot_z_to_down( obj):
return rotate( a=180, v=FORWARD_VEC)(obj)
def rot_z_to_right( obj):
return rotate( a=90, v=FORWARD_VEC)(obj)
def rot_z_to_left( obj):
return rotate( a=-90, v=FORWARD_VEC)(obj)
def rot_z_to_forward( obj):
return rotate( a=-90, v=RIGHT_VEC)(obj)
def rot_z_to_back( obj):
return rotate( a=90, v=RIGHT_VEC)(obj)
# ================================
# = Box-aligment and translation =
# ================================
def box_align( obj, direction_func=up, distance=0 ):
# Given a box side (up, left, etc) and a distance,
# rotate obj (assumed to be facing up) in the
# correct direction and move it distance in that
# direction
trans_and_rot = {
up: rot_z_to_up, # Null
down: rot_z_to_down,
right: rot_z_to_right,
left: rot_z_to_left,
forward: rot_z_to_forward,
back: rot_z_to_back,
}
assert( direction_func in trans_and_rot)
rot = trans_and_rot[ direction_func]
return direction_func( distance)( rot( obj))
# =======================
# = 90-degree Rotations =
# =======================
def rot_z_to_x( obj):
return rotate( a=90, v=FORWARD_VEC)(obj)
def rot_z_to_neg_x( obj):
return rotate( a=-90, v=FORWARD_VEC)(obj)
def rot_z_to_neg_y( obj):
return rotate( a=90, v=RIGHT_VEC)(obj)
def rot_z_to_y( obj):
return rotate( a=-90, v=RIGHT_VEC)(obj)
def rot_x_to_y( obj):
return rotate( a=90, v=UP_VEC)(obj)
def rot_x_to_neg_y( obj):
return rotate( a=-90, v=UP_VEC)(obj)
# =======
# = Arc =
# =======
def arc( rad, start_degrees, end_degrees, segments=None):
# Note: the circle that this arc is drawn from gets segments,
# not the arc itself. That means a quarter-circle arc will
# have segments/4 segments.
bottom_half_square = back( rad)(square( [3*rad, 2*rad], center=True))
top_half_square = forward( rad)( square( [3*rad, 2*rad], center=True))
start_shape = circle( rad, segments=segments)
if abs( (end_degrees - start_degrees)%360) <= 180:
end_angle = end_degrees - 180
ret = difference()(
start_shape,
rotate( a=start_degrees)( bottom_half_square.copy()),
rotate( a= end_angle)( bottom_half_square.copy())
)
else:
ret = intersection( )(
start_shape,
union()(
rotate( a=start_degrees)( top_half_square.copy()),
rotate( a=end_degrees)( bottom_half_square.copy())
)
)
return ret
def arc_inverted( rad, start_degrees, end_degrees, segments=None):
# Return the segment of an arc *outside* the circle of radius rad,
# bounded by two tangents to the circle. This is the shape
# needed for fillets.
# Note: the circle that this arc is drawn from gets segments,
# not the arc itself. That means a quarter-circle arc will
# have segments/4 segments.
# Leave the portion of a circumscribed square of sides
# 2*rad that is NOT in the arc behind. This is most useful for 90-degree
# segments, since it's what you'll add to create fillets and take away
# to create rounds.
# NOTE: an inverted arc is only valid for end_degrees-start_degrees <= 180.
# If this isn't true, end_degrees and start_degrees will be swapped so
# that an acute angle can be found. end_degrees-start_degrees == 180
# will yield a long rectangle of width 2*radius, since the tangent lines
# will be parallel and never meet.
# Fix start/end degrees as needed; find a way to make an acute angle
if end_degrees < start_degrees:
end_degrees += 360
if end_degrees - start_degrees >= 180:
start_degrees, end_degrees = end_degrees, start_degrees
# We want the area bounded by:
# -- the circle from start_degrees to end_degrees
# -- line tangent to the circle at start_degrees
# -- line tangent to the circle at end_degrees
# Note that this shape is only valid if end_degrees - start_degrees < 180,
# since if the two angles differ by more than 180 degrees,
# the tangent lines don't converge
if end_degrees - start_degrees == 180:
raise ValueError( "Unable to draw inverted arc over 180 or more "
"degrees. start_degrees: %s end_degrees: %s"
%(start_degrees, end_degrees))
wide = 1000
high = 1000
top_half_square = translate( [-(wide-rad), 0])( square([wide, high], center=False))
bottom_half_square = translate( [-(wide-rad), -high])( square([wide, high], center=False))
a = rotate( start_degrees)( top_half_square)
b = rotate( end_degrees)( bottom_half_square)
ret = (a*b) - circle( rad, segments=segments)
return ret
# TODO: arc_to that creates an arc from point to another point.
# This is useful for making paths. See the SVG path command:
# See: http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
# ======================
# = Bounding Box Class =
# ======================
class BoundingBox(object):
# A basic Bounding Box representation to enable some more introspection about
# objects. For instance, a BB will let us say "put this new object on top of
# that old one". Bounding Boxes *can't* be relied on for boolean operations
# without compiling in OpenSCAD, so they're limited, but good for some purposes.
# Be careful to understand what things this BB implementation can and can't do -ETJ 15 Oct 2013
# Basically you can use a BoundingBox to describe the extents of an object
# the moment it's created, but once you perform any CSG operation on it, it's
# more or less useless.
def __init__( self, size, loc=None):
loc = loc if loc else [0,0,0]
# self.w, self.h, self.d = size
# self.x, self.y, self.z = loc
self.set_size( size)
self.set_position( loc)
def size( self):
return [ self.w, self.h, self.d]
def position( self):
return [ self.x, self.y, self.z]
def set_position( self, position):
self.x, self.y, self.z = position
def set_size(self, size):
self.w, self.h, self.d = size
def split_planar( self, cutting_plane_normal= RIGHT_VEC, cut_proportion=0.5, add_wall_thickness=0):
cpd = {RIGHT_VEC: 0, LEFT_VEC:0, FORWARD_VEC:1, BACK_VEC:1, UP_VEC:2, DOWN_VEC:2}
cutting_plane = cpd.get( cutting_plane_normal, 2)
# Figure what the cutting plane offset should be
dim_center = self.position()[cutting_plane]
dim = self.size()[cutting_plane]
dim_min = dim_center - dim/2
dim_max = dim_center + dim/2
cut_point = (cut_proportion) * dim_min + (1-cut_proportion)*dim_max
# Now create bounding boxes with the appropriate sizes
part_bbs = []
a_sum = 0
for i, part in enumerate([ cut_proportion, (1-cut_proportion)]):
part_size = self.size()
part_size[cutting_plane] = part_size[ cutting_plane] * part
part_loc = self.position()
part_loc[ cutting_plane] = dim_min + a_sum + dim * (part/2)
# If extra walls are requested around the slices, add them here
if add_wall_thickness != 0:
# Expand the walls as requested
for j in [X, Y, Z]:
part_size[j] += 2*add_wall_thickness
# Don't expand in the direction of the cutting_plane, only away from it
part_size[cutting_plane] -= add_wall_thickness
# add +/- add_wall_thickness/2 to the location in the
# slicing dimension so we stay at the center of the piece
loc_offset = -add_wall_thickness/2 + i*add_wall_thickness
part_loc[ cutting_plane] += loc_offset
part_bbs.append( BoundingBox( part_size, part_loc))
a_sum += part * dim
return part_bbs
def cube( self, larger=False):
c_size = self.size() if not larger else [s + 2*EPSILON for s in self.size()]
c = translate( self.position())(
cube( c_size, center=True)
)
return c
def min( self, which_dim=None):
min_pt = [p-s/2 for p, s in zip( self.position(), self.size())]
if which_dim:
return min_pt[ which_dim]
else:
return min_pt
def max( self, which_dim=None):
max_pt = [p+s/2 for p, s in zip( self.position(), self.size())]
if which_dim:
return max_pt[ which_dim]
else:
return max_pt
# ===================
# = Model Splitting =
# ===================
def split_body_planar( obj, obj_bb, cutting_plane_normal=UP_VEC, cut_proportion=0.5, dowel_holes=False, dowel_rad=4.5, hole_depth=15, add_wall_thickness=0):
# Split obj along the specified plane, returning two pieces and
# general bounding boxes for each.
# Note that the bounding boxes are NOT accurate to the sections,
# they just indicate which portion of the original BB is in each
# section. Given the limits of OpenSCAD, this is the best we can do -ETJ 17 Oct 2013
# Optionally, leave holes in both bodies to allow the pieces to be put
# back together with short dowels.
# Find the splitting bounding boxes
part_bbs = obj_bb.split_planar( cutting_plane_normal, cut_proportion, add_wall_thickness=add_wall_thickness)
# And intersect the bounding boxes with the object itself
slices = [obj*part_bb.cube() for part_bb in part_bbs]
# Make holes for dowels if requested.
# In case the bodies need to be aligned properly, make two holes,
# separated by one dowel-width
if dowel_holes:
cpd = {RIGHT_VEC: 0, LEFT_VEC:0, FORWARD_VEC:1, BACK_VEC:1, UP_VEC:2, DOWN_VEC:2}
cutting_plane = cpd.get( cutting_plane_normal, 2)
dowel = cylinder( r=dowel_rad, h=hole_depth*2, center=True)
# rotate dowels to correct axis
if cutting_plane != 2:
rot_vec = RIGHT_VEC if cutting_plane == 1 else FORWARD_VEC
dowel = rotate( a=90, v=rot_vec)( dowel)
cut_point = part_bbs[0].position()[ cutting_plane] + part_bbs[0].size()[ cutting_plane]/2
# Move dowels away from center of face by 2*dowel_rad in each
# appropriate direction
dowel_trans_a = part_bbs[0].position()
dowel_trans_a[ cutting_plane] = cut_point
separation_index = {0:1, 1:2, 2:0}[cutting_plane]
dowel_trans_a[ separation_index] -= 2*dowel_rad
dowel_trans_b = dowel_trans_a[:]
dowel_trans_b[ separation_index] += 4*dowel_rad
dowel_a = translate( dowel_trans_a)(dowel)
dowel_b = translate( dowel_trans_b)(dowel)
dowels = dowel_a + dowel_b
# subtract dowels from each slice
slices = [ s - dowels for s in slices]
slices_and_bbs = [slices[0], part_bbs[0], slices[1], part_bbs[1]]
return slices_and_bbs
def section_cut_xz( body, y_cut_point=0):
big_w = 10000
d = 2
c = forward( d/2 + y_cut_point)( cube( [big_w, d, big_w], center=True))
return c * body
# =====================
# = Bill of Materials =
# =====================
# Any part defined in a method can be automatically counted using the
# @bom_part() decorator. After all parts have been created, call
# bill_of_materials()
# to generate a report. Se examples/bom_scad.py for usage
g_parts_dict = {}
def bom_part( description='', per_unit_price=None, currency='US$'):
def wrap(f):
name = description if description else f.__name__
g_parts_dict[name] = [0, currency, per_unit_price]
def wrapped_f( *args):
name = description if description else f.__name__
g_parts_dict[name][0] += 1
return f(*args)
return wrapped_f
return wrap
def bill_of_materials():
res = ''
res += "%8s\t%8s\t%8s\t%8s\n"%("Desc.", "Count", "Unit Price", "Total Price")
all_costs = {}
for desc,(count, currency, price) in g_parts_dict.items():
if count > 0:
if price:
total = price*count
try:
all_costs[currency] += total
except:
all_costs[currency] = total
res += "%8s\t%8d\t%s %8f\t%s %8.2f\n"%(desc, count, currency, price, currency, total)
else:
res += "%8s\t%8d\n"%(desc, count)
if all_costs > 0:
res += "_"*60+'\n'
res += "Total Cost:\n"
for currency in all_costs.keys():
res += "\t\t%s %.2f\n"%(currency, all_costs[currency])
res+="\n"
return res
#FIXME: finish this.
def bill_of_materials_justified():
res = ''
columns = [s.rjust(8) for s in ("Desc.", "Count", "Unit Price", "Total Price")]
all_costs = {}
for desc, (count, currency, price) in g_parts_dict.items():
if count > 0:
if price:
total = price*count
try:
all_costs[currency] += total
except:
all_costs[currency] = total
res += "%(desc)s %(count)s %(currency)s %(price)s %(currency)s %(total)s \n"%vars()
else:
res += "%(desc)s %(count)s "%vars()
if all_costs > 0:
res += "_"*60+'\n'
res += "Total Cost:\n"
for currency in all_costs.keys():
res += "\t\t%s %.2f\n"%(currency, all_costs[currency])
res+="\n"
return res
# ================
# = Bounding Box =
# ================
def bounding_box( points):
all_x = []; all_y = []; all_z = []
for p in points:
all_x.append( p[0])
all_y.append( p[1])
if len(p) > 2:
all_z.append( p[2])
else:
all_z.append(0)
return [ [min(all_x), min(all_y), min(all_z)], [max(all_x), max(all_y), max(all_z)]]
# =======================
# = Hardware dimensions =
# =======================
screw_dimensions = {
'm3': { 'nut_thickness':2.4, 'nut_inner_diam': 5.4, 'nut_outer_diam':6.1, 'screw_outer_diam':3.0, 'cap_diam':5.5 ,'cap_height':3.0 },
'm4': { 'nut_thickness':3.1, 'nut_inner_diam': 7.0, 'nut_outer_diam':7.9, 'screw_outer_diam':4.0, 'cap_diam':6.9 ,'cap_height':3.9 },
'm5': { 'nut_thickness':4.7, 'nut_inner_diam': 7.9, 'nut_outer_diam':8.8, 'screw_outer_diam':5.0, 'cap_diam':8.7 ,'cap_height':5 },
}
def screw( screw_type='m3', screw_length=16):
dims = screw_dimensions[screw_type.lower()]
shaft_rad = dims['screw_outer_diam']/2
cap_rad = dims['cap_diam']/2
cap_height = dims['cap_height']
ret = union()(
cylinder( shaft_rad, screw_length),
up(screw_length)(
cylinder( cap_rad, cap_height)
)
)
return ret
def nut( screw_type='m3'):
dims = screw_dimensions[screw_type.lower()]
outer_rad = dims['nut_outer_diam']
inner_rad = dims['screw_outer_diam']
ret = difference()(
circle( outer_rad, segments=6),
circle( inner_rad)
)
return ret
# ==================
# = PyEuclid Utils =
# = -------------- =
try:
import euclid
from euclid import *
# NOTE: The PyEuclid on PyPi doesn't include several elements added to
# the module as of 13 Feb 2013. Add them here until euclid supports them
# TODO: when euclid updates, remove this cruft. -ETJ 13 Feb 2013
import patch_euclid
patch_euclid.run_patch()
def euclidify( an_obj, intended_class=Vector3):
# If an_obj is an instance of the appropriate PyEuclid class,
# return it. Otherwise, try to turn an_obj into the appropriate
# class and throw an exception on failure
# Since we often want to convert an entire array
# of objects (points, etc.) accept arrays of arrays
ret = an_obj
# See if this is an array of arrays. If so, convert all sublists
if isinstance( an_obj, (list, tuple)):
if isinstance( an_obj[0], (list,tuple)):
ret = [intended_class(*p) for p in an_obj]
elif isinstance( an_obj[0], intended_class):
# this array is already euclidified; return it
ret = an_obj
else:
try:
ret = intended_class( *an_obj)
except:
raise TypeError( "Object: %s ought to be PyEuclid class %s or "
"able to form one, but is not."%(an_obj, intended_class.__name__))
elif not isinstance( an_obj, intended_class):
try:
ret = intended_class( *an_obj)
except:
raise TypeError( "Object: %s ought to be PyEuclid class %s or "
"able to form one, but is not."%(an_obj, intended_class.__name__))
return ret
def euc_to_arr( euc_obj_or_list): # Inverse of euclidify()
# Call as_arr on euc_obj_or_list or on all its members if it's a list
if hasattr(euc_obj_or_list, "as_arr"):
return euc_obj_or_list.as_arr()
elif isinstance( euc_obj_or_list, (list, tuple)) and hasattr(euc_obj_or_list[0], 'as_arr'):
return [euc_to_arr( p) for p in euc_obj_or_list]
else:
# euc_obj_or_list is neither an array-based PyEuclid object,
# nor a list of them. Assume it's a list of points or vectors,
# and return the list unchanged. We could be wrong about this, though.
return euc_obj_or_list
def is_scad( obj):
return isinstance( obj, openscad_object)
def scad_matrix( euclid_matrix4):
a = euclid_matrix4
return [[a.a, a.b, a.c, a.d],
[a.e, a.f, a.g, a.h],
[a.i, a.j, a.k, a.l],
[a.m, a.n, a.o, a.p]
]
# ==============
# = Transforms =
# ==============
def transform_to_point( body, dest_point, dest_normal, src_point=Point3(0,0,0), src_normal=Vector3(0,1,0), src_up=Vector3(0,0,1)):
# Transform body to dest_point, looking at dest_normal.
# Orientation & offset can be changed by supplying the src arguments
# Body may be:
# -- an openSCAD object
# -- a list of 3-tuples or PyEuclid Point3s
# -- a single 3-tuple or Point3
dest_point = euclidify( dest_point, Point3)
dest_normal = euclidify( dest_normal, Vector3)
at = dest_point + dest_normal
EUC_UP = euclidify( UP_VEC)
EUC_FORWARD = euclidify( FORWARD_VEC)
EUC_ORIGIN = euclidify( ORIGIN, Vector3)
# if dest_normal and src_up are parallel, the transform collapses
# all points to dest_point. Instead, use EUC_FORWARD if needed
if dest_normal.cross( src_up) == EUC_ORIGIN:
if src_up.cross( EUC_UP) == EUC_ORIGIN:
src_up = EUC_FORWARD
else: src_up = EUC_UP
look_at_matrix = Matrix4.new_look_at( eye=dest_point, at=at, up=src_up )
if is_scad( body):
# If the body being altered is a SCAD object, do the matrix mult
# in OpenSCAD
sc_matrix = scad_matrix( look_at_matrix)
res = multmatrix( m=sc_matrix)( body)
else:
body = euclidify( body, Point3)
if isinstance( body, (list, tuple)):
res = [look_at_matrix * p for p in body]
else:
res = look_at_matrix * body
return res
# ========================================
# = Vector drawing: 3D arrow from a line =
# = -------------- =======================
def draw_segment( euc_line=None, endless=False, arrow_rad=7, vec_color=None):
# Draw a tradtional arrow-head vector in 3-space.
vec_arrow_rad = arrow_rad
vec_arrow_head_rad = vec_arrow_rad * 1.5
vec_arrow_head_length = vec_arrow_rad * 3
if isinstance( euc_line, Vector3):
p = Point3( *ORIGIN)
v = euc_line
elif isinstance( euc_line, Line3):
p = euc_line.p
v = -euc_line.v
elif isinstance( euc_line, list) or isinstance( euc_line, tuple):
# TODO: This assumes p & v are PyEuclid classes.
# Really, they could as easily be two 3-tuples. Should
# check for this.
p, v = euc_line[0], euc_line[1]
shaft_length = v.magnitude() - vec_arrow_head_length
arrow = cylinder( r= vec_arrow_rad, h = shaft_length)
arrow += up( shaft_length )(
cylinder(r1=vec_arrow_head_rad, r2=0, h = vec_arrow_head_length)
)
if endless:
endless_length = max( v.magnitude()*10, 200)
arrow += cylinder( r=vec_arrow_rad/3, h = endless_length, center=True)
arrow = transform_to_point( body=arrow, dest_point=p, dest_normal=v)
if vec_color:
arrow = color( vec_color)(arrow)
return arrow
# ==========
# = Offset =
# = ------ =
LEFT, RIGHT = radians(90), radians(-90)
def offset_polygon( point_arr, offset, inside=True, closed_poly=True):
# returns a closed solidPython polygon offset by offset distance
# from the polygon described by point_arr.
op = offset_points( point_arr, offset=offset, inside=inside, closed_poly=closed_poly)
return polygon( euc_to_arr(op))
def offset_points( point_arr, offset, inside=True, closed_poly=True):
# Given a set of points, return a set of points offset from
# them.
# To get reasonable results, the points need to be all in a plane.
# ( Non-planar point_arr will still return results, but what constitutes
# 'inside' or 'outside' would be different in that situation.)
#
# What direction inside and outside lie in is determined by the first
# three points (first corner). In a convex closed shape, this corresponds
# to inside and outside. If the first three points describe a concave
# portion of a closed shape, inside and outside will be switched.
#
# Basically this means that if you're offsetting a complicated shape,
# you'll likely have to try both directions (inside=True/False) to
# figure out which direction you're offsetting to.
#
# CAD programs generally require an interactive user choice about which
# side is outside and which is inside. Robust behavior with this
# function will require similar checking.
# Also note that short segments or narrow areas can cause problems
# as well. This method suffices for most planar convex figures where
# segment length is greater than offset, but changing any of those
# assumptions will cause unattractive results. If you want real
# offsets, use SolidWorks.
# TODO: check for self-intersections in the line connecting the
# offset points, and remove them.
# Using the first three points in point_arr, figure out which direction
# is inside and what plane to put the points in
point_arr = euclidify( point_arr[:], Point3)
in_dir = _inside_direction( *point_arr[0:3])
normal = _three_point_normal( *point_arr[0:3])
direction = in_dir if inside else _other_dir( in_dir)
# Generate offset points for the correct direction
# for all of point_arr.
segs = []
offset_pts = []
point_arr += point_arr[ 0:2] # Add first two points to the end as well
if closed_poly:
for i in range( len(point_arr) - 1):
a, b = point_arr[i:i+2]
par_seg = _parallel_seg( a, b, normal=normal, offset=offset, direction=direction )
segs.append( par_seg)
if len(segs) > 1:
int_pt = segs[-2].intersect(segs[-1])
if int_pt:
offset_pts.append( int_pt)
# When calculating based on a closed curve, we can't find the
# first offset point until all others have been calculated.
# Now that we've done so, put the last point back to first place
last = offset_pts[-1]
offset_pts.insert( 0, last)
del( offset_pts[-1])
else:
for i in range( len(point_arr)-2):
a, b = point_arr[i:i+2]
par_seg = _parallel_seg( a, b, normal=normal, offset=offset, direction=direction )
segs.append( par_seg)
# In an open poly, first and last points will be parallel
# to the first and last segments, not intersecting other segs
if i == 0:
offset_pts.append( par_seg.p1)
elif i == len(point_arr) - 3:
offset_pts.append( segs[-2].p2)
else:
int_pt = segs[-2].intersect(segs[-1])
if int_pt:
offset_pts.append( int_pt)
return offset_pts
# ==================
# = Offset helpers =
# ==================
def _parallel_seg( p, q, offset, normal=Vector3( 0, 0, 1), direction=LEFT):
# returns a PyEuclid Line3 parallel to pq, in the plane determined
# by p,normal, to the left or right of pq.
v = q - p
angle = direction
rot_v = v.rotate_around( axis=normal, theta=angle)
rot_v.set_length( offset)
return Line3( p+rot_v, v )
def _inside_direction( a, b, c, offset=10):
# determines which direction (LEFT, RIGHT) is 'inside' the triangle
# made by a, b, c. If ab and bc are parallel, return LEFT
x = _three_point_normal( a, b, c)
# Make two vectors (left & right) for each segment.
l_segs = [_parallel_seg( p, q, normal=x, offset=offset, direction=LEFT) for p,q in ( (a,b), (b,c))]
r_segs = [_parallel_seg( p, q, normal=x, offset=offset, direction=RIGHT) for p,q in ( (a,b), (b,c))]
# Find their intersections.
p1 = l_segs[0].intersect( l_segs[1])
p2 = r_segs[0].intersect( r_segs[1])
# The only way I've figured out to determine which direction is
# 'inside' or 'outside' a joint is to calculate both inner and outer
# vectors and then to find the intersection point closest to point a.
# This ought to work but it seems like there ought to be a more direct
# way to figure this out. -ETJ 21 Dec 2012
# The point that's closer to point a is the inside point.
if a.distance( p1) <= a.distance( p2):
return LEFT
else:
return RIGHT
def _other_dir( left_or_right):
if left_or_right == LEFT:
return RIGHT
else:
return LEFT
def _three_point_normal( a, b, c):
ab = b - a
bc = c - b
seg_ab = Line3( a, ab)
seg_bc = Line3( b, bc)
x = seg_ab.v.cross( seg_bc.v)
return x
# =============
# = 2D Fillet =
# =============
def _widen_angle_for_fillet( start_degrees, end_degrees):
# Fix start/end degrees as needed; find a way to make an acute angle
if end_degrees < start_degrees:
end_degrees += 360
if end_degrees - start_degrees >= 180:
start_degrees, end_degrees = end_degrees, start_degrees
epsilon_degrees = 2
return start_degrees - epsilon_degrees, end_degrees + epsilon_degrees
def fillet_2d( three_point_sets, orig_poly, fillet_rad, remove_material=True):
# NOTE: three_point_sets must be a list of sets of three points
# (i.e., a list of 3-tuples of points), even if only one fillet is being done:
# e.g. [[a, b, c]]
# a, b, and c are three points that form a corner at b.
# Return a negative arc (the area NOT covered by a circle) of radius rad
# in the direction of the more acute angle between
# Note that if rad is greater than a.distance(b) or c.distance(b), for a
# 90-degree corner, the returned shape will include a jagged edge.
# TODO: use fillet_rad = min( fillet_rad, a.distance(b), c.distance(b))
# If a shape is being filleted in several places, it is FAR faster
# to add/ remove its set of shapes all at once rather than
# to cycle through all the points, since each method call requires
# a relatively complex boolean with the original polygon.
# So... three_point_sets is either a list of three Euclid points that
# determine the corner to be filleted, OR, a list of those lists, in
# which case everything will be removed / added at once.
# NOTE that if material is being added (fillets) or removed (rounds)
# each must be called separately.
if len( three_point_sets) == 3 and isinstance( three_point_sets[0], (Vector2, Vector3)):
three_point_sets = [three_point_sets]
arc_objs = []
for three_points in three_point_sets:
assert len(three_points) in (2,3)
# make two vectors out of the three points passed in
a, b, c = euclidify( three_points, Point3)
# Find the center of the arc we'll have to make
offset = offset_points( [a, b, c], offset=fillet_rad, inside=True)
center_pt = offset[1]
a2, b2, c2, cp2 = [Point2( p.x, p.y) for p in (a,b,c, center_pt)]
a2b2 = LineSegment2( a2, b2)
c2b2 = LineSegment2( c2, b2)
# Find the point on each segment where the arc starts; Point2.connect()
# returns a segment with two points; Take the one that's not the center
afs = cp2.connect( a2b2)
cfs = cp2.connect( c2b2)
afp, cfp = [seg.p1 if seg.p1 != cp2 else seg.p2 for seg in (afs, cfs)]
a_degs, c_degs = [ (degrees(math.atan2( seg.v.y, seg.v.x)))%360 for seg in (afs, cfs)]
start_degs = a_degs
end_degs = c_degs
# Widen start_degs and end_degs slightly so they overlap the areas
# they're supposed to join/ remove.
start_degs, end_degs = _widen_angle_for_fillet( start_degs, end_degs)
arc_obj = translate( center_pt.as_arr() )(
arc_inverted( rad=fillet_rad, start_degrees=start_degs, end_degrees=end_degs)
)
arc_objs.append( arc_obj)
if remove_material:
poly = orig_poly - arc_objs
else:
poly = orig_poly + arc_objs
return poly
# ==========================
# = Extrusion along a path =
# = ---------------------- =
def extrude_along_path( shape_pts, path_pts, scale_factors=None): # Possible: twist
# Extrude the convex curve defined by shape_pts along path_pts.
# -- For predictable results, shape_pts must be planar, convex, and lie
# in the XY plane centered around the origin.
#
# -- len( scale_factors) should equal len( path_pts). If not present, scale
# will be assumed to be 1.0 for each point in path_pts
# -- Future additions might include corner styles (sharp, flattened, round)
# or a twist factor
polyhedron_pts = []
facet_indices = []
if not scale_factors:
scale_factors = [1.0] * len(path_pts)
# Make sure we've got Euclid Point3's for all elements
shape_pts = euclidify( shape_pts, Point3)
path_pts = euclidify( path_pts, Point3)
src_up = Vector3( *UP_VEC)
for which_loop in range( len( path_pts) ):
path_pt = path_pts[which_loop]
scale = scale_factors[which_loop]
# calculate the tangent to the curve at this point
if which_loop > 0 and which_loop < len(path_pts) - 1:
prev_pt = path_pts[which_loop-1]
next_pt = path_pts[which_loop+1]
v_prev = path_pt - prev_pt
v_next = next_pt - path_pt
tangent = v_prev + v_next
elif which_loop == 0:
tangent = path_pts[which_loop+1] - path_pt
elif which_loop == len( path_pts) - 1:
tangent = path_pt - path_pts[ which_loop -1]
# Scale points
if scale != 1.0:
this_loop = [ (scale*sh) for sh in shape_pts]
# Convert this_loop back to points; scaling changes them to Vectors
this_loop= [Point3(v.x, v.y, v.z) for v in this_loop]
else:
this_loop = shape_pts[:]
# Rotate & translate
this_loop = transform_to_point( this_loop, dest_point=path_pt, dest_normal=tangent, src_up=src_up)
# Add the transformed points to our final list
polyhedron_pts += this_loop
# And calculate the facet indices
shape_pt_count = len(shape_pts)
segment_start = which_loop*shape_pt_count
segment_end = segment_start + shape_pt_count - 1
if which_loop < len(path_pts) - 1:
for i in range( segment_start, segment_end):
facet_indices.append( [i, i+shape_pt_count, i+1])
facet_indices.append( [i+1, i+shape_pt_count, i+shape_pt_count+1])
facet_indices.append( [segment_start, segment_end, segment_end + shape_pt_count])
facet_indices.append( [segment_start, segment_end + shape_pt_count, segment_start+shape_pt_count])
# Cap the start of the polyhedron
for i in range(1, shape_pt_count - 1):
facet_indices.append( [0, i, i+1])
# And the end ( could be rolled into the earlier loop)
# FIXME: concave cross-sections will cause this end-capping algorithm to fail
end_cap_base = len( polyhedron_pts) - shape_pt_count
for i in range( end_cap_base + 1, len(polyhedron_pts) -1):
facet_indices.append( [ end_cap_base, i+1, i])
return polyhedron( points = euc_to_arr(polyhedron_pts), triangles=facet_indices)
except:
# euclid isn't available; these methods won't be either
pass
## {{{ http://code.activestate.com/recipes/577068/ (r1)
def frange(*args):
"""frange([start, ] end [, step [, mode]]) -> generator
A float range generator. If not specified, the default start is 0.0
and the default step is 1.0.
Optional argument mode sets whether frange outputs an open or closed
interval. mode must be an int. Bit zero of mode controls whether start is
included (on) or excluded (off); bit one does the same for end. Hence:
0 -> open interval (start and end both excluded)
1 -> half-open (start included, end excluded)
2 -> half open (start excluded, end included)
3 -> closed (start and end both included)
By default, mode=1 and only start is included in the output.
"""
mode = 1 # Default mode is half-open.
n = len(args)
if n == 1:
args = (0.0, args[0], 1.0)
elif n == 2:
args = args + (1.0,)
elif n == 4:
mode = args[3]
args = args[0:3]
elif n != 3:
raise TypeError('frange expects 1-4 arguments, got %d' % n)
assert len(args) == 3
try:
start, end, step = [a + 0.0 for a in args]
except TypeError:
raise TypeError('arguments must be numbers')
if step == 0.0:
raise ValueError('step must not be zero')
if not isinstance(mode, int):
raise TypeError('mode must be an int')
if mode & 1:
i, x = 0, start
else:
i, x = 1, start+step
if step > 0:
if mode & 2:
from operator import le as comp
else:
from operator import lt as comp
else:
if mode & 2:
from operator import ge as comp
else:
from operator import gt as comp
while comp(x, end):
yield x
i += 1
x = start + i*step
## end of http://code.activestate.com/recipes/577068/ }}}
# =====================
# = D e b u g g i n g =
# =====================
def obj_tree_str( sp_obj, vars_to_print=None):
# For debugging. This prints a string of all of an object's
# children, with whatever attributes are specified in vars_to_print
# Takes an optional list (vars_to_print) of variable names to include in each
# element (e.g. ['is_part_root', 'is_hole', 'name'])
if not vars_to_print: vars_to_print = []
# Signify if object has parent or not
parent_sign = "\nL " if sp_obj.parent else "\n* "
# Print object
s = parent_sign + str( sp_obj) + "\t"
# Extra desired fields
for v in vars_to_print:
if hasattr( sp_obj, v):
s += "%s: %s\t"%( v, getattr(sp_obj, v))
# Add all children
for c in sp_obj.children:
s += indent( obj_tree_str(c, vars_to_print))
return s
| {
"content_hash": "d85a1f05278d9378fbb069d774b0c495",
"timestamp": "",
"source": "github",
"line_count": 1123,
"max_line_length": 156,
"avg_line_length": 37.82546749777382,
"alnum_prop": 0.5495079806017232,
"repo_name": "vishnubob/rockit",
"id": "74f5c14b284b7d4a8b2386c105c11266f4c849f0",
"size": "42521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rockit/solid/utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "119729"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('paloma', '0005_auto_20150418_1524'),
]
operations = [
migrations.AlterField(
model_name='message',
name='circle',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='paloma.Circle', null=True, verbose_name='Circle'),
preserve_default=True,
),
migrations.AlterField(
model_name='site',
name='domain',
field=models.CharField(default=b'localhost', max_length=100, help_text='@Domain', unique=True, verbose_name='@Domain', db_index=True),
preserve_default=True,
),
migrations.AlterField(
model_name='site',
name='name',
field=models.CharField(help_text='Owner Site Name', unique=True, max_length=100, verbose_name='Owner Site Name', db_index=True),
preserve_default=True,
),
migrations.AlterField(
model_name='site',
name='operators',
field=models.ManyToManyField(help_text='User', to=settings.AUTH_USER_MODEL, verbose_name='Site Operators'),
preserve_default=True,
),
migrations.AlterField(
model_name='site',
name='url',
field=models.CharField(default=b'/', max_length=150, help_text='URL', unique=True, verbose_name='URL', db_index=True),
preserve_default=True,
),
migrations.AlterField(
model_name='template',
name='name',
field=models.CharField(max_length=200, verbose_name='Template Name', db_index=True),
preserve_default=True,
),
migrations.AlterField(
model_name='template',
name='text',
field=models.TextField(default=b'', verbose_name='Template Text'),
preserve_default=True,
),
]
| {
"content_hash": "6f4a50eade211fdff0c7f84a9777a4ee",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 162,
"avg_line_length": 37.1578947368421,
"alnum_prop": 0.5878186968838527,
"repo_name": "hdknr/paloma",
"id": "427d3bdd5457abc5f0ca24a17e892d18a16da9e2",
"size": "2142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/paloma/migrations/0006_auto_20150906_0657.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3054"
},
{
"name": "Python",
"bytes": "162971"
},
{
"name": "Shell",
"bytes": "1786"
}
],
"symlink_target": ""
} |
from redberry.tests import RedTestCase
from redberry.models import RedPost, RedCategory
class FrontendRoutesTest(RedTestCase):
def test_home(self):
url = '/%s/' % self.url_prefix
response = self.test_client.get(url)
# Shows published
post = RedPost.query.first()
assert post.title in response.data
assert post.content in response.data
# Does not show if not published
post.published = False
response = self.test_client.get(url)
assert post.title not in response.data
assert post.content not in response.data
assert "No posts yet." in response.data
def test_show_post(self):
post = RedPost.query.first()
url = "/%s/%s" % (self.url_prefix, post.slug)
# Shows by slug
response = self.test_client.get(url)
assert post.title in response.data
assert post.content in response.data
# Redirects if invalid slug
response = self.test_client.get(url + "-invalid")
assert response.status_code == 302
assert response.location == 'http://localhost/%s/' % self.url_prefix
self.assert_flashes("Post not found!")
def test_show_category(self):
category = RedCategory.query.first()
url = "/%s/category/%s" % (self.url_prefix, category.slug)
# Shows by slug
response = self.test_client.get(url)
assert category.title in response.data
assert category.description in response.data
# Shows category posts
if category.posts:
assert category.posts[0].title in response.data
# Redirects if invalid slug
response = self.test_client.get(url + "-invalid")
assert response.status_code == 302
assert response.location == 'http://localhost/%s/' % self.url_prefix
self.assert_flashes("Category not found!")
| {
"content_hash": "c1c80041a5aca6e96656b05d16e9d6ef",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 76,
"avg_line_length": 33.892857142857146,
"alnum_prop": 0.631190727081138,
"repo_name": "michaelcho/redberry",
"id": "c4f8edb7140a01c09212572f8c6aee99654409c2",
"size": "1898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redberry/tests/frontend_routes_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2932"
},
{
"name": "HTML",
"bytes": "22169"
},
{
"name": "JavaScript",
"bytes": "1237"
},
{
"name": "Python",
"bytes": "29103"
},
{
"name": "Shell",
"bytes": "952"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.