text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Reattach URLs/Evidences from Comments to Assessments
Create Date: 2017-01-04 10:04:18.770087
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '4aa9f2ee7969'
down_revision = '3f615f3b5192'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("""
update ignore object_documents as od join
relationships as r on od.documentable_type = r.source_type and
od.documentable_id = r.source_id
set od.documentable_type = r.destination_type,
od.documentable_id = r.destination_id
where od.documentable_type = 'Comment' and
r.destination_type = 'Assessment'
""")
op.execute("""
update ignore object_documents as od join
relationships as r on od.documentable_type = r.destination_type and
od.documentable_id = r.destination_id
set od.documentable_type = r.source_type,
od.documentable_id = r.source_id
where od.documentable_type = 'Comment' and
r.source_type = 'Assessment'
""")
relationship_update_skeleton = """
update ignore relationships as rel1 join
relationships as rel2 on
{rel1_comment}_type = 'Comment' and
{rel2_comment}_type = 'Comment' and
{rel1_comment}_id = {rel2_comment}_id
set {rel1_comment}_type = {assessment}_type,
{rel1_comment}_id = {assessment}_id
where {assessment}_type = 'Assessment' and
{document}_type = 'Document'
"""
# (rel1.src_type <-> rel1.dst_type & rel2.src_type <-> rel2.dst_type)
# Comment <-> Document & Comment <-> Assessment
# Comment <-> Document & Assessment <-> Comment
# Document <-> Comment & Comment <-> Assessment
# Document <-> Comment & Assessment <-> Comment
for rel1_comment, rel2_comment, document, assessment in (
("rel1.source", "rel2.source", "rel1.destination", "rel2.destination"),
("rel1.source", "rel2.destination", "rel1.destination", "rel2.source"),
("rel1.destination", "rel2.source", "rel1.source", "rel2.destination"),
("rel1.destination", "rel2.destination", "rel1.source", "rel2.source"),
):
op.execute(relationship_update_skeleton.format(
rel1_comment=rel1_comment, rel2_comment=rel2_comment,
document=document, assessment=assessment,
))
# Every remaining Comment<->Document mapping relates to orphaned Comments
op.execute("""
delete from object_documents
where documentable_type = 'Comment'
""")
# Remove Comment<->NotAssessment mappings: only Assessment is Commentable
op.execute("""
delete from relationships
where source_type = 'Comment' and destination_type != 'Assessment'
""")
op.execute("""
delete from relationships
where source_type != 'Assessment' and destination_type = 'Comment'
""")
# Remove unmapped Documents
op.execute("""
delete d from documents as d left join
object_documents as od on od.document_id = d.id left join
relationships as r on r.source_type = 'Document' and
r.source_id = d.id or
r.destination_type = 'Document' and
r.destination_id = d.id
where od.id is NULL and r.id is NULL
""")
# Remove unmapped Comments
op.execute("""
delete c from comments as c left join
relationships as r on r.source_type = 'Comment' and
r.source_id = c.id or
r.destination_type = 'Comment' and
r.destination_id = c.id
where r.id is NULL
""")
# Remove orphaned ObjectOwners
models_tables = (
("Comment", "comments"),
("Document", "documents"),
)
for model, table in models_tables:
op.execute("""
delete object_owners
from object_owners left join
{table} on object_owners.ownable_id = {table}.id and
object_owners.ownable_type = '{model}'
where object_owners.ownable_type = '{model}' and {table}.id is NULL
""".format(table=table, model=model))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
| {
"content_hash": "d75e0ee0d0b5f74bcbe9dc009dbcc90c",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 79,
"avg_line_length": 38.226890756302524,
"alnum_prop": 0.5990327544515278,
"repo_name": "VinnieJohns/ggrc-core",
"id": "6cc2711e643dcfac20db586c4efe096ff3ef635f",
"size": "4662",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/ggrc/migrations/versions/20170104100418_4aa9f2ee7969_reattach_urls_evidences_from_comments_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "226950"
},
{
"name": "HTML",
"bytes": "1060386"
},
{
"name": "JavaScript",
"bytes": "1927277"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2762348"
},
{
"name": "Shell",
"bytes": "31100"
}
],
"symlink_target": ""
} |
import os
import ssl
import sys
import logging
import datetime
from tornado import httpserver, ioloop, netutil, web
def start_server(app: web.Application, https_port: int = 443):
""" Try to start an HTTPS server
:param app: tornado.web.Application to use
:param https_port: int the port number to use for HTTPS process
"""
https_socket = netutil.bind_sockets(https_port) # HTTPS socket
# create cert and key file paths
cert_file = 'cert/default.cert'
key_file = 'cert/default.key'
if os.path.isfile(cert_file) and os.path.isfile(key_file): # verify files
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) # define ssl context
ssl_ctx.load_cert_chain(cert_file, key_file) # load ssl required files
logging.info(str(datetime.datetime.utcnow()) + ' Start an HTTPS request handler on port : ' + str(https_port))
httpserver.HTTPServer(app, ssl_options=ssl_ctx).add_sockets(https_socket) # bind https port
else:
logging.error('No ssl cert and / or key files, aborting ...')
raise FileNotFoundError
try:
ioloop.IOLoop.current().start() # loop forever to satisfy user's requests
except KeyboardInterrupt: # except KeyboardInterrupt to properly exit
logging.info(str(datetime.datetime.utcnow()) + ' Exiting an HTTPS request handler on port : ' + str(https_port))
ioloop.IOLoop.current().stop() # stop process
sys.exit(0) # exit
class BaseHandler(web.RequestHandler):
"""Superclass for Handlers which require a connected user
"""
def get_current_user(self):
"""Get current connected user
:return: current connected user
"""
return self.get_secure_cookie("user")
| {
"content_hash": "ac83516124fa4adda8626dc8ac348aa7",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 120,
"avg_line_length": 40.72093023255814,
"alnum_prop": 0.6790405482581382,
"repo_name": "emeric254/CMangosAdminServer",
"id": "c8dc0f3147a71cc17bb68eb346ca4547ab5f8658",
"size": "1798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/core/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6938"
},
{
"name": "HTML",
"bytes": "12994"
},
{
"name": "Python",
"bytes": "99853"
}
],
"symlink_target": ""
} |
"""This example generates forecast metrics for a keyword plan.
To create a keyword plan, run the add_keyword_plan.py example.
"""
import argparse
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
# [START generate_forecast_metrics]
def main(client, customer_id, keyword_plan_id):
keyword_plan_service = client.get_service("KeywordPlanService")
resource_name = keyword_plan_service.keyword_plan_path(
customer_id, keyword_plan_id
)
response = keyword_plan_service.generate_forecast_metrics(
keyword_plan=resource_name
)
for i, forecast in enumerate(response.keyword_forecasts):
print(f"#{i+1} Keyword ID: {forecast.keyword_plan_ad_group_keyword}")
metrics = forecast.keyword_forecast
click_val = metrics.clicks
clicks = f"{click_val:.2f}" if click_val else "unspecified"
print(f"Estimated total clicks: {clicks}")
imp_val = metrics.impressions
impressions = f"{imp_val:.2f}" if imp_val else "unspecified"
print(f"Estimated total impressions: {impressions}")
cpc_val = metrics.average_cpc
cpc = f"{cpc_val:.2f}" if cpc_val else "unspecified"
print(f"Estimated average cpc: {cpc}\n")
# [END generate_forecast_metrics]
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v12")
parser = argparse.ArgumentParser(
description="Generates forecast metrics for a keyword plan."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-k",
"--keyword_plan_id",
type=str,
required=True,
help="A Keyword Plan ID.",
)
args = parser.parse_args()
try:
main(googleads_client, args.customer_id, args.keyword_plan_id)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| {
"content_hash": "1206755771a0f0393ee83c88b58ca0a3",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 33.03703703703704,
"alnum_prop": 0.640881913303438,
"repo_name": "googleads/google-ads-python",
"id": "a6691d256c9b3aee61cf90f43d0b70a848460e2a",
"size": "3273",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/planning/generate_forecast_metrics.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
from django import forms
from django.core.validators import validate_slug
class SubmissionForm(forms.Form):
submission = forms.CharField(
max_length=144,
validators=[validate_slug],
label='Suggest Topic',
required=True,
help_text='144 characters max',
widget=forms.TextInput(attrs={'size':'50', 'placeholder':"large-4.columns"}))
| {
"content_hash": "ddc77175fdeb1c93a76037954993caa7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 85,
"avg_line_length": 32,
"alnum_prop": 0.6692708333333334,
"repo_name": "CSUChico-CINS465/CINS465-Fall2016-Lecture-Examples",
"id": "497f43695cbedb08c670534ad2f10a4bbb523757",
"size": "384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lecture10/lecture10/formexample/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "71"
},
{
"name": "HTML",
"bytes": "25721"
},
{
"name": "JavaScript",
"bytes": "2540"
},
{
"name": "Python",
"bytes": "159760"
},
{
"name": "Shell",
"bytes": "32668"
}
],
"symlink_target": ""
} |
from calvin.runtime.south.async import async
from calvin.runtime.south.async import server_connection
from calvin.runtime.south.async import threads
from calvin.utilities.calvinlogger import get_logger
import pytest
import socket
_log = get_logger(__name__)
def data_available(conn):
first_print = True
while conn.data_available is False:
if first_print:
print "waiting for conn.data_available ... ",
first_print = False
print ""
return True
def connection_made(factory):
first_print = True
while not factory.connections:
if first_print:
print "waiting for connection ... ",
first_print = False
print ""
return True
def hundred_connection_made(factory):
first_print = True
while not len(factory.connections) == 100:
if first_print:
print "waiting for 100 connection ... ",
first_print = False
print ""
return True
def no_more_connections(factory):
first_print = True
while factory.connections:
if first_print:
print "waiting for connections to close ... ",
first_print = False
print ""
return True
def print_header(string):
_log.info("\n\n### %s ###", string)
# Stub
class Scheduler_stub(object):
def trigger_loop(self, actor_ids=None):
""" Trigger the loop_once """
async.DelayedCall(0, self.trigger_loop)
return
class TestServer(object):
@pytest.mark.essential
@pytest.inlineCallbacks
def test_default_line_mode(self):
print_header("TEST_DEFAULT_LINE_MODE")
print_header("Setup")
scheduler = Scheduler_stub()
self.factory = server_connection.ServerProtocolFactory(scheduler.trigger_loop)
self.factory.start('localhost', 8123)
self.conn = None
self.client_socket = None
print_header("Test_Connection")
##################################################################
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
assert not self.factory.connections
assert not self.factory.pending_connections
yield threads.defer_to_thread(self.client_socket.connect, ('localhost', 8123))
yield threads.defer_to_thread(connection_made, self.factory)
assert self.factory.pending_connections
_, self.conn = self.factory.accept()
####################################################################
####################################################################
print_header("Test_Line_Received")
####################################################################
assert self.conn.data_available is False
yield threads.defer_to_thread(self.client_socket.send, "sending string \r\n")
yield threads.defer_to_thread(data_available, self.conn)
assert self.conn.data_get() == "sending string "
print_header("Teardown")
self.factory.stop()
yield threads.defer_to_thread(no_more_connections, self.factory)
@pytest.mark.essential
@pytest.inlineCallbacks
def test_args_in_line_mode(self):
print_header("TEST_ARGS_IN_LINE_MODE")
print_header("Setup")
scheduler = Scheduler_stub()
self.factory = server_connection.ServerProtocolFactory(scheduler.trigger_loop, delimiter='end', max_length=3)
self.factory.start('localhost', 8123)
self.conn = None
self.client_socket = None
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
yield threads.defer_to_thread(self.client_socket.connect, ('localhost', 8123))
yield threads.defer_to_thread(connection_made, self.factory)
_, self.conn = self.factory.accept()
print_header("Test_Short_Line_Received")
####################################################################
yield threads.defer_to_thread(self.client_socket.send, "123end")
yield threads.defer_to_thread(data_available, self.conn)
assert self.conn.data_get() == "123"
print_header("Test_Long_Line_Received")
####################################################################
yield threads.defer_to_thread(self.client_socket.send, "1234end")
yield threads.defer_to_thread(data_available, self.conn)
assert self.conn.data_get() == "1234"
print_header("Teardown")
self.factory.stop()
yield threads.defer_to_thread(no_more_connections, self.factory)
@pytest.mark.essential
@pytest.inlineCallbacks
def test_raw_mode(self):
print_header("TEST_RAW_MODE")
print_header("Setup")
scheduler = Scheduler_stub()
self.factory = server_connection.ServerProtocolFactory(scheduler.trigger_loop, mode='raw', max_length=10)
self.factory.start('localhost', 8123)
self.conn = None
self.client_socket = None
print_header("Test_Connection")
##################################################################
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
yield threads.defer_to_thread(self.client_socket.connect, ('localhost', 8123))
yield threads.defer_to_thread(connection_made, self.factory)
assert self.factory.pending_connections
_, self.conn = self.factory.accept()
assert not self.factory.pending_connections
print_header("Test_Data_Received")
####################################################################
assert self.conn.data_available is False
yield threads.defer_to_thread(self.client_socket.send, "abcdefghijklmnopqrstuvxyz123456789")
yield threads.defer_to_thread(data_available, self.conn)
assert self.conn.data_get() == "abcdefghij"
assert self.conn.data_get() == "klmnopqrst"
assert self.conn.data_get() == "uvxyz12345"
assert self.conn.data_get() == "6789"
print_header("Teardown")
self.factory.stop()
yield threads.defer_to_thread(no_more_connections, self.factory)
@pytest.mark.slow
@pytest.inlineCallbacks
def test_many_clients(self):
print_header("TEST_MANY_CLIENTS")
print_header("Setup")
scheduler = Scheduler_stub()
self.factory = server_connection.ServerProtocolFactory(scheduler.trigger_loop, mode='raw', max_length=10)
self.factory.start('localhost', 8123)
self.conn = None
self.client_socket = None
print_header("Test_Connection")
##################################################################
clients = []
for i in range(100):
clients.append(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
for c in clients:
yield threads.defer_to_thread(c.connect, ('localhost', 8123))
yield threads.defer_to_thread(hundred_connection_made, self.factory)
assert len(self.factory.pending_connections) == 100
for i in range(100):
_, self.conn = self.factory.accept()
assert not self.factory.pending_connections
| {
"content_hash": "0caacdadc8de685aa9df2d72cf3a89e1",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 117,
"avg_line_length": 35.84,
"alnum_prop": 0.5895647321428571,
"repo_name": "EricssonResearch/calvin-base",
"id": "f61580345563b587bcdf508faa855cf18df518f9",
"size": "7773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvin/runtime/south/async/twistedimpl/tests/test_server_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "769"
},
{
"name": "Dockerfile",
"bytes": "612"
},
{
"name": "HTML",
"bytes": "24571"
},
{
"name": "JavaScript",
"bytes": "78325"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "3291484"
},
{
"name": "Shell",
"bytes": "37140"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.url'
db.add_column('multilingual_events_event', 'url',
self.gf('django.db.models.fields.URLField')(default='', max_length=200, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.url'
db.delete_column('multilingual_events_event', 'url')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 25, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'document_library.document': {
'Meta': {'ordering': "('position', '-creation_date')", 'object_name': 'Document'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['document_library.DocumentCategory']", 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'download_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_on_front_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'document_library.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'})
},
'multilingual_events.event': {
'Meta': {'ordering': "('start_date',)", 'object_name': 'Event'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['multilingual_events.EventCategory']"}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'placeholders': ('djangocms_utils.fields.M2MPlaceholderField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'start_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '65', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'multilingual_events.eventagendaday': {
'Meta': {'object_name': 'EventAgendaDay', 'db_table': "'cmsplugin_eventagendaday'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'multilingual_events.eventagendasession': {
'Meta': {'object_name': 'EventAgendaSession', 'db_table': "'cmsplugin_eventagendasession'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '4000', 'blank': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['document_library.Document']", 'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'multilingual_events.eventagendatalk': {
'Meta': {'object_name': 'EventAgendaTalk', 'db_table': "'cmsplugin_eventagendatalk'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '4000', 'blank': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['document_library.Document']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'multilingual_events.eventcategory': {
'Meta': {'object_name': 'EventCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'multilingual_events.eventcategorytitle': {
'Meta': {'object_name': 'EventCategoryTitle'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['multilingual_events.EventCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'multilingual_events.eventtitle': {
'Meta': {'object_name': 'EventTitle'},
'address': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['multilingual_events.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'})
}
}
complete_apps = ['multilingual_events']
| {
"content_hash": "1d392105494e976ca8ba1052b69e9f5f",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 182,
"avg_line_length": 77.0377358490566,
"alnum_prop": 0.5564535880480039,
"repo_name": "bitmazk/django-multilingual-events",
"id": "6abbeb79bda6392c31fc1fbed8ccbff2b09fab2e",
"size": "12288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multilingual_events/south_migrations/0020_auto__add_field_event_url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4247"
},
{
"name": "Python",
"bytes": "532006"
}
],
"symlink_target": ""
} |
from flask import Blueprint, render_template, session, redirect, url_for, \
request, flash, g, jsonify, abort, current_app
from flask.ext.principal import identity_changed, identity_loaded, Identity, \
AnonymousIdentity, RoleNeed
from NHDH import app
from NHDH.database import db_session
from NHDH.models.user import User, roles
# This blueprint does not provide url prefix!
mod = Blueprint('authentication', __name__)
@mod.route('/login', methods=['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None:
return redirect(url_for('profile.view'))
if 'cancel' in request.form:
flash(u'Cancelled. The OpenID was not changed.')
return redirect(oid.get_next_url())
openid = request.values.get('openid')
if not openid:
openid = COMMON_PROVIDERS.get(request.args.get('provider'))
if openid:
return oid.try_login(openid, ask_for=['fullname', 'email', 'language'])
error = oid.fetch_error()
if error:
flash(u'Error: ' + error)
return render_template('authentication/login.html', next=oid.get_next_url())
@mod.route('/logout')
def logout():
if 'user' in session:
identity_changed.send(
current_app._get_current_object(),
identity = AnonymousIdentity()
)
for key in ('user', 'openid', 'identity.name', 'identity.auth_type'):
session.pop(key, None)
flash(u'Logged out')
return redirect(url_for('home.index'))
@oid.after_login
def create_or_login(response):
'''
This is the hook for OpenID.try_login and is being called after a response
has been received.
'''
session['user'] = {}
session['openid'] = response.identity_url
user = g.user or User.query.filter_by(openid=response.identity_url).first()
if user is None:
name = response.fullname or response.nickname
session['user']['email'] = response.email
params = dict(next=oid.get_next_url(), name = name)
return redirect(url_for('.first_login', **params))
g.user = user
identity = Identity(user.id)
# Notify Principal of the identity change
identity_changed.send(
current_app._get_current_object(),
identity = identity
)
if user.openid != response.identity_url:
user.openid = response.identity_url
db_session.commit()
flash(u'OpenID identity changed')
else:
flash(u'Successfully signed in', 'hurray')
return redirect(oid.get_next_url())
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
user = User.query.filter_by(id=identity.name).first()
if not user:
return
load_identity(identity, user)
@identity_changed.connect_via(app)
def on_identity_changed(sender, identity):
if not g.user:
return
load_identity(identity, g.user)
def load_identity(identity, user):
'''
Handles loading the user identity
'''
identity.provides.add(RoleNeed("user"))
if not user.active or not user.verified:
return
if not user.role:
return
keys = roles.keys()
top = max(keys)
identity.provides.add(RoleNeed(roles.get(user.role)))
for k in keys:
if k > user.role:
role = roles.get(k, None)
identity.provides.add(RoleNeed(roles.get(k))) | {
"content_hash": "ec2dc248cdbea4a23ca257a4bd8a5a8e",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 80,
"avg_line_length": 27.5,
"alnum_prop": 0.6411326378539494,
"repo_name": "monk-ee/NHDH",
"id": "69b43b803ca9def590e5986e858854a3328bab3f",
"size": "3355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NHDH/views/authentication.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "119585"
},
{
"name": "JavaScript",
"bytes": "640635"
},
{
"name": "Python",
"bytes": "25013"
}
],
"symlink_target": ""
} |
import os.path
PROJECT_DIR = os.path.dirname(__file__)
STATIC_DOC_ROOT = os.path.join(PROJECT_DIR, "static")
STATICFILES_DIRS = (os.path.join(PROJECT_DIR, "static"),
)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_TZ = True
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = ('django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader')
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware'
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.static',
)
ROOT_URLCONF = 'experimentdb.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, "templates"),
)
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.comments',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.staticfiles',
'projects',
'proteins',
'reagents',
'external',
'cloning',
'datasets',
'sharing',
'data',
'hypotheses',
'ajax_select',
'south',
'PIL',
'braces'
)
INTERNAL_IPS = ('127.0.0.1',)
AJAX_LOOKUP_CHANNELS = {
'antibody' : ('reagents.lookups', 'AntibodyLookup'),
'construct' : ('reagents.lookups', 'ConstructLookup'),
'chemical' : ('reagents.lookups', 'ChemicalLookup'),
'siRNA' : ('reagents.lookups', 'SiRNALookup'),
'strain' : ('reagents.lookups', 'StrainLookup'),
'cell' : ('reagents.lookups', 'CellLineLookup'),
'protein' : ('proteins.lookups', 'ProteinLookup'),
'protocol' : ('data.lookups', 'ProtocolLookup'),
}
# magically include jqueryUI/js/css
AJAX_SELECT_BOOTSTRAP = True
AJAX_SELECT_INLINES = 'inline'
ADMINS = (
('Your Name', 'email@company.com')
)
MANAGERS = ADMINS
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/' #serve the MEDIA_ROOT to this url
LOGIN_URL = '/accounts/login/'
STATIC_URL = '/static/'
#these locations can be absolue paths or relative to the installation (as is shown here)
MEDIA_ROOT = "/var/www/media/files" #set to where pictures and files will be stored. Default is media folder and this is where MEDIA_URL on your webserver should point
STATIC_ROOT = "/var/www/served-static" #this folder is populated by the collectstatic command and is where STATIC_URL on your webserver should point
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ci%^08ig-0qu*&b(kz_=n6lvbx*puyx6=8!yxzm0+*z)w@7+%6'
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Detroit'
DATABASES = {
'default': {
'NAME': 'default.db', # Or path to database file if using sqlite3.
'ENGINE': 'django.db.backends.sqlite3', # Choose one of 'django.db.backends.postgresql_psycopg2','django.db.backends.postgresql', 'django.db.backends.mysql', 'django.db.backends.sqlite3', 'django.db.backends.oracle'
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3
'HOST':'', # Set to empty string for localhost. Not used with sqlite3.
'PORT':'', # Set to empty string for default. Not used with sqlite3.
}
}
| {
"content_hash": "42e28254ad0312f1171f46feb0a09459",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 224,
"avg_line_length": 34.88148148148148,
"alnum_prop": 0.6893183266086218,
"repo_name": "davebridges/ExperimentDB",
"id": "92d2e33f7e804e203be396f2b9ce0c315c27d4c2",
"size": "4754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experimentdb/travis_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "3273"
},
{
"name": "CSS",
"bytes": "171399"
},
{
"name": "JavaScript",
"bytes": "858989"
},
{
"name": "PHP",
"bytes": "21302"
},
{
"name": "Python",
"bytes": "247551"
},
{
"name": "Shell",
"bytes": "6713"
}
],
"symlink_target": ""
} |
"""
WSGI config for eucapp project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "eucapp.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eucapp.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "33b621f4ed7ffd8bc1d8fb8df80e222d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 44.34375,
"alnum_prop": 0.7928118393234672,
"repo_name": "eucalyptus/eucalyptopotamus",
"id": "9442fdfa3cf3a9b93c98402737a0a2128657d524",
"size": "1419",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/eucapp/eucapp/wsgi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "17715"
}
],
"symlink_target": ""
} |
"""
Application Load Balancers
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import six
from collections import defaultdict
from c7n.actions import ActionRegistry, BaseAction, ModifyVpcSecurityGroupsAction
from c7n.exceptions import PolicyValidationError
from c7n.filters import (
Filter, FilterRegistry, DefaultVpcBase, MetricsFilter, ValueFilter)
import c7n.filters.vpc as net_filters
from c7n import tags
from c7n.manager import resources
from c7n.query import QueryResourceManager, DescribeSource, ConfigSource, TypeInfo
from c7n.utils import (
local_session, chunks, type_schema, get_retry, set_annotation)
from c7n.resources.shield import IsShieldProtected, SetShieldProtection
log = logging.getLogger('custodian.app-elb')
@resources.register('app-elb')
class AppELB(QueryResourceManager):
"""Resource manager for v2 ELBs (AKA ALBs and NLBs).
"""
class resource_type(TypeInfo):
service = 'elbv2'
permission_prefix = 'elasticloadbalancing'
enum_spec = ('describe_load_balancers', 'LoadBalancers', None)
name = 'LoadBalancerName'
id = 'LoadBalancerArn'
filter_name = "Names"
filter_type = "list"
dimension = "LoadBalancer"
date = 'CreatedTime'
config_type = 'AWS::ElasticLoadBalancingV2::LoadBalancer'
arn = "LoadBalancerArn"
# The suffix varies by type of loadbalancer (app vs net)
arn_type = 'loadbalancer/app'
retry = staticmethod(get_retry(('Throttling',)))
@classmethod
def get_permissions(cls):
# override as the service is not the iam prefix
return ("elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeTags")
def get_source(self, source_type):
if source_type == 'describe':
return DescribeAppElb(self)
elif source_type == 'config':
return ConfigAppElb(self)
raise ValueError("Unsupported source: %s for %s" % (
source_type, self.resource_type.config_type))
class DescribeAppElb(DescribeSource):
def get_resources(self, ids, cache=True):
"""Support server side filtering on arns or names
"""
if ids[0].startswith('arn:'):
params = {'LoadBalancerArns': ids}
else:
params = {'Names': ids}
return self.query.filter(self.manager, **params)
def augment(self, albs):
_describe_appelb_tags(
albs,
self.manager.session_factory,
self.manager.executor_factory,
self.manager.retry)
return albs
class ConfigAppElb(ConfigSource):
def load_resource(self, item):
resource = super(ConfigAppElb, self).load_resource(item)
item_tags = item['supplementaryConfiguration']['Tags']
# Config originally stored supplementaryconfig on elbv2 as json
# strings. Support that format for historical queries.
if isinstance(item_tags, six.string_types):
item_tags = json.loads(item_tags)
resource['Tags'] = [
{'Key': t['key'], 'Value': t['value']} for t in item_tags]
item_attrs = item['supplementaryConfiguration'][
'LoadBalancerAttributes']
if isinstance(item_attrs, six.string_types):
item_attrs = json.loads(item_attrs)
# Matches annotation of AppELBAttributeFilterBase filter
resource['Attributes'] = {
attr['key']: parse_attribute_value(attr['value']) for
attr in item_attrs}
return resource
def _describe_appelb_tags(albs, session_factory, executor_factory, retry):
client = local_session(session_factory).client('elbv2')
def _process_tags(alb_set):
alb_map = {alb['LoadBalancerArn']: alb for alb in alb_set}
results = retry(client.describe_tags, ResourceArns=list(alb_map.keys()))
for tag_desc in results['TagDescriptions']:
if ('ResourceArn' in tag_desc and
tag_desc['ResourceArn'] in alb_map):
alb_map[tag_desc['ResourceArn']]['Tags'] = tag_desc['Tags']
with executor_factory(max_workers=2) as w:
list(w.map(_process_tags, chunks(albs, 20)))
AppELB.filter_registry.register('tag-count', tags.TagCountFilter)
AppELB.filter_registry.register('marked-for-op', tags.TagActionFilter)
AppELB.filter_registry.register('shield-enabled', IsShieldProtected)
AppELB.filter_registry.register('network-location', net_filters.NetworkLocation)
AppELB.action_registry.register('set-shield', SetShieldProtection)
@AppELB.filter_registry.register('metrics')
class AppElbMetrics(MetricsFilter):
"""Filter app load balancer by metric values.
See available metrics here
https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-cloudwatch-metrics.html
Custodian defaults to specifying dimensions for the app elb only.
Target Group dimension not supported atm.
"""
def get_dimensions(self, resource):
return [{
'Name': self.model.dimension,
'Value': 'app/%s/%s' % (
resource[self.model.name],
resource[self.model.id].rsplit('/')[-1])}]
@AppELB.filter_registry.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "SecurityGroups[]"
@AppELB.filter_registry.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = "AvailabilityZones[].SubnetId"
@AppELB.filter_registry.register('vpc')
class VpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
@AppELB.filter_registry.register('waf-enabled')
class WafEnabled(Filter):
schema = type_schema(
'waf-enabled', **{
'web-acl': {'type': 'string'},
'state': {'type': 'boolean'}})
permissions = ('waf-regional:ListResourcesForWebACL', 'waf-regional:ListWebACLs')
# TODO verify name uniqueness within region/account
# TODO consider associated resource fetch in augment
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client(
'waf-regional')
target_acl = self.data.get('web-acl')
state = self.data.get('state', False)
name_id_map = {}
resource_map = {}
wafs = self.manager.get_resource_manager('waf-regional').resources()
for w in wafs:
if 'c7n:AssociatedResources' not in w:
arns = client.list_resources_for_web_acl(
WebACLId=w['WebACLId']).get('ResourceArns', [])
w['c7n:AssociatedResources'] = arns
name_id_map[w['Name']] = w['WebACLId']
for r in w['c7n:AssociatedResources']:
resource_map[r] = w['WebACLId']
target_acl_id = name_id_map.get(target_acl, target_acl)
# generally frown on runtime validation errors, but also frown on
# api calls during validation.
if target_acl and target_acl_id not in name_id_map.values():
raise ValueError("Invalid target acl:%s, acl not found" % target_acl)
arn_key = self.manager.resource_type.id
state_map = {}
for r in resources:
arn = r[arn_key]
if arn in resource_map:
r['c7n_webacl'] = resource_map[arn]
if not target_acl:
state_map[arn] = True
continue
r_acl = resource_map[arn]
if r_acl == target_acl_id:
state_map[arn] = True
continue
state_map[arn] = False
else:
state_map[arn] = False
return [r for r in resources if state_map[r[arn_key]] == state]
@AppELB.action_registry.register('set-waf')
class SetWaf(BaseAction):
"""Enable/Disable waf protection on applicable resource.
"""
permissions = ('waf-regional:AssociateWebACL', 'waf-regional:ListWebACLs')
schema = type_schema(
'set-waf', required=['web-acl'], **{
'web-acl': {'type': 'string'},
# 'force': {'type': 'boolean'},
'state': {'type': 'boolean'}})
def validate(self):
found = False
for f in self.manager.iter_filters():
if isinstance(f, WafEnabled):
found = True
break
if not found:
# try to ensure idempotent usage
raise PolicyValidationError(
"set-waf should be used in conjunction with waf-enabled filter on %s" % (
self.manager.data,))
return self
def process(self, resources):
wafs = self.manager.get_resource_manager('waf-regional').resources()
name_id_map = {w['Name']: w['WebACLId'] for w in wafs}
target_acl = self.data.get('web-acl')
target_acl_id = name_id_map.get(target_acl, target_acl)
state = self.data.get('state', True)
if state and target_acl_id not in name_id_map.values():
raise ValueError("invalid web acl: %s" % (target_acl_id))
client = local_session(
self.manager.session_factory).client('waf-regional')
arn_key = self.manager.resource_type.id
# TODO implement force to reassociate.
# TODO investigate limits on waf association.
for r in resources:
if state:
client.associate_web_acl(
WebACLId=target_acl_id, ResourceArn=r[arn_key])
else:
client.disassociate_web_acl(
WebACLId=target_acl_id, ResourceArn=r[arn_key])
@AppELB.action_registry.register('set-s3-logging')
class SetS3Logging(BaseAction):
"""Action to enable/disable S3 logging for an application loadbalancer.
:example:
.. code-block:: yaml
policies:
- name: elbv2-test
resource: app-elb
filters:
- type: value
key: Attributes."access_logs.s3.enabled"
value: False
actions:
- type: set-s3-logging
bucket: elbv2logtest
prefix: dahlogs
state: enabled
"""
schema = type_schema(
'set-s3-logging',
state={'enum': ['enabled', 'disabled']},
bucket={'type': 'string'},
prefix={'type': 'string'},
required=('state',))
permissions = ("elasticloadbalancing:ModifyLoadBalancerAttributes",)
def validate(self):
if self.data.get('state') == 'enabled':
if 'bucket' not in self.data or 'prefix' not in self.data:
raise PolicyValidationError((
"alb logging enablement requires `bucket` "
"and `prefix` specification on %s" % (self.manager.data,)))
return self
def process(self, resources):
client = local_session(self.manager.session_factory).client('elbv2')
for elb in resources:
elb_arn = elb['LoadBalancerArn']
attributes = [{
'Key': 'access_logs.s3.enabled',
'Value': (
self.data.get('state') == 'enabled' and 'true' or 'value')}]
if self.data.get('state') == 'enabled':
attributes.append({
'Key': 'access_logs.s3.bucket',
'Value': self.data['bucket']})
prefix_template = self.data['prefix']
info = {t['Key']: t['Value'] for t in elb.get('Tags', ())}
info['DNSName'] = elb.get('DNSName', '')
info['AccountId'] = elb['LoadBalancerArn'].split(':')[4]
info['LoadBalancerName'] = elb['LoadBalancerName']
attributes.append({
'Key': 'access_logs.s3.prefix',
'Value': prefix_template.format(**info)})
self.manager.retry(
client.modify_load_balancer_attributes,
LoadBalancerArn=elb_arn, Attributes=attributes)
@AppELB.action_registry.register('mark-for-op')
class AppELBMarkForOpAction(tags.TagDelayedAction):
"""Action to create a delayed action on an ELB to start at a later date
:example:
.. code-block:: yaml
policies:
- name: appelb-failed-mark-for-op
resource: app-elb
filters:
- "tag:custodian_elb_cleanup": absent
- State: failed
actions:
- type: mark-for-op
tag: custodian_elb_cleanup
msg: "AppElb failed: {op}@{action_date}"
op: delete
days: 1
"""
batch_size = 1
@AppELB.action_registry.register('tag')
class AppELBTagAction(tags.Tag):
"""Action to create tag/tags on an ELB
:example:
.. code-block:: yaml
policies:
- name: appelb-create-required-tag
resource: app-elb
filters:
- "tag:RequiredTag": absent
actions:
- type: tag
key: RequiredTag
value: RequiredValue
"""
batch_size = 1
permissions = ("elasticloadbalancing:AddTags",)
def process_resource_set(self, client, resource_set, ts):
client.add_tags(
ResourceArns=[alb['LoadBalancerArn'] for alb in resource_set],
Tags=ts)
@AppELB.action_registry.register('remove-tag')
class AppELBRemoveTagAction(tags.RemoveTag):
"""Action to remove tag/tags from an ELB
:example:
.. code-block:: yaml
policies:
- name: appelb-delete-expired-tag
resource: app-elb
filters:
- "tag:ExpiredTag": present
actions:
- type: remove-tag
tags: ["ExpiredTag"]
"""
batch_size = 1
permissions = ("elasticloadbalancing:RemoveTags",)
def process_resource_set(self, client, resource_set, tag_keys):
client.remove_tags(
ResourceArns=[alb['LoadBalancerArn'] for alb in resource_set],
TagKeys=tag_keys)
@AppELB.action_registry.register('delete')
class AppELBDeleteAction(BaseAction):
"""Action to delete an ELB
To avoid unwanted deletions of ELB, it is recommended to apply a filter
to the rule
:example:
.. code-block:: yaml
policies:
- name: appelb-delete-failed-elb
resource: app-elb
filters:
- State: failed
actions:
- delete
"""
schema = type_schema('delete', force={'type': 'boolean'})
permissions = (
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:ModifyLoadBalancerAttributes",)
def process(self, load_balancers):
client = local_session(self.manager.session_factory).client('elbv2')
for lb in load_balancers:
self.process_alb(client, lb)
def process_alb(self, client, alb):
try:
if self.data.get('force'):
client.modify_load_balancer_attributes(
LoadBalancerArn=alb['LoadBalancerArn'],
Attributes=[{
'Key': 'deletion_protection.enabled',
'Value': 'false',
}])
self.manager.retry(
client.delete_load_balancer, LoadBalancerArn=alb['LoadBalancerArn'])
except client.exceptions.LoadBalancerNotFoundException:
pass
except client.exceptions.OperationNotPermittedException as e:
self.log.warning(
"Exception trying to delete ALB: %s error: %s",
alb['LoadBalancerArn'], e)
class AppELBListenerFilterBase(object):
""" Mixin base class for filters that query LB listeners.
"""
permissions = ("elasticloadbalancing:DescribeListeners",)
def initialize(self, albs):
client = local_session(self.manager.session_factory).client('elbv2')
self.listener_map = defaultdict(list)
for alb in albs:
try:
results = client.describe_listeners(
LoadBalancerArn=alb['LoadBalancerArn'])
except client.exceptions.LoadBalancerNotFoundException:
continue
self.listener_map[alb['LoadBalancerArn']] = results['Listeners']
def parse_attribute_value(v):
if v.isdigit():
v = int(v)
elif v == 'true':
v = True
elif v == 'false':
v = False
return v
class AppELBAttributeFilterBase(object):
""" Mixin base class for filters that query LB attributes.
"""
def initialize(self, albs):
client = local_session(self.manager.session_factory).client('elbv2')
def _process_attributes(alb):
if 'Attributes' not in alb:
alb['Attributes'] = {}
results = client.describe_load_balancer_attributes(
LoadBalancerArn=alb['LoadBalancerArn'])
# flatten out the list of dicts and cast
for pair in results['Attributes']:
k = pair['Key']
v = parse_attribute_value(pair['Value'])
alb['Attributes'][k] = v
with self.manager.executor_factory(max_workers=2) as w:
list(w.map(_process_attributes, albs))
@AppELB.filter_registry.register('is-logging')
class IsLoggingFilter(Filter, AppELBAttributeFilterBase):
""" Matches AppELBs that are logging to S3.
bucket and prefix are optional
:example:
.. code-block:: yaml
policies:
- name: alb-is-logging-test
resource: app-elb
filters:
- type: is-logging
- name: alb-is-logging-bucket-and-prefix-test
resource: app-elb
filters:
- type: is-logging
bucket: prodlogs
prefix: alblogs
"""
permissions = ("elasticloadbalancing:DescribeLoadBalancerAttributes",)
schema = type_schema('is-logging',
bucket={'type': 'string'},
prefix={'type': 'string'}
)
def process(self, resources, event=None):
self.initialize(resources)
bucket_name = self.data.get('bucket', None)
bucket_prefix = self.data.get('prefix', None)
return [alb for alb in resources
if alb['Attributes']['access_logs.s3.enabled'] and
(not bucket_name or bucket_name == alb['Attributes'].get(
'access_logs.s3.bucket', None)) and
(not bucket_prefix or bucket_prefix == alb['Attributes'].get(
'access_logs.s3.prefix', None))
]
@AppELB.filter_registry.register('is-not-logging')
class IsNotLoggingFilter(Filter, AppELBAttributeFilterBase):
""" Matches AppELBs that are NOT logging to S3.
or do not match the optional bucket and/or prefix.
:example:
.. code-block:: yaml
policies:
- name: alb-is-not-logging-test
resource: app-elb
filters:
- type: is-not-logging
- name: alb-is-not-logging-bucket-and-prefix-test
resource: app-elb
filters:
- type: is-not-logging
bucket: prodlogs
prefix: alblogs
"""
permissions = ("elasticloadbalancing:DescribeLoadBalancerAttributes",)
schema = type_schema('is-not-logging',
bucket={'type': 'string'},
prefix={'type': 'string'}
)
def process(self, resources, event=None):
self.initialize(resources)
bucket_name = self.data.get('bucket', None)
bucket_prefix = self.data.get('prefix', None)
return [alb for alb in resources
if alb['Type'] == 'application' and (
not alb['Attributes']['access_logs.s3.enabled'] or (
bucket_name and bucket_name != alb['Attributes'].get(
'access_logs.s3.bucket', None)) or (
bucket_prefix and bucket_prefix != alb['Attributes'].get(
'access_logs.s3.prefix', None)))]
class AppELBTargetGroupFilterBase(object):
""" Mixin base class for filters that query LB target groups.
"""
def initialize(self, albs):
self.target_group_map = defaultdict(list)
target_groups = self.manager.get_resource_manager(
'app-elb-target-group').resources()
for target_group in target_groups:
for load_balancer_arn in target_group['LoadBalancerArns']:
self.target_group_map[load_balancer_arn].append(target_group)
@AppELB.filter_registry.register('listener')
class AppELBListenerFilter(ValueFilter, AppELBListenerFilterBase):
"""Filter ALB based on matching listener attributes
Adding the `matched` flag will filter on previously matched listeners
:example:
.. code-block:: yaml
policies:
- name: app-elb-invalid-ciphers
resource: app-elb
filters:
- type: listener
key: Protocol
value: HTTPS
- type: listener
key: SslPolicy
value: ['ELBSecurityPolicy-TLS-1-1-2017-01','ELBSecurityPolicy-TLS-1-2-2017-01']
op: ni
matched: true
actions:
- type: modify-listener
sslpolicy: "ELBSecurityPolicy-TLS-1-2-2017-01"
"""
schema = type_schema(
'listener', rinherit=ValueFilter.schema, matched={'type': 'boolean'})
schema_alias = False
permissions = ("elasticloadbalancing:DescribeLoadBalancerAttributes",)
def validate(self):
if not self.data.get('matched'):
return
listeners = list(self.manager.iter_filters())
found = False
for f in listeners[:listeners.index(self)]:
if not f.data.get('matched', False):
found = True
break
if not found:
raise PolicyValidationError(
"matched listener filter, requires preceding listener filter on %s " % (
self.manager.data,))
return self
def process(self, albs, event=None):
self.initialize(albs)
return super(AppELBListenerFilter, self).process(albs, event)
def __call__(self, alb):
listeners = self.listener_map[alb['LoadBalancerArn']]
if self.data.get('matched', False):
listeners = alb.pop('c7n:MatchedListeners', [])
found_listeners = False
for listener in listeners:
if self.match(listener):
set_annotation(alb, 'c7n:MatchedListeners', listener)
found_listeners = True
return found_listeners
@AppELB.action_registry.register('modify-listener')
class AppELBModifyListenerPolicy(BaseAction):
"""Action to modify the policy for an App ELB
:example:
.. code-block:: yaml
policies:
- name: appelb-modify-listener
resource: app-elb
filters:
- type: listener
key: Protocol
value: HTTP
actions:
- type: modify-listener
protocol: HTTPS
sslpolicy: "ELBSecurityPolicy-TLS-1-2-2017-01"
certificate: "arn:aws:acm:region:123456789012:certificate/12345678-\
1234-1234-1234-123456789012"
"""
schema = type_schema(
'modify-listener',
port={'type': 'integer'},
protocol={'enum': ['HTTP', 'HTTPS']},
sslpolicy={'type': 'string'},
certificate={'type': 'string'}
)
permissions = ("elasticloadbalancing:ModifyListener",)
def validate(self):
for f in self.manager.iter_filters():
if f.type == 'listener':
return self
raise PolicyValidationError(
"modify-listener action requires the listener filter %s" % (
self.manager.data,))
def process(self, load_balancers):
args = {}
if 'port' in self.data:
args['Port'] = self.data.get('port')
if 'protocol' in self.data:
args['Protocol'] = self.data.get('protocol')
if 'sslpolicy' in self.data:
args['SslPolicy'] = self.data.get('sslpolicy')
if 'certificate' in self.data:
args['Certificates'] = [{'CertificateArn': self.data.get('certificate')}]
client = local_session(self.manager.session_factory).client('elbv2')
for alb in load_balancers:
for matched_listener in alb.get('c7n:MatchedListeners', ()):
client.modify_listener(
ListenerArn=matched_listener['ListenerArn'],
**args)
@AppELB.action_registry.register('modify-security-groups')
class AppELBModifyVpcSecurityGroups(ModifyVpcSecurityGroupsAction):
permissions = ("elasticloadbalancing:SetSecurityGroups",)
def process(self, albs):
client = local_session(self.manager.session_factory).client('elbv2')
groups = super(AppELBModifyVpcSecurityGroups, self).get_groups(albs)
for idx, i in enumerate(albs):
try:
client.set_security_groups(
LoadBalancerArn=i['LoadBalancerArn'],
SecurityGroups=groups[idx])
except client.exceptions.LoadBalancerNotFoundException:
continue
@AppELB.filter_registry.register('healthcheck-protocol-mismatch')
class AppELBHealthCheckProtocolMismatchFilter(Filter,
AppELBTargetGroupFilterBase):
"""Filter AppELBs with mismatched health check protocols
A mismatched health check protocol is where the protocol on the target group
does not match the load balancer health check protocol
:example:
.. code-block:: yaml
policies:
- name: appelb-healthcheck-mismatch
resource: app-elb
filters:
- healthcheck-protocol-mismatch
"""
schema = type_schema('healthcheck-protocol-mismatch')
permissions = ("elasticloadbalancing:DescribeTargetGroups",)
def process(self, albs, event=None):
def _healthcheck_protocol_mismatch(alb):
for target_group in self.target_group_map[alb['LoadBalancerArn']]:
if (target_group['Protocol'] !=
target_group['HealthCheckProtocol']):
return True
return False
self.initialize(albs)
return [alb for alb in albs if _healthcheck_protocol_mismatch(alb)]
@AppELB.filter_registry.register('target-group')
class AppELBTargetGroupFilter(ValueFilter, AppELBTargetGroupFilterBase):
"""Filter ALB based on matching target group value"""
schema = type_schema('target-group', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ("elasticloadbalancing:DescribeTargetGroups",)
def process(self, albs, event=None):
self.initialize(albs)
return super(AppELBTargetGroupFilter, self).process(albs, event)
def __call__(self, alb):
target_groups = self.target_group_map[alb['LoadBalancerArn']]
return self.match(target_groups)
@AppELB.filter_registry.register('default-vpc')
class AppELBDefaultVpcFilter(DefaultVpcBase):
"""Filter all ELB that exist within the default vpc
:example:
.. code-block:: yaml
policies:
- name: appelb-in-default-vpc
resource: app-elb
filters:
- default-vpc
"""
schema = type_schema('default-vpc')
def __call__(self, alb):
return alb.get('VpcId') and self.match(alb.get('VpcId')) or False
@resources.register('app-elb-target-group')
class AppELBTargetGroup(QueryResourceManager):
"""Resource manager for v2 ELB target groups.
"""
class resource_type(TypeInfo):
service = 'elbv2'
arn_type = 'target-group'
enum_spec = ('describe_target_groups', 'TargetGroups', None)
name = 'TargetGroupName'
id = 'TargetGroupArn'
permission_prefix = 'elasticloadbalancing'
filter_registry = FilterRegistry('app-elb-target-group.filters')
action_registry = ActionRegistry('app-elb-target-group.actions')
retry = staticmethod(get_retry(('Throttling',)))
filter_registry.register('tag-count', tags.TagCountFilter)
filter_registry.register('marked-for-op', tags.TagActionFilter)
@classmethod
def get_permissions(cls):
# override as the service is not the iam prefix
return ("elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTags")
def augment(self, target_groups):
client = local_session(self.session_factory).client('elbv2')
def _describe_target_group_health(target_group):
result = self.retry(client.describe_target_health,
TargetGroupArn=target_group['TargetGroupArn'])
target_group['TargetHealthDescriptions'] = result[
'TargetHealthDescriptions']
with self.executor_factory(max_workers=2) as w:
list(w.map(_describe_target_group_health, target_groups))
_describe_target_group_tags(
target_groups, self.session_factory,
self.executor_factory, self.retry)
return target_groups
def _describe_target_group_tags(target_groups, session_factory,
executor_factory, retry):
client = local_session(session_factory).client('elbv2')
def _process_tags(target_group_set):
target_group_map = {
target_group['TargetGroupArn']:
target_group for target_group in target_group_set
}
results = retry(
client.describe_tags,
ResourceArns=list(target_group_map.keys()))
for tag_desc in results['TagDescriptions']:
if ('ResourceArn' in tag_desc and
tag_desc['ResourceArn'] in target_group_map):
target_group_map[
tag_desc['ResourceArn']
]['Tags'] = tag_desc['Tags']
with executor_factory(max_workers=2) as w:
list(w.map(_process_tags, chunks(target_groups, 20)))
@AppELBTargetGroup.action_registry.register('mark-for-op')
class AppELBTargetGroupMarkForOpAction(tags.TagDelayedAction):
"""Action to specify a delayed action on an ELB target group"""
@AppELBTargetGroup.action_registry.register('tag')
class AppELBTargetGroupTagAction(tags.Tag):
"""Action to create tag/tags on an ELB target group
:example:
.. code-block:: yaml
policies:
- name: appelb-targetgroup-add-required-tag
resource: app-elb-target-group
filters:
- "tag:RequiredTag": absent
actions:
- type: tag
key: RequiredTag
value: RequiredValue
"""
batch_size = 1
permissions = ("elasticloadbalancing:AddTags",)
def process_resource_set(self, client, resource_set, ts):
client.add_tags(
ResourceArns=[tgroup['TargetGroupArn'] for tgroup in resource_set],
Tags=ts)
@AppELBTargetGroup.action_registry.register('remove-tag')
class AppELBTargetGroupRemoveTagAction(tags.RemoveTag):
"""Action to remove tag/tags from ELB target group
:example:
.. code-block:: yaml
policies:
- name: appelb-targetgroup-remove-expired-tag
resource: app-elb-target-group
filters:
- "tag:ExpiredTag": present
actions:
- type: remove-tag
tags: ["ExpiredTag"]
"""
batch_size = 1
permissions = ("elasticloadbalancing:RemoveTags",)
def process_resource_set(self, client, resource_set, tag_keys):
client.remove_tags(
ResourceArns=[tgroup['TargetGroupArn'] for tgroup in resource_set],
TagKeys=tag_keys)
@AppELBTargetGroup.filter_registry.register('default-vpc')
class AppELBTargetGroupDefaultVpcFilter(DefaultVpcBase):
"""Filter all application elb target groups within the default vpc
:example:
.. code-block:: yaml
policies:
- name: appelb-targetgroups-default-vpc
resource: app-elb-target-group
filters:
- default-vpc
"""
schema = type_schema('default-vpc')
def __call__(self, target_group):
return (target_group.get('VpcId') and
self.match(target_group.get('VpcId')) or False)
@AppELBTargetGroup.action_registry.register('delete')
class AppELBTargetGroupDeleteAction(BaseAction):
"""Action to delete ELB target group
It is recommended to apply a filter to the delete policy to avoid unwanted
deletion of any app elb target groups.
:example:
.. code-block:: yaml
policies:
- name: appelb-targetgroups-delete-unused
resource: app-elb-target-group
filters:
- "tag:SomeTag": absent
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('elasticloadbalancing:DeleteTargetGroup',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('elbv2')
for tg in resources:
self.process_target_group(client, tg)
def process_target_group(self, client, target_group):
self.manager.retry(
client.delete_target_group,
TargetGroupArn=target_group['TargetGroupArn'])
| {
"content_hash": "7649902528f80a5c7234d0deaf5b40bb",
"timestamp": "",
"source": "github",
"line_count": 1023,
"max_line_length": 109,
"avg_line_length": 33.78103616813294,
"alnum_prop": 0.5868105793159326,
"repo_name": "kapilt/cloud-custodian",
"id": "798271a94ad009d3dc4e032a4d86f9ed5de573b4",
"size": "35148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c7n/resources/appelb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8163"
},
{
"name": "Go",
"bytes": "146630"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9971"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "5354902"
},
{
"name": "Shell",
"bytes": "13032"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
} |
import config
import json
import datetime
import time
import hashlib
import atexit
from sets import Set
try:
from flask import Flask
from flask import jsonify, render_template, request
except ImportError:
print '[X] Please install Flask:'
print ' $ pip install flask\n'
exit()
try:
import serial
except ImportError:
print '[X] Please install Flask:'
print ' $ pip install pySerial\n'
exit()
try:
rfm12pi = serial.Serial('/dev/ttyAMA0', baudrate=9600, timeout=3.0)
except OSError:
rfm12pi = None
print '[X] RFM12Pi not found. Start server anyway...'
app = Flask('simplehomeautomation')
active_switches = Set()
@app.route('/')
def main():
return render_template(
'index.html',
config=config,
active_switches=active_switches,
logged_in=logged_in(request)
)
@app.route('/login', methods=['POST'])
def login():
if 'password' in request.form:
if request.form['password'] == config.PASSWORD:
return signed_response(jsonify({
'status': True
}))
return jsonify({
'status': False
})
@app.route('/control', methods=['POST'])
def control():
if not logged_in(request):
return jsonify({
'status': False
})
if all(x in request.form for x in ['system', 'device']):
system = request.form['system']
device = request.form['device']
switch = ('switch-%s-%s' % (system, device))
if 'state' in request.form:
state = request.form['state']
else:
state = '0' if switch in active_switches else '1'
# Send command if available
if rfm12pi:
rfm12pi.write('%s,%s,%se' % (system, device, state))
# Remember status
if state == '1':
active_switches.add(switch)
else:
active_switches.discard(switch)
return signed_response(jsonify({
'status': True
}))
return signed_response(jsonify({
'status': False
}))
@app.route('/status')
def status():
return jsonify({
'switches': list(active_switches) if logged_in(request) else []
})
def signed_response(response):
# Add cookie
expires = time.mktime((datetime.date.today() +
datetime.timedelta(days=7)).timetuple())
response.set_cookie(
config.COOKIE,
value=str(current_secret()),
expires=expires
)
return response
def logged_in(request):
valid_cookie = (config.COOKIE in request.cookies and
request.cookies[config.COOKIE] == str(current_secret()))
valid_secret = ('secret' in request.form and
request.form['secret'] == config.SECRET)
return valid_cookie or valid_secret
def current_secret():
return sha256(str(hash(app) * hash(config.SECRET)))
def sha256(string):
return hashlib.sha224(string).hexdigest()
if __name__ == '__main__':
app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG)
def close():
print '[X] Shutting down server...'
if rfm12pi:
rfm12pi.close()
atexit.register(close)
| {
"content_hash": "036eea2d976742c532079ed136221c63",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 76,
"avg_line_length": 23.551470588235293,
"alnum_prop": 0.5916328442085544,
"repo_name": "fniephaus/SimpleHomeAutomation",
"id": "475e89828eac58d9cb7641cb7d7310baaaf1588b",
"size": "3225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "5700"
},
{
"name": "CSS",
"bytes": "264"
},
{
"name": "HTML",
"bytes": "6225"
},
{
"name": "JavaScript",
"bytes": "1886"
},
{
"name": "Python",
"bytes": "3953"
}
],
"symlink_target": ""
} |
import discretize
from scipy.constants import mu_0
from SimPEG import maps, tests, utils
from SimPEG.electromagnetics import frequency_domain as fdem
from SimPEG.electromagnetics.utils import omega
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import Solver as SolverLU
import time
import os
import numpy as np
import unittest
# This could be reduced if we refine the meshes
TOL_FWD = 5e-1 # relative tolerance for prim-sec comparison
TOL_JT = 1e-10
FLR = 1e-20 # "zero", so if residual below this --> pass regardless of order
np.random.seed(2016)
# To test the primary secondary-source, we look at make sure doing primary
# secondary for a simple model gives comprable results to just solving a 3D
# problem
# Also run a sensitivity test, adjoint test
# physical properties
sigmaback = 1e-1
sigmablock = 5e-1
block_x = np.r_[125.0, 225.0]
block_y = np.r_[-50.0, 50.0]
block_z = np.r_[-50.0, 50.0]
# model
model = np.r_[
np.log(sigmaback),
np.log(sigmablock),
np.mean(block_z),
np.diff(block_z),
np.mean(block_x),
np.diff(block_x),
np.mean(block_y),
np.diff(block_y),
]
# source
src_loc = np.r_[0.0, 0.0, 0.0]
freq = 10
# receivers
rx_x = np.linspace(-175.0, 175.0, 8)
rx_y = rx_x.copy()
rx_z = np.r_[175.0]
rx_locs = utils.ndgrid(rx_x, rx_y, rx_z)
# mesh
csx, ncx, npadx = 25.0, 16, 10
csz, ncz, npadz = 25.0, 8, 10
pf = 1.5
# primary mesh
hx = [(csx, ncx), (csx, npadx, pf)]
hz = [(csz, npadz, -pf), (csz, ncz), (csz, npadz, pf)]
meshp = discretize.CylindricalMesh([hx, 1.0, hz], x0="0CC")
# secondary mesh
h = [(csz, npadz - 4, -pf), (csz, ncz), (csz, npadz - 4, pf)]
meshs = discretize.TensorMesh(3 * [h], x0="CCC")
# mappings
primaryMapping = (
maps.ExpMap(meshp) * maps.SurjectFull(meshp) * maps.Projection(nP=8, index=[0])
)
mapping = (
maps.ExpMap(meshs)
* maps.ParametricBlockInLayer(meshs)
* maps.Projection(nP=8, index=np.hstack([np.r_[0], np.arange(0, 8)]))
)
primaryMap2Meshs = (
maps.ExpMap(meshs) * maps.SurjectFull(meshs) * maps.Projection(nP=8, index=[0])
)
class PrimSecFDEMTest(object):
# --------------------- Run some tests! --------------------- #
def DataTest(self):
print("\nTesting Data")
dpred_primsec = self.secondarySimulation.dpred(model, f=self.fields_primsec)
dpred_3D = self.simulation3D.dpred(model, f=self.fields_3D)
nrx_locs = rx_locs.shape[0]
dpred_primsec = dpred_primsec.reshape(nrx_locs, len(self.rxlist))
dpred_3D = dpred_3D.reshape(nrx_locs, len(self.rxlist))
for i in range(len(self.rxlist)):
rx = self.rxlist[i]
normps = np.linalg.norm(dpred_primsec[:, i])
norm3D = np.linalg.norm(dpred_3D[:, i])
normdiff = np.linalg.norm(dpred_primsec[:, i] - dpred_3D[:, i])
passed = normdiff < TOL_FWD * np.mean([normps, norm3D])
print(
" Testing {rxfield}{rxorient} {rxcomp}... "
"prim-sec: {normps:10.5e}, 3D: {norm3D:10.5e}, "
"diff: {diff:10.5e}, passed? {passed}".format(
rxfield=rx.projField,
rxorient=rx.orientation,
rxcomp=rx.component,
normps=normps,
norm3D=norm3D,
diff=normdiff,
passed=passed,
)
)
self.assertTrue(passed)
return True
def JvecTest(self):
print("\nTesting Jvec")
x0 = model
def fun(x):
return [
self.secondarySimulation.dpred(x),
lambda x: self.secondarySimulation.Jvec(x0, x, f=self.fields_primsec),
]
return tests.check_derivative(fun, x0, num=2, plotIt=False)
def AdjointTest(self):
print("\nTesting adjoint")
m = model
f = self.fields_primsec
v = np.random.rand(self.secondarySurvey.nD)
w = np.random.rand(self.secondarySimulation.sigmaMap.nP)
vJw = v.dot(self.secondarySimulation.Jvec(m, w, f))
wJtv = w.dot(self.secondarySimulation.Jtvec(m, v, f))
tol = np.max([TOL_JT * (10 ** int(np.log10(np.abs(vJw)))), FLR])
passed = np.abs(vJw - wJtv) < tol
print(
" J: {}, JT: {}, diff: {}, tol: {}, passed? {}".format(
vJw, wJtv, vJw - wJtv, tol, passed
)
)
return passed
class PrimSecFDEMSrcTest_Cyl2Cart_EB_EB(unittest.TestCase, PrimSecFDEMTest):
@classmethod
def setUpClass(self):
print("\n------- Testing Primary Secondary Source EB -> EB --------\n")
# receivers
self.rxlist = []
for rxtype in ["MagneticFluxDensity", "ElectricField"]:
rx = getattr(fdem.Rx, "Point{}".format(rxtype))
for orientation in ["x", "y", "z"]:
for comp in ["real", "imag"]:
self.rxlist.append(
rx(rx_locs, component=comp, orientation=orientation)
)
# primary
self.primarySimulation = fdem.Simulation3DMagneticFluxDensity(
meshp, sigmaMap=primaryMapping
)
self.primarySimulation.solver = Solver
primarySrc = fdem.Src.MagDipole(self.rxlist, frequency=freq, location=src_loc)
self.primarySurvey = fdem.Survey([primarySrc])
self.secondarySrc = fdem.Src.PrimSecMappedSigma(
self.rxlist,
freq,
self.primarySimulation,
self.primarySurvey,
primaryMap2Meshs,
)
self.secondarySurvey = fdem.Survey([self.secondarySrc])
# Secondary Problem
self.secondarySimulation = fdem.Simulation3DMagneticFluxDensity(
meshs, survey=self.secondarySurvey, sigmaMap=mapping
)
self.secondarySimulation.solver = Solver
# Full 3D problem to compare with
self.survey3D = fdem.Survey([primarySrc])
self.simulation3D = fdem.Simulation3DMagneticFluxDensity(
meshs, survey=self.survey3D, sigmaMap=mapping
)
self.simulation3D.solver = Solver
# solve and store fields
print(" solving primary - secondary")
self.fields_primsec = self.secondarySimulation.fields(model)
print(" ... done")
self.fields_primsec = self.secondarySimulation.fields(model)
print(" solving 3D")
self.fields_3D = self.simulation3D.fields(model)
print(" ... done")
return None
# --------------------- Run some tests! --------------------- #
def test_data_EB(self):
self.DataTest()
def test_Jvec_EB(self):
self.JvecTest()
def test_Jadjoint_EB(self):
self.AdjointTest()
class PrimSecFDEMSrcTest_Cyl2Cart_HJ_EB(unittest.TestCase, PrimSecFDEMTest):
@classmethod
def setUpClass(self):
print("\n------- Testing Primary Secondary Source HJ -> EB --------\n")
# receivers
self.rxlist = []
for rxtype in ["MagneticFluxDensity", "ElectricField"]:
rx = getattr(fdem.Rx, "Point{}".format(rxtype))
for orientation in ["x", "y", "z"]:
for comp in ["real", "imag"]:
self.rxlist.append(
rx(rx_locs, component=comp, orientation=orientation)
)
# primary
self.primarySimulation = fdem.Simulation3DCurrentDensity(
meshp, sigmaMap=primaryMapping
)
self.primarySimulation.solver = Solver
s_e = np.zeros(meshp.nF)
inds = meshp.nFx + meshp.closest_points_index(src_loc, grid_loc="Fz")
s_e[inds] = 1.0 / csz
primarySrc = fdem.Src.RawVec_e(
self.rxlist, frequency=freq, s_e=s_e / meshp.face_areas
)
self.primarySurvey = fdem.Survey([primarySrc])
# Secondary Problem
self.secondarySrc = fdem.Src.PrimSecMappedSigma(
self.rxlist,
freq,
self.primarySimulation,
self.primarySurvey,
primaryMap2Meshs,
)
self.secondarySurvey = fdem.Survey([self.secondarySrc])
self.secondarySimulation = fdem.Simulation3DElectricField(
meshs,
survey=self.secondarySurvey,
sigmaMap=mapping,
)
self.secondarySimulation.solver = Solver
# Full 3D problem to compare with
s_e3D = np.zeros(meshs.nE)
inds = (
meshs.nEx + meshs.nEy + meshs.closest_points_index(src_loc, grid_loc="Ez")
)
s_e3D[inds] = [1.0 / (len(inds))] * len(inds)
src3D = fdem.Src.RawVec_e(self.rxlist, frequency=freq, s_e=s_e3D)
self.survey3D = fdem.Survey([src3D])
self.simulation3D = fdem.Simulation3DElectricField(
meshs, survey=self.survey3D, sigmaMap=mapping
)
self.simulation3D.solver = Solver
self.simulation3D.model = model
# solve and store fields
print(" solving primary - secondary")
self.fields_primsec = self.secondarySimulation.fields(model)
print(" ... done")
self.fields_primsec = self.secondarySimulation.fields(model)
print(" solving 3D")
self.fields_3D = self.simulation3D.fields(model)
print(" ... done")
return None
# --------------------- Run some tests! --------------------- #
def test_data_HJ(self):
self.DataTest()
def test_Jvec_HJ(self):
self.JvecTest()
def test_Jadjoint_HJ(self):
self.AdjointTest()
| {
"content_hash": "a98d59ab77feec5c0af8c839961e6bba",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 86,
"avg_line_length": 31.116504854368934,
"alnum_prop": 0.5790951638065522,
"repo_name": "simpeg/simpeg",
"id": "aa1a382b7de309fb354f521d3209c2528a3af9b2",
"size": "9660",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/em/fdem/forward/test_FDEM_primsec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "3476002"
}
],
"symlink_target": ""
} |
"""CelebA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import zipfile
# Dependency imports
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import image_utils
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_problem
class ImageCeleba(image_utils.ImageProblem):
"""CelebA dataset, aligned and cropped images."""
IMG_DATA = ("img_align_celeba.zip",
"https://drive.google.com/uc?export=download&"
"id=0B7EVK8r0v71pZjFTYXZWM3FlRnM")
LANDMARKS_DATA = ("celeba_landmarks_align",
"https://drive.google.com/uc?export=download&"
"id=0B7EVK8r0v71pd0FJY3Blby1HUTQ")
ATTR_DATA = ("celeba_attr", "https://drive.google.com/uc?export=download&"
"id=0B7EVK8r0v71pblRyaVFSWGxPY0U")
LANDMARK_HEADINGS = ("lefteye_x lefteye_y righteye_x righteye_y "
"nose_x nose_y leftmouth_x leftmouth_y rightmouth_x "
"rightmouth_y").split()
ATTR_HEADINGS = (
"5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs "
"Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair "
"Bushy_Eyebrows Chubby Double_Chin Eyeglasses Goatee Gray_Hair "
"Heavy_Makeup High_Cheekbones Male Mouth_Slightly_Open Mustache "
"Narrow_Eyes No_Beard Oval_Face Pale_Skin Pointy_Nose Receding_Hairline "
"Rosy_Cheeks Sideburns Smiling Straight_Hair Wavy_Hair Wearing_Earrings "
"Wearing_Hat Wearing_Lipstick Wearing_Necklace Wearing_Necktie Young"
).split()
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.input_modality = {"inputs": ("image:identity", 256)}
p.target_modality = ("image:identity", 256)
p.batch_size_multiplier = 256
p.input_space_id = 1
p.target_space_id = 1
def generator(self, tmp_dir, how_many, start_from=0):
"""Image generator for CELEBA dataset.
Args:
tmp_dir: path to temporary storage directory.
how_many: how many images and labels to generate.
start_from: from which image to start.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as JPEG,
* image/format: the string "jpeg" representing image format,
"""
out_paths = []
for fname, url in [self.IMG_DATA, self.LANDMARKS_DATA, self.ATTR_DATA]:
path = generator_utils.maybe_download_from_drive(tmp_dir, fname, url)
out_paths.append(path)
img_path, landmarks_path, attr_path = out_paths # pylint: disable=unbalanced-tuple-unpacking
unzipped_folder = img_path[:-4]
if not tf.gfile.Exists(unzipped_folder):
zipfile.ZipFile(img_path, "r").extractall(tmp_dir)
with tf.gfile.Open(landmarks_path) as f:
landmarks_raw = f.read()
with tf.gfile.Open(attr_path) as f:
attr_raw = f.read()
def process_landmarks(raw_data):
landmarks = {}
lines = raw_data.split("\n")
headings = lines[1].strip().split()
for line in lines[2:-1]:
values = line.strip().split()
img_name = values[0]
landmark_values = [int(v) for v in values[1:]]
landmarks[img_name] = landmark_values
return landmarks, headings
def process_attrs(raw_data):
attrs = {}
lines = raw_data.split("\n")
headings = lines[1].strip().split()
for line in lines[2:-1]:
values = line.strip().split()
img_name = values[0]
attr_values = [int(v) for v in values[1:]]
attrs[img_name] = attr_values
return attrs, headings
img_landmarks, _ = process_landmarks(landmarks_raw)
img_attrs, _ = process_attrs(attr_raw)
image_files = tf.gfile.Glob(unzipped_folder + "/*.jpg")
for filename in image_files[start_from:start_from + how_many]:
img_name = os.path.basename(filename)
landmarks = img_landmarks[img_name]
attrs = img_attrs[img_name]
with tf.gfile.Open(filename, "r") as f:
encoded_image_data = f.read()
yield {
"image/encoded": [encoded_image_data],
"image/format": ["jpeg"],
"attributes": attrs,
"landmarks": landmarks,
}
@property
def train_shards(self):
return 100
@property
def dev_shards(self):
return 10
def generate_data(self, data_dir, tmp_dir, task_id=-1):
generator_utils.generate_dataset_and_shuffle(
self.generator(tmp_dir, 162770), # train
self.training_filepaths(data_dir, self.train_shards, shuffled=False),
self.generator(tmp_dir, 19867, 162770), # dev
self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))
@registry.register_problem
class Img2imgCeleba(ImageCeleba):
"""8px to 32px problem."""
def dataset_filename(self):
return "image_celeba"
def preprocess_example(self, example, unused_mode, unused_hparams):
image = example["inputs"]
# Remove boundaries in CelebA images. Remove 40 pixels each side
# vertically and 20 pixels each side horizontally.
image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40)
image_8 = image_utils.resize_by_area(image, 8)
image_32 = image_utils.resize_by_area(image, 32)
example["inputs"] = image_8
example["targets"] = image_32
return example
@registry.register_problem
class Img2imgCeleba64(Img2imgCeleba):
"""8px to 64px problem."""
def preprocess_example(self, example, unused_mode, unused_hparams):
image = example["inputs"]
# Remove boundaries in CelebA images. Remove 40 pixels each side
# vertically and 20 pixels each side horizontally.
image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40)
image_8 = image_utils.resize_by_area(image, 8)
image_64 = image_utils.resize_by_area(image, 64)
example["inputs"] = image_8
example["targets"] = image_64
return example
| {
"content_hash": "3ffab2320c859197308d3a8c92e421ed",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 97,
"avg_line_length": 35.23391812865497,
"alnum_prop": 0.6582572614107883,
"repo_name": "rsepassi/tensor2tensor",
"id": "7fd3bddb5cea41fb34e96d67b112042fd24cc210",
"size": "6631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/data_generators/celeba.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "34646"
},
{
"name": "JavaScript",
"bytes": "78396"
},
{
"name": "Jupyter Notebook",
"bytes": "2328225"
},
{
"name": "Python",
"bytes": "1702690"
},
{
"name": "Shell",
"bytes": "1260"
}
],
"symlink_target": ""
} |
def extractWwwBlexbinNet(item):
'''
Parser for 'www.blexbin.net'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "4fbd75b1e97660f002c198fa28960dc9",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 25.761904761904763,
"alnum_prop": 0.6229205175600739,
"repo_name": "fake-name/ReadableWebProxy",
"id": "8d8c21dc1200f38ce5c3c1775f5bea6885493e8d",
"size": "542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractWwwBlexbinNet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from pyduino import *
# Imports Généraux
import time, sched
import os
import threading
import signal
import json
import sys
# Pour la détection d'adresse IP
import socket
import fcntl
import struct
# Pour le serveur de socket
import tornado.httpserver
import tornado.ioloop
from tornado.ioloop import PeriodicCallback
import tornado.web
import tornado.websocket
import tornado.template
# Gestion de l'IMU
from mpu9250 import MPU9250
# Nom de l'hostname (utilisé ensuite pour savoir sur quel système
# tourne ce programme)
hostname = socket.gethostname()
# Imports pour la communication i2c avec l'Arduino Mega
from mega import Mega
mega = Mega(hostname = hostname)
# Moteurs
Nmoy = 1
omegaArriereDroit = 0.
codeurArriereDroitDeltaPos = 0
codeurArriereDroitDeltaPosPrec = 0
omegaArriereGauche = 0.
codeurArriereGaucheDeltaPos = 0
codeurArriereGaucheDeltaPosPrec = 0
omegaAvantDroit = 0.
codeurAvantDroitDeltaPos = 0
codeurAvantDroitDeltaPosPrec = 0
omegaAvantGauche = 0.
codeurAvantGaucheDeltaPos = 0
codeurAvantGaucheDeltaPosPrec = 0
# Tension effectivement appliquée
commandeArriereDroit = 0.
commandeArriereGauche = 0.
commandeAvantDroit = 0.
commandeAvantGauche = 0.
# Saturations
umax = 6. # valeur max de la tension de commande du moteur
umin = -6. # valeur min (ou max en négatif) de la tension de commande du moteur
# Asservissements
Kplongi = 4.3 # gain proportionnel du PID d'asservissement longitudinal
Kilongi = 109.0 # gain intégral du PID d'asservissement longitudinal
Kdlongi = 0. # gain dérivé du PID d'asservissement longitudinal
Tflongi = 0.02 # constante de temps de filtrage de l'action dérivée du PID d'asservissement longitudinal
Kplat = 4.3 # gain proportionnel du PID d'asservissement latéral
Kilat = 109.0 # gain intégral du PID d'asservissement latéral
Kdlat = 0. # gain dérivé du PID d'asservissement latéral
Tflat = 0.02 # constante de temps de filtrage de l'action dérivée du PID d'asservissement latéral
Kprot = 0.37 # gain proportionnel du PID d'asservissement de rotation
Kirot = 12.5 # gain intégral du PID d'asservissement rotation
Kdrot = 0. # gain dérivé du PID d'asservissement rotation
Tfrot = 0.02 # constante de temps de filtrage de l'action dérivée du PID d'asservissement rotation
I_x = [0., 0., 0., 0.]
D_x = [0., 0., 0., 0.]
yprec = [0., 0., 0., 0.] # mesure de la vitesse du moteur droit au calcul précédent
vxmes = 0.
vymes = 0.
ximes = 0.
# Paramètres mécaniques
R = 0.0225 # Rayon d'une roue
W = 0.18 # Ecart entre le centre de rotation du robot et les roues
# Variables utilisées pour les données reçues
vxref = 0.
vyref = 0.
xiref = 0.
source_ximes = 0
# Timeout de réception des données
timeout = 2
timeLastReceived = 0
timedOut = False
T0 = time.time()
dt = 0.01
i = 0
tprec = time.time()
tdebut = 0
# Création d'un scheduler pour exécuter des opérations à cadence fixe
s = sched.scheduler(time.time, time.sleep)
idecimLectureTension = 0
decimLectureTension = 6000
decimErreurLectureTension = 100
# Mesure de la tension de la batterie
# On la contraint à être supérieure à 7V, pour éviter une division par
# zéro en cas de problème quelconque
lectureTensionOK = False
tensionAlim = 7.4
while not lectureTensionOK:
try:
tensionAlim = max(7.0, float(mega.read_battery_millivolts()) / 1000.)
lectureTensionOK = True
except:
print("Erreur lecture tension")
# Capteur de distance
idecimDistance = 0
decimDistance = 20
distance = 0
distancePrec = 0
distanceFiltre = 0
tauFiltreDistance = 0.03
# Initialisation de l'IMU
ax = 0.
ay = 0.
gz = 0.
if (hostname == "pcduino"):
I2CBUS = 2
elif (hostname == "raspberrypi"):
I2CBUS = 1
else:
# pcDuino par défaut
I2CBUS = 2
initIMU_OK = False
while not initIMU_OK:
try:
imu = MPU9250(i2cbus=I2CBUS, address=0x69)
initIMU_OK = True
except:
print("Erreur init IMU")
#--- setup ---
def setup():
# Initialisation des moteurs
CommandeMoteurs(0, 0, 0, 0)
# -- fin setup --
# -- loop --
def loop():
global i, T0
i = i+1
s.enterabs( T0 + (i * dt), 1, CalculVitesse, ())
s.run()
# -- fin loop --
def CalculVitesse():
global omegaArriereDroit, omegaArriereGauche, omegaAvantDroit, omegaAvantGauche, timeLastReceived, timeout, timedOut, \
tdebut, codeurArriereDroitDeltaPos, codeurArriereGaucheDeltaPos, codeurAvantDroitDeltaPos, codeurAvantGaucheDeltaPos, \
commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche, \
codeurArriereDroitDeltaPosPrec, codeurArriereGaucheDeltaPosPrec, codeurAvantDroitDeltaPosPrec, codeurAvantGaucheDeltaPosPrec, tprec, \
idecimLectureTension, decimLectureTension, decimErreurLectureTension, tensionAlim, \
distance, idecimDistance, decimDistance, distancePrec, \
distanceFiltre, tauFiltreDistance, imu, gz, R, W, vxmes, vymes, ximes, vxref, vyref, xiref, font, source_ximes, hostname, ax, ay
tdebut = time.time()
# Mesure de la vitesse des moteurs grâce aux codeurs incrémentaux
try:
codeursDeltaPos = mega.read_codeursDeltaPos()
codeurArriereDroitDeltaPos = codeursDeltaPos[0]
codeurArriereGaucheDeltaPos = codeursDeltaPos[1]
codeurAvantDroitDeltaPos = codeursDeltaPos[2]
codeurAvantGaucheDeltaPos = codeursDeltaPos[3]
# Suppression de mesures aberrantes
if (abs(codeurArriereDroitDeltaPos - codeurArriereDroitDeltaPosPrec) > 10) or (abs(codeurArriereGaucheDeltaPos - codeurArriereGaucheDeltaPosPrec) > 10) or (abs(codeurAvantDroitDeltaPos - codeurAvantDroitDeltaPosPrec) > 10) or (abs(codeurAvantGaucheDeltaPos - codeurAvantGaucheDeltaPosPrec) > 10):
codeurArriereDroitDeltaPos = codeurArriereDroitDeltaPosPrec
codeurArriereGaucheDeltaPos = codeurArriereGaucheDeltaPosPrec
codeurAvantDroitDeltaPos = codeurAvantDroitDeltaPosPrec
codeurAvantGaucheDeltaPos = codeurAvantGaucheDeltaPosPrec
codeurArriereDroitDeltaPosPrec = codeurArriereDroitDeltaPos
codeurArriereGaucheDeltaPosPrec = codeurArriereGaucheDeltaPos
codeurAvantDroitDeltaPosPrec = codeurAvantDroitDeltaPos
codeurAvantGaucheDeltaPosPrec = codeurAvantGaucheDeltaPos
except:
#print "Erreur lecture codeurs"
codeurArriereDroitDeltaPos = codeurArriereDroitDeltaPosPrec
codeurArriereGaucheDeltaPos = codeurArriereGaucheDeltaPosPrec
codeurAvantDroitDeltaPos = codeurAvantDroitDeltaPosPrec
codeurAvantGaucheDeltaPos = codeurAvantGaucheDeltaPosPrec
omegaArriereDroit = -2 * ((2 * 3.141592 * codeurArriereDroitDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
omegaArriereGauche = 2 * ((2 * 3.141592 * codeurArriereGaucheDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
omegaAvantDroit = -2 * ((2 * 3.141592 * codeurAvantDroitDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
omegaAvantGauche = 2 * ((2 * 3.141592 * codeurAvantGaucheDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
# Mesures
vxmes = (omegaArriereDroit + omegaArriereGauche + omegaAvantDroit + omegaAvantGauche) * R / 4
vymes = (-omegaArriereDroit + omegaArriereGauche + omegaAvantDroit - omegaAvantGauche) * R / 4
ximes = (omegaArriereDroit - omegaArriereGauche + omegaAvantDroit - omegaAvantGauche) * R / W / 2
# Lecture de la vitesse de rotation autour de la verticale
try:
accel = imu.readAccel()
gyro = imu.readGyro()
ax = 9.81 * accel['x']
ay = 9.81 * accel['y']
gz = gyro['z'] * math.pi / 180
except:
#print("Erreur lecture IMU")
pass
dt2 = time.time() - tprec
tprec = time.time()
# Si on n'a pas reçu de données depuis un certain temps, celles-ci sont annulées
if (time.time()-timeLastReceived) > timeout and not timedOut:
timedOut = True
if timedOut:
commandeLongi = 0.
commandeLat = 0.
commandeRot = 0.
else:
commandeLongi = PID(0, vxref, vxmes, Kplongi, Kilongi, Kdlongi, Tflongi, umax, umin, dt);
commandeLat = PID(1, vyref, vymes, Kplat, Kilat, Kdlat, Tflat, umax, umin, dt);
if (source_ximes == 1):
commandeRot = PID(2, xiref, gz, Kprot, Kirot, Kdrot, Tfrot, umax, umin, dt);
else:
commandeRot = PID(2, xiref, ximes, Kprot, Kirot, Kdrot, Tfrot, umax, umin, dt);
# Transformation des commandes longitudinales et de rotation en tension moteurs
commandeArriereDroit = -(commandeLongi - commandeLat + commandeRot) # Tension négative pour faire tourner positivement ce moteur
commandeArriereGauche = commandeLongi + commandeLat - commandeRot
commandeAvantDroit = -(commandeLongi + commandeLat + commandeRot) # Tension négative pour faire tourner positivement ce moteur
commandeAvantGauche = commandeLongi - commandeLat - commandeRot
CommandeMoteurs(commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche)
# Lecture de la tension d'alimentation
if idecimLectureTension >= decimLectureTension:
try:
tensionAlim = max(7.0, float(mega.read_battery_millivolts()) / 1000.)
idecimLectureTension = 0
except:
# On recommence la lecture dans decimErreurLectureTension * dt
idecimLectureTension = idecimLectureTension - decimErreurLectureTension
#print("Erreur lecture tension dans Loop")
else:
idecimLectureTension = idecimLectureTension + 1
# Calcul de la distance mesurée par le capteur ultrason
# On fait ce calcul après l'affichage pour savoir combien de temps
# il reste pour ne pas perturber la boucle
if idecimDistance >= decimDistance:
idecimDistance = 0
try:
distance = mega.read_distance()
if distance == 0:
# Correspond en fait à une distance supérieure à 200 cm
distance = 200
# print "Distance: ", distance, " cm"
except:
print "Probleme lecture distance"
pass
# Filtre sur la distance
distanceFiltre = (dt2 * distance + tauFiltreDistance * distancePrec) / (dt2 + tauFiltreDistance)
distancePrec = distanceFiltre
else:
idecimDistance = idecimDistance + 1
#print time.time() - tdebut
def PID(iMoteur, omegaref, omega, Kp, Ki, Kd, Tf, umax, umin, dt2):
global I_x, D_x, yprec
# Calcul du PID
# Paramètres intermédiaires
Ti = Ki/(Kp+0.01)
if (Kd>0): # Si PID
ad = Tf/(Tf+dt2)
bd = Kd/(Tf+dt2)
Td = Kp/Kd
Tt = sqrt(Ti*Td)
else: # Si PI
ad = 0
bd = 0
Td = 0
Tt = 0.5*Ti
br = dt2/(Tt+0.01)
# Calcul de la commande avant saturation
# Terme proportionnel
P_x = Kp * (omegaref - omega)
# Terme dérivé
D_x[iMoteur] = ad * D_x[iMoteur] - bd * (omega - yprec[iMoteur])
# Calcul de la commande avant saturation
commande_avant_sat = P_x + I_x[iMoteur] + D_x[iMoteur]
# Application de la saturation sur la commande
if (commande_avant_sat > umax):
commande = umax
elif (commande_avant_sat < umin):
commande = umin
else:
commande = commande_avant_sat
# Terme intégral (sera utilisé lors du pas d'échantillonnage suivant)
I_x[iMoteur] = I_x[iMoteur] + Ki * dt2 * (omegaref - omega) + br * (commande - commande_avant_sat)
# Stockage de la mesure courante pour utilisation lors du pas d'échantillonnage suivant
yprec[iMoteur] = omega
return commande
def CommandeMoteurs(commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche):
# Cette fonction calcule et envoi les signaux PWM au pont en H
# en fonction des tensions de commande et d'alimentation
global tensionAlim
# L'ensemble pont en H + moteur pourrait ne pas être linéaire
tensionArriereDroit = commandeArriereDroit
tensionArriereGauche = commandeArriereGauche
tensionAvantDroit = commandeAvantDroit
tensionAvantGauche = commandeAvantGauche
# Normalisation de la tension d'alimentation par
# rapport à la tension d'alimentation
tension_int_ArriereDroit = int(255 * tensionArriereDroit / tensionAlim)
tension_int_ArriereGauche = int(255 * tensionArriereGauche / tensionAlim)
tension_int_AvantDroit = int(255 * tensionAvantDroit / tensionAlim)
tension_int_AvantGauche = int(255 * tensionAvantGauche / tensionAlim)
# Saturation par sécurité
if (tension_int_ArriereDroit > 255):
tension_int_ArriereDroit = 255
if (tension_int_ArriereDroit < -255):
tension_int_ArriereDroit = -255
if (tension_int_ArriereGauche > 255):
tension_int_ArriereGauche = 255
if (tension_int_ArriereGauche < -255):
tension_int_ArriereGauche = -255
if (tension_int_AvantDroit > 255):
tension_int_AvantDroit = 255
if (tension_int_AvantDroit < -255):
tension_int_AvantDroit = -255
if (tension_int_AvantGauche > 255):
tension_int_AvantGauche = 255
if (tension_int_AvantGauche < -255):
tension_int_AvantGauche = -255
# Commande PWM
try:
mega.moteursArriere(tension_int_ArriereDroit, tension_int_ArriereGauche)
mega.moteursAvant(tension_int_AvantDroit, tension_int_AvantGauche)
mega.moteursCRC(tension_int_ArriereDroit + tension_int_ArriereGauche, tension_int_AvantDroit + tension_int_AvantGauche)
except:
pass
#print "Erreur moteurs"
def emitData():
global tprec
# Délai nécessaire pour que le serveur ait le temps de démarrer
#delay(5000)
tprec = time.time()
while not noLoop: loop() # appelle fonction loop sans fin
class WSHandler(tornado.websocket.WebSocketHandler):
def open(self):
global socketOK
print 'connection opened...'
socketOK = True
self.callback = PeriodicCallback(self.sendToSocket, 100)
self.callback.start()
def on_message(self, message):
global vxref, vyref, xiref, source_ximes, timeLastReceived, timedOut
jsonMessage = json.loads(message)
# Annulation du timeout de réception des données
timeLastReceived = time.time()
timedOut = False;
if jsonMessage.get('vxref') != None:
vxref = float(jsonMessage.get('vxref')) / 100.
if jsonMessage.get('vyref') != None:
vyref = float(jsonMessage.get('vyref')) / 100.
if jsonMessage.get('xiref') != None:
xiref = float(jsonMessage.get('xiref')) * math.pi / 180.
if jsonMessage.get('source_ximes') != None:
# Choix de la source de la vitesse de rotation mesurée: 1: gyro, 0: vitesse des roues
source_ximes = int(jsonMessage.get('source_ximes'))
if not socketOK:
vxref = 0.
vyref = 0.
xiref = 0.
def on_close(self):
global socketOK, vxref, vyref, xiref
print 'connection closed...'
socketOK = False
vxref = 0.
vyref = 0.
xiref = 0.
def sendToSocket(self):
global socketOK, vxmes, vymes, ximes, omegaArriereDroit, omegaArriereGauche, omegaAvantDroit, omegaAvantGauche, \
gz, vxref, vyref, xiref, ax, ay
tcourant = time.time() - T0
aEnvoyer = json.dumps({'Temps':("%.2f" % tcourant), \
'consigne_vx':("%.2f" % vxref), \
'consigne_vy':("%.2f" % vyref), \
'consigne_xi':("%.2f" % xiref), \
'vxmes':("%.2f" % vxmes), \
'vymes':("%.2f" % vymes), \
'ximes':("%.2f" % ximes), \
'omegaArriereDroit':("%.2f" % omegaArriereDroit), \
'omegaArriereGauche':("%.2f" % omegaArriereGauche), \
'omegaAvantDroit':("%.2f" % omegaAvantDroit), \
'omegaAvantGauche':("%.2f" % omegaAvantGauche), \
'ax':("%.2f" % ax), \
'ay':("%.2f" % ay), \
'gz':("%.2f" % gz), \
'Raw':("%.2f" % tcourant) \
+ "," + ("%.2f" % vxmes) \
+ "," + ("%.2f" % vymes) \
+ "," + ("%.2f" % ximes) \
+ "," + ("%.2f" % omegaArriereDroit) \
+ "," + ("%.2f" % omegaArriereGauche) \
+ "," + ("%.2f" % omegaAvantDroit) \
+ "," + ("%.2f" % omegaAvantGauche) \
+ "," + ("%.2f" % ax) \
+ "," + ("%.2f" % ay) \
+ "," + ("%.2f" % gz) \
})
if socketOK:
try:
self.write_message(aEnvoyer)
except:
pass
def check_origin(self, origin):
# Voir http://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketHandler.check_origin
# et http://www.arundhaj.com/blog/tornado-error-during-websocket-handshake.html
return True
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
application = tornado.web.Application([
(r'/ws', WSHandler)
])
def startTornado():
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(9090)
tornado.ioloop.IOLoop.instance().start()
# Gestion du CTRL-C
def signal_handler(signal, frame):
global vxref, vyref, xiref
print 'Sortie du programme'
vxref = 0.
vyref = 0.
xiref = 0.
CommandeMoteurs(0, 0, 0, 0)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
#--- obligatoire pour lancement du code --
if __name__=="__main__": # pour rendre le code executable
setup() # appelle la fonction setup
print "Setup done."
th = threading.Thread(None, emitData, None, (), {})
th.daemon = True
th.start()
print "Starting Tornado."
try:
print "Connect to ws://" + get_ip_address('eth0') + ":9090/ws with Ethernet."
except:
pass
try:
print "Connect to ws://" + get_ip_address('wlan0') + ":9090/ws with Wifi."
except:
pass
socketOK = False
startTornado()
| {
"content_hash": "972b2e57233e83704872ff823046d51e",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 304,
"avg_line_length": 35.120370370370374,
"alnum_prop": 0.6314790403374637,
"repo_name": "3sigma/T-Quad-Quatre-Roues",
"id": "4da9eece7742f5ad2c40408b19edc1903b3af37f",
"size": "19490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "programmes_python/QuatreRoues_Accelero.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74744"
},
{
"name": "Shell",
"bytes": "417"
}
],
"symlink_target": ""
} |
from PIL import Image
import os
from sklearn import svm, metrics
import numpy as np
def doSVM(img_train, target_train, img_test, target_test):
classifier = svm.SVC(gamma=0.001)
classifier.fit(img_train, target_train)
expected = target_test
predicted = classifier.predict(img_test)
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s"
% metrics.confusion_matrix(expected, predicted))
def prepareTraningDataSet(images_true, targets_true, images_false, targets_false , ratio) :
img_train, target_train, img_test, target_test = [],[],[],[]
for i in range(len(images_true)):
if i < len(images_true)*ratio :
img_train.append(images_true[i])
target_train.append(1)
img_train.append(images_false[i])
target_train.append(0)
else :
img_test.append(images_true[i])
target_test.append(1)
img_test.append(images_false[i])
target_test.append(0)
return np.array(img_train) , np.array(target_train), np.array(img_test), np.array(target_test)
def main():
# img_true_dir = "dataset/true_resize/"
# img_false_dir = "dataset/false_resize/"
img_true_dir = "dataset/true_resize_square/"
img_false_dir = "dataset/false_resize_square/"
images_true = []
targets_true = []
images_false = []
targets_false = []
# The dataset
for image in os.listdir(img_true_dir):
images_true.append(np.array(list(Image.open(img_true_dir + image).convert('L').getdata())))
targets_true.append(1)
i = 1
n_samples = len(images_true)
for image in os.listdir(img_false_dir):
images_false.append(np.array(list(Image.open(img_false_dir + image).convert('L').getdata())))
targets_false.append(0)
i += 1
if i > n_samples:
break
test_ratio = 0.9 # train_data : dataset = 9 : 10
img_train , target_train, img_test, target_test= prepareTraningDataSet(images_true, targets_true, images_false, targets_false , test_ratio)
# img_train = images[0::2]
# target_train = targets[0::2]
# img_test = images[1::2]
# target_test = targets[1::2]
print("len of img_train, target_train, img_test, target_test : %d, %d, %d, %d" % (len(img_train), len(target_train), len(img_test), len(target_test) ))
doSVM(img_train, target_train, img_test, target_test)
if __name__ == '__main__' :
main() | {
"content_hash": "80308778ab2fe761661e5dc46f7d076d",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 155,
"avg_line_length": 34.93150684931507,
"alnum_prop": 0.6250980392156863,
"repo_name": "MagicUmom/pattern_recognition_project",
"id": "0134859a510af6bb20bc5e9862b490ff04cd5b1e",
"size": "2587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "svm_line.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53516"
}
],
"symlink_target": ""
} |
import logging
import sys
import threading
import traceback
logger = logging.getLogger("pykka")
__all__ = ["log_thread_tracebacks"]
def log_thread_tracebacks(*args, **kwargs):
"""Logs at :attr:`logging.CRITICAL` level a traceback for each running
thread.
This can be a convenient tool for debugging deadlocks.
The function accepts any arguments so that it can easily be used as e.g. a
signal handler, but it does not use the arguments for anything.
To use this function as a signal handler, setup logging with a
:attr:`logging.CRITICAL` threshold or lower and make your main thread
register this with the :mod:`signal` module::
import logging
import signal
import pykka.debug
logging.basicConfig(level=logging.DEBUG)
signal.signal(signal.SIGUSR1, pykka.debug.log_thread_tracebacks)
If your application deadlocks, send the `SIGUSR1` signal to the process::
kill -SIGUSR1 <pid of your process>
Signal handler caveats:
- The function *must* be registered as a signal handler by your main
thread. If not, :func:`signal.signal` will raise a :exc:`ValueError`.
- All signals in Python are handled by the main thread. Thus, the signal
will only be handled, and the tracebacks logged, if your main thread is
available to do some work. Making your main thread idle using
:func:`time.sleep` is OK. The signal will awaken your main thread.
Blocking your main thread on e.g. :func:`queue.Queue.get` or
:meth:`pykka.Future.get` will break signal handling, and thus you won't
be able to signal your process to print the thread tracebacks.
The morale is: setup signals using your main thread, start your actors,
then let your main thread relax for the rest of your application's life
cycle.
.. versionadded:: 1.1
"""
thread_names = {t.ident: t.name for t in threading.enumerate()}
for ident, frame in sys._current_frames().items():
name = thread_names.get(ident, "?")
stack = "".join(traceback.format_stack(frame))
logger.critical(f"Current state of {name} (ident: {ident}):\n{stack}")
| {
"content_hash": "5dfb294d61e6894ab658ec6b7e6555ae",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 78,
"avg_line_length": 35.17741935483871,
"alnum_prop": 0.692801467216873,
"repo_name": "jodal/pykka",
"id": "230baf122a3b3e8bf51a5fa63d23765b2b8cbfb9",
"size": "2181",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/pykka/debug.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "112432"
}
],
"symlink_target": ""
} |
from string import Template
from pyPaSWAS.Core import resource_filename, read_file
class Code(object):
'''
Initializes the CUDA code by setting configuration parameters using the CUDA
templates located in Core/cuda
'''
def __init__(self, logger):
self.logger = logger
self.shared_xy_code = ''
self.directions = ''
self.score_part = ''
self.variable_part = ''
self.variable_source = ''
self.direction_source = ''
self.score_source = ''
self.main_source = ''
def read_source(self, filename):
'''Read source code from the specified file and prefix it
with line number and file name info for better compilation error messages.
'''
return '#line 1 "{}"\n'.format(filename) + read_file(filename)
def set_shared_xy_code(self, sharedx=8, sharedy=8):
'''
Sets the horizontal and the vertical sizes of the smallest alignment matrices in shared memory
:param sharedx:
:param sharedy:
'''
#self.logger.debug('Setting sharedx to {0}, sharedy to {1}'.format(sharedx, sharedy))
code_t = Template(self.read_source(self.main_source))
self.shared_xy_code = code_t.safe_substitute(SHARED_X=sharedx, SHARED_Y=sharedy)
def set_direction_code(self, no_direction=0, up_left=1, up=2, left=3, stop=4):
'''
TODO: Docstring
:param no_direction:
:param up_left:
:param up:
:param left:
:param stop:
'''
#self.logger.debug('Setting directions:\n\tno = {0}\n\tup_left = {1}\n\tup = {2}\n\tleft = {3}\n\t'
# 'stop = {3}'.format(no_direction, up_left, up, left, stop))
direction_t = Template(self.read_source(self.direction_source))
self.directions = direction_t.safe_substitute(NO_DIRECTION=no_direction,
UP_LEFT_DIRECTION=up_left,
UP_DIRECTION=up,
LEFT_DIRECTION=left,
STOP_DIRECTION=stop)
def set_score_code(self, score):
'''Formats information contained in a score.
'''
#self.logger.debug('Sourcing the scorepart of the cuda code')
score_part_t = Template(self.read_source(self.score_source))
gap_extension = 0.0
if score.gap_extension != None:
gap_extension = score.gap_extension
self.score_part = score_part_t.safe_substitute(SCORE_TYPE=score.score_type,
LOWER_LIMIT=score.lower_limit_score,
MINIMUM_SCORE=score.minimum_score,
MAX_SCORE=score.lower_limit_max_score,
GAP_SCORE=score.gap_score,
GAP_EXTENSION=gap_extension,
HIGHEST_SCORE=score.highest_score,
MATRIX=score.__str__(),
DIMENSION=score.dimensions)
def set_variable_code(self, number_sequences, number_targets, x_val, y_val, char_offset):
'''Sets the variable part of the code'''
#self.logger.debug('Setting the variable part of the cuda code\n\t(using: n_seq: {}, n_targets: {}, '
# 'x_val: {}, y_val: {})'.format(number_sequences, number_targets, x_val, y_val))
variable_t = Template(self.read_source(self.variable_source))
self.variable_part = variable_t.safe_substitute(N_SEQUENCES=number_sequences,
N_TARGETS=number_targets,
X=x_val,
Y=y_val,
CHAR_OFFSET=char_offset)
def get_code(self, score, number_sequences, number_targets, x_sequence_length, y_sequence_length):
'''Retrieves the source for the cuda program'''
#self.logger.debug('Formatting the cuda source code...')
self.set_score_code(score)
self.set_variable_code(number_sequences, number_targets, x_sequence_length, y_sequence_length, score.char_offset)
#self.logger.debug('Formatting the cuda source code OK.')
return self.variable_part + self.directions + self.score_part + self.shared_xy_code
class Cudacode(Code):
'''
Initializes the CUDA code by setting configuration parameters using the CUDA
templates located in Core/cuda
'''
def __init__(self, logger):
Code.__init__(self, logger)
self.variable_source = resource_filename(__name__, 'cuda/default_variable.cu')
self.direction_source = resource_filename(__name__, 'cuda/default_direction.cu')
self.score_source = resource_filename(__name__, 'cuda/default_score.cu')
self.main_source = resource_filename(__name__, 'cuda/default_main.cu')
class OCLcode(Code):
'''
Initializes the OpenCL code by setting configuration parameters using the OpenCL
templates located in Core/ocl
'''
def __init__(self, logger):
Code.__init__(self, logger)
self.direction_source = resource_filename(__name__, 'ocl/default_direction.cl')
self.score_source = resource_filename(__name__, 'ocl/default_score.cl')
class GPUcode(OCLcode):
'''
Initializes the GPU OpenCL code by setting configuration parameters using the OpenCL
templates located in Core/ocl
'''
def __init__(self, logger):
OCLcode.__init__(self, logger)
self.main_source = resource_filename(__name__, 'ocl/default_main_gpu.cl')
self.variable_source = resource_filename(__name__, 'ocl/default_variable_gpu.cl')
class CPUcode(OCLcode):
'''
Initializes the GPU OpenCL code by setting configuration parameters using the OpenCL
templates located in Core/ocl
'''
def __init__(self, logger):
OCLcode.__init__(self, logger)
self.main_source = resource_filename(__name__, 'ocl/default_main_cpu.cl')
self.variable_source = resource_filename(__name__, 'ocl/default_variable_cpu.cl')
def set_shared_xy_code(self, sharedx=8, sharedy=8, workloadx=4, workloady=4):
'''
Sets the horizontal and the vertical sizes of the smallest alignment matrices in shared memory
:param sharedx:
:param sharedy:
'''
#self.logger.debug('Setting sharedx to {0}, sharedy to {1}'.format(sharedx, sharedy))
code_t = Template(self.read_source(self.main_source))
self.shared_xy_code = code_t.safe_substitute(SHARED_X=sharedx, SHARED_Y=sharedy, WORKLOAD_X=workloadx, WORKLOAD_Y=workloady)
| {
"content_hash": "4069a7ac260db20967f4f4f1a172a2b6",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 132,
"avg_line_length": 48.854166666666664,
"alnum_prop": 0.5656005685856432,
"repo_name": "swarris/pyPaSWAS",
"id": "b2f871ae14158b1525104b9fe802ca1e1739ab57",
"size": "7035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyPaSWAS/Core/PaSWAS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "55486"
},
{
"name": "Cuda",
"bytes": "39887"
},
{
"name": "Dockerfile",
"bytes": "4587"
},
{
"name": "Makefile",
"bytes": "1520"
},
{
"name": "Python",
"bytes": "235631"
},
{
"name": "Shell",
"bytes": "14043"
}
],
"symlink_target": ""
} |
import voxie
#
# Copyright (c) 2014-2022 The Voxie Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import voxie.json_dbus
import sys
import functools
import dbus
import math
# PYTHONPATH=pythonlib/ python3 -c 'import voxie, voxie.serialize_state; args = voxie.parser.parse_args(); context = voxie.VoxieContext(args); instance = context.createInstance(); voxie.serialize_state.serialize(instance); context.client.destroy()'
class NodeInfo:
def __init__(self, obj):
self.obj = obj
self.path = self.obj._objectPath
self.prototype = self.obj.Prototype
self.prototypeName = self.prototype.Name
self.kind = self.prototype.NodeKind
def __repr__(self):
return repr(self.obj)
def isDigit(c):
return c >= '0' and c <= '9'
def compareStringNumeric(str1, str2):
pos = 0
while pos < len(str1) and pos < len(str2) and str1[pos] == str2[pos]:
pos += 1
# Check the number of digits in both strings starting at the first character which is different and consider the string with more digits to be larger
while True:
isDigit1 = pos < len(str1) and isDigit(str1[pos])
isDigit2 = pos < len(str2) and isDigit(str2[pos])
if isDigit1 and not isDigit2:
return 1
elif isDigit2 and not isDigit1:
return -1
elif not isDigit1 and not isDigit2:
# Fall back to 'normal' comparison
if str1 < str2:
return -1
elif str1 > str2:
return 1
else:
return 0
# Else: isDigit1 and isDigit2, continue
pos = pos + 1
# This objectKindOrder should make sure that all objects which appear as properties of other objects are created before those objects
nodeKindOrder = {
'de.uni_stuttgart.Voxie.NodeKind.NodeGroup': 1,
'de.uni_stuttgart.Voxie.NodeKind.Property': 2,
'de.uni_stuttgart.Voxie.NodeKind.Data': 3,
'de.uni_stuttgart.Voxie.NodeKind.Object3D': 4,
'de.uni_stuttgart.Voxie.NodeKind.SegmentationStep': 5,
'de.uni_stuttgart.Voxie.NodeKind.Filter': 6,
'de.uni_stuttgart.Voxie.NodeKind.Visualizer': 7,
}
def compareNode(obj1, obj2):
if obj1.kind in nodeKindOrder:
if obj2.kind in nodeKindOrder:
if nodeKindOrder[obj1.kind] < nodeKindOrder[obj2.kind]:
return -1
elif nodeKindOrder[obj1.kind] > nodeKindOrder[obj2.kind]:
return 1
else:
return -1
elif obj2.kind in nodeKindOrder:
return 1
res = compareStringNumeric(str(obj1.kind), str(obj2.kind))
if res < 0:
return -1
elif res > 0:
return 1
res = compareStringNumeric(
str(obj1.prototypeName), str(obj2.prototypeName))
if res < 0:
return -1
elif res > 0:
return 1
res = compareStringNumeric(str(obj1.path), str(obj2.path))
if res < 0:
return -1
elif res > 0:
return 1
return 0
# TODO: remove
def serializePropertyValueSimple(serializedNodes, value):
if type(value) == tuple or type(value) == list:
return list(map(lambda x: serializePropertyValueSimple(serializedNodes, x), value))
if type(value) == dbus.ObjectPath:
if value == dbus.ObjectPath('/'):
return None
elif value in serializedNodes:
return serializedNodes[value] # TODO: must not be quoted
else:
print('Warning: Could not find node %s' %
str(value), file=sys.stderr)
return None
if type(value) in [bool, int, float]:
return value
print('Got type: %s' % type(value), file=sys.stderr)
return value
def serializePropertyValue(serializedNodes, sig, value):
sig = dbus.Signature(sig)
# return repr(value)
# return repr(serializePropertyValueSimple(serializedNodes, value))
if sig == 'b':
return repr(bool(value))
elif sig in ['y', 'n', 'q', 'i', 'u', 'x', 't']:
return repr(int(value))
elif sig == 'd':
f = float(value)
if math.isnan(f):
return "float('NaN')"
elif f == float('Infinity'):
return "float('Infinity')"
elif f == float('-Infinity'):
return "float('-Infinity')"
else:
return repr(f)
elif sig == 's':
return repr(str(value))
elif sig == 'o':
if value == dbus.ObjectPath('/'):
return 'None'
# return "dbus.ObjectPath('/')"
elif value in serializedNodes:
return '%s' % (serializedNodes[value],)
else:
print('Warning: Could not find node %s' %
str(value), file=sys.stderr)
return 'None'
# return "dbus.ObjectPath('/')"
elif sig[0] == 'a' and sig[1] != '{':
return '[' + ', '.join([serializePropertyValue(serializedNodes, sig[1:], v) for v in value]) + ']'
elif sig[0] == '(' and sig[-1] == ')':
sigs = dbus.Signature(sig[1:-1])
return '(' + ', '.join([serializePropertyValue(serializedNodes, s, v) for s, v in zip(sigs, value)]) + ')'
elif sig == 'a{sv}': # TODO: This assumes this is a JSON-like value
obj = voxie.json_dbus.dbus_to_json_dict(value)
# TODO: Is repr() the right thing here?
return 'voxie.json_dbus.json_to_dbus_dict(' + repr(obj) + ')'
else:
print('Warning: Could not serialize DBus signature %s' %
str(sig), file=sys.stderr)
return 'None'
def serializeNode(instance, file, serializedNodes, obj, oname):
propertiesExpr = '{'
propertiesExpr += '\n'
for property in obj.prototype.ListProperties():
propertiesExpr += ' '
# TODO: Handle errors
name = property.Name
try:
value = obj.obj.GetProperty(name)
except Exception as e:
print('Warning: Could not get property %s for node %s: %s' %
(repr(name), str(obj.path), e))
continue
propertiesExpr += '%s: voxie.Variant(%s, %s),' % (repr(name), repr(str(
value.signature)), serializePropertyValue(serializedNodes, value.signature, value.value))
propertiesExpr += '\n'
propertiesExpr += '}'
filename = ''
if obj.kind == 'de.uni_stuttgart.Voxie.NodeKind.Data':
filename = voxie.cast(
obj.obj, ['de.uni_stuttgart.Voxie.DataNode']).FileName
if filename != '':
objData = obj.obj.CastTo('de.uni_stuttgart.Voxie.DataNode')
importProperties = objData.ImportProperties
if len(importProperties) == 0:
print('%s = instance.OpenFileChecked(%s) # %s' %
(oname, repr(filename), repr(obj.prototypeName)), file=file)
else:
# TODO: More error handling
importer = objData.Importer
importerName = importer.Name
propertiesExpr2 = '{'
propertiesExpr2 += '\n'
for name in importProperties:
propertiesExpr2 += ' '
value = importProperties[name]
propertiesExpr2 += '%s: voxie.Variant(%s, %s),' % (repr(name), repr(str(
value.signature)), serializePropertyValue(serializedNodes, value.signature, value.value))
propertiesExpr2 += '\n'
propertiesExpr2 += '}'
print('%s = instance.Components.GetComponent(\'de.uni_stuttgart.Voxie.ComponentType.Importer\', %s).CastTo(\'de.uni_stuttgart.Voxie.Importer\').ImportNode(%s, {\'Properties\': voxie.Variant(\'a{sv}\', %s)}) # %s' %
(oname, repr(importerName), repr(filename), propertiesExpr2, repr(obj.prototypeName)), file=file)
print('%s.SetPropertiesChecked(%s)' %
(oname, propertiesExpr), file=file)
else:
print('%s = instance.CreateNodeChecked(%s, %s)' %
(oname, repr(obj.prototypeName), propertiesExpr), file=file)
print('%s.ManualDisplayName = %s' %
(oname, obj.obj.ManualDisplayName), file=file)
print('%s.GraphPosition = %s' % (oname, obj.obj.GraphPosition), file=file)
if obj.kind == 'de.uni_stuttgart.Voxie.NodeKind.Visualizer':
visObj = voxie.cast(
obj.obj, ['de.uni_stuttgart.Voxie.VisualizerNode'])
if instance.Gui.MdiViewMode == 'de.uni_stuttgart.Voxie.MdiViewMode.SubWindow' or not visObj.IsAttached:
print('%s.CastTo(\'de.uni_stuttgart.Voxie.VisualizerNode\').IsAttached = %s' % (
oname, visObj.IsAttached), file=file)
print('%s.CastTo(\'de.uni_stuttgart.Voxie.VisualizerNode\').VisualizerPosition = %s' % (
oname, visObj.VisualizerPosition), file=file)
print('%s.CastTo(\'de.uni_stuttgart.Voxie.VisualizerNode\').VisualizerSize = %s' % (
oname, visObj.VisualizerSize), file=file)
def serializeGui(instance, file):
print("### GUI ###", file=file)
print('instance.Gui.MdiViewMode = \'%s\'' %
instance.Gui.MdiViewMode, file=file)
def isObjectInNodeGroup(obj, nodeGroupPath):
if obj.obj.ParentNodeGroup is not None:
if obj.obj.ParentNodeGroup._objectPath == nodeGroupPath:
return True
else:
return isObjectInNodeGroup(obj.obj.ParentNodeGroup, nodeGroupPath)
def serialize(instance, file=sys.stdout, nodeGroupPath=None):
"""Serialize a voxie instance to a file.
Args:
instance: The instance which should be serialized.
file: File to which the serialized output will be written. Defaults to sys.stdout.
nodeGroupPath: Defaults to None. If set to none all objects in the project will be serialized.
If set to the dbus object path of a node group in this instance then only that node group and
all objects which are (indirect) children of it will be serialized.
"""
versionInfo = instance.VersionInformation
print('#!/usr/bin/python3', file=file)
# TODO: print version string in a different format?
print('# Stored using voxie version %s' %
(repr(versionInfo['VersionString'].getValue('s')),), file=file)
print('import voxie, dbus', file=file)
print('instance = voxie.instanceFromArgs()\n', file=file)
lastKind = None
lastPrototypeName = None
nodes = list(map(NodeInfo, instance.ListNodes()))
# print(list(map(lambda x: x.prototype._objectPath, nodes)))
nodes.sort(key=functools.cmp_to_key(compareNode))
# TODO: support for reordering if necessary or for use SetProperty() to set the properties later when the ordering above is not sufficient?
serializedNodes = {}
# id = 0
usedIDs = {}
# if nodeGroupPath is set then filter out all objs in the 'objects' list that are not (indirect) children of the node group.
if nodeGroupPath is not None:
nodeGroupObjects = []
for obj in nodes:
if isObjectInNodeGroup(obj, nodeGroupPath) or obj.obj._objectPath == nodeGroupPath:
nodeGroupObjects.append(obj)
nodes = nodeGroupObjects
nodes.sort(key=functools.cmp_to_key(compareNode))
for obj in nodes:
# print(obj.kind, obj.prototypeName, obj.path)
if obj.kind != lastKind:
print('\n### %s ###' % (obj.kind,), file=file)
lastKind = obj.kind
if obj.prototypeName != lastPrototypeName:
print('# %s #' % (obj.prototypeName,), file=file)
lastPrototypeName = obj.prototypeName
# id = id + 1
# name = 'obj%d' % id
pnameShort = obj.prototypeName
if '.' in pnameShort:
pnameShort = pnameShort[(pnameShort.rindex('.') + 1):]
pnameShortFiltered = ''
for c in pnameShort:
if (c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z') or (c >= '0' and c <= '9') or c == '_':
pnameShortFiltered += c
if pnameShortFiltered not in usedIDs:
usedIDs[pnameShortFiltered] = 0
usedIDs[pnameShortFiltered] += 1
name = 'o_%s_%d' % (pnameShortFiltered, usedIDs[pnameShortFiltered])
serializeNode(instance, file, serializedNodes, obj, name)
serializedNodes[obj.path] = name
# node group relations. We can only set these after we're done with serializing all nodes because we
# can't guarantee that nested node groups are serialized in the "correct" order (top-level first)
print("\n### Node Group Relations ###", file=file)
for obj in nodes:
parentNodeGroup = obj.obj.ParentNodeGroup
if parentNodeGroup is not None:
print('%s.ParentNodeGroup = %s' % (
serializedNodes[obj.path], serializedNodes[parentNodeGroup._objectPath]), file=file)
print('%s.ExportedProperties = %s' %
(serializedNodes[obj.path], obj.obj.ExportedProperties), file=file)
serializeGui(instance, file)
| {
"content_hash": "85796791992a4f3f95b779fb063805d8",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 248,
"avg_line_length": 41.66066066066066,
"alnum_prop": 0.6210624954948462,
"repo_name": "voxie-viewer/voxie",
"id": "dd24bc7dafabc9ebd713406e63de6a3963171135",
"size": "13873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonlib/voxie/serialize_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "151"
},
{
"name": "C",
"bytes": "582785"
},
{
"name": "C++",
"bytes": "1860251"
},
{
"name": "CMake",
"bytes": "1934"
},
{
"name": "JavaScript",
"bytes": "3194"
},
{
"name": "Makefile",
"bytes": "2053"
},
{
"name": "Python",
"bytes": "36220"
},
{
"name": "QMake",
"bytes": "20169"
},
{
"name": "Shell",
"bytes": "2367"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=broad-except
import os
import re
import sys
import hmac
import logging
import datetime
from hashlib import sha1
from rapid.lib.version import Version
logger = logging.getLogger("rapid")
def deep_merge(_a, _b, path=None):
"merges b into a"
if path is None:
path = []
for key in _b:
if key in _a:
if isinstance(_a[key], dict) and isinstance(_b[key], dict):
deep_merge(_a[key], _b[key], path + [str(key)])
elif isinstance(_a.get(key, None), list) and isinstance(_b.get(key, None), list):
_a[key].extend(_b[key])
elif _a[key] == _b[key]:
pass # same leaf value
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
if isinstance(_a.get(key, None), list) and isinstance(_b.get(key, None), list):
_a[key].extend(_b[key])
else:
_a[key] = _b[key]
return _a
class RoutingUtil(object):
@staticmethod
def is_valid_request(sent_api_key, api_key):
return str(sent_api_key) == str(api_key)
@staticmethod
def get_cihub_signature(secret_key, data):
return hmac.new(secret_key.encode('ascii', 'ignore'), data, sha1).hexdigest()
class ORMUtil(object):
@staticmethod
def get_filtered_query(query, _filter, clazz): # pylint: disable=too-many-locals
if _filter is not None:
joined_classes = []
for token in _filter.split(':=:'):
_m = re.match('(.*)(_eq_|_ne_|_gt_|_lt_|_in_|_ge_|_le_|_like_)(.*)', token)
if not _m:
return query
group1 = _m.group(1).split('.')
_op = _m.group(2).replace('_', '')
value = _m.group(3)
if len(group1) > 1:
final_class = None
final_column = None
current_class = clazz
while len(group1) > 1:
nested_attribute = group1[1]
column_name = group1.pop(0)
attr = getattr(current_class, column_name, None)
final_class = attr.property.mapper.class_
final_column = getattr(final_class, nested_attribute, None)
parent_name = final_column.parent.mapper.class_
if parent_name not in joined_classes:
query = query.join(final_column.parent.mapper.class_)
joined_classes.append(parent_name)
current_class = final_column.parent.mapper.class_
if final_class is not None and final_column is not None:
query = ORMUtil.get_embedded_filter(final_class, final_column.name, _op, value, query)
else:
column_name = group1[0]
if hasattr(clazz, column_name):
query = ORMUtil.get_column_filter(clazz, column_name, _op, value, query)
else:
logger.info("Not found: {}".format(_m.group(1)))
return query
@staticmethod
def get_column_filter(clazz, column_name, _op, value, query):
if hasattr(clazz, column_name):
query = ORMUtil.get_embedded_filter(clazz, column_name, _op, value, query)
return query
raise Exception("Missing attribute")
@staticmethod
def get_embedded_filter(clazz, column_name, _op, value, query):
if hasattr(clazz, column_name):
column = getattr(clazz, column_name, None)
filt = None
if _op == 'in':
filt = column.in_(value.split(','))
else:
attr = None
try:
for _e in ['%s', '%s_', '__%s__']:
if hasattr(column, _e % _op):
attr = _e % _op
except Exception as exception:
logger.info("\n\n")
logger.error(exception)
if value == 'null':
value = None
if column.type.python_type == bool:
value = value == 'True'
elif column.type.python_type == datetime.datetime:
try:
value = datetime.datetime.utcfromtimestamp(float(value)) # Python timestamp
except ValueError:
value = datetime.datetime.utcfromtimestamp(float(value / 1e3)) # Traditional linux timestamp
filt = getattr(column, attr)(value)
query = query.filter(filt)
return query
class UpgradeUtil(object):
@staticmethod
def upgrade_version(version, configuration, attempted_reinstall=False):
if UpgradeUtil._install(version, configuration):
try:
import uwsgi
uwsgi.reload()
except ImportError:
import traceback
traceback.print_exc()
return False
return True
if not attempted_reinstall:
if UpgradeUtil.upgrade_version(Version.get_version(), configuration, True):
return False
raise Exception("Server was unable to upgrade nor restore to previous version!")
@staticmethod
def _install(version, configuration):
version = version.split(';')[0]
assert re.search(r'[^a-zA-Z\-0-9.]', version) is None, "Invalid Version string."
try:
logger.info("installing version: {}".format(version))
return_code = os.system("{}/bin/pip install -i {} {} rapid-framework=={}".format(sys.prefix, configuration.install_uri, configuration.install_options, version))
return return_code == 0
except Exception as exception:
logger.error(exception)
return False
class OSUtil:
@staticmethod
def separator():
return os.getenv('os_path_override', os.path.sep)
@staticmethod
def path_join(*args):
"""
:param args: List[str]
:return:
"""
return OSUtil.separator().join(args)
| {
"content_hash": "65b424ec9a0101263b276cd6d983b63b",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 172,
"avg_line_length": 35.927083333333336,
"alnum_prop": 0.5407364453464772,
"repo_name": "BambooHR/rapid",
"id": "d5b762622348fe933a87ef764adc808376aa1cf1",
"size": "6898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rapid/lib/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1235"
},
{
"name": "Mako",
"bytes": "1069"
},
{
"name": "Python",
"bytes": "665011"
},
{
"name": "Shell",
"bytes": "6411"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class EventsResults(Model):
"""An events query result.
:param odatacontext: OData context metadata endpoint for this response
:type odatacontext: str
:param aimessages: OData messages for this response.
:type aimessages: list[~azure.applicationinsights.models.ErrorInfo]
:param value: Contents of the events query result.
:type value: list[~azure.applicationinsights.models.EventsResultData]
"""
_attribute_map = {
'odatacontext': {'key': '@odata\\.context', 'type': 'str'},
'aimessages': {'key': '@ai\\.messages', 'type': '[ErrorInfo]'},
'value': {'key': 'value', 'type': '[EventsResultData]'},
}
def __init__(self, *, odatacontext: str=None, aimessages=None, value=None, **kwargs) -> None:
super(EventsResults, self).__init__(**kwargs)
self.odatacontext = odatacontext
self.aimessages = aimessages
self.value = value
| {
"content_hash": "ca1060b486f69dd3fb67236179c445b0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 97,
"avg_line_length": 38.52,
"alnum_prop": 0.6542056074766355,
"repo_name": "Azure/azure-sdk-for-python",
"id": "97e9c80082cd921d7d99426ca14d0144cee7f43d",
"size": "1437",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/applicationinsights/azure-applicationinsights/azure/applicationinsights/models/events_results_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_vmnic_info
short_description: Gathers info about vmnics available on the given ESXi host
description:
- This module can be used to gather information about vmnics available on the given ESXi host.
- If C(cluster_name) is provided, then vmnic information about all hosts from given cluster will be returned.
- If C(esxi_hostname) is provided, then vmnic information about given host system will be returned.
- Additional details about vswitch and dvswitch with respective vmnic is also provided which is added in 2.7 version.
version_added: '2.9'
author:
- Abhijeet Kasurde (@Akasurde)
- Christian Kotte (@ckotte)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
capabilities:
description:
- Gather information about general capabilities (Auto negotiation, Wake On LAN, and Network I/O Control).
type: bool
default: false
directpath_io:
description:
- Gather information about DirectPath I/O capabilities and configuration.
type: bool
default: false
sriov:
description:
- Gather information about SR-IOV capabilities and configuration.
type: bool
default: false
esxi_hostname:
description:
- Name of the host system to work with.
- Vmnic information about this ESXi server will be returned.
- This parameter is required if C(cluster_name) is not specified.
type: str
cluster_name:
description:
- Name of the cluster from which all host systems will be used.
- Vmnic information about each ESXi server will be returned for the given cluster.
- This parameter is required if C(esxi_hostname) is not specified.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather info about vmnics of all ESXi Host in the given Cluster
vmware_host_vmnic_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: cluster_host_vmnics
- name: Gather info about vmnics of an ESXi Host
vmware_host_vmnic_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
delegate_to: localhost
register: host_vmnics
'''
RETURN = r'''
hosts_vmnics_info:
description:
- dict with hostname as key and dict with vmnics information as value.
- for C(num_vmnics), only NICs starting with vmnic are counted. NICs like vusb* are not counted.
- details about vswitch and dvswitch was added in version 2.7.
- details about vmnics was added in version 2.8.
returned: hosts_vmnics_info
type: dict
sample:
{
"10.76.33.204": {
"all": [
"vmnic0",
"vmnic1"
],
"available": [],
"dvswitch": {
"dvs_0002": [
"vmnic1"
]
},
"num_vmnics": 2,
"used": [
"vmnic1",
"vmnic0"
],
"vmnic_details": [
{
"actual_duplex": "Full Duplex",
"actual_speed": 10000,
"adapter": "Intel(R) 82599 10 Gigabit Dual Port Network Connection",
"configured_duplex": "Auto negotiate",
"configured_speed": "Auto negotiate",
"device": "vmnic0",
"driver": "ixgbe",
"location": "0000:01:00.0",
"mac": "aa:bb:cc:dd:ee:ff",
"status": "Connected",
},
{
"actual_duplex": "Full Duplex",
"actual_speed": 10000,
"adapter": "Intel(R) 82599 10 Gigabit Dual Port Network Connection",
"configured_duplex": "Auto negotiate",
"configured_speed": "Auto negotiate",
"device": "vmnic1",
"driver": "ixgbe",
"location": "0000:01:00.1",
"mac": "ab:ba:cc:dd:ee:ff",
"status": "Connected",
},
],
"vswitch": {
"vSwitch0": [
"vmnic0"
]
}
}
}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs
class HostVmnicMgr(PyVmomi):
"""Class to manage vmnic info"""
def __init__(self, module):
super(HostVmnicMgr, self).__init__(module)
self.capabilities = self.params.get('capabilities')
self.directpath_io = self.params.get('directpath_io')
self.sriov = self.params.get('sriov')
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
if not self.hosts:
self.module.fail_json(msg="Failed to find host system.")
def find_dvs_by_uuid(self, uuid=None):
"""Find DVS by it's UUID"""
dvs_obj = None
if uuid is None:
return dvs_obj
dvswitches = get_all_objs(self.content, [vim.DistributedVirtualSwitch])
for dvs in dvswitches:
if dvs.uuid == uuid:
dvs_obj = dvs
break
return dvs_obj
def gather_host_vmnic_info(self):
"""Gather vmnic info"""
hosts_vmnic_info = {}
for host in self.hosts:
host_vmnic_info = dict(all=[], available=[], used=[], vswitch=dict(), dvswitch=dict())
host_nw_system = host.configManager.networkSystem
if host_nw_system:
nw_config = host_nw_system.networkConfig
vmnics = [pnic.device for pnic in nw_config.pnic if pnic.device.startswith('vmnic')]
host_vmnic_info['all'] = [pnic.device for pnic in nw_config.pnic]
host_vmnic_info['num_vmnics'] = len(vmnics)
host_vmnic_info['vmnic_details'] = []
for pnic in host.config.network.pnic:
pnic_info = dict()
if pnic.device.startswith('vmnic'):
if pnic.pci:
pnic_info['location'] = pnic.pci
for pci_device in host.hardware.pciDevice:
if pci_device.id == pnic.pci:
pnic_info['adapter'] = pci_device.vendorName + ' ' + pci_device.deviceName
break
else:
pnic_info['location'] = 'PCI'
pnic_info['device'] = pnic.device
pnic_info['driver'] = pnic.driver
if pnic.linkSpeed:
pnic_info['status'] = 'Connected'
pnic_info['actual_speed'] = pnic.linkSpeed.speedMb
pnic_info['actual_duplex'] = 'Full Duplex' if pnic.linkSpeed.duplex else 'Half Duplex'
else:
pnic_info['status'] = 'Disconnected'
pnic_info['actual_speed'] = 'N/A'
pnic_info['actual_duplex'] = 'N/A'
if pnic.spec.linkSpeed:
pnic_info['configured_speed'] = pnic.spec.linkSpeed.speedMb
pnic_info['configured_duplex'] = 'Full Duplex' if pnic.spec.linkSpeed.duplex else 'Half Duplex'
else:
pnic_info['configured_speed'] = 'Auto negotiate'
pnic_info['configured_duplex'] = 'Auto negotiate'
pnic_info['mac'] = pnic.mac
# General NIC capabilities
if self.capabilities:
pnic_info['nioc_status'] = 'Allowed' if pnic.resourcePoolSchedulerAllowed else 'Not allowed'
pnic_info['auto_negotiation_supported'] = pnic.autoNegotiateSupported
pnic_info['wake_on_lan_supported'] = pnic.wakeOnLanSupported
# DirectPath I/O and SR-IOV capabilities and configuration
if self.directpath_io:
pnic_info['directpath_io_supported'] = pnic.vmDirectPathGen2Supported
if self.directpath_io or self.sriov:
if pnic.pci:
for pci_device in host.configManager.pciPassthruSystem.pciPassthruInfo:
if pci_device.id == pnic.pci:
if self.directpath_io:
pnic_info['passthru_enabled'] = pci_device.passthruEnabled
pnic_info['passthru_capable'] = pci_device.passthruCapable
pnic_info['passthru_active'] = pci_device.passthruActive
if self.sriov:
try:
if pci_device.sriovCapable:
pnic_info['sriov_status'] = (
'Enabled' if pci_device.sriovEnabled else 'Disabled'
)
pnic_info['sriov_active'] = \
pci_device.sriovActive
pnic_info['sriov_virt_functions'] = \
pci_device.numVirtualFunction
pnic_info['sriov_virt_functions_requested'] = \
pci_device.numVirtualFunctionRequested
pnic_info['sriov_virt_functions_supported'] = \
pci_device.maxVirtualFunctionSupported
else:
pnic_info['sriov_status'] = 'Not supported'
except AttributeError:
pnic_info['sriov_status'] = 'Not supported'
host_vmnic_info['vmnic_details'].append(pnic_info)
vswitch_vmnics = []
proxy_switch_vmnics = []
if nw_config.vswitch:
for vswitch in nw_config.vswitch:
host_vmnic_info['vswitch'][vswitch.name] = []
# Workaround for "AttributeError: 'NoneType' object has no attribute 'nicDevice'"
# this issue doesn't happen every time; vswitch.spec.bridge.nicDevice exists!
try:
for vnic in vswitch.spec.bridge.nicDevice:
vswitch_vmnics.append(vnic)
host_vmnic_info['vswitch'][vswitch.name].append(vnic)
except AttributeError:
pass
if nw_config.proxySwitch:
for proxy_config in nw_config.proxySwitch:
dvs_obj = self.find_dvs_by_uuid(uuid=proxy_config.uuid)
if dvs_obj:
host_vmnic_info['dvswitch'][dvs_obj.name] = []
for proxy_nic in proxy_config.spec.backing.pnicSpec:
proxy_switch_vmnics.append(proxy_nic.pnicDevice)
if dvs_obj:
host_vmnic_info['dvswitch'][dvs_obj.name].append(proxy_nic.pnicDevice)
used_vmics = proxy_switch_vmnics + vswitch_vmnics
host_vmnic_info['used'] = used_vmics
host_vmnic_info['available'] = [pnic.device for pnic in nw_config.pnic if pnic.device not in used_vmics]
hosts_vmnic_info[host.name] = host_vmnic_info
return hosts_vmnic_info
def main():
"""Main"""
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
capabilities=dict(type='bool', required=False, default=False),
directpath_io=dict(type='bool', required=False, default=False),
sriov=dict(type='bool', required=False, default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True,
)
host_vmnic_mgr = HostVmnicMgr(module)
module.exit_json(changed=False, hosts_vmnics_info=host_vmnic_mgr.gather_host_vmnic_info())
if __name__ == "__main__":
main()
| {
"content_hash": "6e3cdbcb10942a16befb422c048e5432",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 123,
"avg_line_length": 44.512903225806454,
"alnum_prop": 0.5037321545039496,
"repo_name": "thaim/ansible",
"id": "b2e1bc7dea52e33c2517badc946ab7f3214f95fc",
"size": "14062",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/vmware/vmware_host_vmnic_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import os
import sys
from fabric import api as fab
from fabric.contrib import files as fab_files
from fab_deploy.utils import run_as
from ..base import _, Daemon, Ubuntu
from ..deployment import command
from ..utils import upload_template, upload_first
class Redis(Daemon):
namespace = 'redis'
def install_development_libraries(self):
fab.env.os.install_package('redis-server')
@command
def install(self):
self.install_development_libraries()
| {
"content_hash": "bd9ec098f0eb261717616bca25729ad9",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 50,
"avg_line_length": 22.608695652173914,
"alnum_prop": 0.7288461538461538,
"repo_name": "suvit/speedydeploy",
"id": "0f8010d0a5dcdf11018b45183d40ff08869ff1a3",
"size": "544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "speedydeploy/project/redis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1110"
},
{
"name": "Nginx",
"bytes": "1900"
},
{
"name": "Python",
"bytes": "90835"
},
{
"name": "Shell",
"bytes": "10296"
}
],
"symlink_target": ""
} |
from logging import getLogger
from functools import partial
from operator import attrgetter
from collections import OrderedDict
import gevent
from gevent_tasks.errors import ForeverRuntimeError, TaskKeyError
from gevent_tasks.pool import TaskPool
from gevent_tasks.tasks import Task
from gevent_tasks.timing import Timing
from gevent_tasks.utils import convert_fn_name
__all__ = ["TaskManager"]
class TaskManager(object):
__slots__ = ("logger", "_pool", "_tasks")
FOREVER_POLL_SECS = 0.1
"""float: number of seconds to :func:`gevent.sleep` in our
:func:`~gevent_tasks.manager.TaskManager.forever` block between
looking for failed tasks.
"""
def __init__(self,
pool_size=TaskPool.DEFAULT_POOL_SIZE,
pool_cls=None,
max_task_timings=Timing.MAX_RUN_TIMES,
logger=None):
"""Interface for managing tasks and running them in a Gevent Pool.
Args:
pool_size (int): maximum concurrent gevents to use in a
:class:`gevent_tasks.pool.TaskPool`.
pool_cls (:class:`gevent.pool.Pool`): the concurrency pool that all
of our underlying periodic tasks will run in. This is
important to remember since our pool can only process
its defined size of threads at one time. Tasks that block
waiting for space in the pool may lapse their rerun period
and fall into an undefined state.
The recommended pool to use is
:obj:`gevent_tasks.pool.TaskPool` which has helper methods
with information about the current run state of its
greenlets_.
max_task_timings (int): number of task runs to track for per-task statistics.
logger (:obj:`logging.Logger`): logging instance from the
standard library. If one isn't provided a new one will be
made for this instance.
.. _greenlets: http://www.gevent.org/gevent.html#greenlet-objects
"""
# yapf: disable
Timing.MAX_RUN_TIMES = max(4, max_task_timings)
if pool_size < 2:
pool_size = 2
if pool_cls and callable(pool_cls) and pool_size:
pool = pool_cls(pool_size)
else: # pool_cls is None:
pool = TaskPool(size=pool_size)
self._pool = pool # type: TaskPool
self._tasks = OrderedDict() # type: OrderedDict[str, Task]
self.logger = logger or getLogger("%s.TaskManager" % __name__)
# yapf: enable
def __repr__(self):
return "<TaskManager(tasks=%d, capacity=%d)>" % (len(self._tasks), self._pool.size)
def __iter__(self):
yield from self._tasks.values()
def task(self, _fn=None, **kwargs):
"""Register a method as a task via decorated function.
Can be used as a simple decorator, ::
@manager.task
def some_function(task):
...
or with keyword arguments that match those used for
:obj:`.Task`, ::
@manager.task(interval=30.0, timeout=25.0)
def some_function(task):
...
When keyword arguments are omitted the default values are applied:
``name`` is the function's name converted to CamelCase,
``timeout`` is 59 seconds, ``interval`` is 60 seconds, and
``logger`` is built from the name of the name of
:obj:`.TaskManager.logger`.
Args:
_fn (Callable): function that takes at least one argument,
``task``, that will be run on a fixed interval for the
lifetime of the current process.
kwargs: the same keyword arguments used for creating a
:obj:`.Task` object.
Returns:
Callable of the underlying function.
"""
def make_task(f, **kw):
name = kw.get("name", convert_fn_name(f.__name__))
logger = kw.get("logger", None)
if logger is None:
logger = getLogger(self.logger.name + ".Task.%s" % name)
kw.update({
"fn": f,
"name": name,
"manager": self,
"timeout": kw.get("timeout", 59.0),
"interval": kw.get("interval", 60.0),
"logger": logger,
})
return Task(**kw)
if _fn and callable(_fn):
self.add(make_task(_fn))
return _fn
else:
# spec'd out task
def inner(fn, **kwargs):
self.add(make_task(fn, **kwargs))
return fn
return partial(inner, **kwargs)
@property
def pool(self):
""":obj:`.TaskPool`: Reference to the underlying TaskPool instance."""
return self._pool
@property
def task_names(self):
"""list(str): Copy of a list of all the registered task's names."""
return [t for t in self._tasks.keys()]
def get(self, name):
"""Get a reference for a Task by its name.
Returns:
:obj:`.Task` when ``name`` is registered, ``None`` otherwise.
"""
return self._tasks.get(name, None)
def add(self, task, start=False):
"""Add a task to the manager and optionally start executing it.
Args:
task (:obj:`.Task`): instance of Task to track in our manager.
start (bool): if the task is not in a running state, should
it be started.
Raises:
KeyError: when the Task's name is the same as one already being
tracked.
Returns:
``task``
"""
if task.name in self._tasks:
raise TaskKeyError(task.name)
if task.pool is None:
task.pool = self._pool
self._tasks[task.name] = task
if start and not task.running:
task.start()
return task
def add_many(self, *tasks, start=False):
"""Add many tasks to the manager.
Args:
*tasks (:obj:`.Task`): variable amount of Tasks to track.
start (bool): checks if each task has been started, if it
hasn't when ``True`` the task will start.
Raises:
KeyError: when one of the Task's name is the same as one
already being tracked.
Returns:
None
"""
for task in tasks:
self.add(task, start=start)
def start(self, task_name):
"""Starts a registered Task by name.
Args:
task_name (str): will start a task by name if it's currently
being tracked in the manager.
Returns:
None
Raises:
Nothing: will "fail" silently if a non-tracked name is given.
"""
task = self._tasks.get(task_name, None)
if task:
task.start()
def start_all(self):
"""Calls :func:`~start` on each Task being tracked.
Returns:
None
"""
for task in self.task_names:
self.start(task)
def stop(self, task_name, force=False):
"""Stop a registered task by name.
Args:
task_name (str): will stop a task by name if it's currently
being tracked in the manager and running.
force (bool): block the pool and event loop until this task
can be forcibly terminated.
Returns:
None
Raises:
Nothing: will "fail" silently if a non-tracked name is given.
"""
task = self._tasks.get(task_name, None)
if task:
task.stop(force)
def stop_all(self, force=False):
"""Calls :func:`~stop` on each Task being tracked.
Args:
force (bool): block the pool and event loop until each task
can be forcibly terminated.
Returns:
None
"""
for task in self.task_names:
self.stop(task, force)
def remove_task(self, task, force=False):
"""Unregister a task from the manager by name or instance.
Args:
task (str or :obj:`.Task`): reference to a tracked Task.
force (bool): calls :func:`.stop` with ``force`` before
removing the Task from our manager.
Returns:
:obj:`Task` or ``None``
"""
if hasattr(task, "name"):
name = task.name
else:
name = task
task_ = self._tasks.pop(name, None)
if task_:
task_.stop(force)
return task_
def remove_all(self, force=True):
"""Calls :func:`.remove_task` for each Task being tracked.
Args:
force (bool): calls :func:`.stop` with ``force`` before
removing the Task from our manager.
Yields:
:obj:`.Task`: each Task as it's removed. Allows for accessing
additional runtime information before being garbage
collected.
"""
for task in self.task_names:
yield self.remove_task(task, force)
def forever(self,
*exceptions,
stop_after_exc=True,
stop_on_zero=True,
polling=None,
callback=None):
"""Blocks in an infinite loop after starting all registered tasks.
The only way to break out is if one of the included ``exceptions``
is raised while being executed in a running task.
Note:
The loop will sleep for :attr:`.FOREVER_POLL_SECS` between
checking Tasks for a failed state.
Args:
stop_after_exc (bool): stop the loop after our first exception.
stop_on_zero (bool): stop the loop if no tasks are running.
polling (float): overwrites :attr:`.FOREVER_POLL_SECS` if value
is not ``None``.
callback (Callable): a function, with no parameters, that is called at the end of
the forever loop if all tasks were unscheduled/stopped successfully without
raising any Exceptions in ``*exceptions``.
*exceptions (Exception): variable number of Exception classes
to raise if an error occurs in a Task. This will break the
Forever loop and effectively stop our TaskPool.
Note:
:exc:`KeyboardInterrupt` is exempt from ``exceptions``
and will fail "gracefully" instead of re-raising to
break the loop.
Returns:
Any: the return value of ``callback`` if it's defined, else ``None``.
"""
if not exceptions:
exceptions = (ForeverRuntimeError,)
if polling is not None:
polling = max(0.005, polling)
else:
polling = self.FOREVER_POLL_SECS
scheduled_attr = attrgetter('scheduled')
self.start_all()
try:
while True:
if stop_on_zero:
none_scheduled = not any(map(scheduled_attr, self))
if self.pool.running == 0 and none_scheduled:
self.logger.debug('stop_on_zero=True, no tasks scheduled')
break
for task in self:
err = task.exception_info
if err:
if stop_after_exc:
exc_cls, exc_val, trace = err
self.logger.error(exc_val)
raise ForeverRuntimeError(*exc_val.args) from exc_cls
self.remove_task(task.name)
gevent.sleep(polling)
except KeyboardInterrupt:
self.logger.debug("keyboard interrupt")
except exceptions as e:
self.logger.exception(e, exc_info=True)
raise e
self._pool.join()
if callback and callable(callback):
return callback()
| {
"content_hash": "154aba359d2bc2072d14393c7f9877e9",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 93,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.5446766496626625,
"repo_name": "blakev/gevent-tasks",
"id": "63377c0df561622e6085ea6f4ab55f14b59cbb80",
"size": "12237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gevent_tasks/manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34767"
}
],
"symlink_target": ""
} |
"""
.. _l-logreg-example:
Train, convert and predict with ONNX Runtime
============================================
This example demonstrates an end to end scenario
starting with the training of a machine learned model
to its use in its converted from.
.. contents::
:local:
Train a logistic regression
+++++++++++++++++++++++++++
The first step consists in retrieving the iris datset.
"""
from sklearn.datasets import load_iris
iris = load_iris()
X, y = iris.data, iris.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
####################################
# Then we fit a model.
from sklearn.linear_model import LogisticRegression
clr = LogisticRegression()
clr.fit(X_train, y_train)
####################################
# We compute the prediction on the test set
# and we show the confusion matrix.
from sklearn.metrics import confusion_matrix
pred = clr.predict(X_test)
print(confusion_matrix(y_test, pred))
####################################
# Conversion to ONNX format
# +++++++++++++++++++++++++
#
# We use module
# `sklearn-onnx <https://github.com/onnx/sklearn-onnx>`_
# to convert the model into ONNX format.
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
initial_type = [("float_input", FloatTensorType([None, 4]))]
onx = convert_sklearn(clr, initial_types=initial_type)
with open("logreg_iris.onnx", "wb") as f:
f.write(onx.SerializeToString())
##################################
# We load the model with ONNX Runtime and look at
# its input and output.
import onnxruntime as rt
sess = rt.InferenceSession("logreg_iris.onnx", providers=rt.get_available_providers())
print("input name='{}' and shape={}".format(sess.get_inputs()[0].name, sess.get_inputs()[0].shape))
print("output name='{}' and shape={}".format(sess.get_outputs()[0].name, sess.get_outputs()[0].shape))
##################################
# We compute the predictions.
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
import numpy
pred_onx = sess.run([label_name], {input_name: X_test.astype(numpy.float32)})[0]
print(confusion_matrix(pred, pred_onx))
###################################
# The prediction are perfectly identical.
#
# Probabilities
# +++++++++++++
#
# Probabilities are needed to compute other
# relevant metrics such as the ROC Curve.
# Let's see how to get them first with
# scikit-learn.
prob_sklearn = clr.predict_proba(X_test)
print(prob_sklearn[:3])
#############################
# And then with ONNX Runtime.
# The probabilies appear to be
prob_name = sess.get_outputs()[1].name
prob_rt = sess.run([prob_name], {input_name: X_test.astype(numpy.float32)})[0]
import pprint
pprint.pprint(prob_rt[0:3])
###############################
# Let's benchmark.
from timeit import Timer
def speed(inst, number=10, repeat=20):
timer = Timer(inst, globals=globals())
raw = numpy.array(timer.repeat(repeat, number=number))
ave = raw.sum() / len(raw) / number
mi, ma = raw.min() / number, raw.max() / number
print("Average %1.3g min=%1.3g max=%1.3g" % (ave, mi, ma))
return ave
print("Execution time for clr.predict")
speed("clr.predict(X_test)")
print("Execution time for ONNX Runtime")
speed("sess.run([label_name], {input_name: X_test.astype(numpy.float32)})[0]")
###############################
# Let's benchmark a scenario similar to what a webservice
# experiences: the model has to do one prediction at a time
# as opposed to a batch of prediction.
def loop(X_test, fct, n=None):
nrow = X_test.shape[0]
if n is None:
n = nrow
for i in range(0, n):
im = i % nrow
fct(X_test[im : im + 1])
print("Execution time for clr.predict")
speed("loop(X_test, clr.predict, 100)")
def sess_predict(x):
return sess.run([label_name], {input_name: x.astype(numpy.float32)})[0]
print("Execution time for sess_predict")
speed("loop(X_test, sess_predict, 100)")
#####################################
# Let's do the same for the probabilities.
print("Execution time for predict_proba")
speed("loop(X_test, clr.predict_proba, 100)")
def sess_predict_proba(x):
return sess.run([prob_name], {input_name: x.astype(numpy.float32)})[0]
print("Execution time for sess_predict_proba")
speed("loop(X_test, sess_predict_proba, 100)")
#####################################
# This second comparison is better as
# ONNX Runtime, in this experience,
# computes the label and the probabilities
# in every case.
##########################################
# Benchmark with RandomForest
# +++++++++++++++++++++++++++
#
# We first train and save a model in ONNX format.
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
initial_type = [("float_input", FloatTensorType([1, 4]))]
onx = convert_sklearn(rf, initial_types=initial_type)
with open("rf_iris.onnx", "wb") as f:
f.write(onx.SerializeToString())
###################################
# We compare.
sess = rt.InferenceSession("rf_iris.onnx", providers=rt.get_available_providers())
def sess_predict_proba_rf(x):
return sess.run([prob_name], {input_name: x.astype(numpy.float32)})[0]
print("Execution time for predict_proba")
speed("loop(X_test, rf.predict_proba, 100)")
print("Execution time for sess_predict_proba")
speed("loop(X_test, sess_predict_proba_rf, 100)")
##################################
# Let's see with different number of trees.
measures = []
for n_trees in range(5, 51, 5):
print(n_trees)
rf = RandomForestClassifier(n_estimators=n_trees)
rf.fit(X_train, y_train)
initial_type = [("float_input", FloatTensorType([1, 4]))]
onx = convert_sklearn(rf, initial_types=initial_type)
with open("rf_iris_%d.onnx" % n_trees, "wb") as f:
f.write(onx.SerializeToString())
sess = rt.InferenceSession("rf_iris_%d.onnx" % n_trees, providers=rt.get_available_providers())
def sess_predict_proba_loop(x):
return sess.run([prob_name], {input_name: x.astype(numpy.float32)})[0]
tsk = speed("loop(X_test, rf.predict_proba, 100)", number=5, repeat=5)
trt = speed("loop(X_test, sess_predict_proba_loop, 100)", number=5, repeat=5)
measures.append({"n_trees": n_trees, "sklearn": tsk, "rt": trt})
from pandas import DataFrame
df = DataFrame(measures)
ax = df.plot(x="n_trees", y="sklearn", label="scikit-learn", c="blue", logy=True)
df.plot(x="n_trees", y="rt", label="onnxruntime", ax=ax, c="green", logy=True)
ax.set_xlabel("Number of trees")
ax.set_ylabel("Prediction time (s)")
ax.set_title("Speed comparison between scikit-learn and ONNX Runtime\nFor a random forest on Iris dataset")
ax.legend()
| {
"content_hash": "ec3870cbb8e76e8ac43b0bba2dc22b15",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 107,
"avg_line_length": 28.47457627118644,
"alnum_prop": 0.6352678571428572,
"repo_name": "microsoft/onnxruntime",
"id": "b5033b503b3ebf4aff6c5ac6c0c65abf0132036a",
"size": "6815",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/python/inference/examples/plot_train_convert_predict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1763425"
},
{
"name": "Batchfile",
"bytes": "17040"
},
{
"name": "C",
"bytes": "955390"
},
{
"name": "C#",
"bytes": "2304597"
},
{
"name": "C++",
"bytes": "39435305"
},
{
"name": "CMake",
"bytes": "514764"
},
{
"name": "CSS",
"bytes": "138431"
},
{
"name": "Cuda",
"bytes": "1104338"
},
{
"name": "Dockerfile",
"bytes": "8089"
},
{
"name": "HLSL",
"bytes": "11234"
},
{
"name": "HTML",
"bytes": "5933"
},
{
"name": "Java",
"bytes": "418665"
},
{
"name": "JavaScript",
"bytes": "212575"
},
{
"name": "Jupyter Notebook",
"bytes": "218327"
},
{
"name": "Kotlin",
"bytes": "4653"
},
{
"name": "Liquid",
"bytes": "5457"
},
{
"name": "NASL",
"bytes": "2628"
},
{
"name": "Objective-C",
"bytes": "151027"
},
{
"name": "Objective-C++",
"bytes": "107084"
},
{
"name": "Pascal",
"bytes": "9597"
},
{
"name": "PowerShell",
"bytes": "16419"
},
{
"name": "Python",
"bytes": "5041661"
},
{
"name": "Roff",
"bytes": "27539"
},
{
"name": "Ruby",
"bytes": "3545"
},
{
"name": "Shell",
"bytes": "116513"
},
{
"name": "Swift",
"bytes": "115"
},
{
"name": "TypeScript",
"bytes": "973087"
}
],
"symlink_target": ""
} |
from machine import Pin
from neopixel import NeoPixel
from time import sleep
LED_PIN = 5
NUM_OF_LEDS = 6
PIN = Pin(LED_PIN, Pin.OUT)
NEOPIXEL = NeoPixel(PIN, NUM_OF_LEDS)
def init():
states = {}
leds = list(range(0, NUM_OF_LEDS))
for led in leds:
states[led] = 0
return states, leds
def on_off(states, leds):
while True:
user_input = input("Select an LED to turn on (q to quit): ")
if user_input == "q":
print("Bye!")
return False
else:
try:
user_input = int(user_input)-1
except:
print("Not a valid entry")
return True
if user_input not in leds:
print("Not a valid LED")
return True
if states[(user_input)] == 0:
NEOPIXEL[(user_input)] = (255, 255, 255)
states[(user_input)] = 1
elif states[(user_input)] == 1:
NEOPIXEL[(user_input)] = (0, 0, 0)
states[(user_input)] = 0
NEOPIXEL.write()
def main():
run = True
states, leds = init()
while run:
run = on_off(states, leds)
if __name__ == '__main__':
main() | {
"content_hash": "1250ab37b5904426cfe7c41fd9f65c81",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 62,
"avg_line_length": 21.06382978723404,
"alnum_prop": 0.6141414141414141,
"repo_name": "kevinpanaro/esp8266_micropython",
"id": "d82f91f6fa336a9d3d834e5a6fd1728690e13803",
"size": "990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/on_off.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18474"
}
],
"symlink_target": ""
} |
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Dmoskupols',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-I.',
'-Isrc'
]
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| {
"content_hash": "29b6a1065b21d27b913452bb85026399",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 79,
"avg_line_length": 31.705263157894738,
"alnum_prop": 0.651394422310757,
"repo_name": "moskupols/image-labeling-benchmark",
"id": "a0ed5ee725dd8585e8ad08bd195fd128e5c646b4",
"size": "3035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".ycm_extra_conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "44044"
},
{
"name": "Makefile",
"bytes": "2809"
},
{
"name": "Python",
"bytes": "13228"
},
{
"name": "TeX",
"bytes": "97214"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from ..compat import implements_to_string, text_type
class MoyaRuntimeError(Exception):
def __init__(self):
pass
@implements_to_string
class MissingTemplateError(Exception):
hide_py_traceback = True
error_type = "Missing Template"
def __init__(self, path, diagnosis=None):
self.path = path
self.diagnosis = (
diagnosis
or """The referenced template doesn't exists in the templates filesystem. Run the following to see what templates are installed:\n\n **$ moya fs templates --tree**"""
)
def __str__(self):
return 'Missing template "{}"'.format(self.path)
__repr__ = __str__
@implements_to_string
class BadTemplateError(MissingTemplateError):
hide_py_traceback = False
error_type = "Bad Template"
def __str__(self):
return 'Unable to load template "%s"' % self.path
@implements_to_string
class RecursiveTemplateError(Exception):
def __init__(self, path):
self.path = path
def __str__(self):
return "Template '{}' has already been used in an extends directive".format(
self.path
)
@implements_to_string
class TemplateError(Exception):
hide_py_traceback = True
def __init__(
self, msg, path, lineno, diagnosis=None, original=None, trace_frames=None
):
self.msg = msg
self.path = path
self.lineno = lineno
self.diagnosis = diagnosis
self.original = original
self.trace_frames = trace_frames or []
super(TemplateError, self).__init__()
def __str__(self):
return self.msg
def __repr__(self):
return 'File "%s", line %s: %s' % (self.path, self.lineno, self.msg)
def get_moya_error(self):
return 'File "%s", line %s: %s' % (self.path, self.lineno, self.msg)
def get_moya_frames(self):
return self.trace_frames[:]
@implements_to_string
class NodeError(Exception):
hide_py_traceback = True
error_type = "Template Node Error"
def __init__(self, msg, node, lineno, start, end, diagnosis=None):
self.msg = msg
self.node = node
self.lineno = lineno
self.start = start
self.end = end
self.diagnosis = diagnosis
def __str__(self):
return self.msg
class UnknownTag(NodeError):
pass
class UnmatchedTag(NodeError):
pass
class TagSyntaxError(NodeError):
pass
class RecursiveExtends(NodeError):
pass
@implements_to_string
class TokenizerError(Exception):
def __init__(self, msg, lineno, start, end, diagnosis=None):
self.msg = msg
self.lineno = lineno
self.start = start
self.end = end
self.diagnosis = diagnosis
def __str__(self):
return self.msg
class UnmatchedComment(TokenizerError):
pass
#
# class UnmatchedComment(TokenizeError):
# def __init__(self, msg, lineno, start, end, diagnosis=None):
# self.msg = msg
# self.lineno = lineno
# self.start = start
# self.end = end
# self.diagnosis = diagnosis
@implements_to_string
class TagError(Exception):
def __init__(self, msg, node, diagnosis=None):
self.msg = msg
self.node = node
self.diagnosis = diagnosis
super(TagError, self).__init__("{} {}".format(msg, text_type(node)))
def __str__(self):
return self.msg
| {
"content_hash": "23e97288b1759a4beeed99caf360d7ce",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 178,
"avg_line_length": 23.687074829931973,
"alnum_prop": 0.6122917863296956,
"repo_name": "moyaproject/moya",
"id": "f46b6aa75b708fe8393e057c6f43d444acfe4936",
"size": "3482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moya/template/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "662"
},
{
"name": "CSS",
"bytes": "98490"
},
{
"name": "Genshi",
"bytes": "949"
},
{
"name": "HTML",
"bytes": "14279826"
},
{
"name": "JavaScript",
"bytes": "369773"
},
{
"name": "Myghty",
"bytes": "774"
},
{
"name": "Python",
"bytes": "1828220"
},
{
"name": "Shell",
"bytes": "165"
},
{
"name": "Smalltalk",
"bytes": "154"
}
],
"symlink_target": ""
} |
from os import path
import numpy as np
from vispy import app, gloo
from vispy.util.transforms import perspective, translate, rotate
from vispy.io import load_data_file, read_mesh, load_crate
VERT_COLOR_CODE = """
// Uniforms
// ------------------------------------
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
uniform vec4 u_color;
// Attributes
// ------------------------------------
attribute vec3 a_position;
attribute vec4 a_color;
attribute vec3 a_normal;
// Varying
// ------------------------------------
varying vec4 v_color;
void main()
{
v_color = u_color;
gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0);
}
"""
FRAG_COLOR_CODE = """
// Varying
// ------------------------------------
varying vec4 v_color;
void main()
{
gl_FragColor = v_color;
}
"""
VERT_TEX_CODE = """
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
attribute vec3 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main()
{
v_texcoord = a_texcoord;
gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0);
}
"""
FRAG_TEX_CODE = """
uniform sampler2D u_texture;
varying vec2 v_texcoord;
void main()
{
float ty = v_texcoord.y;
float tx = sin(ty*50.0)*0.01 + v_texcoord.x;
gl_FragColor = texture2D(u_texture, vec2(tx, ty));
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, keys='interactive', size=(800, 600))
dirname = path.join(path.abspath(path.curdir),'data')
positions, faces, normals, texcoords = \
read_mesh(load_data_file('cube.obj', directory=dirname))
self.filled_buf = gloo.IndexBuffer(faces)
if False:
self.program = gloo.Program(VERT_TEX_CODE, FRAG_TEX_CODE)
self.program['a_position'] = gloo.VertexBuffer(positions)
self.program['a_texcoord'] = gloo.VertexBuffer(texcoords)
self.program['u_texture'] = gloo.Texture2D(load_crate())
else:
self.program = gloo.Program(VERT_COLOR_CODE, FRAG_COLOR_CODE)
self.program['a_position'] = gloo.VertexBuffer(positions)
self.program['u_color'] = 1, 0, 0, 1
self.view = translate((0, 0, -5))
self.model = np.eye(4, dtype=np.float32)
gloo.set_viewport(0, 0, self.physical_size[0], self.physical_size[1])
self.projection = perspective(45.0, self.size[0] /
float(self.size[1]), 2.0, 10.0)
self.program['u_projection'] = self.projection
self.program['u_model'] = self.model
self.program['u_view'] = self.view
self.theta = 0
self.phi = 0
gloo.set_clear_color('gray')
gloo.set_state('opaque')
gloo.set_polygon_offset(1, 1)
self._timer = app.Timer('auto', connect=self.on_timer, start=True)
self.show()
# ---------------------------------
def on_timer(self, event):
self.theta += .5
self.phi += .5
self.model = np.dot(rotate(self.theta, (0, 1, 0)),
rotate(self.phi, (0, 0, 1)))
self.program['u_model'] = self.model
self.update()
# ---------------------------------
def on_resize(self, event):
gloo.set_viewport(0, 0, event.physical_size[0], event.physical_size[1])
self.projection = perspective(45.0, event.size[0] /
float(event.size[1]), 2.0, 10.0)
self.program['u_projection'] = self.projection
# ---------------------------------
def on_draw(self, event):
gloo.clear()
# Filled cube
gloo.set_state(blend=False, depth_test=True, polygon_offset_fill=True)
self.program.draw('triangles', self.filled_buf)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
c = Canvas()
app.run()
| {
"content_hash": "9e7863ab5613222156a2ada4f28b7c1f",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 79,
"avg_line_length": 25.114093959731544,
"alnum_prop": 0.5758952431854624,
"repo_name": "jay3sh/vispy",
"id": "4110d2f784bc95d32ef58e7f05b39aefea565278",
"size": "3743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/scratch/objloader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "202285"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1593"
},
{
"name": "PowerShell",
"bytes": "4078"
},
{
"name": "Python",
"bytes": "2981105"
}
],
"symlink_target": ""
} |
import lacuna.bc
import lacuna.building
import lacuna.ship
class miningministry(lacuna.building.MyBuilding):
path = 'miningministry'
def __init__( self, client, body_id:int = 0, building_id:int = 0 ):
super().__init__( client, body_id, building_id )
@lacuna.building.MyBuilding.call_returning_meth
def view_platforms( self, *args, **kwargs ):
""" Views your current platform status
Returns a tuple:
- platforms -- List of
lacuna.buildings.miningministry.MiningPlatform objects
- max_platforms -- Integer max platforms this min min can support
- :class:`lacuna.buildings.callable.miningministry.MiningPlatform`
"""
mylist = []
for i in kwargs['rslt']['platforms']:
mylist.append( MiningPlatform(self.client, i) )
return (
mylist,
self.get_type(kwargs['rslt']['max_platforms'])
)
@lacuna.building.MyBuilding.call_returning_meth
def view_ships( self, *args, **kwargs ):
""" View list of mining-capable ships.
All ships that can be used for mining are returned. If a given ship's
task is 'Mining', it's currently shutting ore to and from your mining
platforms. If its task is listed as 'Docked', it's available to be
added to your current mining fleet.
Returns a dict including the key ``ships``, a list of ship dicts::
{
"name" : "CS4",
"id" : "id-goes-here",
"task" : "Mining",
"speed" : 350,
"hold_size" : 5600
},
"""
mylist = []
for i in kwargs['rslt']['ships']:
mylist.append( lacuna.ship.ChainShip(self.client, i) )
return mylist
@lacuna.bc.LacunaObject.set_empire_status
@lacuna.building.MyBuilding.call_building_meth
def add_cargo_ship_to_fleet( self, ship_id:int, *args, **kwargs ):
""" Adds a ship to the mining fleet.
Arguments:
- ship_id -- Integer ID of the ship to add.
The ``cargo ship`` does not have to specifically be of type "cargo_ship",
it just has to be capable of carrying cargo (eg hulk_mega,
smuggler_ship, etc).
"""
pass
@lacuna.bc.LacunaObject.set_empire_status
@lacuna.building.MyBuilding.call_building_meth
def remove_cargo_ship_from_fleet( self, ship_id:int, *args, **kwargs ):
""" Takes a single ship off mining duty; sends the ship a message to
return to base to perform the ``Docked`` task.
Arguments:
- ship_id -- Integer ID of the ship to remove.
After being removed from mining duty, the ship will need to travel
from the mining location back to your space port's planet, so it will
not be available for use immediately.
"""
pass
@lacuna.bc.LacunaObject.set_empire_status
@lacuna.building.MyBuilding.call_building_meth
def abandon_platform( self, platform_id:int, *args, **kwargs ):
""" Abandon one of your mining platforms.
Arguments:
- platform_id -- Integer ID of the platform to abandon.
Remember that you might have multiple platforms on a single asteroid.
When you abandon, you are abandoning the platform, not the asteroid, so
be careful to send the platform ID, not the ID of the asteroid itself.
"""
pass
class MiningPlatform(lacuna.bc.SubClass):
"""
Attributes::
id "id-goes-here",
asteroid lacuna.body.SimpleBody object
rutile_hour 10,
chromite_hour 10,
chalcopyrite_hour 10,
galena_hour 10,
gold_hour 10,
uraninite_hour 10,
bauxite_hour 10,
goethite_hour 10,
halite_hour 10,
gypsum_hour 10,
trona_hour 10,
kerogen_hour 10,
methane_hour 10,
anthracite_hour 10,
sulfur_hour 10,
zircon_hour 10,
monazite_hour 10,
fluorite_hour 10,
beryl_hour 10,
magnetite_hour 10,
shipping_capacity 51
"""
def __init__(self, client, mydict:dict):
mydict['asteroid'] = lacuna.body.SimpleBody(client, mydict['asteroid'])
super().__init__(client, mydict)
| {
"content_hash": "2e3f67a605caad5ec4fd4aec4400d96b",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 82,
"avg_line_length": 35.21705426356589,
"alnum_prop": 0.5694475016508915,
"repo_name": "tmtowtdi/MontyLacuna",
"id": "00704aa20bc107b4810ae63e4e2e598e73a265a0",
"size": "4544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/lacuna/buildings/callable/miningministry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36255146"
},
{
"name": "Shell",
"bytes": "2766"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ITDB_Main', '0015_musical_numbers'),
]
operations = [
migrations.CreateModel(
name='Musical_Number',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=40)),
('composer', models.ForeignKey(blank=True, to='ITDB_Main.People', null=True)),
('play', models.ForeignKey(to='ITDB_Main.Play')),
],
),
migrations.RemoveField(
model_name='musical_numbers',
name='composer',
),
migrations.RemoveField(
model_name='musical_numbers',
name='play',
),
migrations.DeleteModel(
name='Musical_Numbers',
),
]
| {
"content_hash": "4a3d81725367d0a03c49bff12c47f0a4",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 114,
"avg_line_length": 29.606060606060606,
"alnum_prop": 0.5445240532241555,
"repo_name": "Plaudenslager/ITDB",
"id": "c0bd1684e4e94fae35eb85f5bad6acff61d85fc7",
"size": "1001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ITDB_Main/migrations/0016_auto_20150906_2315.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "42485"
},
{
"name": "HTML",
"bytes": "12875"
},
{
"name": "JavaScript",
"bytes": "77703"
},
{
"name": "Python",
"bytes": "34623"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "haas.settings.production")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "53bcc80216b0b5f47f557626deffad14",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 79,
"avg_line_length": 26.22222222222222,
"alnum_prop": 0.7161016949152542,
"repo_name": "uranusjr/bbshighlighter",
"id": "02a9c9675961e00ed03095bbba46c2c1c4dccfff",
"size": "283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "haas/manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1893"
},
{
"name": "JavaScript",
"bytes": "1240"
},
{
"name": "Python",
"bytes": "10396"
}
],
"symlink_target": ""
} |
from globals import *
import life as lfe
import camps
import brain
import logging
def camp_has_raid(camp_id):
if camps.get_camp(camp_id)['raid']:
return True
return False
def create_raid(camp_id, raiders=[], join=None):
_camp = camps.get_camp(camp_id)
if not camp_has_raid(camp_id):
_camp['raid'] = {'started': WORLD_INFO['ticks'],
'raiders': [],
'defenders': [],
'score': 0}
logging.debug('Created raid: %s' % _camp['name'])
if join:
defend_camp(camp_id, join)
def add_raiders(camp_id, raiders):
_camp = camps.get_camp(camp_id)
for raider in [r for r in raiders if not r in _camp['raid']['raiders']]:
_camp['raid']['raiders'].append(raider)
logging.debug('%s added to raid of camp %s' % (' '.join(LIFE[raider]['name']), _camp['name']))
for defender in get_defenders(camp_id):
if not brain.knows_alife_by_id(LIFE[defender], raider):
if defender == raider:
logging.warning('FIXME: Raider is member of camp.')
continue
brain.meet_alife(LIFE[defender], LIFE[raider])
def defend_camp(camp_id, life_id):
_camp = camps.get_camp(camp_id)
if not life_id in _camp['raid']['defenders']:
_camp['raid']['defenders'].append(life_id)
logging.debug('%s is now defending camp %s' % (' '.join(LIFE[life_id]['name']), _camp['name']))
for raider in get_raiders(camp_id):
if not brain.knows_alife_by_id(LIFE[life_id], raider):
brain.meet_alife(LIFE[life_id], LIFE[raider])
def get_raiders(camp_id):
return camps.get_camp(camp_id)['raid']['raiders']
def get_defenders(camp_id):
return camps.get_camp(camp_id)['raid']['defenders']
def has_control(camp_id, group_id):
if group_id == camps.get_controlling_group_global(camp_id):
return True
return False | {
"content_hash": "34a06993f1b643262a125e079e331f20",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 97,
"avg_line_length": 27.353846153846153,
"alnum_prop": 0.641169853768279,
"repo_name": "flags/Reactor-3",
"id": "691d718a5701170d98da90d4ad0d63375f3532ab",
"size": "1778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alife/raids.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "415"
},
{
"name": "Python",
"bytes": "1042784"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../src/sapyens/.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sapyens'
copyright = u'2015, fillest'
author = u'fillest'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sapyensdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sapyens.tex', u'sapyens Documentation',
u'fillest', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sapyens', u'sapyens Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sapyens', u'sapyens Documentation',
author, 'sapyens', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "ef8a193208f276167f7813e9fc035271",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 79,
"avg_line_length": 31.949458483754512,
"alnum_prop": 0.703728813559322,
"repo_name": "fillest/sapyens",
"id": "f40bfe0372d051386a4943fd014721d49de2ba62",
"size": "9270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Mako",
"bytes": "11297"
},
{
"name": "Python",
"bytes": "61021"
},
{
"name": "Shell",
"bytes": "801"
}
],
"symlink_target": ""
} |
from normalization import minmax
import os as os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import PIL.Image as Image
import math as math
def create_font(fontname='Tahoma', fontsize=10):
return { 'fontname': fontname, 'fontsize':fontsize }
def plot_gray(X, sz=None, filename=None):
if not sz is None:
X = X.reshape(sz)
X = minmax(I, 0, 255)
fig = plt.figure()
implot = plt.imshow(np.asarray(Ig), cmap=cm.gray)
if filename is None:
plt.show()
else:
fig.savefig(filename, format="png", transparent=False)
def plot_eigenvectors(eigenvectors, num_components, sz, filename=None, start_component=0, rows = None, cols = None, title="Subplot", color=True):
if (rows is None) or (cols is None):
rows = cols = int(math.ceil(np.sqrt(num_components)))
num_components = np.min(num_components, eigenvectors.shape[1])
fig = plt.figure()
for i in range(start_component, num_components):
vi = eigenvectors[0:,i].copy()
vi = minmax(np.asarray(vi), 0, 255, dtype=np.uint8)
vi = vi.reshape(sz)
ax0 = fig.add_subplot(rows,cols,(i-start_component)+1)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax0.get_yticklabels(), visible=False)
plt.title("%s #%d" % (title, i), create_font('Tahoma',10))
if color:
implot = plt.imshow(np.asarray(vi))
else:
implot = plt.imshow(np.asarray(vi), cmap=cm.grey)
if filename is None:
fig.show()
else:
fig.savefig(filename, format="png", transparent=False)
def subplot(title, images, rows, cols, sptitle="subplot", sptitles=[], colormap=cm.gray, ticks_visible=True, filename=None):
fig = plt.figure()
# main title
fig.text(.5, .95, title, horizontalalignment='center')
for i in xrange(len(images)):
ax0 = fig.add_subplot(rows,cols,(i+1))
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax0.get_yticklabels(), visible=False)
if len(sptitles) == len(images):
plt.title("%s #%s" % (sptitle, str(sptitles[i])), create_font('Tahoma',10))
else:
plt.title("%s #%d" % (sptitle, (i+1)), create_font('Tahoma',10))
plt.imshow(np.asarray(images[i]), cmap=colormap)
if filename is None:
plt.show()
else:
fig.savefig(filename)
# using plt plot:
#filename="/home/philipp/facerec/at_database_vs_accuracy_xy.png"
#t = np.arange(2., 10., 1.)
#fig = plt.figure()
#plt.plot(t, r0, 'k--', t, r1, 'k')
#plt.legend(("Eigenfaces", "Fisherfaces"), 'lower right', shadow=True, fancybox=True)
#plt.ylim(0,1)
#plt.ylabel('Recognition Rate')
#plt.xlabel('Database Size (Images per Person)')
#fig.savefig(filename, format="png", transparent=False)
#plt.show()
| {
"content_hash": "54641467ed2e46a9919ba02d68a95399",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 145,
"avg_line_length": 36.24691358024691,
"alnum_prop": 0.6055858310626703,
"repo_name": "revan/facerecserver",
"id": "70edfb144e5bed2a6f6f00e9f477b7a6313850b1",
"size": "2936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visual.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "70350"
}
],
"symlink_target": ""
} |
"""
Masked Linear module: A fully connected layer that computes an adaptive binary mask on the fly.
The mask (binary or not) is computed at each forward pass and multiplied against
the weight matrix to prune a portion of the weights.
The pruned weight matrix is then multiplied against the inputs (and if necessary, the bias is added).
"""
import math
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init
from .binarizer import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
class MaskedLinear(nn.Linear):
"""
Fully Connected layer with on the fly adaptive mask.
If needed, a score matrix is created to store the importance of each associated weight.
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
mask_init: str = "constant",
mask_scale: float = 0.0,
pruning_method: str = "topK",
):
"""
Args:
in_features (`int`)
Size of each input sample
out_features (`int`)
Size of each output sample
bias (`bool`)
If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
mask_init (`str`)
The initialization method for the score matrix if a score matrix is needed.
Choices: ["constant", "uniform", "kaiming"]
Default: ``constant``
mask_scale (`float`)
The initialization parameter for the chosen initialization method `mask_init`.
Default: ``0.``
pruning_method (`str`)
Method to compute the mask.
Choices: ["topK", "threshold", "sigmoied_threshold", "magnitude", "l0"]
Default: ``topK``
"""
super(MaskedLinear, self).__init__(in_features=in_features, out_features=out_features, bias=bias)
assert pruning_method in ["topK", "threshold", "sigmoied_threshold", "magnitude", "l0"]
self.pruning_method = pruning_method
if self.pruning_method in ["topK", "threshold", "sigmoied_threshold", "l0"]:
self.mask_scale = mask_scale
self.mask_init = mask_init
self.mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
self.init_mask()
def init_mask(self):
if self.mask_init == "constant":
init.constant_(self.mask_scores, val=self.mask_scale)
elif self.mask_init == "uniform":
init.uniform_(self.mask_scores, a=-self.mask_scale, b=self.mask_scale)
elif self.mask_init == "kaiming":
init.kaiming_uniform_(self.mask_scores, a=math.sqrt(5))
def forward(self, input: torch.tensor, threshold: float):
# Get the mask
if self.pruning_method == "topK":
mask = TopKBinarizer.apply(self.mask_scores, threshold)
elif self.pruning_method in ["threshold", "sigmoied_threshold"]:
sig = "sigmoied" in self.pruning_method
mask = ThresholdBinarizer.apply(self.mask_scores, threshold, sig)
elif self.pruning_method == "magnitude":
mask = MagnitudeBinarizer.apply(self.weight, threshold)
elif self.pruning_method == "l0":
l, r, b = -0.1, 1.1, 2 / 3
if self.training:
u = torch.zeros_like(self.mask_scores).uniform_().clamp(0.0001, 0.9999)
s = torch.sigmoid((u.log() - (1 - u).log() + self.mask_scores) / b)
else:
s = torch.sigmoid(self.mask_scores)
s_bar = s * (r - l) + l
mask = s_bar.clamp(min=0.0, max=1.0)
# Mask weights with computed mask
weight_thresholded = mask * self.weight
# Compute output (linear layer) with masked weights
return F.linear(input, weight_thresholded, self.bias)
| {
"content_hash": "d71f5b0d79384c9828620756b8522d2b",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 105,
"avg_line_length": 42.12903225806452,
"alnum_prop": 0.5921388463501787,
"repo_name": "huggingface/pytorch-transformers",
"id": "298c7e5e51de02b3538ff62f2580d2b9b4836bf6",
"size": "4532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/research_projects/movement-pruning/emmental/modules/masked_nn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "194"
},
{
"name": "Jupyter Notebook",
"bytes": "535623"
},
{
"name": "Python",
"bytes": "897445"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import re
module_file = open("teuthology/__init__.py").read()
metadata = dict(re.findall(r"__([a-z]+)__\s*=\s*['\"]([^'\"]*)['\"]", module_file))
long_description = open('README.rst').read()
setup(
name='teuthology',
version=metadata['version'],
packages=find_packages(),
package_data={
'teuthology.task': ['adjust-ulimits', 'edit_sudoers.sh', 'daemon-helper'],
'teuthology.task': ['adjust-ulimits', 'edit_sudoers.sh', 'daemon-helper'],
'teuthology.openstack': [
'archive-key',
'archive-key.pub',
'openstack-centos-6.5-user-data.txt',
'openstack-centos-7.0-user-data.txt',
'openstack-centos-7.1-user-data.txt',
'openstack-centos-7.2-user-data.txt',
'openstack-debian-8.0-user-data.txt',
'openstack-opensuse-42.1-user-data.txt',
'openstack-teuthology.cron',
'openstack-teuthology.init',
'openstack-ubuntu-12.04-user-data.txt',
'openstack-ubuntu-14.04-user-data.txt',
'openstack-user-data.txt',
'openstack.yaml',
'setup-openstack.sh'
],
},
author='Inktank Storage, Inc.',
author_email='ceph-qa@ceph.com',
description='Ceph test framework',
license='MIT',
keywords='teuthology test ceph cluster',
url='https://github.com/ceph/teuthology',
long_description=long_description,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Filesystems',
],
install_requires=['apache-libcloud',
'gevent',
'PyYAML',
'argparse >= 1.2.1',
'configobj',
'six >= 1.9', # python-openstackclient won't work properly with less
'pexpect',
'docopt',
'netaddr', # teuthology/misc.py
# only used by orchestra, but we monkey-patch it in
# teuthology/__init__.py
'paramiko',
'psutil >= 2.1.0',
'configparser',
'ansible>=2.0',
'prettytable',
'rocket-python >= 1.2.15',
'manhole',
'humanfriendly',
],
extras_require = {
'orchestra': [
# For apache-libcloud when using python < 2.7.9
'backports.ssl_match_hostname',
'beanstalkc3 >= 0.4.0',
'httplib2',
'ndg-httpsclient', # for requests, urllib3
'pyasn1', # for requests, urllib3
'pyopenssl>=0.13', # for requests, urllib3
'python-dateutil',
# python-novaclient is specified here, even though it is
# redundant, because python-openstackclient requires
# Babel, and installs 2.3.3, which is forbidden by
# python-novaclient 4.0.0
'python-novaclient',
'python-openstackclient',
# with openstacklient >= 2.1.0, neutronclient no longer is
# a dependency but we need it anyway.
'python-neutronclient',
'raven',
'requests != 2.13.0',
],
'test': [
'boto >= 2.0b4', # for qa/tasks/radosgw_*.py
'cryptography >= 2.7', # for qa/tasks/mgr/dashboard/test_rgw.py
'nose', # for qa/tasks/rgw_multisite_tests.py',
'pip-tools',
'pytest', # for tox.ini
'requests', # for qa/tasks/mgr/dashboard/helper.py
'tox',
# For bucket notification testing in multisite
'xmltodict',
'boto3',
'PyJWT', # for qa/tasks/mgr/dashboard/test_auth.py
'ipy', # for qa/tasks/cephfs/mount.py
'toml', # for qa/tasks/cephadm.py
]
},
# to find the code associated with entry point
# A.B:foo first cd into directory A, open file B
# and find sub foo
entry_points={
'console_scripts': [
'teuthology = scripts.run:main',
'teuthology-openstack = scripts.openstack:main',
'teuthology-nuke = scripts.nuke:main',
'teuthology-suite = scripts.suite:main',
'teuthology-ls = scripts.ls:main',
'teuthology-worker = scripts.worker:main',
'teuthology-lock = scripts.lock:main',
'teuthology-schedule = scripts.schedule:main',
'teuthology-updatekeys = scripts.updatekeys:main',
'teuthology-update-inventory = scripts.update_inventory:main',
'teuthology-results = scripts.results:main',
'teuthology-report = scripts.report:main',
'teuthology-kill = scripts.kill:main',
'teuthology-queue = scripts.queue:main',
'teuthology-prune-logs = scripts.prune_logs:main',
'teuthology-describe = scripts.describe:main',
'teuthology-reimage = scripts.reimage:main'
],
},
)
| {
"content_hash": "6bcf13a035f37d98e4df37e5e32745e6",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 90,
"avg_line_length": 40.63970588235294,
"alnum_prop": 0.5274108919848018,
"repo_name": "dmick/teuthology",
"id": "e595da4a646751bf285d20eb0f00f7cbd6fd29b8",
"size": "5527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1096"
},
{
"name": "Makefile",
"bytes": "4194"
},
{
"name": "Python",
"bytes": "1413171"
},
{
"name": "Shell",
"bytes": "61271"
}
],
"symlink_target": ""
} |
"""Unit tests for cross-language parquet io read/write."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import re
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam import coders
from apache_beam.coders.avro_record import AvroRecord
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.transforms.external import ImplicitSchemaPayloadBuilder
PARQUET_WRITE_URN = "beam:transforms:xlang:test:parquet_write"
@attr('UsesCrossLanguageTransforms')
@unittest.skipUnless(
os.environ.get('EXPANSION_JAR'),
"EXPANSION_JAR environment variable is not set.")
@unittest.skipUnless(
os.environ.get('EXPANSION_PORT'),
"EXPANSION_PORT environment var is not provided.")
class XlangParquetIOTest(unittest.TestCase):
# TODO: add verification for the file written by external transform
# after fixing BEAM-7612
def test_write(self):
expansion_jar = os.environ.get('EXPANSION_JAR')
port = os.environ.get('EXPANSION_PORT')
address = 'localhost:%s' % port
try:
with TestPipeline() as p:
p.get_pipeline_options().view_as(DebugOptions).experiments.append(
'jar_packages=' + expansion_jar)
p.not_use_test_runner_api = True
_ = p \
| beam.Create([
AvroRecord({"name": "abc"}), AvroRecord({"name": "def"}),
AvroRecord({"name": "ghi"})]) \
| beam.ExternalTransform(
PARQUET_WRITE_URN,
ImplicitSchemaPayloadBuilder({'data': u'/tmp/test.parquet'}),
address)
except RuntimeError as e:
if re.search(PARQUET_WRITE_URN, str(e)):
print("looks like URN not implemented in expansion service, skipping.")
else:
raise e
class AvroTestCoder(coders.AvroGenericCoder):
SCHEMA = """
{
"type": "record", "name": "testrecord",
"fields": [ {"name": "name", "type": "string"} ]
}
"""
def __init__(self):
super(AvroTestCoder, self).__init__(self.SCHEMA)
coders.registry.register_coder(AvroRecord, AvroTestCoder)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| {
"content_hash": "4eca2880afa7c8e89f735be4da6c1cb6",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 30.68,
"alnum_prop": 0.675793133420252,
"repo_name": "iemejia/incubator-beam",
"id": "3c0b6dd3ef04fa44e110238c4b587da0b23ae694",
"size": "3086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/external/xlang_parquetio_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import pure_interface
import unittest
import inspect
import types
from typing import Dict, Any
class IAnimal(pure_interface.PureInterface):
def speak(self, volume):
pass
class IPlant(pure_interface.PureInterface):
def grow(self, height=10):
pass
class ADescriptor(object):
def __get__(self, instance, owner):
return None
def func1(a, b, c):
pass
def func2(a, b, c=None):
pass
def func3(a=None, b=None):
pass
def func4():
pass
def func5(a, *args):
pass
def func6(a, **kwargs):
pass
def func7(a, b=3, *args):
pass
def func8(*args):
pass
def func9(**kwargs):
pass
def func10(*args, **kwargs):
pass
def func11(a=4, **kwargs):
pass
def test_call(func, arg_spec):
# type: (types.FunctionType, inspect.ArgSpec) -> bool
if arg_spec.defaults:
n_defaults = len(arg_spec.defaults)
kwargs = {a: a for a in arg_spec.args[-n_defaults:]} # type: Dict[str, Any]
args = arg_spec.args[:-n_defaults]
else:
args = arg_spec.args
kwargs = {}
try:
func(*args, **kwargs)
except TypeError:
return False
try:
func(*args)
except TypeError:
return False
if arg_spec.varargs:
try:
func(*(args + ['more', 'random', 'arguments']), **kwargs)
except TypeError:
return False
try:
func(*(args + ['more', 'random', 'arguments']))
except TypeError:
return False
if arg_spec.keywords:
kwargs['more'] = 1
kwargs['random'] = 1
kwargs['kewords'] = 1
try:
func(*args, **kwargs)
except TypeError:
return False
return True
class TestFunctionSignatureChecks(unittest.TestCase):
@classmethod
def setUpClass(cls):
pure_interface.is_development = True
def check_signatures(self, int_func, impl_func, expected_result):
interface_sig = pure_interface.getargspec(int_func)
concrete_sig = pure_interface.getargspec(impl_func)
reality = test_call(impl_func, interface_sig)
self.assertEqual(expected_result, reality, 'Reality does not match expectations')
result = pure_interface._signatures_are_consistent(concrete_sig, interface_sig)
self.assertEqual(expected_result, result, 'Signature test gave wrong answer')
def test_tests(self):
self.check_signatures(func1, func1, True)
self.check_signatures(func2, func2, True)
self.check_signatures(func2, func1, False)
self.check_signatures(func1, func2, True)
self.check_signatures(func2, func3, False)
self.check_signatures(func3, func2, False)
self.check_signatures(func3, func4, False)
self.check_signatures(func4, func3, True)
def test_varargs(self):
self.check_signatures(func1, func8, True)
self.check_signatures(func2, func8, False)
self.check_signatures(func3, func8, False)
self.check_signatures(func4, func8, True)
self.check_signatures(func5, func8, True)
self.check_signatures(func9, func8, False)
def test_pos_varargs(self):
self.check_signatures(func1, func5, True)
self.check_signatures(func2, func5, False)
self.check_signatures(func3, func5, False)
self.check_signatures(func4, func5, False)
self.check_signatures(func6, func5, False)
self.check_signatures(func7, func5, False)
self.check_signatures(func8, func5, False)
def test_keywords(self):
self.check_signatures(func1, func9, False)
self.check_signatures(func2, func9, False)
self.check_signatures(func3, func9, True)
self.check_signatures(func4, func9, True)
self.check_signatures(func6, func9, False)
self.check_signatures(func8, func9, False)
def test_def_keywords(self):
def kwarg_keywords(c=4, **kwargs):
pass
self.check_signatures(func1, kwarg_keywords, False)
self.check_signatures(func2, kwarg_keywords, False)
self.check_signatures(func3, kwarg_keywords, True)
self.check_signatures(func4, kwarg_keywords, True)
self.check_signatures(func3, func11, True)
self.check_signatures(func6, func11, True)
def test_vararg_keywords(self):
self.check_signatures(func1, func10, True)
self.check_signatures(func2, func10, True)
self.check_signatures(func3, func10, True)
self.check_signatures(func4, func10, True)
self.check_signatures(func5, func10, True)
self.check_signatures(func6, func10, True)
self.check_signatures(func7, func10, True)
self.check_signatures(func8, func10, True)
self.check_signatures(func9, func10, True)
self.check_signatures(func11, func10, True)
def test_pos_kwarg_vararg(self):
def pos_kwarg_vararg(a, c=4, *args):
pass
self.check_signatures(func1, pos_kwarg_vararg, True)
self.check_signatures(func2, pos_kwarg_vararg, False)
self.check_signatures(func3, pos_kwarg_vararg, False)
self.check_signatures(func4, pos_kwarg_vararg, False)
def test_all(self):
def all(a, c=4, *args, **kwargs):
pass
self.check_signatures(func1, all, True)
self.check_signatures(func2, all, False)
self.check_signatures(func3, all, False)
self.check_signatures(func4, all, False)
self.check_signatures(func6, all, True)
self.check_signatures(func7, all, True)
self.check_signatures(func11, all, False)
def test_some_more(self):
self.check_signatures(func1, func5, True)
self.check_signatures(func2, func7, False)
self.check_signatures(func5, func5, True)
self.check_signatures(func5, func8, True)
self.check_signatures(func5, func11, False)
self.check_signatures(func6, func11, True)
self.check_signatures(func7, func11, False)
self.check_signatures(func8, func5, False)
self.check_signatures(func9, func11, True)
self.check_signatures(func11, func9, True)
def test_diff_names_fails(self):
# concrete subclass
with self.assertRaises(pure_interface.InterfaceError):
class Animal(object, IAnimal):
def speak(self, loudness):
pass
# abstract subclass
with self.assertRaises(pure_interface.InterfaceError):
class Animal2(IAnimal):
def speak(self, loudness):
pass
def test_too_few_fails(self):
# concrete subclass
with self.assertRaises(pure_interface.InterfaceError):
class Animal(object, IAnimal):
def speak(self):
pass
# abstract subclass
with self.assertRaises(pure_interface.InterfaceError):
class Animal2(IAnimal):
def speak(self):
pass
def test_too_many_fails(self):
# concrete subclass
with self.assertRaises(pure_interface.InterfaceError):
class Animal(object, IAnimal):
def speak(self, volume, msg):
pass
def test_all_functions_checked(self): # issue #7
class IWalkingAnimal(IAnimal):
def walk(self, distance):
pass
with self.assertRaises(pure_interface.InterfaceError):
class Animal(object, IWalkingAnimal):
speak = ADescriptor()
def walk(self, volume):
pass
# abstract subclass
with self.assertRaises(pure_interface.InterfaceError):
class Animal2(IAnimal):
def speak(self, volume, msg):
pass
def test_new_with_default_passes(self):
class Animal(object, IAnimal):
def speak(self, volume, msg='hello'):
return '{} ({})'.format(msg, volume)
# abstract subclass
class IAnimal2(IAnimal):
def speak(self, volume, msg='hello'):
pass
class Animal3(object, IAnimal2):
def speak(self, volume, msg='hello'):
return '{} ({})'.format(msg, volume)
a = Animal()
b = Animal3()
self.assertEqual(a.speak('loud'), 'hello (loud)')
self.assertEqual(b.speak('loud'), 'hello (loud)')
def test_adding_default_passes(self):
class Animal(object, IAnimal):
def speak(self, volume='loud'):
return 'hello ({})'.format(volume)
a = Animal()
self.assertEqual(a.speak(), 'hello (loud)')
def test_increasing_required_params_fails(self):
# concrete subclass
with self.assertRaises(pure_interface.InterfaceError):
class Plant(object, IPlant):
def grow(self, height):
return height + 5
# abstract subclass
with self.assertRaises(pure_interface.InterfaceError):
class Plant2(IPlant):
def grow(self, height):
pass
class TestDisableFunctionSignatureChecks(unittest.TestCase):
@classmethod
def setUpClass(cls):
pure_interface.is_development = False
def test_too_many_passes(self):
try:
class Animal(object, IAnimal):
def speak(self, volume, msg):
pass
a = Animal()
except pure_interface.InterfaceError as exc:
self.fail('Unexpected error {}'.format(exc))
| {
"content_hash": "fe2da01886a0215fbffb828d5e21a2b9",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 89,
"avg_line_length": 31.534591194968552,
"alnum_prop": 0.586158755484643,
"repo_name": "tim-mitchell/pure_interface",
"id": "98cbb83c6a6eb26af856fa3783391b4fb0ce1dc2",
"size": "10053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_function_sigs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46630"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "mredoc",
version = "0.1",
author = "Mike Hull",
author_email = "mikehulluk@gmail.com",
description = ("Tools for generation hierachical documents in HTML and PDF, primarily designed for building reports of computational models."),
license = "BSD",
url = "https://github.com/mikehulluk/mredoc",
package_dir = {'':'src' },
packages=['mredoc',
'mredoc.util',
'mredoc.visitors',
'mredoc.writers',
'mredoc.writers.html',
'mredoc.writers.latex'
],
# Could also have been done with 'scripts=':
#entry_points = {
# 'console_scripts': [
# 'mreorg.curate = mreorg.curator.cmdline.mreorg_curate:main',
# ],
#},
package_data={
'mredoc':[
'resources/*',
'testing/*',
]
},
#data_files=[('mreorg/etc', ['etc/configspec.ini']),
# #('config', ['cfg/data.cfg']),
# #('/etc/init.d', ['init-script'])
# ],
install_requires=['matplotlib','mredoc','pygments'],
long_description=read('README.txt'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
| {
"content_hash": "c013521f3f8a801061ab2a81fcef618e",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 147,
"avg_line_length": 29.31578947368421,
"alnum_prop": 0.5553560742070617,
"repo_name": "mikehulluk/mredoc",
"id": "5eea13c87a6c5849712a92b70c6f387e5671df58",
"size": "1671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "121764"
}
],
"symlink_target": ""
} |
'''
:maintainer: HubbleStack / madchills
:maturity: 2016.7.0
:platform: Windows
:requires: SaltStack
'''
from __future__ import absolute_import
import copy
import csv
import fnmatch
import logging
import salt.utils
import salt.utils.platform
log = logging.getLogger(__name__)
__virtualname__ = 'win_auditpol'
def __virtual__():
if not salt.utils.platform.is_windows():
return False, 'This audit module only runs on windows'
return True
def apply_labels(__data__, labels):
'''
Filters out the tests whose label doesn't match the labels given when running audit and returns a new data structure with only labelled tests.
'''
labelled_data = {}
if labels:
labelled_data[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in __data__.get(__virtualname__, {}):
labelled_test_cases=[]
for test_case in __data__[__virtualname__].get(topkey, []):
# each test case is a dictionary with just one key-val pair. key=test name, val=test data, description etc
if isinstance(test_case, dict) and test_case:
test_case_body = test_case.get(next(iter(test_case)))
if set(labels).issubset(set(test_case_body.get('labels',[]))):
labelled_test_cases.append(test_case)
labelled_data[__virtualname__][topkey]=labelled_test_cases
else:
labelled_data = __data__
return labelled_data
def audit(data_list, tags, labels, debug=False, **kwargs):
'''
Runs auditpol on the local machine and audits the return data
with the CIS yaml processed by __virtual__
'''
__data__ = {}
__auditdata__ = _auditpol_import()
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug('auditpol audit __data__:')
log.debug(__data__)
log.debug('auditpol audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
name = tag_data['name']
audit_type = tag_data['type']
match_output = tag_data['match_output'].lower()
# Blacklisted audit (do not include)
if 'blacklist' in audit_type:
if name not in __auditdata__:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Value of balcklisted attribute '{0}' is " \
"configured on your system. It should not " \
"be configured".format(name)
ret['Failure'].append(tag_data)
# Whitelisted audit (must include)
if 'whitelist' in audit_type:
if name in __auditdata__:
audit_value = __auditdata__[name].lower()
tag_data['found_value'] = audit_value
secret = _translate_value_type(audit_value, tag_data['value_type'], match_output)
if secret:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Value of attribute '{0}' is currently" \
" set as '{1}'. Expected value is '{2}({3})'" \
.format(name,
audit_value,
match_output,
tag_data['value_type'])
ret['Failure'].append(tag_data)
else:
log.debug('When trying to audit the advanced auditpol section,'
' the yaml contained incorrect data for the key')
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the secedit:blacklist and
secedit:whitelist level
'''
if __virtualname__ not in ret:
ret[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get(__virtualname__, {}):
if topkey not in ret[__virtualname__]:
ret[__virtualname__][topkey] = []
for key, val in data[__virtualname__][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret[__virtualname__][topkey].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfullname')
for toplist, toplevel in data.get(__virtualname__, {}).iteritems():
# secedit:whitelist
for audit_dict in toplevel:
for audit_id, audit_data in audit_dict.iteritems():
# secedit:whitelist:PasswordComplexity
tags_dict = audit_data.get('data', {})
# secedit:whitelist:PasswordComplexity:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# secedit:whitelist:PasswordComplexity:data:Windows 2012
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
tag_data = {}
# Whitelist could have a dictionary, not a string
if isinstance(tag, dict):
tag_data = copy.deepcopy(tag)
tag = tag_data.pop('tag')
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'win_auditpol',
'type': toplist}
formatted_data.update(tag_data)
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
def _auditpol_export():
try:
dump = __salt__['cmd.run']('auditpol /get /category:* /r')
if dump:
dump = dump.split('\n')
return dump
else:
log.error('Nothing was returned from the auditpol command.')
except StandardError:
log.error('An error occurred running the auditpol command.')
def _auditpol_import():
dict_return = {}
export = _auditpol_export()
auditpol_csv = csv.DictReader(export)
for row in auditpol_csv:
if row:
dict_return[row['Subcategory']] = row['Inclusion Setting']
return dict_return
def _translate_value_type(current, value, evaluator):
if 'equal' in value:
if current == evaluator:
return True
else:
return False
| {
"content_hash": "5cee86863b96f217fa23a5ab4fddb48e",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 146,
"avg_line_length": 39.8421052631579,
"alnum_prop": 0.48252672030743365,
"repo_name": "hubblestack/hubble-salt",
"id": "2c8069979e7e713df6f3742a6c29953c25a74e9d",
"size": "8353",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "hubblestack_nova/win_auditpol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "472126"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, redirect
from django.template import Template, Context, RequestContext
from django.core.mail import send_mail
from starl.forms import *
from starl.models import *
from starl.views import *
from forms import *
import re,datetime,time
def assets_server(request):
if status(request) == 1:
return render_to_response('login.html',{'error':'链接超时,请重新登陆!!!','alert':'alert-error'})
assets_server_form = assets_server_add_Form()
form = Assets_Server.objects.filter()
a_path = u'设备资产'
i_user = request.COOKIES.get('starl', '')
return render_to_response('assets_server.html', {'form':form,'assets_server_form':assets_server_form,'a_path':a_path,'i_user':i_user})
def assets_server_add(request):
if status(request) == 1:
return render_to_response('login.html',{'error':'链接超时,请重新登陆!!!','alert':'alert-error'})
if request.method == 'POST':
form = assets_server_add_Form(request.POST)
if form.is_valid():
assets_server_data = Assets_Server()
assets_server_data.Room = form.cleaned_data['Room']
assets_server_data.System = form.cleaned_data['System']
assets_server_data.Type = form.cleaned_data["Type"]
assets_server_data.Use = form.cleaned_data["Use"]
assets_server_data.Brand = form.cleaned_data["Brand"]
assets_server_data.Equipment_Model = form.cleaned_data["Equipment_Model"]
assets_server_data.Weights = form.cleaned_data["Weights"]
assets_server_data.Private_IP = form.cleaned_data["Private_IP"]
assets_server_data.Public_IP = form.cleaned_data["Public_IP"]
assets_server_data.Admin_IP = form.cleaned_data["Admin_IP"]
assets_server_data.SN = form.cleaned_data["SN"]
assets_server_data.Response = form.cleaned_data["Response"]
assets_server_data.expiration = form.cleaned_data["expiration"]
assets_server_data.CPU = form.cleaned_data["CPU"]
assets_server_data.CPU_NUM = form.cleaned_data["CPU_NUM"]
assets_server_data.MEM = form.cleaned_data["MEM"]
assets_server_data.MEM_NUM = form.cleaned_data["MEM_NUM"]
assets_server_data.MEM_SINGLE = form.cleaned_data["MEM_SINGLE"]
assets_server_data.HDD = form.cleaned_data["HDD"]
assets_server_data.HDD_NUM = form.cleaned_data["HDD_NUM"]
assets_server_data.HDD_SINGLE = form.cleaned_data["HDD_SINGLE"]
assets_server_data.RAID_Type = form.cleaned_data["RAID_Type"]
assets_server_data.MAC_1 = form.cleaned_data["MAC_1"]
assets_server_data.MAC_2 = form.cleaned_data["MAC_2"]
assets_server_data.MAC_3 = form.cleaned_data["MAC_3"]
assets_server_data.MAC_4 = form.cleaned_data["MAC_4"]
if assets_server_data.save() == None:
return render_to_response('assets_server_add.html',{'form':form,'error':'添加成功!!!','alert':'alert-success'})
else:
return render_to_response('assets_server_add.html',{'form':form,'error':'添加失败!!!','alert':'alert-error'})
else:
return render_to_response('assets_server_add.html',{'form':form,'error':'添加失败!!!','alert':'alert-error'})
else:
form = assets_server_add_Form()
a_path = u'设备资产'
i_user = request.COOKIES.get('starl', '')
return render_to_response('assets_server_add.html', {'form':form,'a_path':a_path,'i_user':i_user})
def assets_server_view(request, id):
if status(request) == 1:
return render_to_response('login.html',{'error':'链接超时,请重新登陆!!!','alert':'alert-error'})
return HttpResponse("assets_server_view" + id)
def assets_serverr_edit(request, id):
if status(request) == 1:
return render_to_response('login.html',{'error':'链接超时,请重新登陆!!!','alert':'alert-error'})
return HttpResponse("assets_server_edit" + id)
def assets_server_delete(request, id):
if status(request) == 1:
return render_to_response('login.html',{'error':'链接超时,请重新登陆!!!','alert':'alert-error'})
return HttpResponse("assets_server_delete" + id) | {
"content_hash": "97d9490282bffa3e1cedb73e903f7c87",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 135,
"avg_line_length": 46.770114942528735,
"alnum_prop": 0.6647825018432048,
"repo_name": "selboo/starl-mangle",
"id": "da0958d13fddaf4e2239136c49e0a932b49a83f0",
"size": "4308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "starl/views_assets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1463"
},
{
"name": "CSS",
"bytes": "197524"
},
{
"name": "HTML",
"bytes": "792119"
},
{
"name": "JavaScript",
"bytes": "517786"
},
{
"name": "PHP",
"bytes": "613053"
},
{
"name": "Python",
"bytes": "312293"
},
{
"name": "Shell",
"bytes": "4409"
}
],
"symlink_target": ""
} |
'''
------------------------------------------------------------------------------
Copyright (c) 2015 Microsoft Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
------------------------------------------------------------------------------
'''
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from SimpleHTTPServer import SimpleHTTPRequestHandler as BaseHTTPRequestHandler
from SocketServer import TCPServer as HTTPServer
try:
from urllib.parse import urlparse, parse_qs, unquote
except ImportError:
from urlparse import urlparse, parse_qs
from urllib import unquote
import threading
import webbrowser
def get_auth_token(auth_url, redirect_uri):
"""Easy way to get the auth token. Wraps up all the threading
and stuff. Does block main thread.
Args:
auth_url (str): URL of auth server, including query params
needed to get access token.
redirect_uri (str): Redirect URI, as set for the app. Should be
something like "http://localhost:8080" for this to work.
Returns:
str: A string representing the auth code, sent back by the server
"""
url_netloc = urlparse(redirect_uri).netloc
if ':' not in url_netloc:
host_address = url_netloc
port = 80 # default port
else:
host_address, port = url_netloc.split(':')
port = int(port)
# Set up HTTP server and thread
token_acquired = threading.Event()
s = GetAccessTokenServer((host_address, port), token_acquired, GetAccessTokenRequestHandler)
th = threading.Thread(target=s.serve_forever)
th.start()
webbrowser.open(auth_url)
# At this point the browser will open and the code
# will be extracted by the server
token_acquired.wait() # First wait for the response from the auth server
auth_token = s.authentication_token
s.shutdown()
th.join()
return auth_token
class GetAccessTokenServer(HTTPServer, object):
def __init__(self, server_address, stop_event, RequestHandlerClass):
super(HTTPServer, self).init(server_address, RequestHandlerClass)
self._stop_event = stop_event
self._access_token = None
self._authentication_token = None
@property
def access_token(self):
return self._access_token
@access_token.setter
def access_token(self, value):
self._access_token = value
if value is not None:
self._stop_event.set()
@property
def authentication_token(self):
return self._authentication_token
@authentication_token.setter
def authentication_token(self, value):
self._authentication_token = value
if value is not None:
self._stop_event.set()
class GetAccessTokenRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
params = parse_qs(urlparse(self.path).query)
if "access_token" in params:
# Extract the access token query param
self.server.access_token = params["access_token"][0]
if "authentication_token" in params:
# Extract the auth token query param
self.server.authentication_token = params["authentication_token"][0]
if "error" in params:
error_msg, error_desc = (unquote(params["error"][0]),
unquote(params["error_description"][0]))
raise RuntimeError("The server returned an error: {} - {}"
.format(error_msg, error_desc))
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(
'<script type="text/javascript">window.close()</script>'
.encode("utf-8")))
| {
"content_hash": "338fd0b1947472cb228889bbb101f70c",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 96,
"avg_line_length": 38.57258064516129,
"alnum_prop": 0.6583734058122517,
"repo_name": "OneDrive/onedrive-sdk-python",
"id": "d432f721b272f5c2d29ed47ecb2e953f9e498260",
"size": "4785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/onedrivesdk/helpers/get_access_token_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "PowerShell",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "616271"
}
],
"symlink_target": ""
} |
from grafanalib.core import (
BarGauge,
ConstantInput,
DataSourceInput,
GaugePanel,
Heatmap,
HeatmapColor,
RowPanel,
Stat,
Threshold,
YAxis,
)
from grafanalib import formatunits as UNITS
from scalgrafanalib import layout, Tooltip, Target, TimeSeries, Dashboard
STATUS_CODE_2XX = '2xx'
STATUS_CODE_3XX = '3xx'
STATUS_CODE_4XX = '4xx'
STATUS_CODE_5XX = '5xx'
def s3_request_timeseries_expr(process, job, code):
labelSelector = 'namespace="${namespace}"'
labelSelector += f',status="{code}"'
if job is not None:
labelSelector += f',job="{job}"'
if process is not None:
labelSelector += f',origin="{process}"'
return f'sum(increase(lifecycle_s3_operations{{{labelSelector}}}[$__interval]))'
def s3_request_timeseries(title, process=None, job=None):
return TimeSeries(
title=title,
dataSource="${DS_PROMETHEUS}",
fillOpacity=5,
legendDisplayMode='table',
targets=[
Target(
expr=s3_request_timeseries_expr(process, job, STATUS_CODE_2XX),
legendFormat="HTTP 2xx",
),
Target(
expr=s3_request_timeseries_expr(process, job, STATUS_CODE_3XX),
legendFormat="HTTP 3xx",
),
Target(
expr=s3_request_timeseries_expr(process, job, STATUS_CODE_4XX),
legendFormat="HTTP 4xx",
),
Target(
expr=s3_request_timeseries_expr(process, job, STATUS_CODE_5XX),
legendFormat="HTTP 5xx",
),
]
)
def s3_request_error_rate_expr(process, job, code):
divdLabel = 'namespace="${namespace}"'
divsLabel = 'namespace="${namespace}"'
if code is not None:
divdLabel += f',status="{code}"'
else:
divdLabel += ',status!="2xx"'
if job is not None:
divdLabel += f',job="{job}"'
divsLabel += f',job="{job}"'
if process is not None:
divdLabel += f',origin="{process}"'
divsLabel += f',origin="{process}"'
divd = f'sum(rate(lifecycle_s3_operations{{{divdLabel}}}[$__rate_interval]))'
divs = f'sum(rate(lifecycle_s3_operations{{{divsLabel}}}[$__rate_interval]) > 0)'
return f'{divd}/{divs}'
def s3_request_error_rate(title, process=None, job=None, code=None):
return Stat(
title=title,
dataSource="${DS_PROMETHEUS}",
format=UNITS.PERCENT_UNIT,
reduceCalc="mean",
targets=[
Target(expr=s3_request_error_rate_expr(process, job, code)),
],
thresholds=[
Threshold("green", 0, 0.0),
Threshold("red", 1, 0.05),
],
)
def s3_request_error_rates(process=None, job=None):
return [
s3_request_error_rate( "S3 All Errors", process=process, job=job),
s3_request_error_rate( "S3 3xx Errors", process=process, job=job, code=STATUS_CODE_3XX),
s3_request_error_rate( "S3 4xx Errors", process=process, job=job, code=STATUS_CODE_4XX),
s3_request_error_rate( "S3 5xx Errors", process=process, job=job, code=STATUS_CODE_5XX),
]
def s3_deletion_request_time_series(op):
successLabel = f'status="2xx",op="{op}",namespace="{"${namespace}"}",job="{"${job_lifecycle_object_processor}"}"'
errorLabel = f'status!="2xx",op="{op}",namespace="{"${namespace}"}",job="{"${job_lifecycle_object_processor}"}"'
return TimeSeries(
title=f'{op} Request Rate',
dataSource="${DS_PROMETHEUS}",
fillOpacity=5,
unit=UNITS.REQUESTS_PER_SEC,
targets=[
Target(
expr=f'sum(rate(lifecycle_s3_operations{{{successLabel}}}[$__rate_interval]))',
legendFormat="success",
),
Target(
expr=f'sum(rate(lifecycle_s3_operations{{{errorLabel}}}[$__rate_interval]))',
legendFormat="error",
),
],
)
def kafka_messages_time_series(title, expr):
return TimeSeries(
title=title,
dataSource="${DS_PROMETHEUS}",
fillOpacity=5,
scaleDistributionType='log',
scaleDistributionLog=10,
legendDisplayMode='hidden',
targets=[
Target(
expr=expr,
legendFormat="messages",
),
],
)
def kafka_row(topic, op):
label = f'op="{op}",namespace="{"${namespace}"}"'
return [
kafka_messages_time_series(
f'{topic} Messages in Queue',
f'sum(increase(lifecycle_kafka_publish_success{{{label}}}[$__interval]))',
),
kafka_messages_time_series(
f'{topic} Failed Messages',
f'sum(increase(lifecycle_kafka_publish_error{{{label}}}[$__interval]))',
),
]
up = Stat(
title="Up",
dataSource="${DS_PROMETHEUS}",
reduceCalc="last",
noValue='0',
targets=[
Target(
expr='sum(up{namespace="${namespace}",job="${job_lifecycle_producer}"})',
legendFormat="Conductor",
),
Target(
expr='sum(up{namespace="${namespace}",job="${job_lifecycle_bucket_processor}"})',
legendFormat="Bucket Processor",
),
Target(
expr='sum(up{namespace="${namespace}",job="${job_lifecycle_object_processor}"})',
legendFormat="Expiration Processor",
),
],
thresholds=[
Threshold("green", 0, 0.0),
],
)
lifecycle_batch = Stat(
title="Latest Batch Start Time",
dataSource="${DS_PROMETHEUS}",
reduceCalc="lastNotNull",
format='dateTimeAsLocal',
targets=[
Target(
expr='lifecycle_latest_batch_start_time{job="${job_lifecycle_producer}",namespace="${namespace}"}',
instant=True,
),
],
)
lifecycle_global_s3_requests = s3_request_timeseries("S3 Requests")
lifecycle_global_s3_error_rates = s3_request_error_rates()
lifecycle_bucket_processor_s3_requests = s3_request_timeseries(
"S3 Requests",
process="bucket",
job='${job_lifecycle_bucket_processor}',
)
lifecycle_bucket_processor_s3_error_rates = s3_request_error_rates(
process="bucket",
job='${job_lifecycle_bucket_processor}',
)
lifecycle_expiration_processor_s3_requests = s3_request_timeseries(
"S3 Requests",
process="expiration",
job='${job_lifecycle_object_processor}',
)
lifecycle_expiration_processor_s3_error_rates = s3_request_error_rates(
process="expiration",
job='${job_lifecycle_object_processor}',
)
lifecycle_expiration_processor_s3_delete_object_ops = s3_deletion_request_time_series("deleteObject")
lifecycle_expiration_processor_s3_delete_mpu_ops = s3_deletion_request_time_series("abortMultipartUpload")
dashboard = (
Dashboard(
title="Backbeat Lifecycle",
editable=True,
refresh="30s",
tags=["backbeat", "lifecycle"],
timezone="",
inputs=[
DataSourceInput(
name="DS_PROMETHEUS",
label="Prometheus",
pluginId="prometheus",
pluginName="Prometheus",
),
ConstantInput(
name="namespace",
label="namespace",
description="Namespace associated with the Zenko instance",
value="default",
),
ConstantInput(
name="job_lifecycle_producer",
label="job lifecycle producer",
description="Name of the lifecycle conductor job, used to filter only lifecycle conductor instances",
value="artesca-data-backbeat-lifecycle-producer-headless",
),
ConstantInput(
name="job_lifecycle_bucket_processor",
label="job lifecycle bucket processor",
description="Name of the lifecycle bucket processor job, used to filter only lifecycle bucket processor instances",
value="artesca-data-backbeat-lifecycle-bucket-processor-headless",
),
ConstantInput(
name="job_lifecycle_object_processor",
label="job lifecycle object processor",
description="Name of the lifecycle object processor job, used to filter only lifecycle object processor instances",
value="artesca-data-backbeat-lifecycle-object-processor-headless",
),
],
panels=layout.column([
layout.row([lifecycle_batch, up], height=4),
layout.row([lifecycle_global_s3_requests], height=10),
layout.row(lifecycle_global_s3_error_rates, height=4),
RowPanel(title="Kafka"),
layout.row(kafka_row("Expiration Bucket Task", "BucketTopic"), height=10),
layout.row(kafka_row("Expiration Object Task", "ObjectTopic"), height=10),
RowPanel(title="Lifecycle Bucket Processors"),
layout.row([lifecycle_bucket_processor_s3_requests], height=10),
layout.row(lifecycle_bucket_processor_s3_error_rates, height=4),
RowPanel(title="Lifecycle Expiration Processors"),
layout.row([lifecycle_expiration_processor_s3_requests], height=10),
layout.row(lifecycle_expiration_processor_s3_error_rates, height=4),
layout.row([lifecycle_expiration_processor_s3_delete_object_ops], height=10),
layout.row([lifecycle_expiration_processor_s3_delete_mpu_ops], height=10),
]),
)
.auto_panel_ids()
.verify_datasources()
)
| {
"content_hash": "d0c20ad6516f61d87949ffe1c00a17c2",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 131,
"avg_line_length": 33.260416666666664,
"alnum_prop": 0.58795281344608,
"repo_name": "scality/backbeat",
"id": "aa65d91d790a2f50824b9641323ba5f1f748e4cb",
"size": "9579",
"binary": false,
"copies": "1",
"ref": "refs/heads/development/8.4",
"path": "monitoring/lifecycle/dashboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1500"
},
{
"name": "Go",
"bytes": "48137"
},
{
"name": "JavaScript",
"bytes": "2135867"
},
{
"name": "Python",
"bytes": "55658"
},
{
"name": "Shell",
"bytes": "16801"
}
],
"symlink_target": ""
} |
import os, json, inspect, sys
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.db import models
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.views.generic.list import ListView
from .services import (
Config, DBSettings, MiddlewareSettings, ModelFactory,
StaticSettings, TemplateSettings
)
from .templates import base_apps
from .utils import generate_secret_key, generate_get_absolute_url, DJANGO_FIELDS
separator = ' '
header = '# -*- coding:utf-8 -*-'
slug_regex = '(?P<pk>\d+)'
DJ_CLASSES = [ CreateView, DeleteView, DetailView, ListView, UpdateView ]
DJ_CLASSES_IMPORT = {
'CreateView': 'from django.views.generic.edit import CreateView',
'UpdateView': 'from django.views.generic.edit import UpdateView',
'DeleteView': 'from django.views.generic.edit import DeleteView',
'DetailView': 'from django.views.generic.detail import DetailView',
'ListView': 'from django.views.generic.list import ListView'
}
class DjangoLite(object):
extra_mapping = {
'detail_view': ('Detail', DetailView, False),
'list_view': ('List', ListView, False),
'create_view': ('Create', CreateView, True),
'delete_view': ('Delete', DeleteView, False),
'update_view': ('Update', UpdateView, True)
}
commands = {
'make_models': 'generate_models',
'make_urls': 'generate_urls',
'make_views': 'generate_views',
'make_settings': 'generate_settings'
}
autoconfigure = True
config = {}
configuration = None
_urlpatterns = []
MODELS = {}
VIEWS = {}
def __init__(self, file_attr, autoconfigure=True, *args, **kwargs):
self.base_dir = os.path.dirname(os.path.abspath(file_attr))
sys.path[0] = os.path.dirname(self.base_dir)
self.configuration = Config()
if autoconfigure:
self.configure()
def set_url(self):
self._urlpatterns = [url(r'^admin/', include(admin.site.urls))]
@property
def urlpatterns(self):
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
return [url(r'^admin/', include(admin.site.urls))] + self._urlpatterns + staticfiles_urlpatterns()
@property
def root_urlconf(self):
return self
def configure(self, secret_key=None, debug=True, **kwargs):
if 'override' in kwargs:
self.config = kwargs.get('overrides')
else:
self.configuration.register(DBSettings(self.base_dir))
self.configuration.register(TemplateSettings(self.base_dir))
self.configuration.register(MiddlewareSettings())
self.configuration.register(StaticSettings(self.base_dir))
self.config['BASE_DIR'] = self.base_dir
self.config['ROOT_URLCONF'] = self.root_urlconf
self.config['DEBUG'] = debug
self.config.update(self.installed_apps())
self.config.update(self.configuration.settings)
self.config['SECRET_KEY'] = generate_secret_key() if not secret_key else secret_key
self.config['SESSION_ENGINE'] = 'django.contrib.sessions.backends.signed_cookies'
if 'extra' in kwargs:
self.config.update(kwargs.get('extra'))
if not settings.configured:
settings.configure(**self.config)
import django
django.setup()
@property
def app_label(self):
base_dir = self.config.get('BASE_DIR')
if base_dir:
return os.path.basename(base_dir)
def new_model(self, *args, **kwargs):
model = ModelFactory.create(self.app_label, __name__, *args, **kwargs)
setattr(model, 'get_absolute_url', generate_get_absolute_url(model.__name__.lower()))
self.MODELS[model.__name__] = model
def add_view(self, url_pattern, func, name=None):
params = [url_pattern, func]
if name is None:
name = func.__name__
self._urlpatterns.append(
url(*params, name=name)
)
self.VIEWS[func.__name__] = func
def installed_apps(self, **kwargs):
if 'override_apps' in kwargs:
apps_list = kwargs.get('ovveride_apps')
else:
apps_list = base_apps + (
self.app_label,
) + kwargs.get('extra_apps', ())
return {
'INSTALLED_APPS': apps_list
}
def query(self, model):
model = self.MODELS.get(model)
if model:
return model.objects
def start(self):
from django.core.wsgi import get_wsgi_application
if __name__ == "django_lite.django_lite":
from django.core.management import execute_from_command_line
try:
command = sys.argv[1]
if command in self.commands.keys():
cmd = getattr(self, self.commands.get(command))
for line in cmd():
sys.stdout.write("%s\n" % line)
return
except IndexError:
pass
execute_from_command_line(sys.argv)
else:
get_wsgi_application()
def route(self, url_pattern, name=None):
def wrap(f):
self.add_view(url_pattern, f, name)
def wrapped_f(*args):
f(*args)
return wrapped_f
return wrap
def generate_view(self, cls, view_name):
try:
view_name, view_parent, edit = self.extra_mapping[view_name]
cls_name = cls.__name__
view_class_name = '{0}{1}'.format(cls_name, view_name)
data = { 'model': self.MODELS[cls_name]}
if edit:
data['fields'] = '__all__'
return type(view_class_name, (view_parent, ), data)
except KeyError:
pass
def model(self, admin=True, crud=False):
def wrap(cls):
attributes = inspect.getmembers(cls, lambda attr:not(inspect.isroutine(attr)))
attrs = dict([attr for attr in attributes if not(attr[0].startswith('__') and attr[0].endswith('__'))])
self.new_model(
**{
'name': cls.__name__,
'admin': admin,
'attrs': attrs
}
)
setattr(cls, 'objects', self.query(cls.__name__))
generated_views = []
if hasattr(cls, 'Extra'):
base_url = ''
if hasattr(cls.Extra, 'base_url'):
base_url = cls.Extra.base_url
else:
base_url = cls.__name__.lower()
for extra in cls.Extra.__dict__.iteritems():
view = self.generate_view(cls, extra[0])
if view is not None:
generated_views.append(extra[0])
view_name = '{0}_{1}'.format(cls.__name__.lower(), extra[0])
url = '{0}{1}'.format(base_url, extra[1])
self.add_view(url, view.as_view(), view_name)
else:
base_url = cls.__name__.lower()
if crud:
crud_views = set(self.extra_mapping.keys())
remaining = crud_views - set(generated_views)
for new_view in remaining:
view = self.generate_view(cls, new_view)
view_name = '{0}_{1}'.format(cls.__name__.lower(), new_view)
view_info = self.extra_mapping[new_view]
url_suffix = view_info[0].lower()
url = '^{0}/{1}'.format(base_url, url_suffix)
if view_info[2] or new_view == 'delete_view':
url = '{0}/{1}$'.format(url, slug_regex)
self.add_view(url, view.as_view(), view_name)
return cls
return wrap
def generate_models(self):
yield header
yield 'from django.db import models'
yield 'from django.utils.translation import ugettext_lazy as _\n'
for k, v in self.MODELS.iteritems():
yield 'class {0}(models.Model):'.format(k)
fields = v._meta.get_fields()
for field in fields:
if field.__class__.__name__ in DJANGO_FIELDS:
yield '{0}{1} = models.{2}()'.format(separator, field.name, field.__class__.__name__)
yield '\n{0}class Meta:'.format(separator)
yield '{0}{1}verbose_name = _(\'{2}\')'.format(separator, separator, k.lower())
yield '{0}{1}verbose_name_plural = _(\'{2}s\')'.format(separator, separator, k.lower())
yield '\n{0}def __str__(self):'.format(separator)
yield '{0}{1}return self.pk'.format(separator, separator)
yield '\n'
def generate_urls(self):
from django.core.urlresolvers import RegexURLResolver
patterns = []
for url in self.urlpatterns:
if isinstance(url, RegexURLResolver):
if url.app_name == 'admin':
str_pattern = '{0}url(r\'^admin/\', include(admin.site.urls)),'.format(separator)
patterns.append(str_pattern)
else:
if 'static' not in url.regex.pattern:
str_pattern = '{0}url(r\'{1}\', views.{2}),'.format(separator, url.regex.pattern, url.callback.__name__)
patterns.append(str_pattern)
yield header
yield 'from django.conf.urls import url'
yield 'from django.contrib.staticfiles.urls import staticfiles_urlpatterns'
yield ''
yield 'from . import views\n'
yield 'urlpatterns = ['
for url in patterns:
yield url
yield '] + staticfiles_urlpatterns()\n'
def generate_views(self):
yield header
declarations = []
counters = {}
for k, f in self.VIEWS.iteritems():
if hasattr(f, 'view_class'):
cls = f.view_class
cls_str = ''
for dj_class in DJ_CLASSES:
if issubclass(cls, dj_class):
dj_class_name = dj_class.__name__
try:
counters[dj_class_name] += 1
except KeyError:
counters[dj_class_name] = 1
cls_str = 'class {0}({1}):'.format(cls.__name__, dj_class.__name__)
cls_str += '\n{0}model={1}'.format(separator, cls.model.__name__)
declarations.append(cls_str)
else:
declarations.append(inspect.getsource(f))
for import_str, count in counters.iteritems():
if count > 0:
yield DJ_CLASSES_IMPORT[import_str]
for declaration in declarations:
yield '\n'
yield declaration
def generate_settings(self):
yield header
for k, v in settings._wrapped.__dict__.iteritems():
try:
yield '{0} = {1}'.format(k, json.dumps(settings._wrapped.__dict__[k]))
except TypeError:
pass
| {
"content_hash": "5629d08abb39358d9e4525d7f05559f3",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 124,
"avg_line_length": 39.30555555555556,
"alnum_prop": 0.543904593639576,
"repo_name": "fmarco/django-lite",
"id": "5250b7eec79c38a04d5cf5b5b43c494b192c2023",
"size": "11344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_lite/django_lite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "308"
},
{
"name": "Python",
"bytes": "20112"
}
],
"symlink_target": ""
} |
__version__ = "1.0.0"
__author__ = "Akhier Dragonheart"
__license__ = "MIT"
__all__ = ("Manager, SystemTemplate")
from .Manager import Manager
from .SystemTemplate import SystemTemplate
| {
"content_hash": "5f4e58412596ec4c8436615c3d2cf513",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 42,
"avg_line_length": 26.714285714285715,
"alnum_prop": 0.6898395721925134,
"repo_name": "Akhier/JourneyIntoGostOnol",
"id": "23393c145743c28034d88af987f32bd3a1f7faaf",
"size": "187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ecs/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9385"
}
],
"symlink_target": ""
} |
"""Test for the sqlite3 data connector."""
from unittest import TestCase
from tests.dc.test import AbstractDCTest
from dc.sqlite3.connector import Sqlite3Connector
class DCTest(AbstractDCTest, TestCase):
name = "sqlite3"
connector = Sqlite3Connector
| {
"content_hash": "2e614612fb6754bbad1b5369c25b79f1",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 49,
"avg_line_length": 24.181818181818183,
"alnum_prop": 0.7669172932330827,
"repo_name": "v-legoff/pa-poc1",
"id": "4372fd7ae842347f5b2c4f3e5d3f4ae467e67194",
"size": "1809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/dc/sqlite3/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "86362"
}
],
"symlink_target": ""
} |
"""This module contains the general information for FirmwareBootDefinition ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class FirmwareBootDefinitionConsts:
TYPE_ADAPTOR = "adaptor"
TYPE_BLADE_BIOS = "blade-bios"
TYPE_BLADE_CONTROLLER = "blade-controller"
TYPE_FEX = "fex"
TYPE_SAS_EXPANDER = "sas-expander"
TYPE_SIOC = "sioc"
TYPE_STORAGE_CONTROLLER = "storage-controller"
TYPE_SYSTEM = "system"
class FirmwareBootDefinition(ManagedObject):
"""This is FirmwareBootDefinition class."""
consts = FirmwareBootDefinitionConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("FirmwareBootDefinition", "firmwareBootDefinition", "fw-boot-def", VersionMeta.Version151f, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'biosUnit', u'mgmtController', u'storageController', u'systemIOController'], [u'firmwareBootUnit'], ["Get"]),
"modular": MoMeta("FirmwareBootDefinition", "firmwareBootDefinition", "fw-boot-def", VersionMeta.Version2013e, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'biosUnit', u'mgmtController', u'storageController'], [u'firmwareBootUnit'], ["Get"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, ["adaptor", "blade-bios", "blade-controller", "fex", "sas-expander", "sioc", "storage-controller", "system"], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["adaptor", "blade-bios", "blade-controller", "fex", "sas-expander", "sioc", "storage-controller", "system"], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"status": "status",
"type": "type",
},
"modular": {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"status": "status",
"type": "type",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.status = None
self.type = None
ManagedObject.__init__(self, "FirmwareBootDefinition", parent_mo_or_dn, **kwargs)
| {
"content_hash": "9802aa4283551ed1865b43b3a8fc7f74",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 284,
"avg_line_length": 49.61538461538461,
"alnum_prop": 0.6142118863049095,
"repo_name": "ragupta-git/ImcSdk",
"id": "157b9fd62ddb89b2f085f663bf6af4a219605024",
"size": "3870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imcsdk/mometa/firmware/FirmwareBootDefinition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1042023"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from about.views import MainView
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', MainView.as_view(), name='home'),
url(r'^about/', include('about.urls')),
url(r'^web/', include('webdev.urls')),
url(r'^enterprise/', include('enterprise.urls')),
url(r'^contact/', include('contact.urls')),
url(r'^login/?$', 'django.contrib.auth.views.login', {'template_name':'auth/login.html'}, name='login'),
url(r'^logout/?$', 'django.contrib.auth.views.logout_then_login', {'login_url':'/'}, name='logout'),
url(r'^portal/', include('portal.urls')),
)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"content_hash": "90903e7150897b702328a33858f5a65c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 108,
"avg_line_length": 36.12,
"alnum_prop": 0.6699889258028793,
"repo_name": "m-clark/mclarkpw",
"id": "e7cf1bc288901769d3f4ee1a46d2c793d53abd76",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mclarkpw/mclarkpw/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "165494"
},
{
"name": "JavaScript",
"bytes": "1842"
},
{
"name": "Python",
"bytes": "43304"
},
{
"name": "Ruby",
"bytes": "874"
}
],
"symlink_target": ""
} |
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.transient_error import TransientError # noqa: F401,E501
from orcid_api_v3.models.transient_non_empty_string import TransientNonEmptyString # noqa: F401,E501
from orcid_api_v3.models.url_v30_rc2 import UrlV30Rc2 # noqa: F401,E501
class ExternalIDV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'external_id_type': 'str',
'external_id_value': 'str',
'external_id_normalized': 'TransientNonEmptyString',
'external_id_normalized_error': 'TransientError',
'external_id_url': 'UrlV30Rc2',
'external_id_relationship': 'str'
}
attribute_map = {
'external_id_type': 'external-id-type',
'external_id_value': 'external-id-value',
'external_id_normalized': 'external-id-normalized',
'external_id_normalized_error': 'external-id-normalized-error',
'external_id_url': 'external-id-url',
'external_id_relationship': 'external-id-relationship'
}
def __init__(self, external_id_type=None, external_id_value=None, external_id_normalized=None, external_id_normalized_error=None, external_id_url=None, external_id_relationship=None): # noqa: E501
"""ExternalIDV30Rc2 - a model defined in Swagger""" # noqa: E501
self._external_id_type = None
self._external_id_value = None
self._external_id_normalized = None
self._external_id_normalized_error = None
self._external_id_url = None
self._external_id_relationship = None
self.discriminator = None
self.external_id_type = external_id_type
self.external_id_value = external_id_value
if external_id_normalized is not None:
self.external_id_normalized = external_id_normalized
if external_id_normalized_error is not None:
self.external_id_normalized_error = external_id_normalized_error
if external_id_url is not None:
self.external_id_url = external_id_url
if external_id_relationship is not None:
self.external_id_relationship = external_id_relationship
@property
def external_id_type(self):
"""Gets the external_id_type of this ExternalIDV30Rc2. # noqa: E501
:return: The external_id_type of this ExternalIDV30Rc2. # noqa: E501
:rtype: str
"""
return self._external_id_type
@external_id_type.setter
def external_id_type(self, external_id_type):
"""Sets the external_id_type of this ExternalIDV30Rc2.
:param external_id_type: The external_id_type of this ExternalIDV30Rc2. # noqa: E501
:type: str
"""
if external_id_type is None:
raise ValueError("Invalid value for `external_id_type`, must not be `None`") # noqa: E501
self._external_id_type = external_id_type
@property
def external_id_value(self):
"""Gets the external_id_value of this ExternalIDV30Rc2. # noqa: E501
:return: The external_id_value of this ExternalIDV30Rc2. # noqa: E501
:rtype: str
"""
return self._external_id_value
@external_id_value.setter
def external_id_value(self, external_id_value):
"""Sets the external_id_value of this ExternalIDV30Rc2.
:param external_id_value: The external_id_value of this ExternalIDV30Rc2. # noqa: E501
:type: str
"""
if external_id_value is None:
raise ValueError("Invalid value for `external_id_value`, must not be `None`") # noqa: E501
self._external_id_value = external_id_value
@property
def external_id_normalized(self):
"""Gets the external_id_normalized of this ExternalIDV30Rc2. # noqa: E501
:return: The external_id_normalized of this ExternalIDV30Rc2. # noqa: E501
:rtype: TransientNonEmptyString
"""
return self._external_id_normalized
@external_id_normalized.setter
def external_id_normalized(self, external_id_normalized):
"""Sets the external_id_normalized of this ExternalIDV30Rc2.
:param external_id_normalized: The external_id_normalized of this ExternalIDV30Rc2. # noqa: E501
:type: TransientNonEmptyString
"""
self._external_id_normalized = external_id_normalized
@property
def external_id_normalized_error(self):
"""Gets the external_id_normalized_error of this ExternalIDV30Rc2. # noqa: E501
:return: The external_id_normalized_error of this ExternalIDV30Rc2. # noqa: E501
:rtype: TransientError
"""
return self._external_id_normalized_error
@external_id_normalized_error.setter
def external_id_normalized_error(self, external_id_normalized_error):
"""Sets the external_id_normalized_error of this ExternalIDV30Rc2.
:param external_id_normalized_error: The external_id_normalized_error of this ExternalIDV30Rc2. # noqa: E501
:type: TransientError
"""
self._external_id_normalized_error = external_id_normalized_error
@property
def external_id_url(self):
"""Gets the external_id_url of this ExternalIDV30Rc2. # noqa: E501
:return: The external_id_url of this ExternalIDV30Rc2. # noqa: E501
:rtype: UrlV30Rc2
"""
return self._external_id_url
@external_id_url.setter
def external_id_url(self, external_id_url):
"""Sets the external_id_url of this ExternalIDV30Rc2.
:param external_id_url: The external_id_url of this ExternalIDV30Rc2. # noqa: E501
:type: UrlV30Rc2
"""
self._external_id_url = external_id_url
@property
def external_id_relationship(self):
"""Gets the external_id_relationship of this ExternalIDV30Rc2. # noqa: E501
:return: The external_id_relationship of this ExternalIDV30Rc2. # noqa: E501
:rtype: str
"""
return self._external_id_relationship
@external_id_relationship.setter
def external_id_relationship(self, external_id_relationship):
"""Sets the external_id_relationship of this ExternalIDV30Rc2.
:param external_id_relationship: The external_id_relationship of this ExternalIDV30Rc2. # noqa: E501
:type: str
"""
allowed_values = ["PART_OF", "SELF", "VERSION_OF"] # noqa: E501
if external_id_relationship not in allowed_values:
raise ValueError(
"Invalid value for `external_id_relationship` ({0}), must be one of {1}" # noqa: E501
.format(external_id_relationship, allowed_values)
)
self._external_id_relationship = external_id_relationship
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ExternalIDV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExternalIDV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| {
"content_hash": "5185f4a282d393dca62da1cfb7d05b21",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 201,
"avg_line_length": 35.452,
"alnum_prop": 0.6224754597765993,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "619a410a2485e1d4b415745c8b55c650cb1e18f1",
"size": "8880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_api_v3/models/external_idv30_rc2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
} |
def extractKtranslations980580331WordpressCom(item):
'''
Parser for 'ktranslations980580331.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
chp_prefixes = [
('Dragon’s Legacy – ', 'Dragon\'s Legacy', 'translated'),
('Accumulate Experience by Reading Books – ', 'Accumulate Experience by Reading Books', 'translated'),
('Cat ', 'Me and My Beloved Cat (Girlfriend)', 'translated'),
]
for prefix, series, tl_type in chp_prefixes:
if item['title'].lower().startswith(prefix.lower()):
return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | {
"content_hash": "28ff635fa001c28cb7bb3efb8dbbeb33",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 120,
"avg_line_length": 38.86363636363637,
"alnum_prop": 0.6163742690058479,
"repo_name": "fake-name/ReadableWebProxy",
"id": "eac92a702fe7c8e86c315e5d06a28ea1976cb5a1",
"size": "861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractKtranslations980580331WordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
import asyncio
import json
import base64
from . import AbstractStorage, Session
from Crypto.Cipher import AES
from Crypto import Random
class EncryptedCookieStorage(AbstractStorage):
"""Encrypted JSON storage.
"""
def __init__(self, secret_key, *, cookie_name="AIOHTTP_SESSION",
domain=None, max_age=None, path='/',
secure=None, httponly=True):
super().__init__(cookie_name=cookie_name, domain=domain,
max_age=max_age, path=path, secure=secure,
httponly=httponly)
self._secret_key = secret_key
if len(self._secret_key) % AES.block_size != 0:
raise TypeError(
'Secret key must be a multiple of {} in length'.format(
AES.block_size))
@asyncio.coroutine
def load_session(self, request):
cookie = self.load_cookie(request)
if cookie is None:
return Session(None, new=True)
else:
cookie = base64.b64decode(cookie)
iv = cookie[:AES.block_size]
data = cookie[AES.block_size:]
cipher = AES.new(self._secret_key, AES.MODE_CBC, iv)
decrypted = cipher.decrypt(data)
data = json.loads(decrypted.decode('utf-8'))
return Session(None, data=data, new=False)
@asyncio.coroutine
def save_session(self, request, response, session):
if session.empty:
return self.save_cookie(response, session._mapping)
cookie_data = json.dumps(self._get_session_data(session)).encode(
'utf-8')
if len(cookie_data) % AES.block_size != 0:
# padding with spaces to full blocks
to_pad = AES.block_size - (len(cookie_data) % AES.block_size)
cookie_data += b' ' * to_pad
iv = Random.new().read(AES.block_size)
cipher = AES.new(self._secret_key, AES.MODE_CBC, iv)
encrypted = cipher.encrypt(cookie_data)
encrypted = iv + encrypted
b64coded = base64.b64encode(encrypted).decode('utf-8')
self.save_cookie(response, b64coded)
| {
"content_hash": "a2c45d74493a1ad1d23f40344d1ec268",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 73,
"avg_line_length": 36.6551724137931,
"alnum_prop": 0.587958607714017,
"repo_name": "graingert/aiohttp_session",
"id": "5a9f9b6f54d7d8aeee2ce1702eb0d617780261ff",
"size": "2126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiohttp_session/cookie_storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "926"
},
{
"name": "Python",
"bytes": "39076"
}
],
"symlink_target": ""
} |
class HelloWorld(object):
"""
Simple test of the basic architecture of the project.
"""
def __init__(self, text='Hello world!'):
"""
Create an empty HelloWorld object.
@param text: text to print. Set to 'Hello world!' if ommitted.
"""
self._text = text
def print_text(self):
"""
Print the text stored inside of the object to the terminal.
"""
print self._text
| {
"content_hash": "2523ee8c2c74a754bde0f6422559d467",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 26.705882352941178,
"alnum_prop": 0.5572687224669604,
"repo_name": "MatthewPeterKelly/numerical-methods-in-python",
"id": "be7039e1d76b4d8842217d4269ce9d7746842c7b",
"size": "454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numerical_methods/hello_world.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12364"
}
],
"symlink_target": ""
} |
"""Pickle field implementation for Django."""
from copy import deepcopy
from base64 import b64encode, b64decode
from zlib import compress, decompress
try:
from cPickle import loads, dumps
except ImportError:
from pickle import loads, dumps
from django.db import models
from django.utils.encoding import force_unicode
from virtenviro.picklefield import DEFAULT_PROTOCOL
class PickledObject(str):
"""
A subclass of string so it can be told whether a string is a pickled
object or not (if the object is an instance of this class then it must
[well, should] be a pickled one).
Only really useful for passing pre-encoded values to ``default``
with ``dbsafe_encode``, not that doing so is necessary. If you
remove PickledObject and its references, you won't be able to pass
in pre-encoded values anymore, but you can always just pass in the
python objects themselves.
"""
def dbsafe_encode(value, compress_object=False, pickle_protocol=DEFAULT_PROTOCOL):
# We use deepcopy() here to avoid a problem with cPickle, where dumps
# can generate different character streams for same lookup value if
# they are referenced differently.
# The reason this is important is because we do all of our lookups as
# simple string matches, thus the character streams must be the same
# for the lookups to work properly. See tests.py for more information.
if not compress_object:
value = b64encode(dumps(deepcopy(value), pickle_protocol))
else:
value = b64encode(compress(dumps(deepcopy(value), pickle_protocol)))
return PickledObject(value)
def dbsafe_decode(value, compress_object=False):
if not compress_object:
value = loads(b64decode(value))
else:
value = loads(decompress(b64decode(value)))
return value
class PickledObjectField(models.Field):
"""
A field that will accept *any* python object and store it in the
database. PickledObjectField will optionally compress its values if
declared with the keyword argument ``compress=True``.
Does not actually encode and compress ``None`` objects (although you
can still do lookups using None). This way, it is still possible to
use the ``isnull`` lookup type correctly.
"""
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
self.compress = kwargs.pop('compress', False)
self.protocol = kwargs.pop('protocol', DEFAULT_PROTOCOL)
kwargs.setdefault('editable', False)
super(PickledObjectField, self).__init__(*args, **kwargs)
def get_default(self):
"""
Returns the default value for this field.
The default implementation on models.Field calls force_unicode
on the default, which means you can't set arbitrary Python
objects as the default. To fix this, we just return the value
without calling force_unicode on it. Note that if you set a
callable as a default, the field will still call it. It will
*not* try to pickle and encode it.
"""
if self.has_default():
if callable(self.default):
return self.default()
return self.default
# If the field doesn't have a default, then we punt to models.Field.
return super(PickledObjectField, self).get_default()
def to_python(self, value):
"""
B64decode and unpickle the object, optionally decompressing it.
If an error is raised in de-pickling and we're sure the value is
a definite pickle, the error is allowed to propogate. If we
aren't sure if the value is a pickle or not, then we catch the
error and return the original value instead.
"""
if value is not None:
try:
value = dbsafe_decode(value, self.compress)
except:
# If the value is a definite pickle; and an error is raised in
# de-pickling it should be allowed to propogate.
if isinstance(value, PickledObject):
raise
return value
def get_db_prep_value(self, value):
"""
Pickle and b64encode the object, optionally compressing it.
The pickling protocol is specified explicitly (by default 2),
rather than as -1 or HIGHEST_PROTOCOL, because we don't want the
protocol to change over time. If it did, ``exact`` and ``in``
lookups would likely fail, since pickle would now be generating
a different string.
"""
if value is not None and not isinstance(value, PickledObject):
# We call force_unicode here explicitly, so that the encoded string
# isn't rejected by the postgresql_psycopg2 backend. Alternatively,
# we could have just registered PickledObject with the psycopg
# marshaller (telling it to store it like it would a string), but
# since both of these methods result in the same value being stored,
# doing things this way is much easier.
value = force_unicode(dbsafe_encode(value, self.compress, self.protocol))
return value
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value)
def get_internal_type(self):
return 'TextField'
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=False):
if lookup_type not in ['exact', 'in', 'isnull']:
raise TypeError('Lookup type %s is not supported.' % lookup_type)
# The Field model already calls get_db_prep_value before doing the
# actual lookup, so all we need to do is limit the lookup types.
try:
return super(PickledObjectField, self).get_db_prep_lookup(
lookup_type, value, connection=connection, prepared=prepared)
except TypeError:
# Try not to break on older versions of Django, where the
# `connection` and `prepared` parameters are not available.
return super(PickledObjectField, self).get_db_prep_lookup(
lookup_type, value)
# South support; see http://south.aeracode.org/docs/tutorial/part4.html#simple-inheritance
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], [r"^picklefield\.fields\.PickledObjectField"])
| {
"content_hash": "e010179a1b8aa58226f0a170758ff00e",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 90,
"avg_line_length": 42.473684210526315,
"alnum_prop": 0.6685254027261462,
"repo_name": "Haikson/virtenviro",
"id": "1ed9d2e6bbad6c52813f636f5a9c639273131b38",
"size": "6480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "virtenviro/picklefield/fields.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "48"
},
{
"name": "CSS",
"bytes": "237430"
},
{
"name": "HTML",
"bytes": "190190"
},
{
"name": "JavaScript",
"bytes": "204448"
},
{
"name": "PHP",
"bytes": "2199"
},
{
"name": "Python",
"bytes": "193113"
},
{
"name": "Ruby",
"bytes": "322"
}
],
"symlink_target": ""
} |
"""Generates pybind11 bindings code for functions."""
from typing import Generator, List, Optional
from clif.protos import ast_pb2
from clif.pybind11 import utils
I = utils.I
UNARY_OPS = {
'__neg__': ('operator-', '-'),
'__pos__': ('operator+', '+'),
'__inv__': ('operator~', '~'),
'__invert__': ('operator~', '~'),
'__bool__': ('operator bool', '!'),
'__int__': ('operator int', 'int_'),
# pybind11 requires `operator double` for `__float__`. See
# https://third_party/pybind11/include/pybind11/operators.h;l=191;rcl=480132896
'__float__': ('operator double', 'float_'),
}
BINARY_OPS = {
'__sub__': ('operator-', '-'),
'__add__': ('operator+', '+'),
'__mul__': ('operator*', '*'),
'__div__': ('operator/', '/'),
'__truediv__': ('operator/', '/'),
'__mod__': ('operator%', '%'),
'__lshift__': ('operator<<', '<<'),
'__rshift__': ('operator>>', '>>'),
'__and__': ('operator&', '&'),
'__xor__': ('operator^', '^'),
'__eq__': ('operator==', '=='),
'__ne__': ('operator!=', '!='),
'__or__': ('operator|', '|'),
'__gt__': ('operator>', '>'),
'__ge__': ('operator>=', '>='),
'__lt__': ('operator<', '<'),
'__le__': ('operator<=', '<='),
}
INPLACE_OPS = {
'__iadd__': ('operator+=', '+='),
'__isub__': ('operator-=', '-='),
'__imul__': ('operator*=', '*='),
'__idiv__': ('operator/=', '/='),
'__itruediv__': ('operator/=', '/='),
'__imod__': ('operator%=', '%='),
'__ilshift__': ('operator<<=', '<<='),
'__irshift__': ('operator>>=', '>>='),
'__iand__': ('operator&=', '&='),
'__ixor__': ('operator^=', '^='),
'__ior__': ('operator|=', '|='),
}
REFLECTED_OPS = {
'__radd__': ('operator+', '+'),
'__rsub__': ('operator-', '-'),
'__rmul__': ('operator*', '*'),
'__rdiv__': ('operator/', '/'),
'__rtruediv__': ('operator/', '/'),
'__rmod__': ('operator%', '%'),
'__rlshift__': ('operator<<', '<<'),
'__rrshift__': ('operator>>', '>>'),
'__rand__': ('operator&', '&'),
'__rxor__': ('operator~', '~'),
'__ror__': ('operator|', '|'),
}
SUPPORTED_OPS = {**UNARY_OPS, **BINARY_OPS, **INPLACE_OPS, **REFLECTED_OPS}
UNSUPPORTED_BINARY_OPS = frozenset([
'__floordiv__',
])
UNSUPPORTED_INPLACE_OPS = frozenset([
'__ifloordiv__',
])
UNSUPPORTED_REFLECTED_OPS = frozenset([
'__rfloordiv__',
])
def fix_py_args_for_unsupported_operators_in_place(
func_decl: ast_pb2.FuncDecl, py_args: List[str]) -> None:
"""Fix `py::args` declaration of the operator.
Sometimes users might implement operators as free functions, not C++ member
functions. In this case, PyCLIF AST will include an extra parameter for
`self`. When generating pybind11 bindings code, we always generate
operators as member functions, therefore we need to remove `self` from the
parameter list.
Args:
func_decl: AST function declaration in proto format.
py_args: A list of strings that are like `py::arg('a')`.
"""
py_name = func_decl.name.native
if len(py_args) == 2:
if py_name in UNSUPPORTED_BINARY_OPS or py_name in UNSUPPORTED_INPLACE_OPS:
py_args.pop(0)
elif py_name in UNSUPPORTED_REFLECTED_OPS:
py_args.pop()
def needs_operator_overloading(func_decl: ast_pb2.FuncDecl) -> bool:
"""Returns whether operator overloading is needed for the function."""
py_name = func_decl.name.native
assert len(SUPPORTED_OPS) == sum(len(d) for d in [
UNARY_OPS, BINARY_OPS, INPLACE_OPS, REFLECTED_OPS])
if py_name not in SUPPORTED_OPS:
return False
expected_operator = SUPPORTED_OPS.get(py_name)[0]
# If user does not use the pre-defined operator for a magic method,
# we just fall back to normal function generation.
operator_name = func_decl.name.cpp_name.split('::')[-1]
return operator_name == expected_operator
def generate_operator(
module_name: str,
func_decl: ast_pb2.FuncDecl,
class_decl: Optional[ast_pb2.ClassDecl] = None
) -> Generator[str, None, None]:
"""Generates operator overload functions.
Args:
module_name: String containing the outer module name.
func_decl: AST function declaration in proto format.
class_decl: Outer class declaration in proto format. None if this is not
a member function.
Yields:
Pybind11 operator overload bindings code.
"""
py_name = func_decl.name.native
class_py_name = _get_class_py_name(func_decl, class_decl)
assert class_py_name, f'Invalid operator declaration: {func_decl}'
if py_name in UNARY_OPS:
yield _generate_unary_operator(module_name, func_decl)
elif py_name in BINARY_OPS:
yield _generate_binary_operator(module_name, func_decl, class_py_name)
elif py_name in INPLACE_OPS:
yield _generate_inplace_operator(module_name, func_decl, class_py_name)
elif py_name in REFLECTED_OPS:
yield _generate_reflected_operator(module_name, func_decl, class_py_name)
else:
yield ''
def _get_class_py_name(func_decl: ast_pb2.FuncDecl,
class_decl: Optional[ast_pb2.ClassDecl] = None) -> str:
"""Finds the Python name of the class that defines the operator function."""
if class_decl:
return class_decl.name.native
if (func_decl.name.native in UNARY_OPS or
func_decl.name.native in BINARY_OPS or
func_decl.name.native in INPLACE_OPS):
return func_decl.params[0].name.native
elif func_decl.name.native in REFLECTED_OPS:
return func_decl.params[-1].name.native
return ''
def _generate_unary_operator(module_name: str,
func_decl: ast_pb2.FuncDecl) -> str:
py_name = func_decl.name.native
assert py_name in UNARY_OPS, f'unsupported unary operator: {py_name}'
operator = UNARY_OPS[func_decl.name.native][1]
return f'{module_name}.def({operator}(py::self));'
def _generate_binary_operator(module_name: str,
func_decl: ast_pb2.FuncDecl,
class_py_name: str) -> str:
"""Generates bindings code for binary operators."""
py_name = func_decl.name.native
assert py_name in BINARY_OPS, f'unsupported binary operator: {py_name}'
assert func_decl.params, f'function {py_name} does not have any parameters'
operator = BINARY_OPS[func_decl.name.native][1]
if len(func_decl.params) == 1:
param = func_decl.params[0]
else:
param = func_decl.params[1]
right_operand = _convert_param_to_operand(param, class_py_name)
return f'{module_name}.def(py::self {operator} {right_operand});'
def _generate_inplace_operator(module_name: str,
func_decl: ast_pb2.FuncDecl,
class_py_name: str) -> str:
py_name = func_decl.name.native
assert py_name in INPLACE_OPS, f'unsupported inplace operator: {py_name}'
assert func_decl.params, f'function {py_name} does not have any parameters'
operator = INPLACE_OPS[func_decl.name.native][1]
operand = _convert_param_to_operand(func_decl.params[0], class_py_name)
return f'{module_name}.def(py::self {operator} {operand});'
def _generate_reflected_operator(module_name: str,
func_decl: ast_pb2.FuncDecl,
class_py_name: str) -> str:
py_name = func_decl.name.native
assert py_name in REFLECTED_OPS, f'unsupported reflected operator: {py_name}'
assert func_decl.params, f'function {py_name} does not have any parameters'
operator = REFLECTED_OPS[func_decl.name.native][1]
left_operand = _convert_param_to_operand(func_decl.params[0], class_py_name)
return f'{module_name}.def({left_operand} {operator} py::self);'
def _convert_param_to_operand(param: ast_pb2.ParamDecl,
class_py_name: str) -> str:
if param.type.lang_type == class_py_name:
return 'py::self'
else:
return f'({param.type.cpp_type}){{}}'
| {
"content_hash": "3f3a64284d49601fc1811166a2d73d23",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 83,
"avg_line_length": 35.663636363636364,
"alnum_prop": 0.600815702268672,
"repo_name": "google/clif",
"id": "9f61e3fed0d4169fe526c364104f8a18acd68534",
"size": "8421",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "clif/pybind11/operators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4035"
},
{
"name": "C++",
"bytes": "685973"
},
{
"name": "CMake",
"bytes": "29813"
},
{
"name": "Dockerfile",
"bytes": "4053"
},
{
"name": "Python",
"bytes": "742833"
},
{
"name": "Starlark",
"bytes": "28337"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode'
]
if on_rtd:
extensions.append('sphinxcontrib.napoleon')
else:
extensions.append('sphinx.ext.napoleon')
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'py1090'
copyright = '2015, Jonas Lieb'
author = 'Jonas Lieb'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0a1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_theme_path = ['_themes']
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
html_theme_options = {
"sidebarbgcolor": "#f2f2f2",
"sidebartextcolor": "#000000",
"sidebarlinkcolor": "#101010",
"headbgcolor": "#f2f2f2",
"headtextcolor": "#101010",
"footerbgcolor": "#101010",
"footertextcolor": "#ffffff",
"relbarbgcolor": "#101010",
"relbartextcolor": "#ffffff",
"bodyfont": "Open Sans, sans-serif",
"headfont": "Open Sans, sans-serif",
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'py1090doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'py1090.tex', 'py1090 Documentation',
'Jonas Lieb', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'py1090', 'py1090 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'py1090', 'py1090 Documentation',
author, 'py1090', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'https://docs.python.org/3.4': None,
'http://matplotlib.org/basemap': None
}
# Autoclass options
autoclass_content = 'both'
autodoc_default_flags = ['members', 'show-inheritance']
| {
"content_hash": "ed05c61b4636ada9dc18f11801230e8d",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 79,
"avg_line_length": 31.528301886792452,
"alnum_prop": 0.6817275084779573,
"repo_name": "jojonas/py1090",
"id": "00ddef18be148d14799252ae827be07fa67286c7",
"size": "10481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20556"
}
],
"symlink_target": ""
} |
"""models Fastly API concepts
No dependencies on Terraform should be in this module"""
from collections import namedtuple
import re
import os
class FastlyVCL:
@classmethod
def from_string(cls, content):
return cls(content.splitlines())
def __init__(self, lines):
self._lines = lines
def __eq__(self, another):
return self._lines == another._lines
def __str__(self):
return "\n".join(self._lines)
def __repr__(self):
return "FastlyVCL(%s)" % repr(self._lines)
def insert(self, section, hook, statements):
section_start = self._find_section_start(section)
lines = list(self._lines)
if hook == 'after':
lines[section_start + 1:section_start + 1] = [' %s' % s for s in statements]
lines.insert(section_start + 1, '')
if hook == 'before':
lines.insert(section_start, '')
lines[section_start:section_start] = [' %s' % s for s in statements]
return FastlyVCL(lines)
def _find_section_start(self, section):
lookup = r"^( *)#FASTLY %s" % section
section_start = None
for i, line in enumerate(self._lines):
m = re.match(lookup, line)
if m:
section_start = i
break
if section_start is None:
raise FastlyCustomVCLGenerationError("Cannot match %s into main VCL template:\n\n%s" % (lookup, str(self)))
return section_start
class FastlyVCLInclusion(namedtuple('FastlyVCLInclusion', ['name', 'type', 'hook'])):
def insert_include(self, main_vcl):
return main_vcl.insert(
self.type,
self.hook,
[
'// BEGIN builder %s' % self.name,
'include "%s"' % self.name,
'// END builder %s' % self.name,
]
)
class FastlyVCLSnippet(namedtuple('FastlyVCLSnippet', ['name', 'content', 'type', 'hook'])):
"""VCL snippets that can be used to augment the default VCL
Due to Terraform limitations we are unable to pass these directly to the Fastly API, and have to build a whole VCL ourselves.
Terminology for fields comes from https://docs.fastly.com/api/config#snippet"""
def as_inclusion(self):
return FastlyVCLInclusion(self.name, self.type, self.hook)
class FastlyVCLTemplate(namedtuple('FastlyVCLTemplate', ['name', 'content', 'type', 'hook'])):
def as_inclusion(self, name):
return FastlyVCLInclusion(name, self.type, self.hook)
class FastlyCustomVCLGenerationError(Exception):
pass
_directory = os.path.join(os.path.dirname(__file__), 'fastly', 'vcl')
def _read_vcl_file(name):
with open(os.path.join(_directory, name)) as fp:
return fp.read()
# taken from https://docs.fastly.com/guides/vcl/mixing-and-matching-fastly-vcl-with-custom-vcl#fastlys-vcl-boilerplate
# Fastly expands #FASTLY macros into generated VCL
MAIN_VCL_TEMPLATE = FastlyVCL.from_string(_read_vcl_file('main.vcl'))
VCL_SNIPPETS = {
'original-host': FastlyVCLSnippet(
name='original-host',
content=_read_vcl_file('original-host.vcl'),
type='recv',
hook='before'
),
'gzip-by-content-type-suffix': FastlyVCLSnippet(
name='gzip-by-content-type-suffix',
content=_read_vcl_file('gzip-by-content-type-suffix.vcl'),
type='fetch',
hook='after'
),
'office-webdav-200': FastlyVCLSnippet(
name='office-webdav-200',
content=_read_vcl_file('office-webdav-200.vcl'),
type='recv',
hook='after'
),
'ping-status': FastlyVCLSnippet(
name='ping-status',
content=_read_vcl_file('ping-status.vcl'),
type='recv',
hook='after'
),
'strip-non-journal-cookies': FastlyVCLSnippet(
name='strip-non-journal-cookies',
content=_read_vcl_file('strip-non-journal-cookies.vcl'),
type='recv',
hook='after'
),
'journal-google-scholar': FastlyVCLSnippet(
name='journal-google-scholar',
content=_read_vcl_file('journal-google-scholar.vcl'),
type='recv',
hook='after'
),
'journal-google-scholar-vary': FastlyVCLSnippet(
name='journal-google-scholar-vary',
content=_read_vcl_file('journal-google-scholar-vary.vcl'),
type='deliver',
hook='after'
),
}
VCL_TEMPLATES = {
'error-page': FastlyVCLTemplate(
name='error-page',
content=_read_vcl_file('error-page.vcl.tpl'),
type='error',
hook='after'
)
}
| {
"content_hash": "8bff8d3e93a1cb38fcade9ba98b727e3",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 129,
"avg_line_length": 33.37956204379562,
"alnum_prop": 0.6055106057292805,
"repo_name": "elifesciences/builder",
"id": "9363d059d450eac27b71400b1a06f585f687996b",
"size": "4573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/buildercore/fastly.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HCL",
"bytes": "1182"
},
{
"name": "Python",
"bytes": "735556"
},
{
"name": "Shell",
"bytes": "33921"
},
{
"name": "Smarty",
"bytes": "142"
},
{
"name": "VCL",
"bytes": "4406"
}
],
"symlink_target": ""
} |
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="index.html"), name='Index'),
url(r'^api/v1/', include('contacts.urls')),
url(r'^admin/', admin.site.urls),
]
| {
"content_hash": "d311ce8632ce9083695cad06f2b83f72",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 79,
"avg_line_length": 30.7,
"alnum_prop": 0.7003257328990228,
"repo_name": "topix-hackademy/contact-tools",
"id": "95220854a6e9a509c64bf5d5ea216fc388fa60b7",
"size": "307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contacttools/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "327"
},
{
"name": "HTML",
"bytes": "938"
},
{
"name": "Python",
"bytes": "61355"
}
],
"symlink_target": ""
} |
import datetime
import os
import sys
sys.path.append(os.path.abspath('../'))
from filer import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-filer'
copyright = '%s, Stefan Foulis' % (datetime.date.today().year,)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(__version__.split('.')[0:2])
# The full version, including alpha/beta/rc tags.
release = __version__
for c in ('a', 'b', 'dev', 'r'):
if c in release:
tags.add('develop')
break
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-filerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-filer.tex', 'django-filer Documentation',
'Stefan Foulis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-filer', u'django-filer Documentation',
[u'Stefan Foulis'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "df2445515c3a562bad8861a91c1a5458",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 84,
"avg_line_length": 32.37155963302752,
"alnum_prop": 0.70242312597421,
"repo_name": "webu/django-filer",
"id": "f8723efad2555c3051f012c4040199f5a9c9dc54",
"size": "7456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1233"
},
{
"name": "HTML",
"bytes": "42603"
},
{
"name": "JavaScript",
"bytes": "49719"
},
{
"name": "Python",
"bytes": "447378"
},
{
"name": "Shell",
"bytes": "338"
}
],
"symlink_target": ""
} |
import asyncio
import logging
import sys
from datetime import datetime
from pathlib import Path
from shutil import rmtree, which
from tempfile import gettempdir
from typing import Any, Union
import click
from black_primer import lib
DEFAULT_CONFIG = Path(__file__).parent / "primer.json"
_timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
DEFAULT_WORKDIR = Path(gettempdir()) / f"primer.{_timestamp}"
LOG = logging.getLogger(__name__)
def _handle_debug(
ctx: click.core.Context,
param: Union[click.core.Option, click.core.Parameter],
debug: Union[bool, int, str],
) -> Union[bool, int, str]:
"""Turn on debugging if asked otherwise INFO default"""
log_level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(
format="[%(asctime)s] %(levelname)s: %(message)s (%(filename)s:%(lineno)d)",
level=log_level,
)
return debug
async def async_main(
config: str,
debug: bool,
keep: bool,
long_checkouts: bool,
rebase: bool,
workdir: str,
workers: int,
) -> int:
work_path = Path(workdir)
if not work_path.exists():
LOG.debug(f"Creating {work_path}")
work_path.mkdir()
if not which("black"):
LOG.error("Can not find 'black' executable in PATH. No point in running")
return -1
try:
ret_val = await lib.process_queue(
config, work_path, workers, keep, long_checkouts, rebase
)
return int(ret_val)
finally:
if not keep and work_path.exists():
LOG.debug(f"Removing {work_path}")
rmtree(work_path, onerror=lib.handle_PermissionError)
return -2
@click.command(context_settings={"help_option_names": ["-h", "--help"]})
@click.option(
"-c",
"--config",
default=str(DEFAULT_CONFIG),
type=click.Path(exists=True),
show_default=True,
help="JSON config file path",
)
@click.option(
"--debug",
is_flag=True,
callback=_handle_debug,
show_default=True,
help="Turn on debug logging",
)
@click.option(
"-k",
"--keep",
is_flag=True,
show_default=True,
help="Keep workdir + repos post run",
)
@click.option(
"-L",
"--long-checkouts",
is_flag=True,
show_default=True,
help="Pull big projects to test",
)
@click.option(
"-R",
"--rebase",
is_flag=True,
show_default=True,
help="Rebase project if already checked out",
)
@click.option(
"-w",
"--workdir",
default=str(DEFAULT_WORKDIR),
type=click.Path(exists=False),
show_default=True,
help="Directory path for repo checkouts",
)
@click.option(
"-W",
"--workers",
default=2,
type=int,
show_default=True,
help="Number of parallel worker coroutines",
)
@click.pass_context
def main(ctx: click.core.Context, **kwargs: Any) -> None:
"""primer - prime projects for blackening... 🏴"""
LOG.debug(f"Starting {sys.argv[0]}")
# TODO: Change to asyncio.run when Black >= 3.7 only
loop = asyncio.get_event_loop()
try:
ctx.exit(loop.run_until_complete(async_main(**kwargs)))
finally:
loop.close()
if __name__ == "__main__": # pragma: nocover
main()
| {
"content_hash": "b16e10f31124ba16340214d62f018d8f",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 84,
"avg_line_length": 24.075757575757574,
"alnum_prop": 0.6214600377595972,
"repo_name": "TeamSPoon/logicmoo_workspace",
"id": "5903adc72d4867553c520ed1a9e05b5101c9bda4",
"size": "3220",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "packs_web/butterfly/lib/python3.7/site-packages/black_primer/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "342"
},
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "1"
},
{
"name": "CSS",
"bytes": "126627"
},
{
"name": "HTML",
"bytes": "839172"
},
{
"name": "Java",
"bytes": "11116"
},
{
"name": "JavaScript",
"bytes": "238700"
},
{
"name": "PHP",
"bytes": "42253"
},
{
"name": "Perl 6",
"bytes": "23"
},
{
"name": "Prolog",
"bytes": "440882"
},
{
"name": "PureBasic",
"bytes": "1334"
},
{
"name": "Rich Text Format",
"bytes": "3436542"
},
{
"name": "Roff",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "61603"
},
{
"name": "TeX",
"bytes": "99504"
}
],
"symlink_target": ""
} |
import utaka.src.core.Object as Object
import utaka.src.accessControl.ObjectACP as ObjectACP
import utaka.src.accessControl.BucketACP as BucketACP
import utaka.src.logging.BucketLog as BucketLogging
import utaka.src.exceptions.ForbiddenException as ForbiddenException
def getObject(user, bucket, key, getMetadata, getData, byteRangeStart, byteRangeEnd, ifMatch, ifNotMatch, ifModifiedSince, ifNotModifiedSince, ifRange):
if not ObjectACP.checkUserPermission(user, bucket, key, 'read'):
raise ForbiddenException.AccessDeniedException()
res = Object.getObject(bucket = bucket, key=key, getMetadata=getMetadata, getData=getData, byteRangeStart=byteRangeStart, byteRangeEnd=byteRangeEnd, ifMatch=ifMatch, ifNotMatch=ifNotMatch, ifModifiedSince=ifModifiedSince, ifNotModifiedSince=ifNotModifiedSince, ifRange=ifRange)
BucketLogging.logKeyEvent(user, bucket, key, 'get')
return res
def setObject(user, bucket, key, metadata, data, contentMd5, contentType, contentDisposition, contentEncoding, accessControlPolicy):
if not ObjectACP.checkUserPermission(user, bucket, key, 'write'):
raise ForbiddenException.AccessDeniedException()
res = Object.setObject(userid = user, bucket=bucket, key=key, metadata=metadata, data=data, content_md5 = contentMd5, content_type=contentType, content_disposition=contentDisposition, content_encoding=contentEncoding)
ObjectACP.setObjectACP(bucket, key, accessControlPolicy)
BucketLogging.logKeyEvent(user, bucket, key, 'set', res[2])
return res
def cloneObject(user, sourceBucket, sourceKey, destBucket, destKey, metadata, ifMatch, ifNotMatch, ifModifiedSince, ifNotModifiedSince, accessControlPolicy):
if not ( ObjectACP.checkUserPermission(user, sourceBucket, sourceKey, 'read') and ObjectACP.checkUserPermission(user, destBucket, destKey, 'write') ):
raise ForbiddenException.AccessDeniedException()
res = Object.cloneObject(user, sourceBucket, sourceKey, destBucket, destKey, metadata, ifMatch, ifNotMatch, ifModifiedSince, ifNotModifiedSince)
ObjectACP.setObjectACP(destBucket, destKey, accessControlPolicy)
BucketLogging.logKeyEvent(user, sourceBucket, sourceKey, 'get')
BucketLogging.logKeyEvent(user, destBucket, destKey, 'set', res[2])
return res
def destroyObject(user, bucket, key):
if not ObjectACP.checkUserPermission(user, bucket, key, 'write'):
raise ForbiddenException.AccessDeniedException()
Object.destroyObject(bucket=bucket, key=key)
BucketLogging.logKeyEvent(user, bucket, key, 'delete')
def getObjectACP(user, bucket, key):
if not ObjectACP.checkUserPermission(user, bucket, key, 'read_acp'):
raise ForbiddenException.AccessDeniedException()
res = ObjectACP.getObjectACP(bucket, key)
BucketLogging.logKeyEvent(user, bucket, key, 'get_acp')
return res
def setObjectACP(user, bucket, key, accessControlPolicy):
if not ObjectACP.checkUserPermission(user, bucket, key, 'write_acp'):
raise ForbiddenException.AccessDeniedException()
ObjectACP.setObjectACP(bucket, key, accessControlPolicy)
BucketLogging.logKeyEvent(user, bucket, key, 'set_acp') | {
"content_hash": "0dcfb98f806edd3770a9ffe8a492b3cb",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 278,
"avg_line_length": 63.166666666666664,
"alnum_prop": 0.8120052770448549,
"repo_name": "mattmillr/utaka",
"id": "ef88009201e990638d46ddf384486e1d79ba7fd2",
"size": "3033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/core/ObjectWithACP.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "121737"
},
{
"name": "Shell",
"bytes": "44"
}
],
"symlink_target": ""
} |
import os
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth.core import User
from website import settings
from website.app import init_app
from website.conferences.model import Conference
def main():
init_app(set_backends=True, routes=False)
populate_conferences()
MEETING_DATA = {
'spsp2014': {
'name': 'Society for Personality and Social Psychology 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'asb2014': {
'name': 'Association of Southeastern Biologists 2014',
'info_url': 'http://www.sebiologists.org/meetings/talks_posters.html',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'aps2014': {
'name': 'Association for Psychological Science 2014',
'info_url': 'http://centerforopenscience.org/aps/',
'logo_url': '/static/img/2014_Convention_banner-with-APS_700px.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'annopeer2014': {
'name': '#annopeer',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'cpa2014': {
'name': 'Canadian Psychological Association 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'filaments2014': {
'name': 'National Radio Astronomy Observatory Filaments 2014',
'info_url': None,
'logo_url': 'https://science.nrao.edu/science/meetings/2014/'
'filamentary-structure/images/filaments2014_660x178.png',
'active': False,
'admins': [
'lvonschi@nrao.edu',
# 'Dkim@nrao.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'bitss2014': {
'name': 'Berkeley Initiative for Transparency in the Social Sciences Research Transparency Forum 2014',
'info_url': None,
'logo_url': os.path.join(
settings.STATIC_URL_PATH,
'img',
'conferences',
'bitss.jpg',
),
'active': False,
'admins': [
'gkroll@berkeley.edu',
'awais@berkeley.edu',
],
'public_projects': True,
'poster': False,
'talk': True,
},
'spsp2015': {
'name': 'Society for Personality and Social Psychology 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'meetings@spsp.org',
],
'poster': True,
'talk': True,
},
'aps2015': {
'name': 'Association for Psychological Science 2015',
'info_url': None,
'logo_url': 'http://www.psychologicalscience.org/images/APS_2015_Banner_990x157.jpg',
'active': True,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'icps2015': {
'name': 'International Convention of Psychological Science 2015',
'info_url': None,
'logo_url': 'http://icps.psychologicalscience.org/wp-content/themes/deepblue/images/ICPS_Website-header_990px.jpg',
'active': False,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'mpa2015': {
'name': 'Midwestern Psychological Association 2015',
'info_url': None,
'logo_url': 'http://www.midwesternpsych.org/resources/Pictures/MPA%20logo.jpg',
'active': True,
'admins': [
'mpa@kent.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'NCCC2015': {
'name': 'North Carolina Cognition Conference 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'aoverman@elon.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VPRSF2015': {
'name': 'Virginia Piedmont Regional Science Fair 2015',
'info_url': None,
'logo_url': 'http://vprsf.org/wp-content/themes/VPRSF/images/logo.png',
'active': False,
'admins': [
'director@vprsf.org',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'APRS2015': {
'name': 'UVA Annual Postdoctoral Research Symposium 2015',
'info_url': None,
'logo_url': 'http://s1.postimg.org/50qj9u6i7/GPA_Logo.jpg',
'active': False,
'admins': [
'mhurst@virginia.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'ASB2015': {
'name': 'Association of Southeastern Biologists 2015',
'info_url': None,
'logo_url': 'http://www.sebiologists.org/wp/wp-content/uploads/2014/09/banner_image_Large.png',
'active': False,
'admins': [
'amorris.mtsu@gmail.com',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'TeaP2015': {
'name': 'Tagung experimentell arbeitender Psychologen 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VSSEF2015': {
'name': 'Virginia State Science and Engineering Fair 2015',
'info_url': 'http://www.vmi.edu/conferences/vssef/vssef_home/',
'logo_url': 'http://www.vmi.edu/uploadedImages/Images/Headers/vssef4.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'RMPA2015': {
'name': 'Rocky Mountain Psychological Association 2015',
'info_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/85th_annual_rmpa_conference_program_hr.pdf',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARP2015': {
'name': 'Association for Research in Personality 2015',
'info_url': 'http://www.personality-arp.org/conference/',
'logo_url': 'http://www.personality-arp.org/wp-content/uploads/conference/st-louis-arp.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEP2015': {
'name': 'Society of Experimental Psychologists Meeting 2015',
'info_url': 'http://faculty.virginia.edu/Society_of_Experimental_Psychologists/',
'logo_url': 'http://www.sepsych.org/nav/images/SEP-header.gif',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Reid2015': {
'name': 'L. Starling Reid Undergraduate Psychology Conference 2015',
'info_url': 'http://avillage.web.virginia.edu/Psych/Conference',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NEEPS2015': {
'name': 'Northeastern Evolutionary Psychology Conference 2015',
'info_url': 'http://neeps2015.weebly.com/',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'VaACS2015': {
'name': 'Virginia Section American Chemical Society Student Poster Session 2015',
'info_url': 'http://virginia.sites.acs.org/',
'logo_url': 'http://virginia.sites.acs.org/Bulletin/15/UVA.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MADSSCi2015': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2015',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://s24.postimg.org/qtc3baefp/2015madssci_seasr.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NRAO2015': {
'name': 'National Radio Astronomy Observatory Accretion 2015',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015/posters',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARCS2015': {
'name': 'Advancing Research Communication and Scholarship 2015',
'info_url': 'http://commons.pacificu.edu/arcs/',
'logo_url': 'http://commons.pacificu.edu/assets/md5images/4dfd167454e9f4745360a9550e189323.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'singlecasedesigns2015': {
'name': 'Single Case Designs in Clinical Psychology: Uniting Research and Practice',
'info_url': 'https://www.royalholloway.ac.uk/psychology/events/eventsarticles/singlecasedesignsinclinicalpsychologyunitingresearchandpractice.aspx',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OSFM2015': {
'name': 'OSF for Meetings 2015',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'JSSP2015': {
'name': 'Japanese Society of Social Psychology 2015',
'info_url': 'http://www.socialpsychology.jp/conf2015/index.html',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'4S2015': {
'name': 'Society for Social Studies of Science 2015',
'info_url': 'http://www.4sonline.org/meeting',
'logo_url': 'http://www.4sonline.org/ee/denver-skyline.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IARR2016': {
'name': 'International Association for Relationship Research 2016',
'info_url': 'http://iarr.psych.utoronto.ca/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IA2015': {
'name': 'Inclusive Astronomy 2015',
'info_url': 'https://vanderbilt.irisregistration.com/Home/Site?code=InclusiveAstronomy2015',
'logo_url': 'https://vanderbilt.blob.core.windows.net/images/Inclusive%20Astronomy.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'R2RC': {
'name': 'Right to Research Coalition',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OpenCon2015': {
'name': 'OpenCon2015',
'info_url': 'http://opencon2015.org/',
'logo_url': 'http://s8.postimg.org/w9b30pxyd/Open_Con2015_new_logo.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ESIP2015': {
'name': 'Earth Science Information Partners 2015',
'info_url': 'http://esipfed.org/',
'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SPSP2016': {
'name': 'Society for Personality and Social Psychology 2016 ',
'info_url': 'http://meeting.spsp.org',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NACIII': {
'name': '2015 National Astronomy Consortium (NAC) III Workshop',
'info_url': 'https://info.nrao.edu/do/odi/meetings/2015/nac111/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CDS2015': {
'name': 'Cognitive Development Society 2015',
'info_url': 'http://meetings.cogdevsoc.org/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEASR2016': {
'name': 'Southeastern Association of Shared Resources 2016',
'info_url': 'http://seasr.abrf.org',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Accretion2015': {
'name': 'Observational Evidence of Gas Accretion onto Galaxies?',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'2020Futures': {
'name': 'U.S. Radio/Millimeter/Submillimeter Science Futures in the 2020s',
'info_url': 'https://science.nrao.edu/science/meetings/2015/2020futures/home',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'RMPA2016': {
'name': 'Rocky Mountain Psychological Association 2016',
'info_url': 'http://www.rockymountainpsych.org/convention-info.html',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CNI2015': {
'name': 'Coalition for Networked Information (CNI) Fall Membership Meeting 2015',
'info_url': 'https://wp.me/P1LncT-64s',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'SWPA2016': {
'name': 'Southwestern Psychological Association Convention 2016',
'info_url': 'https://www.swpsych.org/conv_dates.php',
'logo_url': 'http://s28.postimg.org/xbwyqqvx9/SWPAlogo4.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ESIP2016W': {
'name': 'Earth Science Information Partners Winter Meeting 2016',
'info_url': 'http://commons.esipfed.org/2016WinterMeeting',
'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MiamiBrainhack15': {
'name': 'University of Miami Brainhack 2015',
'info_url': 'http://brainhack.org/americas/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'PsiChiRepository': {
'name': 'Psi Chi',
'info_url': 'http://www.psichi.org/?ResearchAdvisory#.VmBpeOMrI1g',
'logo_url': 'http://s11.postimg.org/4g2451vcz/Psi_Chi_Logo.png',
'admins': [
'research.director@psichi.org',
],
'field_names': {
'submission1': 'measures',
'submission2': 'materials',
'submission1_plural': 'measures/scales',
'submission2_plural': 'study materials',
'meeting_title_type': 'Repository',
'add_submission': 'materials',
'mail_subject': 'Title',
'mail_message_body': 'Measure or material short description',
'mail_attachment': 'Your measure/scale or material file(s)'
},
},
'GI2015': {
'name': 'Genome Informatics 2015',
'info_url': 'https://meetings.cshl.edu/meetings.aspx?meet=info&year=15',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MADSSCi2016': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2016',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://madssci.abrf.org/sites/default/files/madssci-logo-bk.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SMM2015': {
'name': 'The Society for Marine Mammalogy',
'info_url': 'https://www.marinemammalscience.org/conference/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'TESS': {
'name': 'Time-sharing Experiments for the Social Sciences',
'info_url': 'http://www.tessexperiments.org',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
'field_names': {
'submission1': 'poster',
'submission2': 'study',
'submission1_plural': 'posters',
'submission2_plural': 'studies',
'meeting_title_type': 'Studies',
'add_submission': 'studies',
}
},
'ASCERM2016': {
'name': 'ASCE Rocky Mountain Student Conference 2016',
'info_url': 'http://luninuxos.com/asce/',
'logo_url': 'http://s2.postimg.org/eaduh2ovt/2016_ASCE_Rocky_Mtn_banner.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'ARCA2016': {
'name': '5th Applied Research Conference in Africa',
'info_url': 'http://www.arcaconference.org/',
'logo_url': 'http://www.arcaconference.org/images/ARCA_LOGO_NEW.JPG',
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'CURCONF2016': {
'name': 'CUR Biennial Conference 2016',
'info_url': 'http://www.cur.org/conferences_and_events/biennial2016/',
'logo_url': 'http://s11.postimg.org/v8feuna4y/Conference_logo_eps.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CATALISE2016': {
'name': 'Criteria and Terminology Applied to Language Impairments: Synthesising the Evidence (CATALISE) 2016',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Emergy2016': {
'name': '9th Biennial Emergy Research Conference',
'info_url': 'http://www.cep.ees.ufl.edu/emergy/conferences/ERC09_2016/index.shtml',
'logo_url': 'http://s12.postimg.org/uf9ioqmct/emergy.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'aps2016': {
'name': '28th APS Annual Convention',
'info_url': 'http://www.psychologicalscience.org/convention',
'logo_url': 'http://www.psychologicalscience.org/redesign/wp-content/uploads/2015/03/APS_2016_Banner_990x157.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'jssp2016': {
'name': 'Japanese Society of Social Psychology 2016',
'info_url': 'http://www.socialpsychology.jp/conf2016/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'sepech2016': {
'name': 'XI SEPECH - Research Seminar in Human Sciences (Seminário de Pesquisa em Ciências Humanas)',
'info_url': 'http://www.uel.br/eventos/sepech/sepech2016/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'etmaal2016': {
'name': 'Etmaal van de Communicatiewetenschap 2016 - Media Psychology',
'info_url': 'https://etmaal2016.wordpress.com',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
}
def populate_conferences():
for meeting, attrs in MEETING_DATA.iteritems():
meeting = meeting.strip()
admin_emails = attrs.pop('admins', [])
admin_objs = []
for email in admin_emails:
try:
user = User.find_one(Q('username', 'iexact', email))
admin_objs.append(user)
except ModularOdmException:
raise RuntimeError('Username {0!r} is not registered.'.format(email))
custom_fields = attrs.pop('field_names', {})
conf = Conference(
endpoint=meeting, admins=admin_objs, **attrs
)
conf.field_names.update(custom_fields)
try:
conf.save()
except ModularOdmException:
conf = Conference.find_one(Q('endpoint', 'eq', meeting))
for key, value in attrs.items():
if isinstance(value, dict):
current = getattr(conf, key)
current.update(value)
setattr(conf, key, current)
else:
setattr(conf, key, value)
conf.admins = admin_objs
changed_fields = conf.save()
if changed_fields:
print('Updated {}: {}'.format(meeting, changed_fields))
else:
print('Added new Conference: {}'.format(meeting))
if __name__ == '__main__':
main()
| {
"content_hash": "3019f508e20266f3454821b4c55731c9",
"timestamp": "",
"source": "github",
"line_count": 702,
"max_line_length": 156,
"avg_line_length": 32.686609686609685,
"alnum_prop": 0.5305935675063191,
"repo_name": "GageGaskins/osf.io",
"id": "de9c4f98b86c9129d8118bad2ebb6a6ed90b3ab5",
"size": "22989",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/populate_conferences.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "133911"
},
{
"name": "HTML",
"bytes": "58475"
},
{
"name": "JavaScript",
"bytes": "1393750"
},
{
"name": "Mako",
"bytes": "635929"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "4889695"
},
{
"name": "Shell",
"bytes": "2118"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import json
import pprint
import logging
import boto3
import requests
from snooze.constants import GITHUB_HEADERS
try:
basestring
except NameError:
basestring = str
class RepositoryListener(object):
"""Sets up infrastructure for listening to a Github repository."""
def __init__(self, repository_name,
github_username, github_token,
aws_key, aws_secret, aws_region,
events, callbacks=None, **kwargs):
"""Instantiates a RepositoryListener.
Additionally:
* Creates or connects to a AWS SQS queue named for the repository
* Creates or connects to a AWS SNS topic named for the repository
* Connects the AWS SNS topic to the AWS SQS queue
* Configures the Github repository to push hooks to the SNS topic
Args:
repository_name (str): name of a Github repository, like
"tdsmith/homebrew-pypi-poet"
github_username (str): Github username
github_token (str): Github authentication token from
https://github.com/settings/tokens/new with admin:org_hook
privileges
aws_key (str): AWS key
aws_secret (str): AWS secret
aws_region (str): AWS region (e.g. 'us-west-2')
events (list<str>): List of Github webhook events to monitor for
activity, from https://developer.github.com/webhooks/#events.
callbacks (list<function(str event_type, Object event_payload)>):
functions to call with a decoded Github JSON payload when a
webhook event lands. You can register these after instantiation
with register_callback.
"""
self.repository_name = repository_name
self.github_username = github_username
self.github_token = github_token
self.aws_key = aws_key
self.aws_secret = aws_secret
self.aws_region = aws_region
# create or reuse sqs queue
sqs_resource = boto3.resource("sqs", region_name=self.aws_region)
self.sqs_queue = sqs_resource.create_queue(
QueueName="snooze__{}".format(self._to_topic(repository_name))
)
# create or reuse sns topic
sns_resource = boto3.resource("sns", region_name=self.aws_region)
sns_topic = sns_resource.create_topic(
Name=self._to_topic(repository_name)
)
sns_topic.subscribe(
Protocol='sqs',
Endpoint=self.sqs_queue.attributes["QueueArn"]
)
# configure repository to push to the sns topic
connect_github_to_sns(aws_key, aws_secret, aws_region,
github_username, github_token, repository_name,
sns_topic.arn, events)
# register callbacks
self._callbacks = []
if callbacks:
[self.register_callback(f) for f in callbacks]
def poll(self, wait=True):
"""Checks for messages from the Github repository.
Args:
wait (bool): Use SQS long polling, i.e. wait up to 20 seconds for a
message to be received before returning an empty list.
Returns: None
"""
messages = self.sqs_queue.receive_messages(WaitTimeSeconds=20*wait)
for message in messages:
body = message.body
logging.debug(
"Queue {} received message: {}".format(
self.sqs_queue.url, body))
try:
decoded_full_body = json.loads(body)
decoded_body = json.loads(decoded_full_body["Message"])
event_type = decoded_full_body["MessageAttributes"]["X-Github-Event"]["Value"]
except ValueError:
logging.error("Queue {} received non-JSON message: {}".format(
self.sqs_queue.url, body))
else:
for callback in self._callbacks:
try:
callback(event_type, decoded_body)
except Exception as e:
logging.error(
"Queue {} encountered exception {} while "
"processing message {}: {}".format(
self.sqs_queue.url, e.__class__.__name__,
pprint.pformat(decoded_body), str(e)
))
finally:
message.delete()
def _to_topic(self, repository_name):
"""Converts a repository_name to a valid SNS topic name.
Args:
repository_name: Name of a Github repository
Returns: str
"""
return repository_name.replace("/", "__")
def register_callback(self, callback):
"""Registers a callback on a webhook received event.
All callbacks are always called, in the order registered, for all events
received.
Args:
callback (function(str, Object)): function accepting an event_type
argument with the name of the triggered event and an event_payload
object with the JSON-decoded payload body
"""
self._callbacks.append(callback)
def connect_github_to_sns(aws_key, aws_secret, aws_region,
github_username, github_token, repository_name,
sns_topic_arn, events, **_):
"""Connects a Github repository to a SNS topic.
Args:
sns_topic_arn: ARN of an existing SNS topic
events (list<str> | str): Github webhook events to monitor for
activity, from https://developer.github.com/webhooks/#events.
Returns: None
"""
auth = requests.auth.HTTPBasicAuth(github_username, github_token)
if isinstance(events, basestring):
events = [events]
payload = {
"name": "amazonsns",
"config": {
"aws_key": aws_key,
"aws_secret": aws_secret,
"sns_topic": sns_topic_arn,
"sns_region": aws_region,
},
"events": events,
}
r = requests.post(
"https://api.github.com/repos/{}/hooks".format(repository_name),
data=json.dumps(payload),
headers=GITHUB_HEADERS,
auth=auth)
r.raise_for_status()
| {
"content_hash": "b1d877f555daa465cb337471b9f4ef74",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 94,
"avg_line_length": 37.18604651162791,
"alnum_prop": 0.5700437773608505,
"repo_name": "tdsmith/github-snooze-button",
"id": "c213cc874269fc61004ecd9938ae352486858dbf",
"size": "6396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snooze/repository_listener.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121340"
}
],
"symlink_target": ""
} |
from vumi.tests.helpers import VumiTestCase
from go.vumitools.tests.helpers import djangotest_imports
with djangotest_imports(globals()):
from django import forms
from go.apps.rapidsms.view_definition import (
EndpointsField, RapidSmsForm, AuthTokensForm, EditRapidSmsView,
ConversationViewDefinition)
class TestEndpointsField(VumiTestCase):
def test_clean_none(self):
f = EndpointsField()
self.assertEqual(f.clean(None), [])
def test_clean_empty_string(self):
f = EndpointsField()
self.assertEqual(f.clean(""), [])
def test_clean_invalid_type(self):
f = EndpointsField()
self.assertRaises(forms.ValidationError, f.clean, 5)
def test_clean_one_endpoint(self):
f = EndpointsField()
self.assertEqual(f.clean(u"foo"), [u"foo"])
def test_clean_two_endpoints(self):
f = EndpointsField()
self.assertEqual(f.clean(u"foo, bar"), [u"foo", u"bar"])
def test_clean_invalid_endpoint(self):
f = EndpointsField()
self.assertRaises(forms.ValidationError, f.clean, u"foo:bar")
def test_from_endpoints(self):
f1 = EndpointsField()
self.assertEqual(f1.from_endpoints([u"foo", u"bar"]), u"foo,bar")
self.assertEqual(f1.from_endpoints([]), u"")
f2 = EndpointsField(separator=u"+")
self.assertEqual(f2.from_endpoints([u"foo", u"bar"]), u"foo+bar")
class TestRapidSmsForm(VumiTestCase):
def test_initial_from_config(self):
initial = RapidSmsForm.initial_from_config({
"rapidsms_url": "http://www.example.com/",
"rapidsms_username": "rapid-user",
"rapidsms_password": "rapid-pass",
"rapidsms_auth_method": "basic",
"rapidsms_http_method": "POST",
})
self.assertEqual(initial, {
'rapidsms_url': 'http://www.example.com/',
'rapidsms_username': 'rapid-user',
'rapidsms_password': 'rapid-pass',
'rapidsms_auth_method': 'basic',
'rapidsms_http_method': 'POST',
'allowed_endpoints': u'default',
})
def test_initial_from_config_with_endpoints(self):
initial = RapidSmsForm.initial_from_config({
'allowed_endpoints': ['default', 'extra']
})
self.assertEqual(initial, {
'allowed_endpoints': u'default,extra',
})
def test_to_config(self):
form = RapidSmsForm({
'rapidsms_url': 'http://www.example.com/',
'rapidsms_username': 'rapid-user',
'rapidsms_password': 'rapid-pass',
'rapidsms_auth_method': 'basic',
'rapidsms_http_method': 'POST',
'allowed_endpoints': 'default, extra',
})
form.is_valid()
self.assertEqual(form.errors, {})
self.assertEqual(form.to_config(), {
'rapidsms_url': u'http://www.example.com/',
'rapidsms_username': u'rapid-user',
'rapidsms_password': u'rapid-pass',
'rapidsms_auth_method': u'basic',
'rapidsms_http_method': u'POST',
'allowed_endpoints': ['default', 'extra'],
})
class TestAuthTokensForm(VumiTestCase):
def test_initial_from_config_with_auth_token(self):
initial = AuthTokensForm.initial_from_config({
'api_tokens': ["token-1"]
})
self.assertEqual(initial, {
'auth_token': "token-1",
})
def test_to_config(self):
form = AuthTokensForm({
'auth_token': "token-1",
})
form.is_valid()
self.assertEqual(form.errors, {})
self.assertEqual(form.to_config(), {
'api_tokens': ["token-1"]
})
class TestEditRapidSmsView(VumiTestCase):
def test_edit_forms(self):
view = EditRapidSmsView()
self.assertEqual(view.edit_forms, (
('rapidsms', RapidSmsForm),
('auth_tokens', AuthTokensForm),
))
class TestConversationViewDefinition(VumiTestCase):
def test_edit_view(self):
view_def = ConversationViewDefinition(None)
self.assertEqual(view_def.edit_view, EditRapidSmsView)
| {
"content_hash": "e9063ae81c33ebac0dfabe2d3d8d08fa",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 73,
"avg_line_length": 33.568,
"alnum_prop": 0.5900857959961868,
"repo_name": "praekelt/vumi-go",
"id": "0c315ae97c15994e827185467f1ef39fa969e3ea",
"size": "4196",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "go/apps/rapidsms/tests/test_view_definition.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "154585"
},
{
"name": "HTML",
"bytes": "158025"
},
{
"name": "JavaScript",
"bytes": "446112"
},
{
"name": "Python",
"bytes": "2738963"
},
{
"name": "Shell",
"bytes": "6799"
}
],
"symlink_target": ""
} |
import os
import re
import shutil
import logging
from migrate.versioning import exceptions, pathed, script
log = logging.getLogger(__name__)
class VerNum(object):
"""A version number that behaves like a string and int at the same time"""
_instances = dict()
def __new__(cls, value):
val = str(value)
if val not in cls._instances:
cls._instances[val] = super(VerNum, cls).__new__(cls)
ret = cls._instances[val]
return ret
def __init__(self,value):
self.value = str(int(value))
if self < 0:
raise ValueError("Version number cannot be negative")
def __add__(self, value):
ret = int(self) + int(value)
return VerNum(ret)
def __sub__(self, value):
return self + (int(value) * -1)
def __cmp__(self, value):
return int(self) - int(value)
def __repr__(self):
return "<VerNum(%s)>" % self.value
def __str__(self):
return str(self.value)
def __int__(self):
return int(self.value)
class Collection(pathed.Pathed):
"""A collection of versioning scripts in a repository"""
FILENAME_WITH_VERSION = re.compile(r'^(\d{3,}).*')
def __init__(self, path):
"""Collect current version scripts in repository
and store them in self.versions
"""
super(Collection, self).__init__(path)
# Create temporary list of files, allowing skipped version numbers.
files = os.listdir(path)
if '1' in files:
# deprecation
raise Exception('It looks like you have a repository in the old '
'format (with directories for each version). '
'Please convert repository before proceeding.')
tempVersions = dict()
for filename in files:
match = self.FILENAME_WITH_VERSION.match(filename)
if match:
num = int(match.group(1))
tempVersions.setdefault(num, []).append(filename)
else:
pass # Must be a helper file or something, let's ignore it.
# Create the versions member where the keys
# are VerNum's and the values are Version's.
self.versions = dict()
for num, files in tempVersions.items():
self.versions[VerNum(num)] = Version(num, path, files)
@property
def latest(self):
""":returns: Latest version in Collection"""
return max([VerNum(0)] + self.versions.keys())
def create_new_python_version(self, description, **k):
"""Create Python files for new version"""
ver = self.latest + 1
extra = str_to_filename(description)
if extra:
if extra == '_':
extra = ''
elif not extra.startswith('_'):
extra = '_%s' % extra
filename = '%03d%s.py' % (ver, extra)
filepath = self._version_path(filename)
script.PythonScript.create(filepath, **k)
self.versions[ver] = Version(ver, self.path, [filename])
def create_new_sql_version(self, database, **k):
"""Create SQL files for new version"""
ver = self.latest + 1
self.versions[ver] = Version(ver, self.path, [])
# Create new files.
for op in ('upgrade', 'downgrade'):
filename = '%03d_%s_%s.sql' % (ver, database, op)
filepath = self._version_path(filename)
script.SqlScript.create(filepath, **k)
self.versions[ver].add_script(filepath)
def version(self, vernum=None):
"""Returns latest Version if vernum is not given.
Otherwise, returns wanted version"""
if vernum is None:
vernum = self.latest
return self.versions[VerNum(vernum)]
@classmethod
def clear(cls):
super(Collection, cls).clear()
def _version_path(self, ver):
"""Returns path of file in versions repository"""
return os.path.join(self.path, str(ver))
class Version(object):
"""A single version in a collection
:param vernum: Version Number
:param path: Path to script files
:param filelist: List of scripts
:type vernum: int, VerNum
:type path: string
:type filelist: list
"""
def __init__(self, vernum, path, filelist):
self.version = VerNum(vernum)
# Collect scripts in this folder
self.sql = dict()
self.python = None
for script in filelist:
self.add_script(os.path.join(path, script))
def script(self, database=None, operation=None):
"""Returns SQL or Python Script"""
for db in (database, 'default'):
# Try to return a .sql script first
try:
return self.sql[db][operation]
except KeyError:
continue # No .sql script exists
# TODO: maybe add force Python parameter?
ret = self.python
assert ret is not None, \
"There is no script for %d version" % self.version
return ret
def add_script(self, path):
"""Add script to Collection/Version"""
if path.endswith(Extensions.py):
self._add_script_py(path)
elif path.endswith(Extensions.sql):
self._add_script_sql(path)
SQL_FILENAME = re.compile(r'^(\d+)_([^_]+)_([^_]+).sql')
def _add_script_sql(self, path):
basename = os.path.basename(path)
match = self.SQL_FILENAME.match(basename)
if match:
version, dbms, op = match.group(1), match.group(2), match.group(3)
else:
raise exceptions.ScriptError(
"Invalid SQL script name %s " % basename + \
"(needs to be ###_database_operation.sql)")
# File the script into a dictionary
self.sql.setdefault(dbms, {})[op] = script.SqlScript(path)
def _add_script_py(self, path):
if self.python is not None:
raise exceptions.ScriptError('You can only have one Python script '
'per version, but you have: %s and %s' % (self.python, path))
self.python = script.PythonScript(path)
class Extensions:
"""A namespace for file extensions"""
py = 'py'
sql = 'sql'
def str_to_filename(s):
"""Replaces spaces, (double and single) quotes
and double underscores to underscores
"""
s = s.replace(' ', '_').replace('"', '_').replace("'", '_').replace(".", "_")
while '__' in s:
s = s.replace('__', '_')
return s
| {
"content_hash": "1e70e49c503fb5c6af8e2296255e1991",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 81,
"avg_line_length": 31.03791469194313,
"alnum_prop": 0.5693999083829593,
"repo_name": "denny820909/builder",
"id": "04445a3b39f331f31bdc9c443ba3ab26345f324d",
"size": "6596",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/sqlalchemy_migrate-0.6-py2.6.egg/migrate/versioning/version.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
try:
from . import generic as g
except BaseException:
import generic as g
class CreationTest(g.unittest.TestCase):
def setUp(self):
engines = []
if g.trimesh.util.has_module('triangle'):
engines.append('triangle')
if g.trimesh.util.has_module('mapbox_earcut'):
engines.append('earcut')
self.engines = engines
def test_cone(self):
c = g.trimesh.creation.cone(radius=0.5, height=1.0)
assert c.is_volume
assert c.body_count == 1
assert g.np.allclose(c.extents, 1.0, atol=0.03)
assert c.metadata['shape'] == 'cone'
def test_cylinder(self):
# tolerance for cylinders
atol = 0.03
c = g.trimesh.creation.cylinder(radius=0.5, height=1.0)
assert c.is_volume
assert c.body_count == 1
assert g.np.allclose(c.extents, 1.0, atol=atol)
assert c.metadata['shape'] == 'cylinder'
# check the "use a segment" feature
# passed height should be overridden
radius = 0.75
offset = 10.0
# true bounds
bounds = [[0, -radius, offset - radius],
[1, radius, offset + radius]],
# create with a height that gets overridden
c = g.trimesh.creation.cylinder(
radius=radius,
height=200,
segment=[[0, 0, offset],
[1, 0, offset]])
assert c.is_volume
assert c.body_count == 1
# make sure segment has been applied correctly
assert g.np.allclose(
c.bounds, bounds, atol=atol)
# try again with no height passed
c = g.trimesh.creation.cylinder(
radius=radius,
segment=[[0, 0, offset],
[1, 0, offset]])
assert c.is_volume
assert c.body_count == 1
# make sure segment has been applied correctly
assert g.np.allclose(
c.bounds, bounds, atol=atol)
def test_soup(self):
count = 100
mesh = g.trimesh.creation.random_soup(face_count=count)
assert len(mesh.faces) == count
assert len(mesh.face_adjacency) == 0
assert len(mesh.split(only_watertight=True)) == 0
assert len(mesh.split(only_watertight=False)) == count
def test_spheres(self):
# test generation of UV spheres and icospheres
for sphere in [g.trimesh.creation.uv_sphere(),
g.trimesh.creation.icosphere()]:
assert sphere.is_volume
assert sphere.is_convex
assert sphere.is_watertight
assert sphere.is_winding_consistent
assert sphere.metadata['shape'] == 'sphere'
# all vertices should have radius of exactly 1.0
radii = g.np.linalg.norm(
sphere.vertices - sphere.center_mass, axis=1)
assert g.np.allclose(radii, 1.0)
# test additional arguments
red_sphere = g.trimesh.creation.icosphere(color=(1., 0, 0))
expected = g.np.full((len(red_sphere.faces), 4), (255, 0, 0, 255))
g.np.testing.assert_allclose(red_sphere.visual.face_colors, expected)
def test_camera_marker(self):
"""
Create a marker including FOV for a camera object
"""
# camera transform (pose) is identity
camera = g.trimesh.scene.Camera(resolution=(320, 240), fov=(60, 45))
meshes = g.trimesh.creation.camera_marker(
camera=camera, marker_height=0.04)
assert isinstance(meshes, list)
# all meshes should be viewable type
for mesh in meshes:
assert isinstance(mesh, (g.trimesh.Trimesh,
g.trimesh.path.Path3D))
def test_axis(self):
# specify the size of the origin radius
origin_size = 0.04
# specify the length of the cylinders
axis_length = 0.4
# construct a visual axis
axis = g.trimesh.creation.axis(origin_size=origin_size,
axis_length=axis_length)
# AABB should be origin radius + cylinder length
assert g.np.allclose(origin_size + axis_length,
axis.bounding_box.primitive.extents,
rtol=.01)
def test_path_sweep(self):
if len(self.engines) == 0:
return
# Create base polygon
vec = g.np.array([0, 1]) * 0.2
n_comps = 100
angle = g.np.pi * 2.0 / n_comps
rotmat = g.np.array([
[g.np.cos(angle), -g.np.sin(angle)],
[g.np.sin(angle), g.np.cos(angle)]])
perim = []
for i in range(n_comps):
perim.append(vec)
vec = g.np.dot(rotmat, vec)
poly = g.Polygon(perim)
# Create 3D path
angles = g.np.linspace(0, 8 * g.np.pi, 1000)
x = angles / 10.0
y = g.np.cos(angles)
z = g.np.sin(angles)
path = g.np.c_[x, y, z]
# Extrude
for engine in self.engines:
mesh = g.trimesh.creation.sweep_polygon(
poly, path, engine=engine)
assert mesh.is_volume
def test_annulus(self):
"""
Basic tests of annular cylinder creation
"""
# run through transforms
transforms = [None]
transforms.extend(g.transforms)
for T in transforms:
a = g.trimesh.creation.annulus(r_min=1.0,
r_max=2.0,
height=1.0,
transform=T)
# mesh should be well constructed
assert a.is_volume
assert a.is_watertight
assert a.is_winding_consistent
assert a.metadata['shape'] == 'annulus'
# should be centered at origin
assert g.np.allclose(a.center_mass, 0.0)
# should be along Z
axis = g.np.eye(3)
if T is not None:
# rotate the symmetry axis ground truth
axis = g.trimesh.transform_points(axis, T)
# should be along rotated Z
assert (g.np.allclose(a.symmetry_axis, axis[2]) or
g.np.allclose(a.symmetry_axis, -axis[2]))
radii = [g.np.dot(a.vertices, i) for i in axis[:2]]
radii = g.np.linalg.norm(radii, axis=0)
# vertices should all be at r_min or r_max
assert g.np.logical_or(g.np.isclose(radii, 1.0),
g.np.isclose(radii, 2.0)).all()
# all heights should be at +/- height/2.0
assert g.np.allclose(g.np.abs(g.np.dot(a.vertices,
axis[2])), 0.5)
# do some cylinder comparison checks
a = g.trimesh.creation.annulus(r_min=0.0,
r_max=1.0,
height=1.0)
cylinder = g.trimesh.creation.cylinder(radius=1, height=1)
# should survive a zero-inner-radius
assert g.np.isclose(a.volume, cylinder.volume)
assert g.np.isclose(a.area, cylinder.area)
# bounds should be the same as a cylinder
a = g.trimesh.creation.annulus(r_min=.25,
r_max=1.0,
height=1.0)
c = g.trimesh.creation.cylinder(radius=1, height=1)
assert g.np.allclose(a.bounds, c.bounds)
# segment should work the same for both
seg = [[1, 2, 3], [4, 5, 6]]
a = g.trimesh.creation.annulus(r_min=.25,
r_max=1.0,
segment=seg)
c = g.trimesh.creation.cylinder(radius=1, segment=seg)
assert g.np.allclose(a.bounds, c.bounds)
def test_triangulate(self):
"""
Test triangulate using meshpy and triangle
"""
# circles
bigger = g.Point([10, 0]).buffer(1.0)
smaller = g.Point([10, 0]).buffer(.25)
# circle with hole in center
donut = bigger.difference(smaller)
# make sure we have nonzero data
assert bigger.area > 1.0
# make sure difference did what we think it should
assert g.np.isclose(donut.area,
bigger.area - smaller.area)
times = {'earcut': 0.0, 'triangle': 0.0}
iterations = 50
# get a polygon to benchmark times with including interiors
bench = [bigger, smaller, donut]
bench.extend(g.get_mesh(
'2D/ChuteHolderPrint.DXF').polygons_full)
bench.extend(g.get_mesh(
'2D/wrench.dxf').polygons_full)
# check triangulation of both meshpy and triangle engine
# including an example that has interiors
for engine in self.engines:
# make sure all our polygons triangulate resonably
for poly in bench:
v, f = g.trimesh.creation.triangulate_polygon(
poly, engine=engine)
# run asserts
check_triangulation(v, f, poly.area)
try:
# do a quick benchmark per engine
# in general triangle appears to be 2x
# faster than
times[engine] += min(
g.timeit.repeat(
't(p, engine=e)',
repeat=3,
number=iterations,
globals={
't': g.trimesh.creation.triangulate_polygon,
'p': poly,
'e': engine})) / iterations
except BaseException:
g.log.error(
'failed to benchmark triangle', exc_info=True)
g.log.info(
'benchmarked triangulation on {} polygons: {}'.format(
len(bench), str(times)))
def test_triangulate_plumbing(self):
"""
Check the plumbing of path triangulation
"""
if len(self.engines) == 0:
return
p = g.get_mesh('2D/ChuteHolderPrint.DXF')
for engine in self.engines:
v, f = p.triangulate(engine=engine)
check_triangulation(v, f, p.area)
def test_truncated(self, count=10):
# create some random triangles
tri = g.random((count, 3, 3))
m = g.trimesh.creation.truncated_prisms(tri)
split = m.split()
assert m.body_count == count
assert len(split) == count
assert all(s.volume > 0 for s in split)
def check_triangulation(v, f, true_area):
assert g.trimesh.util.is_shape(v, (-1, 2))
assert v.dtype.kind == 'f'
assert g.trimesh.util.is_shape(f, (-1, 3))
assert f.dtype.kind == 'i'
tri = g.trimesh.util.stack_3D(v)[f]
area = g.trimesh.triangles.area(tri).sum()
assert g.np.isclose(area, true_area)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
| {
"content_hash": "fec97d34d5dc6905fd08fabfb0fc50ed",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 77,
"avg_line_length": 36.57236842105263,
"alnum_prop": 0.523295556754812,
"repo_name": "mikedh/trimesh",
"id": "f1e265da1a51d0d72a3489e7313d2df1339cfe0c",
"size": "11118",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_creation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2855"
},
{
"name": "HTML",
"bytes": "580"
},
{
"name": "JavaScript",
"bytes": "5887"
},
{
"name": "Makefile",
"bytes": "1862"
},
{
"name": "Python",
"bytes": "2142314"
},
{
"name": "Shell",
"bytes": "5161"
}
],
"symlink_target": ""
} |
import pytest
@pytest.fixture(scope="package")
def salt_eauth_account(salt_eauth_account_factory):
with salt_eauth_account_factory as account:
yield account
| {
"content_hash": "ea0f11955aa3453e5c9200af7d41d6de",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 51,
"avg_line_length": 24.428571428571427,
"alnum_prop": 0.7426900584795322,
"repo_name": "saltstack/salt",
"id": "48554dda4d8c632e0ba76e877bc9dd46cdf49488",
"size": "171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pytests/integration/cli/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
import os
from django.core.wsgi import get_wsgi_application
try:
import newrelic.agent
newrelic.agent.initialize()
newrelic.agent.capture_request_params()
except Exception as e:
print("newrelic couldn't be initialized:", e)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web.settings")
application = get_wsgi_application()
| {
"content_hash": "3c7b1a71ff2d7d1cd84e219f7fe11021",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 63,
"avg_line_length": 23.066666666666666,
"alnum_prop": 0.7485549132947977,
"repo_name": "openstates/openstates.org",
"id": "1511f85206009f70edb021e05f7976f0f071f4ce",
"size": "346",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "web/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2271"
},
{
"name": "Dockerfile",
"bytes": "2731"
},
{
"name": "HTML",
"bytes": "170085"
},
{
"name": "JavaScript",
"bytes": "58627"
},
{
"name": "Jinja",
"bytes": "4114"
},
{
"name": "Procfile",
"bytes": "173"
},
{
"name": "Python",
"bytes": "338617"
},
{
"name": "SCSS",
"bytes": "52406"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
print("are you ok?? 11")
import requests
import redis
import os
import random
from instapush_notify import InstaPushNotify
from fav_zhuanlan import Fav
from logger_fun import logger
print("are you ok??")
s = requests.session()
redis_obj = redis.Redis(host='localhost', port=6379, db=0)
class CheckZhuanLanFav(object):
def __init__(self, url):
self.url = url
self.headers = {
'User-Agent': 'osee2unifiedRelease/332 CFNetwork/711.3.18 Darwin/14.0.0',
'Authorization': os.environ.get('Authorization'),
'Content-Type': 'application/json',
'x-api-version': "3.0.42",
'accept-language': "zh-Hans-CN;q=1, en-US;q=0.9",
'accept': "*/*",
'accept-encoding': "gzip, deflate"
}
self.force_check = True if random.randint(0, 9) > 7 else False
def get_list(self, url):
r = s.get(url, headers=self.headers)
res_json = r.json()
data_info = res_json.get('data', [])
next_url = None
if data_info and self.force_check:
paging_dict = res_json.get('paging', {})
next_url = paging_dict.get('next', None)
for data in data_info:
type_info = data.get('type', '')
if type_info == 'article':
data_url = data.get('url')
data_id = (data.get('id'))
data_title = data.get('title')
if not data_url or not data_id or not data_title:
logger.error("%s error" % data)
continue
if redis_obj.sismember('zhihu_zhuanlan_id', data_id):
logger.warning("%s %s %s exits" % (data_url, data_id, data_title))
continue
logger.info("+++++++++++++++++++++++++++++++++++++++++++")
logger.info(data_url)
logger.info(data_id)
logger.info(data_title)
self.push_fav(data)
logger.info("+++++++++++++++++++++++++++++++++++++++++++")
logger.info("\n")
# return
if next_url:
logger.info("next url %s" % next_url)
self.get_list(next_url)
def push_fav(self, dict_info):
url = dict_info.get('url', '')
data_id = dict_info.get('id')
title = dict_info.get('title')
# f = Fav(url, '735b3e76-e7f5-462c-84d0-bb1109bcd7dd', '')
f = Fav(url, 'f082258a-fd9a-4713-98a0-d85fa838f019', '')
f.get_content()
redis_obj.sadd('zhihu_zhuanlan_id', data_id)
InstaPushNotify.notify(title, type_info=2)
if __name__ == '__main__':
try:
czlf = CheckZhuanLanFav('https://api.zhihu.com/collections/29469118/contents?excerpt_len=75')
czlf.get_list(czlf.url)
except Exception, e:
logger.error(Exception)
logger.error(e)
InstaPushNotify.notify("error e:%s" % e , type_info=2)
| {
"content_hash": "b417c3047422c69030182886c903a520",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 101,
"avg_line_length": 36.4320987654321,
"alnum_prop": 0.5218569976279227,
"repo_name": "youqingkui/zhihufav",
"id": "11873b804e82df4edeb8edb583f4b3fb668c88e3",
"size": "2988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/check_zhuanglan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24953"
}
],
"symlink_target": ""
} |
"""Data utils."""
from typing import Dict, List, Sequence, Tuple
from absl import logging
import numpy as np
import seqio
# A tuple of all possible intent label names to be used by NALUE task.
_NALUE_INTENT_NAMES = (
'AIName', 'APR', 'ATM', 'Accent', 'Accept', 'Account', 'Activate',
'Activity', 'Affirm', 'Age', 'Alarm', 'Answers', 'Arrival', 'Audiobook',
'AutomateHowto', 'Balance', 'Bank', 'BankTransferChargeInfo',
'BasicService', 'Basics', 'Bill', 'Blocked', 'Book', 'BookTaxi', 'BookUber',
'Boss', 'BotInfo', 'Business', 'ByAppHowto', 'ByCardHowTo', 'ByCheckHowTo',
'Calculator', 'Calendar', 'Call', 'Calorie', 'Cancel', 'CarAssistant',
'CardAboutExpire', 'CardChargeInfo', 'CardCompromised', 'CardDamaged',
'CardLost', 'CardNotWorking', 'CardSwallowed', 'CardType', 'CarryOn',
'CashWithdraw', 'Change', 'ChangePassword', 'ChangePersonalDetail',
'ChargedTwice', 'Coffee', 'Confirm', 'ContactAdd', 'ContactQuery',
'ContactlessNotWorking', 'Conversion', 'CookTime', 'Country', 'CreditCard',
'CreditLimit', 'CreditScore', 'Currency', 'Currency ', 'CurrencyExchange',
'CurrentLocation', 'DateTime', 'Declined', 'Definition', 'DeliveryEstimate',
'DeliveryEstimateInfo', 'Deposit', 'Device', 'Dictionary', 'Dim',
'Direction', 'DisposableCard', 'DisposableCardLimitInfo', 'Distance',
'DontCare', 'DontKnow', 'Email', 'Event', 'Expiration', 'ExpirationDate',
'Explain', 'ExtraCharge', 'Failed', 'FeeCharged', 'FeeInfo', 'FindPhone',
'Flight', 'FlipCoin', 'Food', 'ForgottenPassword', 'Freeze', 'FunFact',
'Function', 'Game', 'Gas', 'General', 'GetCard', 'Goodbye', 'Hello',
'Hobby', 'Hotel', 'How', 'HowBusy', 'HowTo', 'Identity', 'ImproveInfo',
'Income', 'Info', 'Ingredient', 'Insurance', 'InterestRateInfo',
'InternationalFees', 'Issue', 'Joke', 'JumpStart', 'Language', 'Level',
'Light', 'LimitInfo', 'Linking', 'List', 'Location', 'LostLuggage',
'Maintenance', 'Manufacturer', 'Meal', 'MeaningOfLife', 'Measurement',
'Media', 'Message', 'Meta', 'Mileage', 'Movie', 'Music', 'Name',
'Navigation', 'Negate', 'NewCard', 'News', 'NextVacation', 'NotUpdated',
'Nutrition', 'OOS', 'Off', 'OilChange', 'On', 'Order', 'OrderChecks',
'Origin', 'PTO', 'Pay', 'Payday', 'Payment', 'PaymentApp', 'Pending',
'Pets', 'Phone', 'PhysicalCard', 'Pin', 'Play', 'Playlist', 'PlugType',
'Podcast', 'Post', 'Preference', 'Pressure', 'Productivity', 'Query',
'Radio', 'RateInfo', 'ReceiveHowto', 'Recipe', 'Recommend', 'Redeem',
'Refund', 'Reminder', 'Remove', 'RentalCar', 'Repeat', 'ReportFraud',
'Request', 'Reservation', 'Reset', 'Restaurant', 'Reverted', 'Review',
'Rewards', 'Ride', 'RollDice', 'RollOver', 'RoutingNumberInfo', 'Send',
'Set', 'Setting', 'Settings', 'ShareLocation', 'Shopping', 'Skip',
'SmallTalk', 'SmartHome', 'Social', 'SpareCard', 'Speaker', 'Speed',
'Spelling', 'SpendingQuery', 'Statement', 'Status', 'Stock', 'StolenPhone',
'Substitute', 'Support', 'Sync', 'TakeOut', 'Tax', 'Terminante', 'Text',
'Thanks', 'TimeZone', 'Timer', 'Tire', 'TopUp', 'TopUpMethod', 'Traffic',
'Train', 'TransactionQuery', 'Transfer', 'Translate', 'Transport',
'TravelAlert', 'TravelNotification', 'TripPlanning', 'Type', 'Unable',
'UnrecognizedTransaction', 'Up', 'Update', 'Used', 'UserName', 'Utility',
'Vaccine', 'Vacuum', 'Verify', 'VeryIdentity', 'VirtualCard',
'VirtualCardNotWorking', 'Visa', 'VolumeDown', 'VolumeMute', 'VolumeUp',
'W2', 'Weather', 'Wemo', 'When', 'WhisperMode', 'Why', 'Work',
'WrongAmount', 'WrongExchangeRate')
# A callable for other file to use to retrieve NaLUE intents (e.g., `task.py`)
get_nalue_intent_names = lambda: _NALUE_INTENT_NAMES
def make_intent_tokens(
intent_names: Sequence[str], vocab: seqio.SentencePieceVocabulary,
custom_tokens: Sequence[str]) -> Tuple[Dict[str, str], Dict[str, str]]:
"""Identify unique vocab tokens for intent label names.
For each intent name, this function identifies a unique symbol from the
SentencePieceVocabulary so every intent label can be decoded into a unique
token. For example, for two intent names (`CardInfo`, `CardLimit`) that will
be tokenized into [`Card`, `Info`] and [`Card`, `Limit`], we will use `Card`
as the token for `CardInfo`, and `Limit` as the token for `CardLimit` so each
intent correspond to a unique token. For an intent whose tokenized symbols are
all already used by other intents, we will use an pre-reserved custom token
from the vocabulary (e.g., extra ids `<extra_id_0>`) as its symbol.
Args:
intent_names: A sequence of names of the intent labels.
vocab: A SentencePieceVocabulary that will be used for tokenizing the output
sequence.
custom_tokens: Custom tokens from the SentencePieceVocabulary. Usually these
are the extra ids of the format "<extra_id_X>".
Returns:
intent_to_token: A mapping that converts intent names to tokens.
token_to_intent: A mapping that converts tokens to intent names.
Raises:
ValueError: If `intent_names` contains duplicate names.
ValueError: If the list of custom tokens are not part of vocab.
ValueError: If the list of custom tokens are already exhausted when a new
custom token is needed.
"""
if len(intent_names) != len(np.unique(intent_names)):
raise ValueError('`intent_names` contains duplicates. '
'Please make sure the intent names are unique.')
# Creates a copy of the custom_tokens that is a mutable stack for later use.
custom_tokens_available = list(custom_tokens)
check_custom_token_validity(custom_tokens_available, vocab)
# Assign each intent names an unique token.
intent_to_token, token_to_intent = dict(), dict()
for intent_name in intent_names:
candidate_tokens = get_vocab_tokens(intent_name, vocab)
for token in candidate_tokens:
# Assigns a token to the intent if it (1) is un-used, (2) corresponds
# to a single id under vocab.encode(), and (3) does not belong to a custom
# special token.
is_unused = token_to_intent.get(token, None) is None
is_singular = len(vocab.encode(token)) == 1
not_custom_token = token not in custom_tokens_available
suitable_token_found = is_unused and is_singular and not_custom_token
if suitable_token_found:
intent_to_token[intent_name] = token
token_to_intent[token] = intent_name
logging.info('%s: assign token %s from candidates %s', intent_name,
token, candidate_tokens)
break
# Otherwise, assign a custom token to this intent.
if not suitable_token_found:
if not custom_tokens_available:
raise ValueError(f'{intent_name} needs a custom token, however all '
'custom_tokens are already used.')
custom_token = custom_tokens_available.pop(0)
intent_to_token[intent_name] = custom_token
token_to_intent[custom_token] = intent_name
logging.info(
'%s: all candidate tokens %s are either already used or '
'does not correspond to a unique id, assigning to it a '
'custom token: %s', intent_name, candidate_tokens, custom_token)
return intent_to_token, token_to_intent
def get_vocab_tokens(input_str: str,
vocab: seqio.SentencePieceVocabulary) -> List[str]:
"""Splits an input string into a list of its SentencePiece tokens."""
return [vocab.decode([i]) for i in vocab.encode(input_str)]
def check_custom_token_validity(custom_tokens: Sequence[str],
vocab: seqio.SentencePieceVocabulary):
"""Makes sure that all custom_tokens are part of the vocabulary.
Args:
custom_tokens: A list of special tokens that should correspond to unique
tokens under the `vocab` object.
vocab: A seqio.SentencePieceVocabulary object.
Raises:
ValueError: If any of the custom_tokens is tokenized into multiple tokens.
ValueError: If any of the custom_tokens is tokenized into a different token.
"""
for token in custom_tokens:
token_recoded = get_vocab_tokens(token, vocab)
if len(token_recoded) != 1:
raise ValueError(
f'custom token "{token}" cannot be tokenized into a single token: '
f'{token_recoded}. It is not part of the valid vocabulary.')
elif token != token_recoded[0]:
raise ValueError(
f'custom token "{token}" is tokenized into a different token '
f'"{token_recoded[0]}". It is not part of the valid vocabulary.')
| {
"content_hash": "2a5d0de852b947a894bcb95c5a157ad9",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 80,
"avg_line_length": 51.07738095238095,
"alnum_prop": 0.6670551217806783,
"repo_name": "google/uncertainty-baselines",
"id": "af657e2d8c85b6f51bbbd2d37eaf86e513687a04",
"size": "9195",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "baselines/t5/data/nalue/data_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "269621"
},
{
"name": "Python",
"bytes": "5741537"
}
],
"symlink_target": ""
} |
from scrapy.item import Item, Field
class Quote(Item):
# define the fields for your item here like:
# name = Field()
text = Field()
author = Field()
book = Field()
| {
"content_hash": "b78bc96d895380063d219f40b0693011",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 48,
"avg_line_length": 23.125,
"alnum_prop": 0.6216216216216216,
"repo_name": "s1na/darkoob",
"id": "bfa4f77dff09ffbaab0817f941042271ced8bba8",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/alpha",
"path": "scraping/scraping/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "155888"
},
{
"name": "Python",
"bytes": "111600"
},
{
"name": "Shell",
"bytes": "364"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class OrientationValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="orientation", parent_name="isosurface.colorbar", **kwargs
):
super(OrientationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["h", "v"]),
**kwargs,
)
| {
"content_hash": "bf01f972ed6987af7a696d59c3670a2a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 84,
"avg_line_length": 35.142857142857146,
"alnum_prop": 0.6036585365853658,
"repo_name": "plotly/plotly.py",
"id": "7ac5b2a5c7ce2e96efefd4eb5aed38a6773e0dcf",
"size": "492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/isosurface/colorbar/_orientation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import datetime
from pyalgotrade import strategy
from pyalgotrade.broker import backtesting
from pyalgotrade import bar
from pyalgotrade import logger
from pyalgotrade.barfeed import membf
class TestBarFeed(membf.BarFeed):
def barsHaveAdjClose(self):
raise NotImplementedError()
class Strategy(strategy.BaseStrategy):
def __init__(self, barFeed, cash):
strategy.BaseStrategy.__init__(self, barFeed, backtesting.Broker(cash, barFeed))
def onBars(self, bars):
self.info("bla")
logger.getLogger("custom").info("ble")
def main():
bf = TestBarFeed(bar.Frequency.DAY)
bars = [
bar.BasicBar(datetime.datetime(2000, 1, 1), 10, 10, 10, 10, 10, 10, bar.Frequency.DAY),
]
bf.addBarsFromSequence("orcl", bars)
strat = Strategy(bf, 1000)
strat.run()
| {
"content_hash": "05c4c5c46a6877fd128f152fd50eb622",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 95,
"avg_line_length": 25.416666666666668,
"alnum_prop": 0.6918032786885245,
"repo_name": "cgqyh/pyalgotrade-mod",
"id": "72652014fc6b8c26a68374f452e0b803d72f4a9a",
"size": "1529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testcases/logger_test_3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1066824"
},
{
"name": "Shell",
"bytes": "504"
}
],
"symlink_target": ""
} |
import logging
import ConfigParser
class RequestResponder(object):
def __init__(self, configFilePath):
#logging.debug('config: '+configFilePath)
self._configuration = ConfigParser.RawConfigParser()
self._configuration.read(configFilePath)
self.requestHandler = None
def get(self, requestHandler):
self.requestHandler = requestHandler
def post(self, requestHandler):
self.requestHandler = requestHandler
def put(self, requestHandler):
self.requestHandler = requestHandler
def delete(self, requestHandler):
self.requestHandler = requestHandler
def options(self, requestHandler):
self.requestHandler = requestHandler
| {
"content_hash": "78eee8d06b733aebca8cd604c8708eb3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 60,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.7039106145251397,
"repo_name": "dataplumber/edge",
"id": "5d27c48b360b0e28adf661118d544c2c365d1f4c",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/requestresponder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "279147"
},
{
"name": "SQLPL",
"bytes": "66101"
},
{
"name": "Shell",
"bytes": "279"
}
],
"symlink_target": ""
} |
import core
from core import *
class DMIDE_MenuBar(wx.MenuBar):
def __init__(self, window=None):
wx.MenuBar.__init__(self, wx.MB_DOCKABLE)
self.installMenuService()
def installMenuService(self):
""" Build a MenuBar for the top-level window as read from an XML file """
if os.path.exists(os.path.join(wx.GetApp().get_dir(), 'settings', 'menubar.xml')):
handler = MenuBarHandler(self)
parser = xml.sax.parse(os.path.join(wx.GetApp().get_dir(), 'settings', 'menubar.xml'), handler)
elif os.path.exists('menubar.xml'):
handler = MenuBarHandler(self)
parser = xml.sax.parse('menubar.xml', handler)
else:
handler = MenuBarHandler(self)
parser = xml.sax.parseString(default_menu, handler)
class DMIDE_FancyMenuBar(wxFlatMenu.FlatMenuBar):
def __init__(self, window):
wxFlatMenu.FlatMenuBar.__init__(self, window, ID_MENUBAR)
self.installMenuService()
def installMenuService(self):
""" Build a MenuBar for the top-level window as read from an XML file """
if os.path.exists(os.path.join(wx.GetApp().get_dir(), 'settings', 'menubar.xml')):
handler = FancyMenuBarHandler(self)
parser = xml.sax.parse(os.path.join(wx.GetApp().get_dir(), 'settings', 'menubar.xml'), handler)
elif os.path.exists('menubar.xml'):
handler = FancyMenuBarHandler(self)
parser = xml.sax.parse('menubar.xml', handler)
else:
handler = FancyMenuBarHandler(self)
parser = xml.sax.parseString(default_menu, handler)
def FindItemById(self, id):
return self.FindMenuItem(id)
class MenuBarHandler(xml.sax.handler.ContentHandler):
""" Handler for reading the XML """
def __init__(self, menubar):
self.ordered_list = []
self.current = []
self.menubar = menubar
def startElement(self, name, attributes):
if name == 'menu_bar':
if not self.current:
menu_title = attributes['title']
self.current = [(wx.Menu(), menu_title)]
else:
menu_title = attributes['title']
self.current.append((wx.Menu(), menu_title))
elif name == 'menu':
if not self.current:
return
if attributes.has_key('type'):
if attributes['type'] == 'separator':
self.current[-1][0].AppendSeparator()
return
id = attributes['id']
title = attributes['title']
macro = ''
desc = ''
flags = ''
if attributes.has_key('macro'):
macro = attributes['macro']
if attributes.has_key('desc'):
desc = attributes['desc']
if attributes.has_key('flags'):
flags = attributes['flags']
if macro:
title = '%s\t%s' % (title, macro)
if flags:
self.current[-1][0].Append(globals()[id], title, desc, globals()[flags])
else:
self.current[-1][0].Append(globals()[id], title, desc)
def endElement(self, name):
if name == 'menu_bar':
if self.current == [self.current[-1]]:
self.menubar.Append(*self.current[-1])
self.current = []
else:
self.current[-2][0].AppendMenu(wx.ID_ANY, self.current[-1][1], self.current[-1][0])
self.current = self.current[:-1]
class FancyMenuBarHandler(xml.sax.handler.ContentHandler):
""" Handler for reading the XML """
def __init__(self, menubar):
self.ordered_list = []
self.current = []
self.menubar = menubar
def startElement(self, name, attributes):
if name == 'menu_bar':
if not self.current:
menu_title = attributes['title']
self.current = [(wxFlatMenu.FlatMenu(), menu_title)]
else:
menu_title = attributes['title']
self.current.append((wxFlatMenu.FlatMenu(), menu_title))
elif name == 'menu':
if not self.current:
return
if attributes.has_key('type'):
if attributes['type'] == 'separator':
self.current[-1][0].AppendSeparator()
return
id = attributes['id']
title = attributes['title']
macro = ''
desc = ''
flags = ''
if attributes.has_key('macro'):
macro = attributes['macro']
if attributes.has_key('desc'):
desc = attributes['desc']
if attributes.has_key('flags'):
flags = attributes['flags']
if macro:
title = '%s\t%s' % (title, macro)
'''
if flags:
self.current[-1][0].Append(globals()[id], title, desc, globals()[flags])
else:
self.current[-1][0].Append(globals()[id], title, desc)
'''
if not flags:
flags = 'ID_ITEM_NORMAL'
if id in id_to_art:
bmp = wx.GetApp().art.getFromWx(id_to_art[id], (dmide_menu_art_size, dmide_menu_art_size), wx.ART_MENU)
disabled = wx.BitmapFromImage(wx.ImageFromBitmap(bmp).ConvertToGreyscale())
else:
bmp = wx.NullBitmap
disabled = wx.NullBitmap
item = wxFlatMenu.FlatMenuItem(self.current[-1][0], globals()[id], title, desc, globals()[flags], normalBmp=bmp, disabledBmp=disabled)
self.current[-1][0].AppendItem(item)
def endElement(self, name):
if name == 'menu_bar':
if self.current == [self.current[-1]]:
self.menubar.Append(*self.current[-1])
self.current = []
else:
self.current[-2][0].AppendMenu(wx.ID_ANY, self.current[-1][1], self.current[-1][0], wx.ITEM_NORMAL)
self.current = self.current[:-1]
default_menu = '''
<menu_list>
<menu_bar title="File">
<menu id="ID_FILE_NEW" title="New" macro="Ctrl+N" desc="Create a new file." />
<menu id="ID_FILE_OPEN" title="Open" macro="Ctrl+O" desc="Open a file." />
<menu id="ID_FILE_CLOSE" title="Close" macro="Ctrl+Shift+C" desc="Close the current file." />
<menu id="ID_FILE_SAVE" title="Save" macro="Ctrl+S" desc="Save the current file." />
<menu id="ID_FILE_SAVEAS" title="Save As" macro="Ctrl+Shift+S" desc="Save the current file in a different title." />
<menu type="separator" />
<menu id="ID_FILE_NEWENVIRONMENT" title="New Environment" macro="Ctrl+Shift+N" desc="Create a new environment." />
<menu id="ID_FILE_OPENENVIRONMENT" title="Open Environment" macro="Ctrl+Shift+O" desc="Open an environment." />
<menu type="separator" />
<menu id="ID_EXIT" title="Exit" macro="Ctrl+Q" desc="Exit DMIDE." />
</menu_bar>
<menu_bar title="Edit">
<menu id="ID_EDIT_UNDO" title="Undo" macro="Ctrl+Z" desc="Undo last change." />
<menu id="ID_EDIT_REDO" title="Redo" macro="Ctrl+Y" desc="Redo last undo change." />
<menu type="separator" />
<menu id="ID_EDIT_CUT" title="Cut" macro="Ctrl+X" desc="Cut the selected text." />
<menu id="ID_EDIT_COPY" title="Copy" macro="Ctrl+C" desc="Copy the selected text." />
<menu id="ID_EDIT_PASTE" title="Paste" macro="Ctrl+V" desc="Paste the text in clipboard." />
<menu id="ID_EDIT_DELETE" title="Delete" macro="Del" desc="Delete the selected text." />
<menu type="separator" />
<menu id="ID_EDIT_FIND" title="Find" macro="Ctrl+F" desc="Find text in this document." />
<menu id="ID_EDIT_FINDNEXT" title="Find Next" macro="F3" desc="Find the next text in this document." />
<menu id="ID_EDIT_FINDPREV" title="Find Previous" macro="Shift+F3" desc="Find the previous text in this document." />
<menu id="ID_EDIT_REPLACE" title="Replace" macro="Ctrl+H" desc="Replace text in this document." />
<menu type="separator" />
<menu id="ID_EDIT_GOTOLINE" title="Goto Line" macro="Ctrl+G" desc="Go to specified line." />
<menu id="ID_EDIT_SELECTALL" title="Select All" macro="Ctrl+A" desc="Select all text in this document." />
</menu_bar>
<menu_bar title="View">
<menu id="ID_VIEW_FILETOOLBAR" title="File Toolbar" desc="Toggle view of the file toolbar." flags="ID_ITEM_CHECK" />
<menu type="separator" />
<menu id="ID_VIEW_FILETREE" title="File Tree" desc="Toggle view of the file tree." flags="ID_ITEM_CHECK" />
<menu id="ID_VIEW_EDITOR" title="Main Editor" desc="Toggle view of the main editor." flags="ID_ITEM_CHECK" />
<menu id="ID_VIEW_BUILDINFORMATION" title="Build Information" desc="Toggle view of the build information." flags="ID_ITEM_CHECK" />
<menu id="ID_VIEW_CONSOLE" title="Console" desc="Toggle view of the developer console." flags="ID_ITEM_CHECK" />
</menu_bar>
<menu_bar title="Perspective">
<menu id="ID_PERSPECTIVE_DEFAULT" title="Default" desc="Load default perspective." />
<menu id="ID_PERSPECTIVE_SAVE" title="Save" desc="Save perspective." />
<menu id="ID_PERSPECTIVE_LOAD" title="Load" desc="Load perspective." />
<menu type="separator" />
</menu_bar>
<menu_bar title="Options">
<menu id="ID_OPTIONS_PERSPECTIVE" title="Perspective" desc="Settings for the look and feel of DMIDE." />
</menu_bar>
<menu_bar title="Help">
<menu type="separator" />
<menu id="ID_HELP_ABOUT" title="About" desc="About DMIDE." />
</menu_bar>
</menu_list>
'''
| {
"content_hash": "52d5f51e2fca695231110ba8d56a3765",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 137,
"avg_line_length": 35.41975308641975,
"alnum_prop": 0.6451725339839666,
"repo_name": "nyov/dmide",
"id": "38d37bcd38fdcd92280fc816e57694e3f8e20827",
"size": "8607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/panels/menubar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4390695"
}
],
"symlink_target": ""
} |
"""
Apply the style rules to the source.
"""
import argparse
import os
import sys
import time
from yapf.yapflib.yapf_api import FormatFile
try:
from .common import get_stash_dir
except (ImportError, ValueError):
from common import get_stash_dir
def apply_to_file(fp, sp, in_place=False):
"""
Apply the style to a file.
:param fp: path to file
:type fp: str
:param sp: path to style
:type sp: str
:param in_place: format code in-place
:type in_place: bool
:return: the reformated code
:rtype: str or None
"""
rc, encoidng, changed = FormatFile(fp, style_config=sp, verify=True, in_place=in_place)
return rc
def apply_to_dir(path, style, recursive=False, in_place=False, verbose=False, pyonly=True):
"""
Apply the style to all files in a directory.
:param path: path to directory
:type path: str
:param style: path to style file
:type style: str
:param recursive: also descend into subdirectories
:type recursive: bool
:param in_place: apply the changes directly to the file
:type in_place: bool
:param verbose: print additional information
:type verbose: bool
:param pyonly: only apply to .py files
:type pyonly: bool
"""
if verbose:
print("Applying style to directory '{}'...".format(path))
for fn in os.listdir(path):
fp = os.path.join(path, fn)
if os.path.isdir(fp) and recursive:
apply_to_dir(fp, style, recursive=recursive, in_place=in_place, verbose=verbose, pyonly=pyonly)
elif os.path.isfile(fp):
if (not fn.endswith(".py")) and pyonly:
if verbose:
print("Skipping '{}' (non-py)...".format(fp))
continue
if verbose:
print("Applying style to file '{}'...".format(fp))
res = apply_to_file(fp, style, in_place=in_place)
if not in_place:
print("# ======= {} =======".format(fp))
print(res)
def main():
"""the main function"""
parser = argparse.ArgumentParser(description="Reformat source to follow style rules")
parser.add_argument("action", help="action to perform", choices=["apply"])
parser.add_argument("-p", "--path", action="store", help="path to file/directory")
parser.add_argument("-s", "--style", action="store", help="path to style file")
parser.add_argument("-r", "--recursive", action="store_true", help="descend into subdirectories")
parser.add_argument("-v", "--verbose", action="store_true", help="be more verbose")
parser.add_argument("-i", "--inplace", action="store_true", help="apply the changes to the source")
parser.add_argument("-a", "--all", action="store_true", help="apply to all files (not just *.py files)")
ns = parser.parse_args()
if ns.path is not None:
path = ns.path
else:
path = get_stash_dir()
if ns.style is not None:
style = ns.style
else:
style = os.path.join(get_stash_dir(), "tools", "yapf.ini")
if ns.action == "apply":
start = time.time()
if not os.path.exists(path):
print("Error: path '{}' does not exists!".format(path))
sys.exit(1)
elif os.path.isdir(path):
apply_to_dir(path, style, in_place=ns.inplace, recursive=ns.recursive, pyonly=(not ns.all), verbose=ns.verbose)
else:
res = apply_to_file(path, style, in_place=ns.inplace)
if not ns.inplace:
print(res)
end = time.time()
if ns.verbose:
print("Done. Style applied in {}s".format(end - start))
if __name__ == "__main__":
main()
| {
"content_hash": "f8edc9f5892b6d60fdd8cf823a93a7ee",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 123,
"avg_line_length": 34.25,
"alnum_prop": 0.6004325493376588,
"repo_name": "ywangd/stash",
"id": "36d7882a55d96302161eff0b121ac3be70ba3917",
"size": "3723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/apply_style.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "939583"
},
{
"name": "Shell",
"bytes": "1648"
}
],
"symlink_target": ""
} |
import setuptools
setuptools.setup(
version="0.0.1",
license='mit',
name='py-pool',
python_requires='>=3.6',
author='nathan todd-stone',
author_email='me@nathants.com',
url='http://github.com/nathants/py-pool',
packages=['pool'],
description='process and thread pools',
)
| {
"content_hash": "243986f7f846a6d18426a3c009e12db3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 45,
"avg_line_length": 23.76923076923077,
"alnum_prop": 0.6343042071197411,
"repo_name": "nathants/pool",
"id": "db693a4dce853e657ad0d3d1bedd88c587ef33a6",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2758"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'ProductTypeIcon', fields ['product_type']
db.delete_unique('cmsplugin_configurableproduct_producttypeicon', ['product_type_id'])
# Deleting field 'ProductTypeIcon.large_icon'
db.delete_column('cmsplugin_configurableproduct_producttypeicon', 'large_icon')
# Deleting field 'ProductTypeIcon.icon'
db.delete_column('cmsplugin_configurableproduct_producttypeicon', 'icon')
# Adding field 'ProductTypeIcon.image'
db.add_column('cmsplugin_configurableproduct_producttypeicon', 'image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Adding field 'ProductTypeIcon.large_icon'
db.add_column('cmsplugin_configurableproduct_producttypeicon', 'large_icon', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True), keep_default=False)
# User chose to not deal with backwards NULL issues for 'ProductTypeIcon.icon'
raise RuntimeError("Cannot reverse this migration. 'ProductTypeIcon.icon' and its values cannot be restored.")
# Deleting field 'ProductTypeIcon.image'
db.delete_column('cmsplugin_configurableproduct_producttypeicon', 'image')
# Adding unique constraint on 'ProductTypeIcon', fields ['product_type']
db.create_unique('cmsplugin_configurableproduct_producttypeicon', ['product_type_id'])
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_configurableproduct.cproductsplugin': {
'Meta': {'object_name': 'CProductsPlugin', 'db_table': "'cmsplugin_cproductsplugin'", '_ormbases': ['cms.CMSPlugin']},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['configurableproduct.ProductType']", 'symmetrical': 'False'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'filter_action': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'filter_product_attributes': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'hide_empty_categories': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'cmsplugin_configurableproduct.cproducttypesplugin': {
'Meta': {'object_name': 'CProductTypesPlugin', 'db_table': "'cmsplugin_cproducttypesplugin'", '_ormbases': ['cms.CMSPlugin']},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['configurableproduct.ProductType']", 'null': 'True', 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'hide_empty_categories': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'cmsplugin_configurableproduct.producttypeicon': {
'Meta': {'object_name': 'ProductTypeIcon'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'product_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'icons'", 'to': "orm['configurableproduct.ProductType']"})
},
'configurableproduct.productbooleanfield': {
'Meta': {'object_name': 'ProductBooleanField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'configurableproduct.productcharfield': {
'Meta': {'object_name': 'ProductCharField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'configurableproduct.productfloatfield': {
'Meta': {'object_name': 'ProductFloatField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'configurableproduct.productimagefield': {
'Meta': {'object_name': 'ProductImageField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'configurableproduct.producttype': {
'Meta': {'object_name': 'ProductType'},
'boolean_fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['configurableproduct.ProductBooleanField']", 'null': 'True', 'through': "orm['configurableproduct.TypeBoolean']", 'blank': 'True'}),
'char_fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['configurableproduct.ProductCharField']", 'null': 'True', 'through': "orm['configurableproduct.TypeChar']", 'blank': 'True'}),
'float_fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['configurableproduct.ProductFloatField']", 'null': 'True', 'through': "orm['configurableproduct.TypeFloat']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['configurableproduct.ProductImageField']", 'null': 'True', 'through': "orm['configurableproduct.TypeImage']", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'configurableproduct.typeboolean': {
'Meta': {'ordering': "['order']", 'object_name': 'TypeBoolean'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductBooleanField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductType']"})
},
'configurableproduct.typechar': {
'Meta': {'ordering': "['order']", 'object_name': 'TypeChar'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductCharField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductType']"})
},
'configurableproduct.typefloat': {
'Meta': {'ordering': "['order']", 'object_name': 'TypeFloat'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductFloatField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductType']"})
},
'configurableproduct.typeimage': {
'Meta': {'ordering': "['order']", 'object_name': 'TypeImage'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductImageField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductType']"})
}
}
complete_apps = ['cmsplugin_configurableproduct']
| {
"content_hash": "bc73fa9d63350a85b5a05ec908b501a4",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 254,
"avg_line_length": 74.13571428571429,
"alnum_prop": 0.6011176413912709,
"repo_name": "airtonix/cmsplugin-configurableproduct",
"id": "37dcff9fa1f4534fd241032e978869d57678f9e1",
"size": "10397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_configurableproduct/migrations/0004_auto__del_field_producttypeicon_large_icon__del_field_producttypeicon_.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "89125"
}
],
"symlink_target": ""
} |
import hashlib
import pytest
from django.contrib.auth.models import User
from django.core import mail
from factories import SecretaryFactory, AdministratorFactory
from users.models import Employee
from users.models import Administrator
from users.models import Secretary
from django.core.exceptions import ObjectDoesNotExist
@pytest.mark.django_db
class TestRegisterUsers:
def setup(self):
self.user1 = SecretaryFactory()
self.user2 = AdministratorFactory()
def test_index_get(self,client):
response = client.get('/users/')
assert response.status_code == 200
def test_register_user_get(self,client):
client.login(username=self.user2.user.username,password='test_password')
response = client.get('/users/register/')
assert response.status_code == 200
def test_register_user_secretary_post(self,client):
client.login(username=self.user2.user.username,password='test_password')
response = client.post('/users/register/',{'employee_type':'secretary',
'name':'Marcelo',
'phone_number':'32',
'email':'marcelo@gmail.com',
'password':'123456789',
'confirmPassword':'123456789'}, follow = True)
try:
recovery = Secretary.objects.get(user= User.objects.get(username='marcelo@gmail.com'))
assert True
except ObjectDoesNotExist:
assert False
def test_register_user_admin_post(self,client):
client.login(username=self.user2.user.username,password='test_password')
response = client.post('/users/register/',{'employee_type':'administrator',
'name':'Marco',
'phone_number':'32',
'email':'marco@gmail.com',
'password':'123456789',
'confirmPassword':'123456789'}, follow = True)
try:
recovery = Administrator.objects.get(user= User.objects.get(username='marco@gmail.com'))
assert True
except ObjectDoesNotExist:
assert False
| {
"content_hash": "272c3687038c5011b5a5b028ab49b448",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 100,
"avg_line_length": 43.26315789473684,
"alnum_prop": 0.5429845904298459,
"repo_name": "amigos-do-gesiel/iespv-administrativo",
"id": "41b18dc166a67108227498f471d345c775bac6c1",
"size": "2466",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "users/tests/test_register_users.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "619"
},
{
"name": "HTML",
"bytes": "27638"
},
{
"name": "Python",
"bytes": "53310"
},
{
"name": "Shell",
"bytes": "896"
}
],
"symlink_target": ""
} |
"""Compute a streaming estimation of the mean of submitted tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class StreamingMean(object):
"""Compute a streaming estimation of the mean of submitted tensors."""
def __init__(self, shape, dtype):
"""Specify the shape and dtype of the mean to be estimated.
Note that a float mean to zero submitted elements is NaN, while computing
the integer mean of zero elements raises a division by zero error.
Args:
shape: Shape of the mean to compute.
dtype: Data type of the mean to compute.
"""
self._dtype = dtype
self._sum = tf.Variable(lambda: tf.zeros(shape, dtype), False)
self._count = tf.Variable(lambda: 0, trainable=False)
@property
def value(self):
"""The current value of the mean."""
return self._sum / tf.cast(self._count, self._dtype)
@property
def count(self):
"""The number of submitted samples."""
return self._count
def submit(self, value):
"""Submit a single or batch tensor to refine the streaming mean."""
# Add a batch dimension if necessary.
if value.shape.ndims == self._sum.shape.ndims:
value = value[None, ...]
return tf.group(
self._sum.assign_add(tf.reduce_sum(value, 0)),
self._count.assign_add(tf.shape(value)[0]))
def clear(self):
"""Return the mean estimate and reset the streaming statistics."""
value = self._sum / tf.cast(self._count, self._dtype)
with tf.control_dependencies([value]):
reset_value = self._sum.assign(tf.zeros_like(self._sum))
reset_count = self._count.assign(0)
with tf.control_dependencies([reset_value, reset_count]):
return tf.identity(value)
| {
"content_hash": "a56e24cc7ff1f0421317da1972493452",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 33.77358490566038,
"alnum_prop": 0.6726256983240223,
"repo_name": "google-research/batch-ppo",
"id": "3f620fe37a45dac8e9d980e638178ee5c5732871",
"size": "2386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agents/tools/streaming_mean.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "176905"
}
],
"symlink_target": ""
} |
"""PyPI and direct package downloading"""
import sys
import os
import re
import shutil
import socket
import base64
import hashlib
from functools import wraps
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
require, Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from setuptools.compat import (urllib2, httplib, StringIO, HTTPError,
urlparse, urlunparse, unquote, splituser,
url2pathname, name2codepoint,
unichr, urljoin, urlsplit, urlunsplit,
ConfigParser)
from setuptools.compat import filterfalse
from fnmatch import translate
from setuptools.py26compat import strip_fragment
from setuptools.py27compat import get_all_headers
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$')
HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
# this is here to fix emacs' cruddy broken syntax highlighting
PYPI_MD5 = re.compile(
'<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)'
'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py', -16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py', -20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base, py_ver, plat
def egg_info_for_url(url):
scheme, server, path, parameters, query, fragment = urlparse(url)
base = unquote(path.split('/')[-1])
if server == 'sourceforge.net' and base == 'download': # XXX Yuck
base = unquote(path.split('/')[-2])
if '#' in base: base, fragment = base.split('#', 1)
return base, fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata): yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence=CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version:
for i, p in enumerate(parts[2:]):
if len(p) == 5 and p.startswith('py2.'):
return # It's a bdist_dumb, not an sdist -- bail out
for p in range(1, len(parts) + 1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence=precedence,
platform=platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos != -1:
match = HREF.search(page, pos)
if match:
yield urljoin(url, htmldecode(match.group(1)))
user_agent = "Python-urllib/%s setuptools/%s" % (
sys.version[:3], require('setuptools')[0].version
)
class ContentChecker(object):
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.python.org/simple", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self, *args, **kw)
self.index_url = index_url + "/"[:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate, hosts))).match
self.to_scan = []
if verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()):
self.opener = ssl_support.opener_for(ca_bundle)
else:
self.opener = urllib2.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path, item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
if (s and s.group(1).lower() == 'file') or self.allows(urlparse(url)[1]):
return True
msg = ("\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/1dg9ijs for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
for item in search_path:
if os.path.isdir(item):
for entry in os.listdir(item):
if entry.endswith('.egg-link'):
self.scan_egg_link(item, entry)
def scan_egg_link(self, path, entry):
lines = [_f for _f in map(str.strip,
open(os.path.join(path, entry))) if _f]
if len(lines) == 2:
for dist in find_distributions(os.path.join(path, lines[0])):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self, url, page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
unquote, link[len(self.index_url):].split('/')
))
if len(parts) == 2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(), {})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url += '#egg=%s-%s' % (pkg, ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg: self.warn(msg, *args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name + '/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name + '/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key, ())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement, installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?" % (
checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec, Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found, fragment, tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
try:
spec = Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" %
(spec,)
)
return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence == DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s", dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence <= SOURCE_DIST or not source):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if local_index is not None:
dist = dist or find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=self.download(dist.location, tmpdir))
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement, tmpdir, force_scan, source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists) == 1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename = dst
file = open(os.path.join(tmpdir, 'setup.py'), 'w')
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
file.close()
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment, dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, tfp, info = None, None, None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(strip_fragment(url))
if isinstance(fp, HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code, fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
tfp = open(filename, 'wb')
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp: fp.close()
if tfp: tfp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, httplib.InvalidURL):
v = sys.exc_info()[1]
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib2.HTTPError:
v = sys.exc_info()[1]
return v
except urllib2.URLError:
v = sys.exc_info()[1]
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except httplib.BadStatusLine:
v = sys.exc_info()[1]
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except httplib.HTTPException:
v = sys.exc_info()[1]
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..', '.').replace('\\', '_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir, name)
# Download the file
#
if scheme == 'svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme == 'git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme == 'file':
return url2pathname(urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type', '').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at " + url)
def _download_svn(self, url, filename):
url = url.split('#', 1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/', 1)
auth, host = splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':', 1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username=" + auth
netloc = host
url = urlunparse((scheme, netloc, url, p, q, f))
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#', 1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("(cd %s && git checkout --quiet %s)" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("(cd %s && hg up -C -r %s >&-)" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c > 255: return unichr(c)
return chr(c)
def decode_entity(match):
what = match.group(1)
if what.startswith('#x'):
what = int(what[2:], 16)
elif what.startswith('#'):
what = int(what[1:])
else:
what = name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
"""Decode HTML entities in the given text."""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n', '')
class Credential(object):
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(ConfigParser.ConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
ConfigParser.ConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib2.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise httplib.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)' % info)
if auth:
auth = "Basic " + _encode_auth(auth)
new_url = urlunparse((scheme, host, path, params, query, frag))
request = urllib2.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib2.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urlparse(fp.url)
if s2 == scheme and h2 == host:
fp.url = urlunparse((s2, netloc, path2, param2, query2, frag2))
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse(url)
filename = url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f == 'index.html':
fp = open(os.path.join(filename, f), 'r')
body = fp.read()
fp.close()
break
elif os.path.isdir(os.path.join(filename, f)):
f += '/'
files.append("<a href=%r>%s</a>" % (f, f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % '\n'.join(files)
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
return HTTPError(url, status, message, headers, StringIO(body))
| {
"content_hash": "bc03fdd35a174a4ed701b3ea0d7b6182",
"timestamp": "",
"source": "github",
"line_count": 1085,
"max_line_length": 100,
"avg_line_length": 36.02396313364055,
"alnum_prop": 0.5577188763240035,
"repo_name": "ppyordanov/HCI_4_Future_Cities",
"id": "d76c2c0e4384c8d851212192a340cd5589b8ed24",
"size": "39086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/src/virtualenv/Lib/site-packages/setuptools/package_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "427445"
},
{
"name": "C++",
"bytes": "21783"
},
{
"name": "CSS",
"bytes": "280650"
},
{
"name": "D",
"bytes": "9679"
},
{
"name": "HTML",
"bytes": "37335"
},
{
"name": "Java",
"bytes": "740594"
},
{
"name": "JavaScript",
"bytes": "1801741"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "2631176"
},
{
"name": "Shell",
"bytes": "12283"
}
],
"symlink_target": ""
} |
import numpy as np
from pysb.integrate import Solver
from matplotlib import pyplot as plt
from indra import bel, biopax, trips
from indra.util import plot_formatting as pf
from indra.assemblers import PysbAssembler
# 1. TEXT
# User defines text:
text = ('MEK1 phosphorylates ERK2 on threonine 185 and tyrosine 187.')
# Show round trip going out to TRIPS/DRUM web service,
# return logical form to INDRA, which is queried by
# INDRA for relevant statements
tp = trips.process_text(text)
# Now generate PySB model
stmts = tp.statements # Don't show this one
pa = PysbAssembler()
pa.add_statements(stmts)
pa.make_model()
t = np.linspace(0, 25000)
sol = Solver(pa.model, t)
sol.run()
pf.set_fig_params()
plt.ion()
plt.figure(figsize=(1, 1), dpi=300)
species_names = [str(s) for s in pa.model.species]
plt.plot(t, sol.y[:, species_names.index("MAPK1(T185='u', Y187='u')")],
'b', label='MAPK1.uu')
plt.plot(t, sol.y[:, species_names.index("MAPK1(T185='p', Y187='u')")] +
sol.y[:, species_names.index("MAPK1(T185='u', Y187='p')")],
'g', label='MAPK1.p')
plt.plot(t, sol.y[:, species_names.index("MAPK1(T185='p', Y187='p')")],
'r', label='MAPK1.pp')
plt.xlabel('Time')
plt.ylabel('Amount')
plt.legend(loc='upper right', fontsize=4)
plt.xticks([])
plt.yticks([])
pf.format_axis(plt.gca())
| {
"content_hash": "0df353bbf1c739b0e9f4c7e2e25feabb",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 72,
"avg_line_length": 30.790697674418606,
"alnum_prop": 0.6812688821752266,
"repo_name": "jmuhlich/indra",
"id": "87b21ab4d4656c6dc9ae5b7a38492ca2a117a149",
"size": "1324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/hello_indra.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "619830"
},
{
"name": "Ruby",
"bytes": "433"
},
{
"name": "Shell",
"bytes": "1319"
}
],
"symlink_target": ""
} |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class DeletedWebAppsOperations(object):
"""DeletedWebAppsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: API Version. Constant value: "2016-03-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-03-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Get all deleted apps for a subscription.
Get all deleted apps for a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DeletedSite
:rtype:
~azure.mgmt.web.models.DeletedSitePaged[~azure.mgmt.web.models.DeletedSite]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DeletedSitePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DeletedSitePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/deletedSites'}
| {
"content_hash": "d16e794da118af964a228776919bcc14",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 144,
"avg_line_length": 39.234042553191486,
"alnum_prop": 0.6252711496746204,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "dadbb2822b872876fef8f4292e69c6116aaf3152",
"size": "4162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-web/azure/mgmt/web/operations/deleted_web_apps_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
"""
PyFilm
~~~~~~
"""
__version__ = "1.1"
__author__ = "JamieJackHerer"
__email__ = "jamiejackherer@gmail.com"
__source__ = "TBA"
__license__ = '''
New BSD License
Copyright (c) 2016, Jamie Lindsey aka JamieJackHerer
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The names of its contributors may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
| {
"content_hash": "65bcb0039a91154d8f89b7e4cec0b52f",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 42.17948717948718,
"alnum_prop": 0.780547112462006,
"repo_name": "jamiejackherer/pyFilm",
"id": "07ee62b74587a4cbb42f405da34a343688894d9e",
"size": "1671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfilm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "244369"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import copy
from datetime import datetime
from distutils.version import LooseVersion
import time
import sys
import traceback
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC
from ansible.module_utils.six import string_types
from ansible.module_utils.k8s.common import KubernetesAnsibleModule
from ansible.module_utils.common.dict_transformations import dict_merge
from distutils.version import LooseVersion
try:
import yaml
from openshift.dynamic.exceptions import DynamicApiError, NotFoundError, ConflictError, ForbiddenError, KubernetesValidateMissing
except ImportError:
# Exceptions handled in common
pass
try:
import kubernetes_validate
HAS_KUBERNETES_VALIDATE = True
except ImportError:
HAS_KUBERNETES_VALIDATE = False
K8S_CONFIG_HASH_IMP_ERR = None
try:
from openshift.helper.hashes import generate_hash
HAS_K8S_CONFIG_HASH = True
except ImportError:
K8S_CONFIG_HASH_IMP_ERR = traceback.format_exc()
HAS_K8S_CONFIG_HASH = False
class KubernetesRawModule(KubernetesAnsibleModule):
@property
def validate_spec(self):
return dict(
fail_on_error=dict(type='bool'),
version=dict(),
strict=dict(type='bool', default=True)
)
@property
def condition_spec(self):
return dict(
type=dict(),
status=dict(default=True, choices=[True, False, "Unknown"]),
reason=dict()
)
@property
def argspec(self):
argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
argument_spec['merge_type'] = dict(type='list', choices=['json', 'merge', 'strategic-merge'])
argument_spec['wait'] = dict(type='bool', default=False)
argument_spec['wait_timeout'] = dict(type='int', default=120)
argument_spec['wait_condition'] = dict(type='dict', default=None, options=self.condition_spec)
argument_spec['validate'] = dict(type='dict', default=None, options=self.validate_spec)
argument_spec['append_hash'] = dict(type='bool', default=False)
return argument_spec
def __init__(self, k8s_kind=None, *args, **kwargs):
self.client = None
self.warnings = []
mutually_exclusive = [
('resource_definition', 'src'),
]
KubernetesAnsibleModule.__init__(self, *args,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
**kwargs)
self.kind = k8s_kind or self.params.get('kind')
self.api_version = self.params.get('api_version')
self.name = self.params.get('name')
self.namespace = self.params.get('namespace')
resource_definition = self.params.get('resource_definition')
validate = self.params.get('validate')
if validate:
if LooseVersion(self.openshift_version) < LooseVersion("0.8.0"):
self.fail_json(msg="openshift >= 0.8.0 is required for validate")
self.append_hash = self.params.get('append_hash')
if self.append_hash:
if not HAS_K8S_CONFIG_HASH:
self.fail_json(msg=missing_required_lib("openshift >= 0.7.2", reason="for append_hash"),
exception=K8S_CONFIG_HASH_IMP_ERR)
if self.params['merge_type']:
if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
self.fail_json(msg=missing_required_lib("openshift >= 0.6.2", reason="for merge_type"))
if resource_definition:
if isinstance(resource_definition, string_types):
try:
self.resource_definitions = yaml.safe_load_all(resource_definition)
except (IOError, yaml.YAMLError) as exc:
self.fail(msg="Error loading resource_definition: {0}".format(exc))
elif isinstance(resource_definition, list):
self.resource_definitions = resource_definition
else:
self.resource_definitions = [resource_definition]
src = self.params.get('src')
if src:
self.resource_definitions = self.load_resource_definitions(src)
if not resource_definition and not src:
self.resource_definitions = [{
'kind': self.kind,
'apiVersion': self.api_version,
'metadata': {
'name': self.name,
'namespace': self.namespace
}
}]
def flatten_list_kind(self, list_resource, definitions):
flattened = []
parent_api_version = list_resource.group_version if list_resource else None
parent_kind = list_resource.kind[:-4] if list_resource else None
for definition in definitions.get('items', []):
resource = self.find_resource(definition.get('kind', parent_kind), definition.get('apiVersion', parent_api_version), fail=True)
flattened.append((resource, self.set_defaults(resource, definition)))
return flattened
def execute_module(self):
changed = False
results = []
self.client = self.get_api_client()
flattened_definitions = []
for definition in self.resource_definitions:
kind = definition.get('kind', self.kind)
api_version = definition.get('apiVersion', self.api_version)
if kind.endswith('List'):
resource = self.find_resource(kind, api_version, fail=False)
flattened_definitions.extend(self.flatten_list_kind(resource, definition))
else:
resource = self.find_resource(kind, api_version, fail=True)
flattened_definitions.append((resource, definition))
for (resource, definition) in flattened_definitions:
kind = definition.get('kind', self.kind)
api_version = definition.get('apiVersion', self.api_version)
definition = self.set_defaults(resource, definition)
self.warnings = []
if self.params['validate'] is not None:
self.warnings = self.validate(definition)
result = self.perform_action(resource, definition)
result['warnings'] = self.warnings
changed = changed or result['changed']
results.append(result)
if len(results) == 1:
self.exit_json(**results[0])
self.exit_json(**{
'changed': changed,
'result': {
'results': results
}
})
def validate(self, resource):
def _prepend_resource_info(resource, msg):
return "%s %s: %s" % (resource['kind'], resource['metadata']['name'], msg)
try:
warnings, errors = self.client.validate(resource, self.params['validate'].get('version'), self.params['validate'].get('strict'))
except KubernetesValidateMissing:
self.fail_json(msg="kubernetes-validate python library is required to validate resources")
if errors and self.params['validate']['fail_on_error']:
self.fail_json(msg="\n".join([_prepend_resource_info(resource, error) for error in errors]))
else:
return [_prepend_resource_info(resource, msg) for msg in warnings + errors]
def set_defaults(self, resource, definition):
definition['kind'] = resource.kind
definition['apiVersion'] = resource.group_version
metadata = definition.get('metadata', {})
if self.name and not metadata.get('name'):
metadata['name'] = self.name
if resource.namespaced and self.namespace and not metadata.get('namespace'):
metadata['namespace'] = self.namespace
definition['metadata'] = metadata
return definition
def perform_action(self, resource, definition):
result = {'changed': False, 'result': {}}
state = self.params.get('state', None)
force = self.params.get('force', False)
name = definition['metadata'].get('name')
namespace = definition['metadata'].get('namespace')
existing = None
wait = self.params.get('wait')
wait_timeout = self.params.get('wait_timeout')
wait_condition = None
if self.params.get('wait_condition') and self.params['wait_condition'].get('type'):
wait_condition = self.params['wait_condition']
self.remove_aliases()
try:
# ignore append_hash for resources other than ConfigMap and Secret
if self.append_hash and definition['kind'] in ['ConfigMap', 'Secret']:
name = '%s-%s' % (name, generate_hash(definition))
definition['metadata']['name'] = name
params = dict(name=name, namespace=namespace)
existing = resource.get(**params)
except NotFoundError:
# Remove traceback so that it doesn't show up in later failures
try:
sys.exc_clear()
except AttributeError:
# no sys.exc_clear on python3
pass
except ForbiddenError as exc:
if definition['kind'] in ['Project', 'ProjectRequest'] and state != 'absent':
return self.create_project_request(definition)
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
except DynamicApiError as exc:
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
if state == 'absent':
result['method'] = "delete"
if not existing:
# The object already does not exist
return result
else:
# Delete the object
result['changed'] = True
if not self.check_mode:
try:
k8s_obj = resource.delete(**params)
result['result'] = k8s_obj.to_dict()
except DynamicApiError as exc:
self.fail_json(msg="Failed to delete object: {0}".format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
if wait:
success, resource, duration = self.wait(resource, definition, wait_timeout, 'absent')
result['duration'] = duration
if not success:
self.fail_json(msg="Resource deletion timed out", **result)
return result
else:
if not existing:
if self.check_mode:
k8s_obj = definition
else:
try:
k8s_obj = resource.create(definition, namespace=namespace).to_dict()
except ConflictError:
# Some resources, like ProjectRequests, can't be created multiple times,
# because the resources that they create don't match their kind
# In this case we'll mark it as unchanged and warn the user
self.warn("{0} was not found, but creating it returned a 409 Conflict error. This can happen \
if the resource you are creating does not directly create a resource of the same kind.".format(name))
return result
except DynamicApiError as exc:
msg = "Failed to create object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
success = True
result['result'] = k8s_obj
if wait and not self.check_mode:
success, result['result'], result['duration'] = self.wait(resource, definition, wait_timeout, condition=wait_condition)
result['changed'] = True
result['method'] = 'create'
if not success:
self.fail_json(msg="Resource creation timed out", **result)
return result
match = False
diffs = []
if existing and force:
if self.check_mode:
k8s_obj = definition
else:
try:
k8s_obj = resource.replace(definition, name=name, namespace=namespace, append_hash=self.append_hash).to_dict()
except DynamicApiError as exc:
msg = "Failed to replace object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
success = True
result['result'] = k8s_obj
if wait:
success, result['result'], result['duration'] = self.wait(resource, definition, wait_timeout, condition=wait_condition)
match, diffs = self.diff_objects(existing.to_dict(), result['result'].to_dict())
result['changed'] = not match
result['method'] = 'replace'
result['diff'] = diffs
if not success:
self.fail_json(msg="Resource replacement timed out", **result)
return result
# Differences exist between the existing obj and requested params
if self.check_mode:
k8s_obj = dict_merge(existing.to_dict(), definition)
else:
if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
k8s_obj, error = self.patch_resource(resource, definition, existing, name,
namespace)
else:
for merge_type in self.params['merge_type'] or ['strategic-merge', 'merge']:
k8s_obj, error = self.patch_resource(resource, definition, existing, name,
namespace, merge_type=merge_type)
if not error:
break
if error:
self.fail_json(**error)
success = True
result['result'] = k8s_obj
if wait:
success, result['result'], result['duration'] = self.wait(resource, definition, wait_timeout, condition=wait_condition)
match, diffs = self.diff_objects(existing.to_dict(), result['result'])
result['result'] = k8s_obj
result['changed'] = not match
result['method'] = 'patch'
result['diff'] = diffs
if not success:
self.fail_json(msg="Resource update timed out", **result)
return result
def patch_resource(self, resource, definition, existing, name, namespace, merge_type=None):
try:
params = dict(name=name, namespace=namespace)
if merge_type:
params['content_type'] = 'application/{0}-patch+json'.format(merge_type)
k8s_obj = resource.patch(definition, **params).to_dict()
match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
error = {}
return k8s_obj, {}
except DynamicApiError as exc:
msg = "Failed to patch object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
error = dict(msg=msg, error=exc.status, status=exc.status, reason=exc.reason, warnings=self.warnings)
return None, error
def create_project_request(self, definition):
definition['kind'] = 'ProjectRequest'
result = {'changed': False, 'result': {}}
resource = self.find_resource('ProjectRequest', definition['apiVersion'], fail=True)
if not self.check_mode:
try:
k8s_obj = resource.create(definition)
result['result'] = k8s_obj.to_dict()
except DynamicApiError as exc:
self.fail_json(msg="Failed to create object: {0}".format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
result['changed'] = True
result['method'] = 'create'
return result
def _wait_for(self, resource, name, namespace, predicate, timeout, state):
start = datetime.now()
def _wait_for_elapsed():
return (datetime.now() - start).seconds
response = None
while _wait_for_elapsed() < timeout:
try:
response = resource.get(name=name, namespace=namespace)
if predicate(response):
if response:
return True, response.to_dict(), _wait_for_elapsed()
else:
return True, {}, _wait_for_elapsed()
time.sleep(timeout // 20)
except NotFoundError:
if state == 'absent':
return True, {}, _wait_for_elapsed()
if response:
response = response.to_dict()
return False, response, _wait_for_elapsed()
def wait(self, resource, definition, timeout, state='present', condition=None):
def _deployment_ready(deployment):
# FIXME: frustratingly bool(deployment.status) is True even if status is empty
# Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty
return (deployment.status and deployment.status.replicas is not None and
deployment.status.availableReplicas == deployment.status.replicas and
deployment.status.observedGeneration == deployment.metadata.generation)
def _pod_ready(pod):
return (pod.status and pod.status.containerStatuses is not None and
all([container.ready for container in pod.status.containerStatuses]))
def _daemonset_ready(daemonset):
return (daemonset.status and daemonset.status.desiredNumberScheduled is not None and
daemonset.status.numberReady == daemonset.status.desiredNumberScheduled and
daemonset.status.observedGeneration == daemonset.metadata.generation)
def _custom_condition(resource):
if not resource.status or not resource.status.conditions:
return False
match = [x for x in resource.status.conditions if x.type == condition['type']]
if not match:
return False
# There should never be more than one condition of a specific type
match = match[0]
if match.status == 'Unknown':
return False
status = True if match.status == 'True' else False
if status == condition['status']:
if condition.get('reason'):
return match.reason == condition['reason']
return True
return False
def _resource_absent(resource):
return not resource
waiter = dict(
Deployment=_deployment_ready,
DaemonSet=_daemonset_ready,
Pod=_pod_ready
)
kind = definition['kind']
if state == 'present' and not condition:
predicate = waiter.get(kind, lambda x: x)
elif state == 'present' and condition:
predicate = _custom_condition
else:
predicate = _resource_absent
return self._wait_for(resource, definition['metadata']['name'], definition['metadata'].get('namespace'), predicate, timeout, state)
| {
"content_hash": "7e9ccb1f7ce446cadeec3db4bfa02ece",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 140,
"avg_line_length": 45.970786516853934,
"alnum_prop": 0.5673852471036809,
"repo_name": "SergeyCherepanov/ansible",
"id": "80659f23a646b9afe94eb156db9fa9c121a0a0c5",
"size": "21150",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "ansible/ansible/module_utils/k8s/raw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
"""Provide models for new modmail."""
from ...const import API_PATH
from .base import RedditBase
class ModmailConversation(RedditBase):
"""A class for modmail conversations.
**Typical Attributes**
This table describes attributes that typically belong to objects of this
class. Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a
guarantee that these attributes will always be present, nor is this list
comprehensive in any way.
======================= ===================================================
Attribute Description
======================= ===================================================
``authors`` Provides an ordered list of :class:`.Redditor`
instances. The authors of each message in the
modmail conversation.
``id`` The ID of the ModmailConversation.
``is_highlighted`` Whether or not the ModmailConversation is
highlighted.
``is_internal`` Whether or not the ModmailConversation is a private
mod conversation.
``last_mod_update`` Time of the last mod message reply, represented in
the `ISO 8601`_ standard with timezone.
``last_updated`` Time of the last message reply, represented in
the `ISO 8601`_ standard with timezone.
``last_user_update`` Time of the last user message reply, represented in
the `ISO 8601`_ standard with timezone.
``num_messages`` The number of messages in the ModmailConversation.
``obj_ids`` Provides a list of dictionaries representing
mod actions on the ModmailConversation. Each dict
contains attributes of 'key' and 'id'. The key can
be either 'messages' or 'ModAction'. ModAction
represents archiving/highlighting etc.
``owner`` Provides an instance of :class:`.Subreddit`. The
subreddit that the ModmailConversation belongs to.
``participant`` Provides an instance of :class:`.Redditor`. The
participating user in the ModmailConversation.
``subject`` The subject of the ModmailConversation.
======================= ===================================================
.. _ISO 8601: https://en.wikipedia.org/wiki/ISO_8601
"""
STR_FIELD = 'id'
@staticmethod
def _convert_conversation_objects(data, reddit):
"""Convert messages and mod actions to PRAW objects."""
result = {'messages': [], 'modActions': []}
for thing in data['conversation']['objIds']:
key = thing['key']
thing_data = data[key][thing['id']]
result[key].append(reddit._objector.objectify(thing_data))
return result
@staticmethod
def _convert_user_summary(data, reddit):
"""Convert dictionaries of recent user history to PRAW objects."""
parsers = {'recentComments':
reddit._objector.parsers[reddit.config.kinds['comment']],
'recentConvos': ModmailConversation,
'recentPosts':
reddit._objector.parsers[reddit.config.kinds['submission']]}
for kind, parser in parsers.items():
objects = []
for thing_id, summary in data[kind].items():
thing = parser(reddit, id=thing_id.rsplit('_', 1)[-1])
if parser is not ModmailConversation:
del summary['permalink']
for key, value in summary.items():
setattr(thing, key, value)
objects.append(thing)
# Sort by id, oldest to newest
data[kind] = sorted(
objects,
key=lambda x: int(x.id, base=36), reverse=True)
@classmethod
def parse(cls, data, reddit, # pylint: disable=arguments-differ
convert_objects=True):
"""Return an instance of ModmailConversation from ``data``.
:param data: The structured data.
:param reddit: An instance of :class:`.Reddit`.
:param convert_objects: If True, convert message and mod action data
into objects (default: True).
"""
conversation = data['conversation']
conversation['authors'] = [reddit._objector.objectify(author)
for author in conversation['authors']]
for entity in 'owner', 'participant':
conversation[entity] = reddit._objector.objectify(
conversation[entity])
if data.get('user'):
cls._convert_user_summary(data['user'], reddit)
conversation['user'] = reddit._objector.objectify(data['user'])
if convert_objects:
conversation.update(cls._convert_conversation_objects(data,
reddit))
conversation = reddit._objector._snake_case_keys(conversation)
return cls(reddit, _data=conversation)
def __init__(self, reddit, id=None, # pylint: disable=redefined-builtin
mark_read=False, _data=None):
"""Construct an instance of the ModmailConversation object.
:param mark_read: If True, conversation is marked as read
(default: False).
"""
super(ModmailConversation, self).__init__(reddit, _data)
if bool(id) == bool(_data):
raise TypeError('Either `id` or `_data` must be provided.')
if id:
self.id = id # pylint: disable=invalid-name
if mark_read:
self._info_params = {'markRead': True}
def _build_conversation_list(self, other_conversations):
"""Return a comma-separated list of conversation IDs."""
conversations = [self] + (other_conversations or [])
return ','.join(conversation.id for conversation in conversations)
def _info_path(self):
return API_PATH['modmail_conversation'].format(id=self.id)
def archive(self):
"""Archive the conversation.
Example:
.. code:: python
reddit.subreddit('redditdev').modmail('2gmz').archive()
"""
self._reddit.post(API_PATH['modmail_archive'].format(id=self.id))
def highlight(self):
"""Highlight the conversation.
Example:
.. code:: python
reddit.subreddit('redditdev').modmail('2gmz').highlight()
"""
self._reddit.post(API_PATH['modmail_highlight'].format(id=self.id))
def mute(self):
"""Mute the non-mod user associated with the conversation.
Example:
.. code:: python
reddit.subreddit('redditdev').modmail('2gmz').mute()
"""
self._reddit.request('POST',
API_PATH['modmail_mute'].format(id=self.id))
def read(self, other_conversations=None): # noqa: D207, D301
"""Mark the conversation(s) as read.
:param other_conversations: A list of other conversations to mark
(default: None).
For example, to mark the conversation as read along with other recent
conversations from the same user:
.. code:: python
subreddit = reddit.subreddit('redditdev')
conversation = subreddit.modmail.conversation('2gmz')
conversation.read(\
other_conversations=conversation.user.recent_convos)
"""
data = {'conversationIds': self._build_conversation_list(
other_conversations)}
self._reddit.post(API_PATH['modmail_read'], data=data)
def reply(self, body, author_hidden=False, internal=False):
"""Reply to the conversation.
:param body: The markdown formatted content for a message.
:param author_hidden: When True, author is hidden from non-moderators
(default: False).
:param internal: When True, message is a private moderator note,
hidden from non-moderators (default: False).
:returns: A :class:`~.ModmailMessage` object for the newly created
message.
For example, to reply to the non-mod user while hiding your username:
.. code:: python
conversation = reddit.subreddit('redditdev').modmail('2gmz')
conversation.reply('Message body', author_hidden=True)
To create a private moderator note on the conversation:
.. code:: python
conversation.reply('Message body', internal=True)
"""
data = {'body': body, 'isAuthorHidden': author_hidden,
'isInternal': internal}
response = self._reddit.post(API_PATH['modmail_conversation']
.format(id=self.id), data=data)
message_id = response['conversation']['objIds'][-1]['id']
message_data = response['messages'][message_id]
return self._reddit._objector.objectify(message_data)
def unarchive(self):
"""Unarchive the conversation.
Example:
.. code:: python
reddit.subreddit('redditdev').modmail('2gmz').unarchive()
"""
self._reddit.post(API_PATH['modmail_unarchive'].format(id=self.id))
def unhighlight(self):
"""Un-highlight the conversation.
Example:
.. code:: python
reddit.subreddit('redditdev').modmail('2gmz').unhighlight()
"""
self._reddit.request('DELETE',
API_PATH['modmail_highlight'].format(id=self.id))
def unmute(self):
"""Unmute the non-mod user associated with the conversation.
Example:
.. code:: python
reddit.subreddit('redditdev').modmail('2gmz').unmute()
"""
self._reddit.request('POST',
API_PATH['modmail_unmute'].format(id=self.id))
def unread(self, other_conversations=None): # noqa: D207, D301
"""Mark the conversation(s) as unread.
:param other_conversations: A list of other conversations to mark
(default: None).
For example, to mark the conversation as unread along with other recent
conversations from the same user:
.. code:: python
subreddit = reddit.subreddit('redditdev')
conversation = subreddit.modmail.conversation('2gmz')
conversation.unread(\
other_conversations=conversation.user.recent_convos)
"""
data = {'conversationIds': self._build_conversation_list(
other_conversations)}
self._reddit.post(API_PATH['modmail_unread'], data=data)
class ModmailObject(RedditBase):
"""A base class for objects within a modmail conversation."""
AUTHOR_ATTRIBUTE = 'author'
STR_FIELD = 'id'
def __setattr__(self, attribute, value):
"""Objectify the AUTHOR_ATTRIBUTE attribute."""
if attribute == self.AUTHOR_ATTRIBUTE:
value = self._reddit._objector.objectify(value)
super(ModmailObject, self).__setattr__(attribute, value)
class ModmailAction(ModmailObject):
"""A class for moderator actions on modmail conversations."""
class ModmailMessage(ModmailObject):
"""A class for modmail messages."""
| {
"content_hash": "e7f39014717562eb72df5727c5f58fc2",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 79,
"avg_line_length": 37.096774193548384,
"alnum_prop": 0.5781739130434783,
"repo_name": "13steinj/praw",
"id": "2461efa305eafbdb3e922cd50d9464b0475e3159",
"size": "11500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "praw/models/reddit/modmail.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "667266"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
} |
"""
debug.py - Functions to aid in debugging
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
from __future__ import print_function
import sys, traceback, time, gc, re, types, weakref, inspect, os, cProfile, threading
from . import ptime
from numpy import ndarray
from .Qt import QtCore, QtGui
from .util.mutex import Mutex
from .util import cprint
__ftraceDepth = 0
def ftrace(func):
"""Decorator used for marking the beginning and end of function calls.
Automatically indents nested calls.
"""
def w(*args, **kargs):
global __ftraceDepth
pfx = " " * __ftraceDepth
print(pfx + func.__name__ + " start")
__ftraceDepth += 1
try:
rv = func(*args, **kargs)
finally:
__ftraceDepth -= 1
print(pfx + func.__name__ + " done")
return rv
return w
class Tracer(object):
"""
Prints every function enter/exit. Useful for debugging crashes / lockups.
"""
def __init__(self):
self.count = 0
self.stack = []
def trace(self, frame, event, arg):
self.count += 1
# If it has been a long time since we saw the top of the stack,
# print a reminder
if self.count % 1000 == 0:
print("----- current stack: -----")
for line in self.stack:
print(line)
if event == 'call':
line = " " * len(self.stack) + ">> " + self.frameInfo(frame)
print(line)
self.stack.append(line)
elif event == 'return':
self.stack.pop()
line = " " * len(self.stack) + "<< " + self.frameInfo(frame)
print(line)
if len(self.stack) == 0:
self.count = 0
return self.trace
def stop(self):
sys.settrace(None)
def start(self):
sys.settrace(self.trace)
def frameInfo(self, fr):
filename = fr.f_code.co_filename
funcname = fr.f_code.co_name
lineno = fr.f_lineno
callfr = sys._getframe(3)
callline = "%s %d" % (callfr.f_code.co_name, callfr.f_lineno)
args, _, _, value_dict = inspect.getargvalues(fr)
if len(args) and args[0] == 'self':
instance = value_dict.get('self', None)
if instance is not None:
cls = getattr(instance, '__class__', None)
if cls is not None:
funcname = cls.__name__ + "." + funcname
return "%s: %s %s: %s" % (callline, filename, lineno, funcname)
def warnOnException(func):
"""Decorator that catches/ignores exceptions and prints a stack trace."""
def w(*args, **kwds):
try:
func(*args, **kwds)
except:
printExc('Ignored exception:')
return w
def getExc(indent=4, prefix='| ', skip=1):
lines = formatException(*sys.exc_info(), skip=skip)
lines2 = []
for l in lines:
lines2.extend(l.strip('\n').split('\n'))
lines3 = [" "*indent + prefix + l for l in lines2]
return '\n'.join(lines3)
def printExc(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented exception backtrace
(This function is intended to be called within except: blocks)"""
exc = getExc(indent, prefix + ' ', skip=2)
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
print(exc)
print(" "*indent + prefix + '='*30 + '<<')
def printTrace(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented stack trace"""
trace = backtrace(1)
#exc = getExc(indent, prefix + ' ')
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
for line in trace.split('\n'):
print(" "*indent + prefix + " " + line)
print(" "*indent + prefix + '='*30 + '<<')
def backtrace(skip=0):
return ''.join(traceback.format_stack()[:-(skip+1)])
def formatException(exctype, value, tb, skip=0):
"""Return a list of formatted exception strings.
Similar to traceback.format_exception, but displays the entire stack trace
rather than just the portion downstream of the point where the exception is
caught. In particular, unhandled exceptions that occur during Qt signal
handling do not usually show the portion of the stack that emitted the
signal.
"""
lines = traceback.format_exception(exctype, value, tb)
lines = [lines[0]] + traceback.format_stack()[:-(skip+1)] + [' --- exception caught here ---\n'] + lines[1:]
return lines
def printException(exctype, value, traceback):
"""Print an exception with its full traceback.
Set `sys.excepthook = printException` to ensure that exceptions caught
inside Qt signal handlers are printed with their full stack trace.
"""
print(''.join(formatException(exctype, value, traceback, skip=1)))
def listObjs(regex='Q', typ=None):
"""List all objects managed by python gc with class name matching regex.
Finds 'Q...' classes by default."""
if typ is not None:
return [x for x in gc.get_objects() if isinstance(x, typ)]
else:
return [x for x in gc.get_objects() if re.match(regex, type(x).__name__)]
def findRefPath(startObj, endObj, maxLen=8, restart=True, seen={}, path=None, ignore=None):
"""Determine all paths of object references from startObj to endObj"""
refs = []
if path is None:
path = [endObj]
if ignore is None:
ignore = {}
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
ignore[id(seen)] = None
prefix = " "*(8-maxLen)
#print prefix + str(map(type, path))
prefix += " "
if restart:
#gc.collect()
seen.clear()
gc.collect()
newRefs = [r for r in gc.get_referrers(endObj) if id(r) not in ignore]
ignore[id(newRefs)] = None
#fo = allFrameObjs()
#newRefs = []
#for r in gc.get_referrers(endObj):
#try:
#if r not in fo:
#newRefs.append(r)
#except:
#newRefs.append(r)
for r in newRefs:
#print prefix+"->"+str(type(r))
if type(r).__name__ in ['frame', 'function', 'listiterator']:
#print prefix+" FRAME"
continue
try:
if any([r is x for x in path]):
#print prefix+" LOOP", objChainString([r]+path)
continue
except:
print(r)
print(path)
raise
if r is startObj:
refs.append([r])
print(refPathString([startObj]+path))
continue
if maxLen == 0:
#print prefix+" END:", objChainString([r]+path)
continue
## See if we have already searched this node.
## If not, recurse.
tree = None
try:
cache = seen[id(r)]
if cache[0] >= maxLen:
tree = cache[1]
for p in tree:
print(refPathString(p+path))
except KeyError:
pass
ignore[id(tree)] = None
if tree is None:
tree = findRefPath(startObj, r, maxLen-1, restart=False, path=[r]+path, ignore=ignore)
seen[id(r)] = [maxLen, tree]
## integrate any returned results
if len(tree) == 0:
#print prefix+" EMPTY TREE"
continue
else:
for p in tree:
refs.append(p+[r])
#seen[id(r)] = [maxLen, refs]
return refs
def objString(obj):
"""Return a short but descriptive string for any object"""
try:
if type(obj) in [int, float]:
return str(obj)
elif isinstance(obj, dict):
if len(obj) > 5:
return "<dict {%s,...}>" % (",".join(list(obj.keys())[:5]))
else:
return "<dict {%s}>" % (",".join(list(obj.keys())))
elif isinstance(obj, str):
if len(obj) > 50:
return '"%s..."' % obj[:50]
else:
return obj[:]
elif isinstance(obj, ndarray):
return "<ndarray %s %s>" % (str(obj.dtype), str(obj.shape))
elif hasattr(obj, '__len__'):
if len(obj) > 5:
return "<%s [%s,...]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj[:5]]))
else:
return "<%s [%s]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj]))
else:
return "<%s %s>" % (type(obj).__name__, obj.__class__.__name__)
except:
return str(type(obj))
def refPathString(chain):
"""Given a list of adjacent objects in a reference path, print the 'natural' path
names (ie, attribute names, keys, and indexes) that follow from one object to the next ."""
s = objString(chain[0])
i = 0
while i < len(chain)-1:
#print " -> ", i
i += 1
o1 = chain[i-1]
o2 = chain[i]
cont = False
if isinstance(o1, list) or isinstance(o1, tuple):
if any([o2 is x for x in o1]):
s += "[%d]" % o1.index(o2)
continue
#print " not list"
if isinstance(o2, dict) and hasattr(o1, '__dict__') and o2 == o1.__dict__:
i += 1
if i >= len(chain):
s += ".__dict__"
continue
o3 = chain[i]
for k in o2:
if o2[k] is o3:
s += '.%s' % k
cont = True
continue
#print " not __dict__"
if isinstance(o1, dict):
try:
if o2 in o1:
s += "[key:%s]" % objString(o2)
continue
except TypeError:
pass
for k in o1:
if o1[k] is o2:
s += "[%s]" % objString(k)
cont = True
continue
#print " not dict"
#for k in dir(o1): ## Not safe to request attributes like this.
#if getattr(o1, k) is o2:
#s += ".%s" % k
#cont = True
#continue
#print " not attr"
if cont:
continue
s += " ? "
sys.stdout.flush()
return s
def objectSize(obj, ignore=None, verbose=False, depth=0, recursive=False):
"""Guess how much memory an object is using"""
ignoreTypes = ['MethodType', 'UnboundMethodType', 'BuiltinMethodType', 'FunctionType', 'BuiltinFunctionType']
ignoreTypes = [getattr(types, key) for key in ignoreTypes if hasattr(types, key)]
ignoreRegex = re.compile('(method-wrapper|Flag|ItemChange|Option|Mode)')
if ignore is None:
ignore = {}
indent = ' '*depth
try:
hash(obj)
hsh = obj
except:
hsh = "%s:%d" % (str(type(obj)), id(obj))
if hsh in ignore:
return 0
ignore[hsh] = 1
try:
size = sys.getsizeof(obj)
except TypeError:
size = 0
if isinstance(obj, ndarray):
try:
size += len(obj.data)
except:
pass
if recursive:
if type(obj) in [list, tuple]:
if verbose:
print(indent+"list:")
for o in obj:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', s)
size += s
elif isinstance(obj, dict):
if verbose:
print(indent+"list:")
for k in obj:
s = objectSize(obj[k], ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', k, s)
size += s
#elif isinstance(obj, QtCore.QObject):
#try:
#childs = obj.children()
#if verbose:
#print indent+"Qt children:"
#for ch in childs:
#s = objectSize(obj, ignore=ignore, verbose=verbose, depth=depth+1)
#size += s
#if verbose:
#print indent + ' +', ch.objectName(), s
#except:
#pass
#if isinstance(obj, types.InstanceType):
gc.collect()
if verbose:
print(indent+'attrs:')
for k in dir(obj):
if k in ['__dict__']:
continue
o = getattr(obj, k)
if type(o) in ignoreTypes:
continue
strtyp = str(type(o))
if ignoreRegex.search(strtyp):
continue
#if isinstance(o, types.ObjectType) and strtyp == "<type 'method-wrapper'>":
#continue
#if verbose:
#print indent, k, '?'
refs = [r for r in gc.get_referrers(o) if type(r) != types.FrameType]
if len(refs) == 1:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
size += s
if verbose:
print(indent + " +", k, s)
#else:
#if verbose:
#print indent + ' -', k, len(refs)
return size
class GarbageWatcher(object):
"""
Convenient dictionary for holding weak references to objects.
Mainly used to check whether the objects have been collect yet or not.
Example:
gw = GarbageWatcher()
gw['objName'] = obj
gw['objName2'] = obj2
gw.check()
"""
def __init__(self):
self.objs = weakref.WeakValueDictionary()
self.allNames = []
def add(self, obj, name):
self.objs[name] = obj
self.allNames.append(name)
def __setitem__(self, name, obj):
self.add(obj, name)
def check(self):
"""Print a list of all watched objects and whether they have been collected."""
gc.collect()
dead = self.allNames[:]
alive = []
for k in self.objs:
dead.remove(k)
alive.append(k)
print("Deleted objects:", dead)
print("Live objects:", alive)
def __getitem__(self, item):
return self.objs[item]
class Profiler(object):
"""Simple profiler allowing measurement of multiple time intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `PYQTGRAPHPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `PYQTGRAPHPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "pyqtgraph." prefix from the module.
"""
_profilers = os.environ.get("PYQTGRAPHPROFILE", None)
_profilers = _profilers.split(",") if _profilers is not None else []
_depth = 0
_msgs = []
disable = False # set this flag to disable all or individual profilers at runtime
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabledProfiler = DisabledProfiler()
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if disabled is True or (disabled == 'env' and len(cls._profilers) == 0):
return cls._disabledProfiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if disabled == 'env' and func_qualname not in cls._profilers: # don't do anything
return cls._disabledProfiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
obj._finished = False
obj._firstTime = obj._lastTime = ptime.time()
obj._newMsg("> Entering " + obj._name)
return obj
def __call__(self, msg=None):
"""Register or print a new message with timing information.
"""
if self.disable:
return
if msg is None:
msg = str(self._markCount)
self._markCount += 1
newTime = ptime.time()
self._newMsg(" %s: %0.4f ms",
msg, (newTime - self._lastTime) * 1000)
self._lastTime = newTime
def mark(self, msg=None):
self(msg)
def _newMsg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
self.flush()
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._newMsg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
def flush(self):
if self._msgs:
print("\n".join([m[0]%m[1] for m in self._msgs]))
type(self)._msgs = []
def profile(code, name='profile_run', sort='cumulative', num=30):
"""Common-use for cProfile"""
cProfile.run(code, name)
stats = pstats.Stats(name)
stats.sort_stats(sort)
stats.print_stats(num)
return stats
#### Code for listing (nearly) all objects in the known universe
#### http://utcc.utoronto.ca/~cks/space/blog/python/GetAllObjects
# Recursively expand slist's objects
# into olist, using seen to track
# already processed objects.
def _getr(slist, olist, first=True):
i = 0
for e in slist:
oid = id(e)
typ = type(e)
if oid in olist or typ is int: ## or e in olist: ## since we're excluding all ints, there is no longer a need to check for olist keys
continue
olist[oid] = e
if first and (i%1000) == 0:
gc.collect()
tl = gc.get_referents(e)
if tl:
_getr(tl, olist, first=False)
i += 1
# The public function.
def get_all_objects():
"""Return a list of all live Python objects (excluding int and long), not including the list itself."""
gc.collect()
gcl = gc.get_objects()
olist = {}
_getr(gcl, olist)
del olist[id(olist)]
del olist[id(gcl)]
del olist[id(sys._getframe())]
return olist
def lookup(oid, objects=None):
"""Return an object given its ID, if it exists."""
if objects is None:
objects = get_all_objects()
return objects[oid]
class ObjTracker(object):
"""
Tracks all objects under the sun, reporting the changes between snapshots: what objects are created, deleted, and persistent.
This class is very useful for tracking memory leaks. The class goes to great (but not heroic) lengths to avoid tracking
its own internal objects.
Example:
ot = ObjTracker() # takes snapshot of currently existing objects
... do stuff ...
ot.diff() # prints lists of objects created and deleted since ot was initialized
... do stuff ...
ot.diff() # prints lists of objects created and deleted since last call to ot.diff()
# also prints list of items that were created since initialization AND have not been deleted yet
# (if done correctly, this list can tell you about objects that were leaked)
arrays = ot.findPersistent('ndarray') ## returns all objects matching 'ndarray' (string match, not instance checking)
## that were considered persistent when the last diff() was run
describeObj(arrays[0]) ## See if we can determine who has references to this array
"""
allObjs = {} ## keep track of all objects created and stored within class instances
allObjs[id(allObjs)] = None
def __init__(self):
self.startRefs = {} ## list of objects that exist when the tracker is initialized {oid: weakref}
## (If it is not possible to weakref the object, then the value is None)
self.startCount = {}
self.newRefs = {} ## list of objects that have been created since initialization
self.persistentRefs = {} ## list of objects considered 'persistent' when the last diff() was called
self.objTypes = {}
ObjTracker.allObjs[id(self)] = None
self.objs = [self.__dict__, self.startRefs, self.startCount, self.newRefs, self.persistentRefs, self.objTypes]
self.objs.append(self.objs)
for v in self.objs:
ObjTracker.allObjs[id(v)] = None
self.start()
def findNew(self, regex):
"""Return all objects matching regex that were considered 'new' when the last diff() was run."""
return self.findTypes(self.newRefs, regex)
def findPersistent(self, regex):
"""Return all objects matching regex that were considered 'persistent' when the last diff() was run."""
return self.findTypes(self.persistentRefs, regex)
def start(self):
"""
Remember the current set of objects as the comparison for all future calls to diff()
Called automatically on init, but can be called manually as well.
"""
refs, count, objs = self.collect()
for r in self.startRefs:
self.forgetRef(self.startRefs[r])
self.startRefs.clear()
self.startRefs.update(refs)
for r in refs:
self.rememberRef(r)
self.startCount.clear()
self.startCount.update(count)
#self.newRefs.clear()
#self.newRefs.update(refs)
def diff(self, **kargs):
"""
Compute all differences between the current object set and the reference set.
Print a set of reports for created, deleted, and persistent objects
"""
refs, count, objs = self.collect() ## refs contains the list of ALL objects
## Which refs have disappeared since call to start() (these are only displayed once, then forgotten.)
delRefs = {}
for i in list(self.startRefs.keys()):
if i not in refs:
delRefs[i] = self.startRefs[i]
del self.startRefs[i]
self.forgetRef(delRefs[i])
for i in list(self.newRefs.keys()):
if i not in refs:
delRefs[i] = self.newRefs[i]
del self.newRefs[i]
self.forgetRef(delRefs[i])
#print "deleted:", len(delRefs)
## Which refs have appeared since call to start() or diff()
persistentRefs = {} ## created since start(), but before last diff()
createRefs = {} ## created since last diff()
for o in refs:
if o not in self.startRefs:
if o not in self.newRefs:
createRefs[o] = refs[o] ## object has been created since last diff()
else:
persistentRefs[o] = refs[o] ## object has been created since start(), but before last diff() (persistent)
#print "new:", len(newRefs)
## self.newRefs holds the entire set of objects created since start()
for r in self.newRefs:
self.forgetRef(self.newRefs[r])
self.newRefs.clear()
self.newRefs.update(persistentRefs)
self.newRefs.update(createRefs)
for r in self.newRefs:
self.rememberRef(self.newRefs[r])
#print "created:", len(createRefs)
## self.persistentRefs holds all objects considered persistent.
self.persistentRefs.clear()
self.persistentRefs.update(persistentRefs)
print("----------- Count changes since start: ----------")
c1 = count.copy()
for k in self.startCount:
c1[k] = c1.get(k, 0) - self.startCount[k]
typs = list(c1.keys())
typs.sort(key=lambda a: c1[a])
for t in typs:
if c1[t] == 0:
continue
num = "%d" % c1[t]
print(" " + num + " "*(10-len(num)) + str(t))
print("----------- %d Deleted since last diff: ------------" % len(delRefs))
self.report(delRefs, objs, **kargs)
print("----------- %d Created since last diff: ------------" % len(createRefs))
self.report(createRefs, objs, **kargs)
print("----------- %d Created since start (persistent): ------------" % len(persistentRefs))
self.report(persistentRefs, objs, **kargs)
def __del__(self):
self.startRefs.clear()
self.startCount.clear()
self.newRefs.clear()
self.persistentRefs.clear()
del ObjTracker.allObjs[id(self)]
for v in self.objs:
del ObjTracker.allObjs[id(v)]
@classmethod
def isObjVar(cls, o):
return type(o) is cls or id(o) in cls.allObjs
def collect(self):
print("Collecting list of all objects...")
gc.collect()
objs = get_all_objects()
frame = sys._getframe()
del objs[id(frame)] ## ignore the current frame
del objs[id(frame.f_code)]
ignoreTypes = [int]
refs = {}
count = {}
for k in objs:
o = objs[k]
typ = type(o)
oid = id(o)
if ObjTracker.isObjVar(o) or typ in ignoreTypes:
continue
try:
ref = weakref.ref(obj)
except:
ref = None
refs[oid] = ref
typ = type(o)
typStr = typeStr(o)
self.objTypes[oid] = typStr
ObjTracker.allObjs[id(typStr)] = None
count[typ] = count.get(typ, 0) + 1
print("All objects: %d Tracked objects: %d" % (len(objs), len(refs)))
return refs, count, objs
def forgetRef(self, ref):
if ref is not None:
del ObjTracker.allObjs[id(ref)]
def rememberRef(self, ref):
## Record the address of the weakref object so it is not included in future object counts.
if ref is not None:
ObjTracker.allObjs[id(ref)] = None
def lookup(self, oid, ref, objs=None):
if ref is None or ref() is None:
try:
obj = lookup(oid, objects=objs)
except:
obj = None
else:
obj = ref()
return obj
def report(self, refs, allobjs=None, showIDs=False):
if allobjs is None:
allobjs = get_all_objects()
count = {}
rev = {}
for oid in refs:
obj = self.lookup(oid, refs[oid], allobjs)
if obj is None:
typ = "[del] " + self.objTypes[oid]
else:
typ = typeStr(obj)
if typ not in rev:
rev[typ] = []
rev[typ].append(oid)
c = count.get(typ, [0,0])
count[typ] = [c[0]+1, c[1]+objectSize(obj)]
typs = list(count.keys())
typs.sort(key=lambda a: count[a][1])
for t in typs:
line = " %d\t%d\t%s" % (count[t][0], count[t][1], t)
if showIDs:
line += "\t"+",".join(map(str,rev[t]))
print(line)
def findTypes(self, refs, regex):
allObjs = get_all_objects()
ids = {}
objs = []
r = re.compile(regex)
for k in refs:
if r.search(self.objTypes[k]):
objs.append(self.lookup(k, refs[k], allObjs))
return objs
def describeObj(obj, depth=4, path=None, ignore=None):
"""
Trace all reference paths backward, printing a list of different ways this object can be accessed.
Attempts to answer the question "who has a reference to this object"
"""
if path is None:
path = [obj]
if ignore is None:
ignore = {} ## holds IDs of objects used within the function.
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
printed=False
for ref in refs:
if id(ref) in ignore:
continue
if id(ref) in list(map(id, path)):
print("Cyclic reference: " + refPathString([ref]+path))
printed = True
continue
newPath = [ref]+path
if len(newPath) >= depth:
refStr = refPathString(newPath)
if '[_]' not in refStr: ## ignore '_' references generated by the interactive shell
print(refStr)
printed = True
else:
describeObj(ref, depth, newPath, ignore)
printed = True
if not printed:
print("Dead end: " + refPathString(path))
def typeStr(obj):
"""Create a more useful type string by making <instance> types report their class."""
typ = type(obj)
if typ == getattr(types, 'InstanceType', None):
return "<instance of %s>" % obj.__class__.__name__
else:
return str(typ)
def searchRefs(obj, *args):
"""Pseudo-interactive function for tracing references backward.
**Arguments:**
obj: The initial object from which to start searching
args: A set of string or int arguments.
each integer selects one of obj's referrers to be the new 'obj'
each string indicates an action to take on the current 'obj':
t: print the types of obj's referrers
l: print the lengths of obj's referrers (if they have __len__)
i: print the IDs of obj's referrers
o: print obj
ro: return obj
rr: return list of obj's referrers
Examples::
searchRefs(obj, 't') ## Print types of all objects referring to obj
searchRefs(obj, 't', 0, 't') ## ..then select the first referrer and print the types of its referrers
searchRefs(obj, 't', 0, 't', 'l') ## ..also print lengths of the last set of referrers
searchRefs(obj, 0, 1, 'ro') ## Select index 0 from obj's referrer, then select index 1 from the next set of referrers, then return that object
"""
ignore = {id(sys._getframe()): None}
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
for a in args:
#fo = allFrameObjs()
#refs = [r for r in refs if r not in fo]
if type(a) is int:
obj = refs[a]
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
elif a == 't':
print(list(map(typeStr, refs)))
elif a == 'i':
print(list(map(id, refs)))
elif a == 'l':
def slen(o):
if hasattr(o, '__len__'):
return len(o)
else:
return None
print(list(map(slen, refs)))
elif a == 'o':
print(obj)
elif a == 'ro':
return obj
elif a == 'rr':
return refs
def allFrameObjs():
"""Return list of frame objects in current stack. Useful if you want to ignore these objects in refernece searches"""
f = sys._getframe()
objs = []
while f is not None:
objs.append(f)
objs.append(f.f_code)
#objs.append(f.f_locals)
#objs.append(f.f_globals)
#objs.append(f.f_builtins)
f = f.f_back
return objs
def findObj(regex):
"""Return a list of objects whose typeStr matches regex"""
allObjs = get_all_objects()
objs = []
r = re.compile(regex)
for i in allObjs:
obj = allObjs[i]
if r.search(typeStr(obj)):
objs.append(obj)
return objs
def listRedundantModules():
"""List modules that have been imported more than once via different paths."""
mods = {}
for name, mod in sys.modules.items():
if not hasattr(mod, '__file__'):
continue
mfile = os.path.abspath(mod.__file__)
if mfile[-1] == 'c':
mfile = mfile[:-1]
if mfile in mods:
print("module at %s has 2 names: %s, %s" % (mfile, name, mods[mfile]))
else:
mods[mfile] = name
def walkQObjectTree(obj, counts=None, verbose=False, depth=0):
"""
Walk through a tree of QObjects, doing nothing to them.
The purpose of this function is to find dead objects and generate a crash
immediately rather than stumbling upon them later.
Prints a count of the objects encountered, for fun. (or is it?)
"""
if verbose:
print(" "*depth + typeStr(obj))
report = False
if counts is None:
counts = {}
report = True
typ = str(type(obj))
try:
counts[typ] += 1
except KeyError:
counts[typ] = 1
for child in obj.children():
walkQObjectTree(child, counts, verbose, depth+1)
return counts
QObjCache = {}
def qObjectReport(verbose=False):
"""Generate a report counting all QObjects and their types"""
global qObjCache
count = {}
for obj in findObj('PyQt'):
if isinstance(obj, QtCore.QObject):
oid = id(obj)
if oid not in QObjCache:
QObjCache[oid] = typeStr(obj) + " " + obj.objectName()
try:
QObjCache[oid] += " " + obj.parent().objectName()
QObjCache[oid] += " " + obj.text()
except:
pass
print("check obj", oid, str(QObjCache[oid]))
if obj.parent() is None:
walkQObjectTree(obj, count, verbose)
typs = list(count.keys())
typs.sort()
for t in typs:
print(count[t], "\t", t)
class PrintDetector(object):
"""Find code locations that print to stdout."""
def __init__(self):
self.stdout = sys.stdout
sys.stdout = self
def remove(self):
sys.stdout = self.stdout
def __del__(self):
self.remove()
def write(self, x):
self.stdout.write(x)
traceback.print_stack()
def flush(self):
self.stdout.flush()
def listQThreads():
"""Prints Thread IDs (Qt's, not OS's) for all QThreads."""
thr = findObj('[Tt]hread')
thr = [t for t in thr if isinstance(t, QtCore.QThread)]
import sip
for t in thr:
print("--> ", t)
print(" Qt ID: 0x%x" % sip.unwrapinstance(t))
def pretty(data, indent=''):
"""Format nested dict/list/tuple structures into a more human-readable string
This function is a bit better than pprint for displaying OrderedDicts.
"""
ret = ""
ind2 = indent + " "
if isinstance(data, dict):
ret = indent+"{\n"
for k, v in data.iteritems():
ret += ind2 + repr(k) + ": " + pretty(v, ind2).strip() + "\n"
ret += indent+"}\n"
elif isinstance(data, list) or isinstance(data, tuple):
s = repr(data)
if len(s) < 40:
ret += indent + s
else:
if isinstance(data, list):
d = '[]'
else:
d = '()'
ret = indent+d[0]+"\n"
for i, v in enumerate(data):
ret += ind2 + str(i) + ": " + pretty(v, ind2).strip() + "\n"
ret += indent+d[1]+"\n"
else:
ret += indent + repr(data)
return ret
class ThreadTrace(object):
"""
Used to debug freezing by starting a new thread that reports on the
location of other threads periodically.
"""
def __init__(self, interval=10.0):
self.interval = interval
self.lock = Mutex()
self._stop = False
self.start()
def stop(self):
with self.lock:
self._stop = True
def start(self, interval=None):
if interval is not None:
self.interval = interval
self._stop = False
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def run(self):
while True:
with self.lock:
if self._stop is True:
return
print("\n============= THREAD FRAMES: ================")
for id, frame in sys._current_frames().items():
if id == threading.current_thread().ident:
continue
print("<< thread %d >>" % id)
traceback.print_stack(frame)
print("===============================================\n")
time.sleep(self.interval)
class ThreadColor(object):
"""
Wrapper on stdout/stderr that colors text by the current thread ID.
*stream* must be 'stdout' or 'stderr'.
"""
colors = {}
lock = Mutex()
def __init__(self, stream):
self.stream = getattr(sys, stream)
self.err = stream == 'stderr'
setattr(sys, stream, self)
def write(self, msg):
with self.lock:
cprint.cprint(self.stream, self.color(), msg, -1, stderr=self.err)
def flush(self):
with self.lock:
self.stream.flush()
def color(self):
tid = threading.current_thread()
if tid not in self.colors:
c = (len(self.colors) % 15) + 1
self.colors[tid] = c
return self.colors[tid]
def enableFaulthandler():
""" Enable faulthandler for all threads.
If the faulthandler package is available, this function disables and then
re-enables fault handling for all threads (this is necessary to ensure any
new threads are handled correctly), and returns True.
If faulthandler is not available, then returns False.
"""
try:
import faulthandler
# necessary to disable first or else new threads may not be handled.
faulthandler.disable()
faulthandler.enable(all_threads=True)
return True
except ImportError:
return False
| {
"content_hash": "07f08c75b23a4014f305dae5c4bbe55f",
"timestamp": "",
"source": "github",
"line_count": 1207,
"max_line_length": 161,
"avg_line_length": 33.70091135045568,
"alnum_prop": 0.5272758561349166,
"repo_name": "pmaunz/pyqtgraph",
"id": "61ae9fd51e5366677d369731aa6595b9154f9cc0",
"size": "40701",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "pyqtgraph/debug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "1752"
},
{
"name": "Python",
"bytes": "2125387"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
import fnmatch
import logging
import marshal
import os
import re
import six
import socket
import stat
import string
import subprocess
from rbtools.clients import SCMClient, RepositoryInfo
from rbtools.clients.errors import (AmendError,
EmptyChangeError,
InvalidRevisionSpecError,
SCMError,
TooManyRevisionsError)
from rbtools.utils.checks import check_gnu_diff, check_install
from rbtools.utils.filesystem import make_empty_files, make_tempfile
from rbtools.utils.process import die, execute
class P4Wrapper(object):
"""A wrapper around p4 commands.
All calls out to p4 go through an instance of this class. It keeps a
separation between all the standard SCMClient logic and any parsing
and handling of p4 invocation and results.
"""
KEYVAL_RE = re.compile('^([^:]+): (.+)$')
COUNTERS_RE = re.compile('^([^ ]+) = (.+)$')
def __init__(self, options):
self.options = options
def is_supported(self):
return check_install(['p4', 'help'])
def counters(self):
lines = self.run_p4(['counters'], split_lines=True)
return self._parse_keyval_lines(lines, self.COUNTERS_RE)
def change(self, changenum, marshalled=True, password=None):
return self.run_p4(['change', '-o', str(changenum)],
password=password, ignore_errors=True,
none_on_ignored_error=True,
marshalled=marshalled)
def modify_change(self, new_change_spec):
"""new_change_spec must contain the changelist number."""
return self.run_p4(['change', '-i'], input_string=new_change_spec)
def files(self, path):
return self.run_p4(['files', path], marshalled=True)
def filelog(self, path):
return self.run_p4(['filelog', path], marshalled=True)
def fstat(self, depot_path, fields=[]):
args = ['fstat']
if fields:
args += ['-T', ','.join(fields)]
args.append(depot_path)
lines = self.run_p4(args, split_lines=True)
stat_info = {}
for line in lines:
line = line.strip()
if line.startswith('... '):
parts = line.split(' ', 2)
stat_info[parts[1]] = parts[2]
return stat_info
def info(self):
lines = self.run_p4(['info'],
ignore_errors=True,
split_lines=True)
return self._parse_keyval_lines(lines)
def opened(self, changenum):
return self.run_p4(['opened', '-c', str(changenum)],
marshalled=True)
def print_file(self, depot_path, out_file=None):
cmd = ['print']
if out_file:
cmd += ['-o', out_file]
cmd += ['-q', depot_path]
return self.run_p4(cmd)
def where(self, depot_path):
return self.run_p4(['where', depot_path], marshalled=True)
def run_p4(self, p4_args, marshalled=False, password=None,
ignore_errors=False, input_string=None, *args, **kwargs):
"""Invoke p4.
In the current implementation, the arguments 'marshalled' and
'input_string' cannot be used together, i.e. this command doesn't
allow inputting and outputting at the same time.
"""
cmd = ['p4']
if marshalled:
cmd += ['-G']
if getattr(self.options, 'p4_client', None):
cmd += ['-c', self.options.p4_client]
if getattr(self.options, 'p4_port', None):
cmd += ['-p', self.options.p4_port]
if getattr(self.options, 'p4_passwd', None):
cmd += ['-P', self.options.p4_passwd]
cmd += p4_args
if password is not None:
cmd += ['-P', password]
if marshalled:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
result = []
has_error = False
while 1:
try:
data = marshal.load(p.stdout)
except EOFError:
break
else:
result.append(data)
if data.get('code', None) == 'error':
has_error = True
rc = p.wait()
if not ignore_errors and (rc or has_error):
for record in result:
if 'data' in record:
print(record['data'])
raise SCMError('Failed to execute command: %s\n' % cmd)
return result
elif input_string is not None:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
p.communicate(input_string) # Send input, wait, set returncode
if not ignore_errors and p.returncode:
raise SCMError('Failed to execute command: %s\n' % cmd)
return None
else:
result = execute(cmd, ignore_errors=ignore_errors, *args, **kwargs)
return result
def _parse_keyval_lines(self, lines, regex=KEYVAL_RE):
keyvals = {}
for line in lines:
m = regex.match(line)
if m:
key = m.groups()[0]
value = m.groups()[1]
keyvals[key] = value.strip()
return keyvals
class PerforceClient(SCMClient):
"""
A wrapper around the p4 Perforce tool that fetches repository information
and generates compatible diffs.
"""
name = 'Perforce'
can_amend_commit = True
supports_diff_exclude_patterns = True
supports_diff_extra_args = True
supports_patch_revert = True
DATE_RE = re.compile(br'(\w+)\s+(\w+)\s+(\d+)\s+(\d\d:\d\d:\d\d)\s+'
br'(\d\d\d\d)')
ENCODED_COUNTER_URL_RE = re.compile('reviewboard.url\.(\S+)')
REVISION_CURRENT_SYNC = '--rbtools-current-sync'
REVISION_PENDING_CLN_PREFIX = '--rbtools-pending-cln:'
REVISION_DEFAULT_CLN = 'default'
ADDED_FILES_RE = re.compile(r'^==== //depot/(\S+)#\d+ ==A== \S+ ====$',
re.M)
DELETED_FILES_RE = re.compile(r'^==== //depot/(\S+)#\d+ ==D== \S+ ====$',
re.M)
def __init__(self, p4_class=P4Wrapper, **kwargs):
super(PerforceClient, self).__init__(**kwargs)
self.p4 = p4_class(self.options)
def get_repository_info(self):
if not self.p4.is_supported():
logging.debug('Unable to execute "p4 help": skipping Perforce')
return None
p4_info = self.p4.info()
# For the repository path, we first prefer p4 brokers, then the
# upstream p4 server. If neither of those are found, just return None.
repository_path = (p4_info.get('Broker address') or
p4_info.get('Server address'))
if repository_path is None:
return None
client_root = p4_info.get('Client root')
if client_root is None:
return None
norm_cwd = os.path.normcase(os.path.realpath(os.getcwd()) +
os.path.sep)
norm_client_root = os.path.normcase(os.path.realpath(client_root) +
os.path.sep)
# Don't accept the repository if the current directory is outside the
# root of the Perforce client.
if not norm_cwd.startswith(norm_client_root):
return None
try:
parts = repository_path.split(':')
hostname = None
if len(parts) == 3 and parts[0] == 'ssl':
hostname = parts[1]
port = parts[2]
elif len(parts) == 2:
hostname, port = parts
if not hostname:
die('Path %s is not a valid Perforce P4PORT' % repository_path)
info = socket.gethostbyaddr(hostname)
# Build the list of repository paths we want to tr to look up.
servers = [hostname]
if info[0] != hostname:
servers.append(info[0])
# If aliases exist for hostname, create a list of alias:port
# strings for repository_path.
if info[1]:
servers += info[1]
repository_path = ['%s:%s' % (server, port)
for server in servers]
# If there's only one repository path found, then we don't
# need to do a more expensive lookup of all registered
# paths. We can look up just this path directly.
if len(repository_path) == 1:
repository_path = repository_path[0]
except (socket.gaierror, socket.herror):
pass
server_version = p4_info.get('Server version', None)
if not server_version:
return None
m = re.search(r'[^ ]*/([0-9]+)\.([0-9]+)/[0-9]+ .*$',
server_version, re.M)
if m:
self.p4d_version = int(m.group(1)), int(m.group(2))
else:
# Gracefully bail if we don't get a match
return None
# Now that we know it's Perforce, make sure we have GNU diff
# installed, and error out if we don't.
check_gnu_diff()
return RepositoryInfo(path=repository_path, supports_changesets=True)
def parse_revision_spec(self, revisions=[]):
"""Parses the given revision spec.
The 'revisions' argument is a list of revisions as specified by the
user. Items in the list do not necessarily represent a single revision,
since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2".
SCMTool-specific overrides of this method are expected to deal with
such syntaxes.
This will return a dictionary with the following keys:
'base': A revision to use as the base of the resulting diff.
'tip': A revision to use as the tip of the resulting diff.
These will be used to generate the diffs to upload to Review Board (or
print). The diff for review will include the changes in (base, tip].
If zero revisions are passed in, this will return the 'default'
changelist.
If a single revision is passed in, this will return the parent of that
revision for 'base' and the passed-in revision for 'tip'. The result
may have special internal revisions or prefixes based on whether the
changeset is submitted, pending, or shelved.
If two revisions are passed in, they need to both be submitted
changesets.
"""
n_revs = len(revisions)
if n_revs == 0:
return {
'base': self.REVISION_CURRENT_SYNC,
'tip': (self.REVISION_PENDING_CLN_PREFIX +
self.REVISION_DEFAULT_CLN)
}
elif n_revs == 1:
# A single specified CLN can be any of submitted, pending, or
# shelved. These are stored with special prefixes and/or names
# because the way that we get the contents of the files changes
# based on which of these is in effect.
status = self._get_changelist_status(revisions[0])
# Both pending and shelved changes are treated as "pending",
# through the same code path. This is because the documentation for
# 'p4 change' tells a filthy lie, saying that shelved changes will
# have their status listed as shelved. In fact, when you shelve
# changes, it sticks the data up on the server, but leaves your
# working copy intact, and the change is still marked as pending.
# Even after reverting the working copy, the change won't have its
# status as "shelved". That said, there's perhaps a way that it
# could (perhaps from other clients?), so it's still handled in
# this conditional.
#
# The diff routine will first look for opened files in the client,
# and if that fails, it will then do the diff against the shelved
# copy.
if status in ('pending', 'shelved'):
return {
'base': self.REVISION_CURRENT_SYNC,
'tip': self.REVISION_PENDING_CLN_PREFIX + revisions[0],
}
elif status == 'submitted':
try:
cln = int(revisions[0])
return {
'base': str(cln - 1),
'tip': str(cln),
}
except ValueError:
raise InvalidRevisionSpecError(
'%s does not appear to be a valid changelist' %
revisions[0])
else:
raise InvalidRevisionSpecError(
'%s does not appear to be a valid changelist' %
revisions[0])
elif n_revs == 2:
result = {}
# The base revision must be a submitted CLN
status = self._get_changelist_status(revisions[0])
if status == 'submitted':
result['base'] = revisions[0]
elif status in ('pending', 'shelved'):
raise InvalidRevisionSpecError(
'%s cannot be used as the base CLN for a diff because '
'it is %s.' % (revisions[0], status))
else:
raise InvalidRevisionSpecError(
'%s does not appear to be a valid changelist' %
revisions[0])
# Tip revision can be any of submitted, pending, or shelved CLNs
status = self._get_changelist_status(revisions[1])
if status == 'submitted':
result['tip'] = revisions[1]
elif status in ('pending', 'shelved'):
raise InvalidRevisionSpecError(
'%s cannot be used for a revision range diff because it '
'is %s' % (revisions[1], status))
else:
raise InvalidRevisionSpecError(
'%s does not appear to be a valid changelist' %
revisions[1])
return result
else:
raise TooManyRevisionsError
def _get_changelist_status(self, changelist):
if changelist == self.REVISION_DEFAULT_CLN:
return 'pending'
else:
change = self.p4.change(changelist)
if len(change) == 1 and 'Status' in change[0]:
return change[0]['Status']
return None
def scan_for_server(self, repository_info):
# Scan first for dot files, since it's faster and will cover the
# user's $HOME/.reviewboardrc
server_url = \
super(PerforceClient, self).scan_for_server(repository_info)
if server_url:
return server_url
return self.scan_for_server_counter(repository_info)
def scan_for_server_counter(self, repository_info):
"""
Checks the Perforce counters to see if the Review Board server's url
is specified. Since Perforce only started supporting non-numeric
counter values in server version 2008.1, we support both a normal
counter 'reviewboard.url' with a string value and embedding the url in
a counter name like 'reviewboard.url.http:||reviewboard.example.com'.
Note that forward slashes aren't allowed in counter names, so
pipe ('|') characters should be used. These should be safe because they
should not be used unencoded in urls.
"""
counters = self.p4.counters()
# Try for a "reviewboard.url" counter first.
url = counters.get('reviewboard.url', None)
if url:
return url
# Next try for a counter of the form:
# reviewboard_url.http:||reviewboard.example.com
for key, value in six.iteritems(counters):
m = self.ENCODED_COUNTER_URL_RE.match(key)
if m:
return m.group(1).replace('|', '/')
return None
def diff(self, revisions, include_files=[], exclude_patterns=[],
extra_args=[]):
"""
Goes through the hard work of generating a diff on Perforce in order
to take into account adds/deletes and to provide the necessary
revision information.
"""
exclude_patterns = self.normalize_exclude_patterns(exclude_patterns)
if not revisions:
# The "path posting" is still interesting enough to keep around. If
# the given arguments don't parse as valid changelists, fall back
# on that behavior.
return self._path_diff(extra_args, exclude_patterns)
# Support both //depot/... paths and local filenames. For the moment,
# this does *not* support any of perforce's traversal literals like ...
depot_include_files = []
local_include_files = []
for filename in include_files:
if filename.startswith('//'):
depot_include_files.append(filename)
else:
# The way we determine files to include or not is via
# 'p4 where', which gives us absolute paths.
local_include_files.append(
os.path.realpath(os.path.abspath(filename)))
base = revisions['base']
tip = revisions['tip']
cl_is_pending = tip.startswith(self.REVISION_PENDING_CLN_PREFIX)
cl_is_shelved = False
if not cl_is_pending:
# Submitted changes are handled by a different method
logging.info('Generating diff for range of submitted changes: %s '
'to %s',
base, tip)
return self._compute_range_changes(
base, tip, depot_include_files, local_include_files,
exclude_patterns)
# Strip off the prefix
tip = tip.split(':', 1)[1]
# Try to get the files out of the working directory first. If that
# doesn't work, look at shelved files.
opened_files = self.p4.opened(tip)
if not opened_files:
opened_files = self.p4.files('//...@=%s' % tip)
cl_is_shelved = True
if not opened_files:
raise EmptyChangeError
if cl_is_shelved:
logging.info('Generating diff for shelved changeset %s' % tip)
else:
logging.info('Generating diff for pending changeset %s' % tip)
diff_lines = []
action_mapping = {
'edit': 'M',
'integrate': 'M',
'add': 'A',
'branch': 'A',
'import': 'A',
'delete': 'D',
}
# XXX: Theoretically, shelved files should handle moves just fine--you
# can shelve and unshelve changes containing moves. Unfortunately,
# there doesn't seem to be any way to match up the added and removed
# files when the changeset is shelved, because none of the usual
# methods (fstat, filelog) provide the source move information when the
# changeset is shelved.
if self._supports_moves() and not cl_is_shelved:
action_mapping['move/add'] = 'MV-a'
action_mapping['move/delete'] = 'MV'
else:
# The Review Board server doesn't support moved files for
# perforce--create a diff that shows moved files as adds and
# deletes.
action_mapping['move/add'] = 'A'
action_mapping['move/delete'] = 'D'
for f in opened_files:
depot_file = f['depotFile']
local_file = self._depot_to_local(depot_file)
new_depot_file = ''
try:
base_revision = int(f['rev'])
except ValueError:
# For actions like deletes, there won't be any "current
# revision". Just pass through whatever was there before.
base_revision = f['rev']
action = f['action']
if ((depot_include_files and
depot_file not in depot_include_files) or
(local_include_files and
local_file not in local_include_files) or
self._should_exclude_file(local_file, depot_file,
exclude_patterns)):
continue
old_file = ''
new_file = ''
logging.debug('Processing %s of %s', action, depot_file)
try:
changetype_short = action_mapping[action]
except KeyError:
die('Unsupported action type "%s" for %s'
% (action, depot_file))
if changetype_short == 'M':
try:
old_file, new_file = self._extract_edit_files(
depot_file, local_file, base_revision, tip,
cl_is_shelved, False)
except ValueError as e:
logging.warning('Skipping file %s: %s', depot_file, e)
continue
elif changetype_short == 'A':
# Perforce has a charming quirk where the revision listed for
# a file is '1' in both the first submitted revision, as well
# as before it's added. On the Review Board side, when we parse
# the diff, we'll check to see if that revision exists, but
# that only works for pending changes. If the change is shelved
# or submitted, revision 1 will exist, which causes the
# displayed diff to contain revision 1 twice.
#
# Setting the revision in the diff file to be '0' will avoid
# problems with patches that add files.
base_revision = 0
try:
old_file, new_file = self._extract_add_files(
depot_file, local_file, tip, cl_is_shelved,
cl_is_pending)
except ValueError as e:
logging.warning('Skipping file %s: %s', depot_file, e)
continue
if os.path.islink(new_file):
logging.warning('Skipping symlink %s', new_file)
continue
elif changetype_short == 'D':
try:
old_file, new_file = self._extract_delete_files(
depot_file, base_revision)
except ValueError as e:
logging.warning('Skipping file %s#%s: %s', depot_file, e)
continue
elif changetype_short == 'MV-a':
# The server supports move information. We ignore this
# particular entry, and handle the moves within the equivalent
# 'move/delete' entry.
continue
elif changetype_short == 'MV':
try:
old_file, new_file, new_depot_file = \
self._extract_move_files(
depot_file, tip, base_revision, cl_is_shelved)
except ValueError as e:
logging.warning('Skipping file %s: %s', depot_file, e)
continue
dl = self._do_diff(old_file, new_file, depot_file, base_revision,
new_depot_file, changetype_short,
ignore_unmodified=True)
diff_lines += dl
return {
'diff': b''.join(diff_lines),
'changenum': self.get_changenum(revisions),
}
def get_changenum(self, revisions):
"""Return the change number for the given revisions.
This is only used when the client is supposed to send a change number
to the server (such as with Perforce).
Args:
revisions (dict):
A revisions dictionary as returned by ``parse_revision_spec``.
Returns:
unicode:
The change number to send to the Review Board server.
"""
# This is used to report the change number to the Review Board server
# when posting pending changesets. By reporting the change number, we
# extract the changeset description server-side. Ideally we'd change
# this to remove the server-side implementation and just implement
# --guess-summary and --guess-description, but that would likely
# create a lot of unhappy users.
tip = revisions['tip']
if tip.startswith(self.REVISION_PENDING_CLN_PREFIX):
tip = tip[len(self.REVISION_PENDING_CLN_PREFIX):]
if tip != self.REVISION_DEFAULT_CLN:
return tip
return None
def _compute_range_changes(self, base, tip, depot_include_files,
local_include_files, exclude_patterns):
"""Compute the changes across files given a revision range.
This will look at the history of all changes within the given range and
compute the full set of changes contained therein. Just looking at the
two trees isn't enough, since files may have moved around and we want
to include that information.
"""
# Start by looking at the filelog to get a history of all the changes
# within the changeset range. This processing step is done because in
# marshalled mode, the filelog doesn't sort its entries at all, and can
# also include duplicate information, especially when files have moved
# around.
changesets = {}
# We expect to generate a diff for (base, tip], but filelog gives us
# [base, tip]. Increment the base to avoid this.
real_base = str(int(base) + 1)
for file_entry in self.p4.filelog('//...@%s,%s' % (real_base, tip)):
cid = 0
while True:
change_key = 'change%d' % cid
if change_key not in file_entry:
break
action = file_entry['action%d' % cid]
depot_file = file_entry['depotFile']
try:
cln = int(file_entry[change_key])
except ValueError:
logging.warning('Skipping file %s: unable to parse '
'change number "%s"',
depot_file, file_entry[change_key])
break
if action == 'integrate':
action = 'edit'
elif action == 'branch':
action = 'add'
if action not in ('edit', 'add', 'delete',
'move/add', 'move/delete'):
raise Exception('Unsupported action type "%s" for %s' %
(action, depot_file))
try:
rev_key = 'rev%d' % cid
rev = int(file_entry[rev_key])
except ValueError:
logging.warning('Skipping file %s: unable to parse '
'revision number "%s"',
depot_file, file_entry[rev_key])
break
change = {
'rev': rev,
'action': action,
}
if action == 'move/add':
change['oldFilename'] = file_entry['file0,%d' % cid]
elif action == 'move/delete':
change['newFilename'] = file_entry['file1,%d' % cid]
cid += 1
changesets.setdefault(cln, {})[depot_file] = change
# Now run through the changesets in order and compute a change journal
# for each file.
files = []
for cln in sorted(changesets.keys()):
changeset = changesets[cln]
for depot_file, change in six.iteritems(changeset):
action = change['action']
# Moves will be handled in the 'move/delete' entry
if action == 'move/add':
continue
file_entry = None
for f in files:
if f['depotFile'] == depot_file:
file_entry = f
break
if file_entry is None:
file_entry = {
'initialDepotFile': depot_file,
'initialRev': change['rev'],
'newFile': action == 'add',
'rev': change['rev'],
'action': 'none',
}
files.append(file_entry)
self._accumulate_range_change(file_entry, change)
if not files:
raise EmptyChangeError
# Now generate the diff
supports_moves = self._supports_moves()
diff_lines = []
for f in files:
action = f['action']
depot_file = f['depotFile']
try:
local_file = self._depot_to_local(depot_file)
except SCMError:
logging.warning('Could not find local filename for "%s"',
depot_file)
local_file = None
rev = f['rev']
initial_depot_file = f['initialDepotFile']
initial_rev = f['initialRev']
if ((depot_include_files and
depot_file not in depot_include_files) or
(local_include_files and local_file and
local_file not in local_include_files) or
self._should_exclude_file(local_file, depot_file,
exclude_patterns)):
continue
if action == 'add':
try:
old_file, new_file = self._extract_add_files(
depot_file, local_file, rev, False, False)
except ValueError as e:
logging.warning('Skipping file %s: %s', depot_file, e)
continue
diff_lines += self._do_diff(
old_file, new_file, depot_file, 0, '', 'A',
ignore_unmodified=True)
elif action == 'delete':
try:
old_file, new_file = self._extract_delete_files(
initial_depot_file, initial_rev)
except ValueError:
logging.warning('Skipping file %s: %s', depot_file, e)
continue
diff_lines += self._do_diff(
old_file, new_file, initial_depot_file, initial_rev,
depot_file, 'D', ignore_unmodified=True)
elif action == 'edit':
try:
old_file, new_file = self._extract_edit_files(
depot_file, local_file, initial_rev, rev, False, True)
except ValueError:
logging.warning('Skipping file %s: %s', depot_file, e)
continue
diff_lines += self._do_diff(
old_file, new_file, initial_depot_file, initial_rev,
depot_file, 'M', ignore_unmodified=True)
elif action == 'move':
try:
old_file_a, new_file_a = self._extract_add_files(
depot_file, local_file, rev, False, False)
old_file_b, new_file_b = self._extract_delete_files(
initial_depot_file, initial_rev)
except ValueError:
logging.warning('Skipping file %s: %s', depot_file, e)
continue
if supports_moves:
# Show the change as a move
diff_lines += self._do_diff(
old_file_a, new_file_b, initial_depot_file,
initial_rev, depot_file, 'MV', ignore_unmodified=True)
else:
# Show the change as add and delete
diff_lines += self._do_diff(
old_file_a, new_file_a, depot_file, 0, '', 'A',
ignore_unmodified=True)
diff_lines += self._do_diff(
old_file_b, new_file_b, initial_depot_file,
initial_rev, depot_file, 'D', ignore_unmodified=True)
elif action == 'skip':
continue
else:
# We should never get here. The results of
# self._accumulate_range_change should never be anything other
# than add, delete, move, or edit.
assert False
return {
'diff': b''.join(diff_lines)
}
def _accumulate_range_change(self, file_entry, change):
"""Compute the effects of a given change on a given file"""
old_action = file_entry['action']
current_action = change['action']
if old_action == 'none':
# This is the first entry for this file.
new_action = current_action
file_entry['depotFile'] = file_entry['initialDepotFile']
# If the first action was an edit or a delete, then the initial
# revision (that we'll use to generate the diff) is n-1
if current_action in ('edit', 'delete'):
file_entry['initialRev'] -= 1
elif current_action == 'add':
# If we're adding a file that existed in the base changeset, it
# means it was previously deleted and then added back. We
# therefore want the operation to look like an edit. If it
# didn't exist, then we added, deleted, and are now adding
# again.
if old_action == 'skip':
new_action = 'add'
else:
new_action = 'edit'
elif current_action == 'edit':
# Edits don't affect the previous type of change
# (edit+edit=edit, move+edit=move, add+edit=add).
new_action = old_action
elif current_action == 'delete':
# If we're deleting a file which did not exist in the base
# changeset, then we want to just skip it entirely (since it
# means it's been added and then deleted). Otherwise, it's a
# real delete.
if file_entry['newFile']:
new_action = 'skip'
else:
new_action = 'delete'
elif current_action == 'move/delete':
new_action = 'move'
file_entry['depotFile'] = change['newFilename']
file_entry['rev'] = change['rev']
file_entry['action'] = new_action
def _extract_edit_files(self, depot_file, local_file, rev_a, rev_b,
cl_is_shelved, cl_is_submitted):
"""Extract the 'old' and 'new' files for an edit operation.
Returns a tuple of (old filename, new filename). This can raise a
ValueError if the extraction fails.
"""
# Get the old version out of perforce
old_filename = make_tempfile()
self._write_file('%s#%s' % (depot_file, rev_a), old_filename)
if cl_is_shelved:
new_filename = make_tempfile()
self._write_file('%s@=%s' % (depot_file, rev_b), new_filename)
elif cl_is_submitted:
new_filename = make_tempfile()
self._write_file('%s#%s' % (depot_file, rev_b), new_filename)
else:
# Just reference the file within the client view
new_filename = local_file
return old_filename, new_filename
def _extract_add_files(self, depot_file, local_file, revision,
cl_is_shelved, cl_is_pending):
"""Extract the 'old' and 'new' files for an add operation.
Returns a tuple of (old filename, new filename). This can raise a
ValueError if the extraction fails.
"""
# Make an empty tempfile for the old file
old_filename = make_tempfile()
if cl_is_shelved:
new_filename = make_tempfile()
self._write_file('%s@=%s' % (depot_file, revision), new_filename)
elif cl_is_pending:
# Just reference the file within the client view
new_filename = local_file
else:
new_filename = make_tempfile()
self._write_file('%s#%s' % (depot_file, revision), new_filename)
return old_filename, new_filename
def _extract_delete_files(self, depot_file, revision):
"""Extract the 'old' and 'new' files for a delete operation.
Returns a tuple of (old filename, new filename). This can raise a
ValueError if extraction fails.
"""
# Get the old version out of perforce
old_filename = make_tempfile()
self._write_file('%s#%s' % (depot_file, revision), old_filename)
# Make an empty tempfile for the new file
new_filename = make_tempfile()
return old_filename, new_filename
def _extract_move_files(self, old_depot_file, tip, base_revision,
cl_is_shelved):
"""Extract the 'old' and 'new' files for a move operation.
Returns a tuple of (old filename, new filename, new depot path). This
can raise a ValueError if extraction fails.
"""
# XXX: fstat *ought* to work, but perforce doesn't supply the movedFile
# field in fstat (or apparently anywhere else) when a change is
# shelved. For now, _diff_pending will avoid calling this method at all
# for shelved changes, and instead treat them as deletes and adds.
assert not cl_is_shelved
# if cl_is_shelved:
# fstat_path = '%s@=%s' % (depot_file, tip)
# else:
fstat_path = old_depot_file
stat_info = self.p4.fstat(fstat_path,
['clientFile', 'movedFile'])
if 'clientFile' not in stat_info or 'movedFile' not in stat_info:
raise ValueError('Unable to get moved file information')
old_filename = make_tempfile()
self._write_file('%s#%s' % (old_depot_file, base_revision),
old_filename)
# if cl_is_shelved:
# fstat_path = '%s@=%s' % (stat_info['movedFile'], tip)
# else:
fstat_path = stat_info['movedFile']
stat_info = self.p4.fstat(fstat_path,
['clientFile', 'depotFile'])
if 'clientFile' not in stat_info or 'depotFile' not in stat_info:
raise ValueError('Unable to get moved file information')
# Grab the new depot path (to include in the diff index)
new_depot_file = stat_info['depotFile']
# Reference the new file directly in the client view
new_filename = stat_info['clientFile']
return old_filename, new_filename, new_depot_file
def _path_diff(self, args, exclude_patterns):
"""
Process a path-style diff. This allows people to post individual files
in various ways.
Multiple paths may be specified in `args`. The path styles supported
are:
//path/to/file
Upload file as a "new" file.
//path/to/dir/...
Upload all files as "new" files.
//path/to/file[@#]rev
Upload file from that rev as a "new" file.
//path/to/file[@#]rev,[@#]rev
Upload a diff between revs.
//path/to/dir/...[@#]rev,[@#]rev
Upload a diff of all files between revs in that directory.
"""
r_revision_range = re.compile(r'^(?P<path>//[^@#]+)' +
r'(?P<revision1>[#@][^,]+)?' +
r'(?P<revision2>,[#@][^,]+)?$')
empty_filename = make_tempfile()
tmp_diff_from_filename = make_tempfile()
tmp_diff_to_filename = make_tempfile()
diff_lines = []
for path in args:
m = r_revision_range.match(path)
if not m:
die('Path %r does not match a valid Perforce path.' % (path,))
revision1 = m.group('revision1')
revision2 = m.group('revision2')
first_rev_path = m.group('path')
if revision1:
first_rev_path += revision1
records = self.p4.files(first_rev_path)
# Make a map for convenience.
files = {}
# Records are:
# 'rev': '1'
# 'func': '...'
# 'time': '1214418871'
# 'action': 'edit'
# 'type': 'ktext'
# 'depotFile': '...'
# 'change': '123456'
for record in records:
if record['action'] not in ('delete', 'move/delete'):
if revision2:
files[record['depotFile']] = [record, None]
else:
files[record['depotFile']] = [None, record]
if revision2:
# [1:] to skip the comma.
second_rev_path = m.group('path') + revision2[1:]
records = self.p4.files(second_rev_path)
for record in records:
if record['action'] not in ('delete', 'move/delete'):
try:
m = files[record['depotFile']]
m[1] = record
except KeyError:
files[record['depotFile']] = [None, record]
old_file = new_file = empty_filename
changetype_short = None
for depot_path, (first_record, second_record) in \
six.iteritems(files):
old_file = new_file = empty_filename
if first_record is None:
new_path = '%s#%s' % (depot_path, second_record['rev'])
self._write_file(new_path, tmp_diff_to_filename)
new_file = tmp_diff_to_filename
changetype_short = 'A'
base_revision = 0
elif second_record is None:
old_path = '%s#%s' % (depot_path, first_record['rev'])
self._write_file(old_path, tmp_diff_from_filename)
old_file = tmp_diff_from_filename
changetype_short = 'D'
base_revision = int(first_record['rev'])
elif first_record['rev'] == second_record['rev']:
# We when we know the revisions are the same, we don't need
# to do any diffing. This speeds up large revision-range
# diffs quite a bit.
continue
else:
old_path = '%s#%s' % (depot_path, first_record['rev'])
new_path = '%s#%s' % (depot_path, second_record['rev'])
self._write_file(old_path, tmp_diff_from_filename)
self._write_file(new_path, tmp_diff_to_filename)
new_file = tmp_diff_to_filename
old_file = tmp_diff_from_filename
changetype_short = 'M'
base_revision = int(first_record['rev'])
local_path = self._depot_to_local(depot_path)
if self._should_exclude_file(local_path, depot_path,
exclude_patterns):
continue
# TODO: We're passing new_depot_file='' here just to make
# things work like they did before the moved file change was
# added (58ccae27). This section of code needs to be updated
# to properly work with moved files.
dl = self._do_diff(old_file, new_file, depot_path,
base_revision, '', changetype_short,
ignore_unmodified=True)
diff_lines += dl
os.unlink(empty_filename)
os.unlink(tmp_diff_from_filename)
os.unlink(tmp_diff_to_filename)
return {
'diff': b''.join(diff_lines),
}
def _do_diff(self, old_file, new_file, depot_file, base_revision,
new_depot_file, changetype_short, ignore_unmodified=False):
"""
Do the work of producing a diff for Perforce.
old_file - The absolute path to the "old" file.
new_file - The absolute path to the "new" file.
depot_file - The depot path in Perforce for this file.
base_revision - The base perforce revision number of the old file as
an integer.
new_depot_file - Location of the new file. Only used for moved files.
changetype_short - The change type as a short string.
ignore_unmodified - If True, will return an empty list if the file
is not changed.
Returns a list of strings of diff lines.
"""
if hasattr(os, 'uname') and os.uname()[0] == 'SunOS':
diff_cmd = ['gdiff', '-urNp', old_file, new_file]
else:
diff_cmd = ['diff', '-urNp', old_file, new_file]
# Diff returns "1" if differences were found.
dl = execute(diff_cmd, extra_ignore_errors=(1, 2),
log_output_on_error=False, translate_newlines=False,
results_unicode=False)
# If the input file has ^M characters at end of line, lets ignore them.
dl = dl.replace(b'\r\r\n', b'\r\n')
dl = dl.splitlines(True)
cwd = os.getcwd()
if depot_file.startswith(cwd):
local_path = depot_file[len(cwd) + 1:]
else:
local_path = depot_file
if changetype_short == 'MV':
is_move = True
if new_depot_file.startswith(cwd):
new_local_path = new_depot_file[len(cwd) + 1:]
else:
new_local_path = new_depot_file
else:
is_move = False
new_local_path = local_path
# Special handling for the output of the diff tool on binary files:
# diff outputs "Files a and b differ"
# and the code below expects the output to start with
# "Binary files "
if (len(dl) == 1 and
dl[0].startswith(b'Files %s and %s differ' %
(old_file, new_file))):
dl = [b'Binary files %s and %s differ\n' % (old_file, new_file)]
if dl == [] or dl[0].startswith(b'Binary files '):
is_empty_and_changed = (self.supports_empty_files() and
changetype_short in ('A', 'D'))
if dl == [] and (is_move or is_empty_and_changed):
line = ('==== %s#%s ==%s== %s ====\n'
% (depot_file, base_revision, changetype_short,
new_local_path)).encode('utf-8')
dl.insert(0, line)
dl.append(b'\n')
else:
if ignore_unmodified:
return []
else:
print('Warning: %s in your changeset is unmodified' %
local_path)
elif len(dl) > 1:
m = re.search(br'(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d)', dl[1])
if m:
timestamp = m.group(1).decode('utf-8')
else:
# Thu Sep 3 11:24:48 2007
m = self.DATE_RE.search(dl[1])
if not m:
die('Unable to parse diff header: %s' % dl[1])
month_map = {
b'Jan': b'01',
b'Feb': b'02',
b'Mar': b'03',
b'Apr': b'04',
b'May': b'05',
b'Jun': b'06',
b'Jul': b'07',
b'Aug': b'08',
b'Sep': b'09',
b'Oct': b'10',
b'Nov': b'11',
b'Dec': b'12',
}
month = month_map[m.group(2)]
day = m.group(3)
timestamp = m.group(4)
year = m.group(5)
timestamp = '%s-%s-%s %s' % (year, month, day, timestamp)
dl[0] = ('--- %s\t%s#%s\n'
% (local_path, depot_file, base_revision)).encode('utf-8')
dl[1] = ('+++ %s\t%s\n'
% (new_local_path, timestamp)).encode('utf-8')
if is_move:
dl.insert(0,
('Moved to: %s\n' % new_depot_file).encode('utf-8'))
dl.insert(0,
('Moved from: %s\n' % depot_file).encode('utf-8'))
# Not everybody has files that end in a newline (ugh). This ensures
# that the resulting diff file isn't broken.
if not dl[-1].endswith(b'\n'):
dl.append(b'\n')
else:
die('ERROR, no valid diffs: %s' % dl[0].decode('utf-8'))
return dl
def _write_file(self, depot_path, tmpfile):
"""
Grabs a file from Perforce and writes it to a temp file. p4 print sets
the file readonly and that causes a later call to unlink fail. So we
make the file read/write.
"""
logging.debug('Writing "%s" to "%s"' % (depot_path, tmpfile))
self.p4.print_file(depot_path, out_file=tmpfile)
# The output of 'p4 print' will be a symlink if that's what version
# control contains. There's a few reasons to skip these files...
#
# * Relative symlinks will likely be broken, causing an unexpected
# OSError.
# * File that's symlinked to isn't necessarily in version control.
# * Users expect that this will only process files under version
# control. If I can replace a file they opened with a symlink to
# private keys in '~/.ssh', then they'd probably be none too happy
# when rbt uses their credentials to publish its contents.
if os.path.islink(tmpfile):
raise ValueError('"%s" is a symlink' % depot_path)
else:
os.chmod(tmpfile, stat.S_IREAD | stat.S_IWRITE)
def _depot_to_local(self, depot_path):
"""
Given a path in the depot return the path on the local filesystem to
the same file. If there are multiple results, take only the last
result from the where command.
"""
where_output = self.p4.where(depot_path)
try:
return where_output[-1]['path']
except:
# XXX: This breaks on filenames with spaces.
return where_output[-1]['data'].split(' ')[2].strip()
def get_raw_commit_message(self, revisions):
"""Extract the commit message based on the provided revision range.
Since local changelists in perforce are not ordered with respect to
one another, this implementation looks at only the tip revision.
"""
changelist = revisions['tip']
# The parsed revision spec may include a prefix indicating that it is
# pending. This prefix, which is delimited by a colon, must be
# stripped in order to run p4 change on the actual changelist number.
if ':' in changelist:
changelist = changelist.split(':', 1)[1]
if changelist == self.REVISION_DEFAULT_CLN:
# The default changelist has no description and couldn't be
# accessed from p4 change anyway
return ''
logging.debug('Fetching description for changelist %s', changelist)
change = self.p4.change(changelist)
if len(change) == 1 and 'Description' in change[0]:
return change[0]['Description']
else:
return ''
def apply_patch_for_empty_files(self, patch, p_num, revert=False):
"""Returns True if any empty files in the patch are applied.
If there are no empty files in the patch or if an error occurs while
applying the patch, we return False.
"""
patched_empty_files = False
if revert:
added_files = self.DELETED_FILES_RE.findall(patch)
deleted_files = self.ADDED_FILES_RE.findall(patch)
else:
added_files = self.ADDED_FILES_RE.findall(patch)
deleted_files = self.DELETED_FILES_RE.findall(patch)
# Prepend the root of the Perforce client to each file name.
p4_info = self.p4.info()
client_root = p4_info.get('Client root')
added_files = ['%s/%s' % (client_root, f) for f in added_files]
deleted_files = ['%s/%s' % (client_root, f) for f in deleted_files]
if added_files:
make_empty_files(added_files)
result = execute(['p4', 'add'] + added_files, ignore_errors=True,
none_on_ignored_error=True)
if result is None:
logging.error('Unable to execute "p4 add" on: %s',
', '.join(added_files))
else:
patched_empty_files = True
if deleted_files:
result = execute(['p4', 'delete'] + deleted_files,
ignore_errors=True, none_on_ignored_error=True)
if result is None:
logging.error('Unable to execute "p4 delete" on: %s',
', '.join(deleted_files))
else:
patched_empty_files = True
return patched_empty_files
def _supports_moves(self):
return (self.capabilities and
self.capabilities.has_capability('scmtools', 'perforce',
'moved_files'))
def _supports_empty_files(self):
"""Checks if the RB server supports added/deleted empty files."""
return (self.capabilities and
self.capabilities.has_capability('scmtools', 'perforce',
'empty_files'))
def _should_exclude_file(self, local_file, depot_file, exclude_patterns):
"""Determine if a file should be excluded from a diff.
Check if the file identified by (local_file, depot_file) should be
excluded from the diff. If a pattern beings with '//', then it will be
matched against the depot_file. Otherwise, it will be matched against
the local file.
This function expects `exclude_patterns` to be normalized.
"""
for pattern in exclude_patterns:
if pattern.startswith('//'):
if fnmatch.fnmatch(depot_file, pattern):
return True
elif local_file and fnmatch.fnmatch(local_file, pattern):
return True
return False
def normalize_exclude_patterns(self, patterns):
"""Normalize the set of patterns so all non-depot paths are absolute.
A path with a leading // is interpreted as a depot pattern and remains
unchanged. A path with a leading path separator is interpreted as being
relative to the Perforce client root. All other paths are interpreted
as being relative to the current working directory. Non-depot paths are
transformed into absolute paths.
"""
cwd = os.getcwd()
base_dir = self.p4.info().get('Client root')
def normalize(p):
if p.startswith('//'):
# Absolute depot patterns remain unchanged.
return p
elif pattern.startswith(os.path.sep):
# Patterns beginning with the operating system's path separator
# are relative to the repository root.
assert base_dir is not None
p = os.path.join(base_dir, p[1:])
else:
# All other patterns are considered to be relative to the
# current working directory.
p = os.path.join(cwd, p)
return os.path.normpath(p)
return [normalize(pattern) for pattern in patterns]
def _replace_description_in_changelist_spec(self, old_spec,
new_description):
"""Replace the description in the given changelist spec.
old_spec is a formatted p4 changelist spec string (the raw output from
p4 change). This method replaces the existing description with
new_description, and returns the new changelist spec.
"""
new_spec = ''
whitespace = tuple(string.whitespace)
description_key = 'Description:'
skipping_old_description = False
for line in old_spec.splitlines(True):
if not skipping_old_description:
if not line.startswith(description_key):
new_spec += line
else:
# Insert the new description. Don't include the first line
# of the old one if it happens to be on the same line as
# the key.
skipping_old_description = True
new_spec += description_key
for desc_line in new_description.splitlines():
new_spec += '\t%s\n' % desc_line
else:
# Ignore the description from the original file (all lines
# that start with whitespace until the next key is
# encountered).
if line.startswith(whitespace):
continue
else:
skipping_old_description = False
new_spec += '\n%s' % line
return new_spec
def amend_commit_description(self, message, revisions):
"""Update a commit message to the given string.
Since local changelists on perforce have no ordering with respect to
each other, the revisions argument is mandatory.
"""
# Get the changelist number from the tip revision, removing the prefix
# if necessary. Don't allow amending submitted or default changelists.
changelist_id = revisions['tip']
logging.debug('Preparing to amend change %s' % changelist_id)
if not changelist_id.startswith(self.REVISION_PENDING_CLN_PREFIX):
raise AmendError('Cannot modify submitted changelist %s'
% changelist_id)
changelist_num = changelist_id.split(':', 1)[1]
if changelist_num == self.REVISION_DEFAULT_CLN:
raise AmendError('Cannot modify the default changelist')
elif not changelist_num.isdigit():
raise AmendError('%s is an invalid changelist ID' % changelist_num)
# Get the current changelist description and insert the new message.
# Since p4 change -i doesn't take in marshalled objects, we get the
# description as raw text and manually edit it.
change = self.p4.change(changelist_num, marshalled=False)
new_change = self._replace_description_in_changelist_spec(
change, message)
self.p4.modify_change(new_change)
| {
"content_hash": "2ad6919a88bb38d0cb7760100a7b59e9",
"timestamp": "",
"source": "github",
"line_count": 1514,
"max_line_length": 79,
"avg_line_length": 39.47886393659181,
"alnum_prop": 0.5279817971926185,
"repo_name": "beol/rbtools",
"id": "80e497456af12737826d853725e3f7c7e4793ab5",
"size": "59771",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rbtools/clients/perforce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9410"
},
{
"name": "HTML",
"bytes": "1000"
},
{
"name": "Python",
"bytes": "724313"
},
{
"name": "Shell",
"bytes": "39731"
}
],
"symlink_target": ""
} |
"""Support for UPC ConnectBox router."""
import asyncio
import logging
import aiohttp
from aiohttp.hdrs import REFERER, USER_AGENT
import async_timeout
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, HTTP_HEADER_X_REQUESTED_WITH
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CMD_DEVICES = 123
DEFAULT_IP = "192.168.0.1"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_HOST, default=DEFAULT_IP): cv.string}
)
async def async_get_scanner(hass, config):
"""Return the UPC device scanner."""
scanner = UPCDeviceScanner(hass, config[DOMAIN])
success_init = await scanner.async_initialize_token()
return scanner if success_init else None
class UPCDeviceScanner(DeviceScanner):
"""This class queries a router running UPC ConnectBox firmware."""
def __init__(self, hass, config):
"""Initialize the scanner."""
self.hass = hass
self.host = config[CONF_HOST]
self.data = {}
self.token = None
self.headers = {
HTTP_HEADER_X_REQUESTED_WITH: "XMLHttpRequest",
REFERER: "http://{}/index.html".format(self.host),
USER_AGENT: (
"Mozilla/5.0 (Windows NT 10.0; WOW64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/47.0.2526.106 Safari/537.36"
),
}
self.websession = async_get_clientsession(hass)
async def async_scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
import defusedxml.ElementTree as ET
if self.token is None:
token_initialized = await self.async_initialize_token()
if not token_initialized:
_LOGGER.error("Not connected to %s", self.host)
return []
raw = await self._async_ws_function(CMD_DEVICES)
try:
xml_root = ET.fromstring(raw)
return [mac.text for mac in xml_root.iter("MACAddr")]
except (ET.ParseError, TypeError):
_LOGGER.warning("Can't read device from %s", self.host)
self.token = None
return []
async def async_get_device_name(self, device):
"""Get the device name (the name of the wireless device not used)."""
return None
async def async_initialize_token(self):
"""Get first token."""
try:
# get first token
with async_timeout.timeout(10):
response = await self.websession.get(
"http://{}/common_page/login.html".format(self.host),
headers=self.headers,
)
await response.text()
self.token = response.cookies["sessionToken"].value
return True
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Can not load login page from %s", self.host)
return False
async def _async_ws_function(self, function):
"""Execute a command on UPC firmware webservice."""
try:
with async_timeout.timeout(10):
# The 'token' parameter has to be first, and 'fun' second
# or the UPC firmware will return an error
response = await self.websession.post(
"http://{}/xml/getter.xml".format(self.host),
data="token={}&fun={}".format(self.token, function),
headers=self.headers,
allow_redirects=False,
)
# Error?
if response.status != 200:
_LOGGER.warning("Receive http code %d", response.status)
self.token = None
return
# Load data, store token for next request
self.token = response.cookies["sessionToken"].value
return await response.text()
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Error on %s", function)
self.token = None
| {
"content_hash": "52b1fc273feb8bccc3edd1d9a3660e23",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 77,
"avg_line_length": 32.99230769230769,
"alnum_prop": 0.5868500816041036,
"repo_name": "fbradyirl/home-assistant",
"id": "3355c33ab2a182b2d4e40cdb6150e049ad5d74dd",
"size": "4289",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/upc_connect/device_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
} |
import pybullet as p
import math
import time
dt = 1./240.
p.connect(p.GUI)#SHARED_MEMORY_GUI)
p.loadURDF("r2d2.urdf",[0,0,1])
p.loadURDF("plane.urdf")
p.setGravity(0,0,-10)
radius=5
t = 0
p.configureDebugVisualizer(shadowMapWorldSize=5)
p.configureDebugVisualizer(shadowMapResolution=8192)
while (1):
t+=dt
p.configureDebugVisualizer(lightPosition=[radius*math.sin(t),radius*math.cos(t),3])
p.stepSimulation()
time.sleep(dt) | {
"content_hash": "f1856e26cfadc4304674fd7210cf6343",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 85,
"avg_line_length": 20.857142857142858,
"alnum_prop": 0.7442922374429224,
"repo_name": "MTASZTAKI/ApertusVR",
"id": "e1d755708d9d600cc0b4700a5b3ca77065e48d1c",
"size": "438",
"binary": false,
"copies": "2",
"ref": "refs/heads/0.9",
"path": "plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/examples/configureDebugVisualizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7599"
},
{
"name": "C++",
"bytes": "1207412"
},
{
"name": "CMake",
"bytes": "165066"
},
{
"name": "CSS",
"bytes": "1816"
},
{
"name": "GLSL",
"bytes": "223507"
},
{
"name": "HLSL",
"bytes": "141879"
},
{
"name": "HTML",
"bytes": "34827"
},
{
"name": "JavaScript",
"bytes": "140550"
},
{
"name": "Python",
"bytes": "1370"
}
],
"symlink_target": ""
} |
from django.test import SimpleTestCase
from cbmail.mixins import MailingListMixin
class MixinsTest(SimpleTestCase):
class NokObject(MailingListMixin):
""" """
def test_mixin_nok(self):
result = self.NokObject()
self.assertRaises(NotImplementedError, result.get_mailing_list)
| {
"content_hash": "ecd755c7ed8e5baa18e3f5986b80f8a2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 24,
"alnum_prop": 0.717948717948718,
"repo_name": "dipcode-software/django-mailings",
"id": "6ce602bec8bce139a0df7e4634e5b3f8785393fa",
"size": "312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cbmail/tests/test_mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "299"
},
{
"name": "Python",
"bytes": "17130"
}
],
"symlink_target": ""
} |
from datetime import datetime
from kitsune.announcements.models import Announcement
from kitsune.announcements.tests import AnnouncementFactory
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import UserFactory, add_permission
from kitsune.wiki.tests import LocaleFactory
class TestCreateLocaleAnnouncement(TestCase):
def setUp(self):
self.locale = LocaleFactory(locale="es")
def _create_test(self, status, count):
"""Login, or other setup, then call this."""
url = reverse("announcements.create_for_locale", locale="es")
resp = self.client.post(
url,
{
"content": "Look at me!",
"show_after": "2012-01-01",
},
)
self.assertEqual(resp.status_code, status)
self.assertEqual(Announcement.objects.count(), count)
def test_create(self):
u = UserFactory(is_superuser=1)
self.client.login(username=u.username, password="testpass")
self._create_test(200, 1)
def test_leader(self):
u = UserFactory()
self.locale.leaders.add(u)
self.locale.save()
self.client.login(username=u.username, password="testpass")
self._create_test(200, 1)
def test_has_permission(self):
u = UserFactory()
add_permission(u, Announcement, "add_announcement")
self.client.login(username=u.username, password="testpass")
self._create_test(200, 1)
def test_no_perms(self):
u = UserFactory()
self.client.login(username=u.username, password="testpass")
self._create_test(403, 0)
def test_anon(self):
self._create_test(302, 0)
class TestDeleteAnnouncement(TestCase):
def setUp(self):
self.locale = LocaleFactory(locale="es")
self.u = UserFactory()
self.locale.leaders.add(self.u)
self.locale.save()
self.announcement = AnnouncementFactory(
creator=self.u,
locale=self.locale,
content="Look at me!",
show_after=datetime(2012, 1, 1, 0, 0, 0),
)
def _delete_test(self, id, status, count):
"""Login, or other setup, then call this."""
url = reverse("announcements.delete", locale="es", args=(id,))
resp = self.client.post(url)
self.assertEqual(resp.status_code, status)
self.assertEqual(Announcement.objects.count(), count)
def test_delete(self):
u = UserFactory(is_superuser=1)
self.client.login(username=u.username, password="testpass")
self._delete_test(self.announcement.id, 204, 0)
def test_leader(self):
# Use the user that was created in setUp.
self.client.login(username=self.u.username, password="testpass")
self._delete_test(self.announcement.id, 204, 0)
def test_has_permission(self):
u = UserFactory()
add_permission(u, Announcement, "add_announcement")
self.client.login(username=u.username, password="testpass")
self._delete_test(self.announcement.id, 204, 0)
def test_no_perms(self):
u = UserFactory()
self.client.login(username=u.username, password="testpass")
self._delete_test(self.announcement.id, 403, 1)
def test_anon(self):
self._delete_test(self.announcement.id, 302, 1)
| {
"content_hash": "f9e420ee63e0ccec940f88cba403cde4",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 72,
"avg_line_length": 33.89,
"alnum_prop": 0.6332251401593391,
"repo_name": "mozilla/kitsune",
"id": "e1b0a10766eb19bf7b6e8f4bd2c09e89bae2df62",
"size": "3389",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "kitsune/announcements/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1156"
},
{
"name": "Dockerfile",
"bytes": "3027"
},
{
"name": "HTML",
"bytes": "535448"
},
{
"name": "JavaScript",
"bytes": "658477"
},
{
"name": "Jinja",
"bytes": "4837"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "Nunjucks",
"bytes": "68656"
},
{
"name": "Python",
"bytes": "2827116"
},
{
"name": "SCSS",
"bytes": "240092"
},
{
"name": "Shell",
"bytes": "10759"
},
{
"name": "Svelte",
"bytes": "26864"
}
],
"symlink_target": ""
} |
FIND_MODE_MODULE = "module"
FIND_MODE_CONFIG = "config"
FIND_MODE_NONE = "none"
FIND_MODE_BOTH = "both"
| {
"content_hash": "2f900058955e8ff5f70ffa3bfb233b36",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 27,
"avg_line_length": 26,
"alnum_prop": 0.6923076923076923,
"repo_name": "conan-io/conan",
"id": "8b6329c26ed8fd7a49ef6d9c4c5f23a8fe977351",
"size": "104",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conan/tools/cmake/cmakedeps/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "8209945"
}
],
"symlink_target": ""
} |
import json
class BuilderDict:
def __init__(self):
self._dict = dict()
def add(self, key, val):
self._dict[key] = val
return self
def to_string(self):
return json.dumps(self._dict)
@staticmethod
def create_update_lease():
response = BuilderDict()
response.add('status', 'not valid token')
return response.to_string()
| {
"content_hash": "77acfffceae28b0711752886975d7867",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 49,
"avg_line_length": 20.94736842105263,
"alnum_prop": 0.5778894472361809,
"repo_name": "aazizyan/playlist-core",
"id": "3a7634190606ea99692cd5e8d84783e8f902dd93",
"size": "398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "objects/builder_dict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21379"
}
],
"symlink_target": ""
} |
"""
Routes and views for the bottle application.
"""
import os
import json
from bottle import route, view, static_file
from datetime import datetime
config = { "secret_key" : "my developer secret value" }
if os.getenv("MY_CONFIG"): # you can define the setting in your Azure Web App
# by setting "MY_CONFIG" in the Appsettings.
config = json.loads(os.getenv("MY_CONFIG"))
@route('/static/<filepath:path>')
def server_static(filepath):
"""Handler for static files, used with the development server.
When running under a production server such as IIS or Apache,
the server should be configured to serve the static files."""
return static_file(filepath, root="static/")
@route('/')
@route('/home')
@view('index')
def home():
"""Renders the home page."""
return dict(
year=datetime.now().year,
secret = config.get("secret_key")
)
@route('/contact')
@view('contact')
def contact():
"""Renders the contact page."""
return dict(
title='Contact',
message='Your contact page.',
year=datetime.now().year
)
@route('/about')
@view('about')
def about():
"""Renders the about page."""
return dict(
title='About',
message='Your application description page.',
year=datetime.now().year
)
| {
"content_hash": "9ed2f3c7ea449f1c7bcc4ccc8520e3dd",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 25.358490566037737,
"alnum_prop": 0.6257440476190477,
"repo_name": "ernstbaslerpartner/bottle-azure-boilerplate",
"id": "b7291431a8ef4f084271d1e8523e7473b34c0efb",
"size": "1344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "routes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "685"
},
{
"name": "JavaScript",
"bytes": "10714"
},
{
"name": "Python",
"bytes": "6408"
},
{
"name": "Smarty",
"bytes": "4105"
}
],
"symlink_target": ""
} |
from behave import *
from behave_webdriver.transformers import matcher_mapping
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
if 'transform-parse' not in matcher_mapping:
use_step_matcher('re')
else:
use_step_matcher('transform-re')
@given('the element "([^"]*)?" is( not)* visible')
@then('I expect that element "([^"]*)?" becomes( not)* visible')
@then('I expect that element "([^"]*)?" is( not)* visible')
def check_element_visibility(context, element, negative):
element_is_visible = context.behave_driver.element_visible(element)
if negative:
assert not element_is_visible, 'Expected element to not be visible, but it was'
else:
assert element_is_visible, 'Expected element to be visible, but it was not visible'
@given('the title is( not)* "([^"]*)?"')
@then('I expect that the title is( not)* "([^"]*)?"')
def title(context, negative, value):
if negative:
assert context.behave_driver.title != value, 'Title was "{}"'.format(context.behave_driver.title)
else:
assert context.behave_driver.title == value, 'Title was "{}"'.format(context.behave_driver.title)
@then('I expect that element "([^"]*)?" is( not)* within the viewport')
def check_element_within_viewport(context, element, negative):
element_in_viewport = context.behave_driver.element_in_viewport(element)
if negative:
assert not element_in_viewport, 'Element was completely within the viewport'
else:
assert element_in_viewport, 'Element was not completely within viewport'
@given('the element "([^"]*)?" is( not)* enabled')
@then('I expect that element "([^"]*)?" is( not)* enabled')
def element_enabled(context, element, negative):
enabled = context.behave_driver.element_enabled(element)
if negative:
assert not enabled
else:
assert enabled
@given('the element "([^"]*)?" is( not)* selected')
@then('I expect that element "([^"]*)?" is( not)* selected')
def element_selected(context, element, negative):
selected = context.behave_driver.element_selected(element)
if negative:
assert not selected
else:
assert selected
@given('the checkbox "([^"]*)?" is( not)* checked')
@then('I expect that checkbox "([^"]*)?" is( not)* checked')
def element_checked(context, element, negative):
checked = context.behave_driver.element_selected(element)
if negative:
assert not checked
else:
assert checked
@given('there is (an|no) element "([^"]*)?" on the page')
def element_exists(context, an_no, element):
negative = an_no == 'no'
exists = context.behave_driver.element_exists(element)
if negative:
assert not exists
else:
assert exists
@then('I expect that element "([^"]*)?" does( not)* exist')
def check_element_exists(context, element, negative):
exists = context.behave_driver.element_exists(element)
if negative:
assert not exists, 'Expected the element does not exist, but element "{}" was located'.format(element)
else:
assert exists, 'Expected element to exist, but no element "{}" was located'.format(element)
@given('the element "([^"]*)?" contains( not)* the same text as element "([^"]*)?"')
@then('I expect that element "([^"]*)?"( not)* contains the same text as element "([^"]*)?"')
def elements_same_text(context, first_element, negative, second_element):
first_elem_text = context.behave_driver.get_element_text(first_element)
second_elem_text = context.behave_driver.get_element_text(second_element)
same = first_elem_text == second_elem_text
if negative:
assert not same, 'Element "{}" text "{}" is same as element "{}"'.format(first_element,
first_elem_text,
second_element)
else:
assert same, 'Element "{}" text "{}" is not same as element "{}" text "{}"'.format(first_element,
first_elem_text,
second_element,
second_elem_text)
@given('the element "([^"]*)?"( not)* matches the text "([^"]*)?"')
@then('I expect that element "([^"]*)?"( not)* matches the text "([^"]*)?"')
def element_matches_text(context, element, negative, text):
elem_text = context.behave_driver.get_element_text(element)
matches = elem_text == text
if negative:
assert not matches, 'Element "{}" text matches "{}"'.format(element,
text)
else:
assert matches, 'The text "{}" did not match the element text "{}"'.format(text, elem_text)
@given('the element "([^"]*)?"( not)* contains the text "([^"]*)?"')
@then('I expect that element "([^"]*)?"( not)* contains the text "([^"]*)?"')
def check_element_contains_text(context, element, negative, text):
contains = context.behave_driver.element_contains(element, text)
if negative:
assert not contains, 'Element text does contain "{}"'.format(text)
else:
assert contains, 'Element text does not contain "{}"'.format(text)
@given('the element "([^"]*)?"( not)* contains any text')
@then('I expect that element "([^"]*)?"( not)* contains any text')
def element_any_text(context, element, negative):
any_text = bool(context.behave_driver.get_element_text(element))
if negative:
assert not any_text
else:
assert any_text
@given('the element "([^"]*)?" is( not)* empty')
@then('I expect that element "([^"]*)?" is( not)* empty')
def check_element_empty(context, element, negative):
elem_text = context.behave_driver.get_element_text(element)
any_text = bool(elem_text)
if negative:
assert any_text is True
else:
assert any_text is False
@given('the page url is( not)* "([^"]*)?"')
@then('I expect that the url is( not)* "([^"]*)?"')
def check_url(context, negative, value):
current_url = context.behave_driver.current_url
if negative:
assert current_url != value, 'The url was "{}"'.format(current_url)
else:
assert current_url == value, 'Expected url to be "{}", but saw the url was "{}"'.format(value, current_url)
@then('I expect the url to( not)* contain "([^"]*)?"')
def check_url_contains(context, negative, value):
current_url = context.behave_driver.current_url
if negative:
assert value not in current_url, 'url was "{}"'.format(current_url)
else:
assert value in current_url, 'url was "{}"'.format(current_url)
@given('the( css)* attribute "([^"]*)?" from element "([^"]*)?" is( not)* "([^"]*)?"')
@then('I expect that the( css)* attribute "([^"]*)?" from element "([^"]*)?" is( not)* "([^"]*)?"')
def check_element_attribute(context, is_css, attr, element, negative, value):
if is_css:
attribute_value, value = context.behave_driver.get_element_attribute(element, attr, is_css, value)
else:
attribute_value = context.behave_driver.get_element_attribute(element, attr)
if negative:
assert attribute_value != value, 'Attribute value was "{}"'.format(attribute_value)
else:
assert attribute_value == value, 'Attribute value was "{}"'.format(attribute_value)
@given('the cookie "([^"]*)?" contains( not)* the value "([^"]*)?"')
@then('I expect that cookie "([^"]*)?"( not)* contains "([^"]*)?"')
def check_cookie_value(context, cookie_key, negative, value):
cookie = context.behave_driver.get_cookie(cookie_key)
cookie_value = cookie.get('value')
if negative:
assert cookie_value != value, 'Cookie value was "{}"'.format(cookie_value)
else:
assert cookie_value == value, 'Cookie value was "{}"'.format(cookie_value)
@given('the cookie "([^"]*)?" does( not)* exist')
def cookie_exists(context, cookie_key, negative):
cookie = context.behave_driver.get_cookie(cookie_key)
if negative:
assert cookie is None, 'Cookie exists: {}'.format(cookie)
else:
assert cookie is not None
@then('I expect that cookie "([^"]*)?"( not)* exists')
def check_cookie_exists(context, cookie_key, negative):
cookie = context.behave_driver.get_cookie(cookie_key)
if negative:
assert cookie is None, u'Cookie was present: "{}"'.format(cookie)
else:
assert cookie is not None, 'Cookie was not found'
@given('the element "([^"]*)?" is( not)* ([\d]+)px (broad|tall)')
@then('I expect that element "([^"]*)?" is( not)* ([\d]+)px (broad|tall)')
def check_element_size(context, element, negative, pixels, how):
elem_size = context.behave_driver.get_element_size(element)
if how == 'tall':
axis = 'height'
else:
axis = 'width'
if negative:
assert elem_size[axis] != int(pixels), 'Element size was "{}"'.format(elem_size)
else:
assert elem_size[axis] == int(pixels), 'Element size was "{}"'.format(elem_size)
@given('the element "([^"]*)?" is( not)* positioned at ([\d]+)px on the (x|y) axis')
@then('I expect that element "([^"]*)?" is( not)* positioned at ([\d]+)px on the (x|y) axis')
def check_element_position(context, element, negative, pos, axis):
element_position = context.behave_driver.get_element_location(element)
if negative:
assert element_position[axis] != int(pos), 'Position was {} on the {} axis'.format(element_position[axis], axis)
else:
assert element_position[axis] == int(pos), 'Position was {} on the {} axis'.format(element_position[axis], axis)
@given('a (alertbox|confirmbox|prompt) is( not)* opened')
@then('I expect that a (alertbox|confirmbox|prompt) is( not)* opened')
def check_modal(context, modal, negative):
if negative:
assert context.behave_driver.has_alert is False
else:
assert context.behave_driver.has_alert is True
@then('I expect that the path is( not)* "([^"]*)?"')
def check_path(context, negative, value):
current_url = context.behave_driver.current_url
path = urlparse(current_url).path
if negative:
assert path != value, 'The path was "{}"'.format(path)
else:
assert path == value, 'Expected the path to be "{}", but saw the path "{}"'.format(value, path)
@then('I expect that element "([^"]*)?" (has|does not have) the class "([^"]*)?"')
def check_element_has_class(context, element, has, classname):
if 'not' in has:
negative = True
else:
negative = False
has_class = context.behave_driver.element_has_class(element, classname)
if negative:
assert not has_class, 'Classes were {}'.format(context.behave_driver.get_element_attribute(element, 'class'))
else:
assert has_class, 'Classes were {}'.format(context.behave_driver.get_element_attribute(element, 'class'))
@then('I expect a new (window|tab) has( not)* been opened')
def check_window_opened(context, _, negative):
if negative:
assert not context.behave_driver.secondary_handles
else:
assert bool(context.behave_driver.secondary_handles)
@then('I expect the url "([^"]*)?" is opened in a new (tab|window)')
def check_url_new_window(context, url, _):
current_handle = context.behave_driver.primary_handle
for handle in context.behave_driver.secondary_handles:
context.behave_driver.switch_to_window(handle)
if context.behave_driver.current_url == url:
context.behave_driver.switch_to_window(current_handle)
break
else:
context.behave_driver.switch_to_window(current_handle)
if len(context.behave_driver.secondary_handles) < 1:
raise AssertionError('No secondary handles found!')
raise AssertionError("The url '{}' was not found in any handle")
@then('I expect that element "([^"]*)?" is( not)* focused')
def check_element_focused(context, element, negative):
element_focused = context.behave_driver.element_focused(element)
if negative:
assert not element_focused
else:
assert element_focused
@then('I expect that a (alertbox|confirmbox|prompt)( not)* contains the text "([^"]*)?"')
def check_modal_text_contains(context, modal_type, negative, text):
alert_text = context.behave_driver.alert.text
if negative:
assert not text in alert_text
else:
assert text in alert_text
@then('I wait on element "([^"]*)?"(?: for (\d+)ms)*(?: to( not)* (be checked|be enabled|be selected|be visible|contain a text|contain a value|exist))*')
def wait_for_element_condition(context, element, milliseconds, negative, condition):
if milliseconds:
digits = ''.join(char for char in milliseconds if char.isdigit())
milliseconds = int(digits)
result = context.behave_driver.wait_for_element_condition(element, milliseconds, negative, condition)
if not negative:
negative = ''
assert result, 'was expecting element "{element}" to {negative} {condition}, but the result was {result}'.format(
element=element,
negative=negative,
condition=condition,
result=result)
@then("I expect the screen is ([\d]+) by ([\d]+) pixels")
def check_screen_size(context, x, y):
screen_x, screen_y = context.behave_driver.screen_size
use_step_matcher('parse')
| {
"content_hash": "31e40b126d8bb0c4f3a8cf75ba94bc65",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 153,
"avg_line_length": 40.74174174174174,
"alnum_prop": 0.6220977371563352,
"repo_name": "spyoungtech/behave-webdriver",
"id": "7ab1d1ba38c44395f9a365d73e053cd70bef40b8",
"size": "13567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "behave_webdriver/steps/expectations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Gherkin",
"bytes": "2435"
},
{
"name": "HTML",
"bytes": "9430"
},
{
"name": "JavaScript",
"bytes": "5659"
},
{
"name": "Python",
"bytes": "75920"
}
],
"symlink_target": ""
} |
from fractions import Fraction
def parse_sexp(string):
"""
Parses an S-Expression into a list-based tree.
parse_sexp("(+ 5 (+ 3 5))") results in [['+', '5', ['+', '3', '5']]]
"""
sexp = [[]]
word = ''
in_str = False
for c in string:
if c == '(' and not in_str:
sexp.append([])
elif c == ')' and not in_str:
if word:
sexp[-1].append(word)
word = ''
temp = sexp.pop()
sexp[-1].append(temp)
elif c in (' ', '\n', '\t') and not in_str:
if word != '':
sexp[-1].append(word)
word = ''
elif c == '\"':
in_str = not in_str
else:
word += c
return sexp[0]
def make_str(ppddl_tree, level=0):
"""
Creates a string representation of a PPDDL tree.
"""
if not ppddl_tree:
return ''
# Make sure the resulting string has the correct indentation.
indent = (' ' * (2*level))
ppddlstr = indent + '('
indent += ' '
# Appending subelements of the PPDDL tree.
for i, element in enumerate(ppddl_tree):
if isinstance(element, list):
ppddlstr += '\n' + make_str(element, level + 1)
else:
if element.startswith(':') and i != 0:
ppddlstr += '\n' + indent
ppddlstr += element
if i != len(ppddl_tree) - 1:
ppddlstr += ' '
if element == ':parameters' and ppddl_tree[i + 1] == []:
ppddlstr += '()'
ppddlstr += ') '
return ppddlstr
def get_all_probabilistic_effects(ppddl_tree,
all_prob_effects_list,
action = None):
"""
Adds all probabilistic effects in this PPDDL tree to the given list.
The effects are added in pre-order traversal.
We assume no nesting of probabilistic effect.
A probabilistic effect is represented as a tuple
(action, ppddl_tree)
where "action" is the name of the action to which the effect belongs and
"ppddl_tree" is the tree representation of the effect.
"""
if not isinstance(ppddl_tree, list) or not ppddl_tree:
return
if ppddl_tree[0] == ':action':
action = ppddl_tree[1].rstrip('\r')
if ppddl_tree[0] == 'probabilistic':
all_prob_effects_list.append( (action, ppddl_tree) )
else:
for element in ppddl_tree:
get_all_probabilistic_effects(element, all_prob_effects_list, action)
def get_all_determinizations_effect(probabilistic_effect_info):
"""
Generates all possible determinizations of the given probabilistic effect.
A determinization is stored as a tuple (index, effect), where index
represents the effect's order in the list of outcomes of the probabilistic
effect, and effect represents the specific outcome.
The method returns a tuple (action_name, all_determinizations)
where "action_name" is the name of the action to which the
probabilistic effect belongs, and "all_determinizations" is a list with
all the determinizations of the effect.
"""
all_determinizations = []
total_explicit_prob = Fraction(0)
idx = 0
probabilistic_effect = probabilistic_effect_info[1]
for i in range(2, len(probabilistic_effect), 2):
total_explicit_prob += Fraction(probabilistic_effect[i - 1])
# The first element is the index of the deterministic effect, the second
# is the effect itself.
all_determinizations.append( (idx, probabilistic_effect[i]) )
idx += 1
if total_explicit_prob != Fraction(1):
all_determinizations.append( (idx, ['and']) ) # No-op effect
return (probabilistic_effect_info[0], all_determinizations)
def get_mlo_determinization_effect(probabilistic_effect_info):
"""
Generates the most likely outcome determinization of the given probabilistic
effect.
A determinization is stored as a tuple (index, effect), where index
represents the effect's order in the list of outcomes of the probabilistic
effect, and effect represents the specific outcome.
The method returns a tuple (action_name, mlo_determinization)
where "action_name" is the name of the action to which the
probabilistic effect belongs, and "mlo_determinization" is the most likely
effect.
"""
probability_mlo = Fraction(-1)
total_non_explicit_prob = Fraction(1)
idx = 0
probabilistic_effect = probabilistic_effect_info[1]
mlo_determinization = []
for i in range(2, len(probabilistic_effect), 2):
probability_effect = Fraction(probabilistic_effect[i - 1])
total_non_explicit_prob -= probability_effect
if Fraction(probabilistic_effect[i - 1]) > probability_mlo:
probability_mlo = probability_effect
mlo_determinization = (idx, probabilistic_effect[i])
idx += 1
if total_non_explicit_prob > probability_mlo:
# No-op effect is the most likely outcome
mlo_determinization = (idx, ['and'])
return (probabilistic_effect_info[0], mlo_determinization)
def get_all_determinizations_comb(determinizations_of_all_effects):
"""
Generates all possible combinations of the given list of lists of
probabilistic effects determinizations.
Each element of the input variable is a tuple with all the determinizations of
a given probabilistic effect, as returned by function
"get_all_determinizations_effect".
The method returns a list with all the possible combinations of these
determinizations, in the same order they are given.
So, for example, if the input is:
((action_0, (0, det_0_0), ..., (k_0, det_0_2)),
(action_1, (0, det_1_0), ..., (k_0, det_1_1)))
The method will return the following list of 6 combinations:
( ((action_0, (0, det_0_0)), (action_1, (0, det_1_0))),
((action_0, (0, det_0_0)), (action_1, (0, det_1_1))),
...,
((action_0, (2, det_0_2)), (action_1, (0, det_1_0))),
((action_0, (2, det_0_2)), (action_1, (0, det_1_1))) )
"""
# Base case for the recursion, only determinizations for one effect.
all_determinizations = []
if len(determinizations_of_all_effects) == 1:
# Note that determinizations_of_all_effects[0] is a tuple:
# (action_name, all_determinizations_of_the_actions_effect)
for determinization in determinizations_of_all_effects[0][1]:
all_determinizations.append([(determinizations_of_all_effects[0][0],
determinization)])
return all_determinizations
# We do this recursively by generating all combinations of effects from the
# second effect onwards, then generating all combinations of the first
# effect's determinization with the resulting list.
remaining_determinizations_comb = (
get_all_determinizations_comb(determinizations_of_all_effects[1:]))
for effect_determinization in determinizations_of_all_effects[0][1]:
for remaining_effects_determinization in remaining_determinizations_comb:
determinization = [(determinizations_of_all_effects[0][0],
effect_determinization)]
determinization.extend(remaining_effects_determinization)
all_determinizations.append(determinization)
return all_determinizations
def determinize_tree(determinization, ppddl_tree, index = 0):
"""
Replaces all probabilistic effects with the given determinization.
Variable "determinization" is a list of determinizations, as created by
"get_all_determinizations_effect".
This function will visit the PPDDL tree in pre-order traversal and each time
it encounters a probabilistic effect, it will replace it with the effect at
"determinizaton[index][1][1]", then increment variable "index" and return its
new value.
Therefore, the user of this function must ensure that the each effect in
the given determinization corresponds to the proper probabilistic effect
in the PPDDL tree.
"""
if not isinstance(ppddl_tree, list) or not ppddl_tree:
return index
if ppddl_tree[0] == 'probabilistic':
ppddl_tree[:] = []
ppddl_tree.extend(determinization[index][1][1])
return index + 1
else:
for element in ppddl_tree:
index = determinize_tree(determinization, element, index)
return index
def clean_up_tree(ppddl_tree):
"""
Removes from the tree all effects that affect fluents (increase, decrease)
as well as void effects generated by the determinization.
"""
# Ignore fluent-related effects and requirements list.
if not ppddl_tree:
return
if (ppddl_tree[0] == 'increase' or ppddl_tree[0] == 'decrease'
or ppddl_tree[0] == ':requirements'):
ppddl_tree[:] = []
return
for sub_tree in ppddl_tree:
if isinstance(sub_tree, list):
clean_up_tree(sub_tree)
# Cleaning up empty conjuctions.
if sub_tree and sub_tree[0] == 'and' and len(sub_tree) > 1:
valid = [x for x in sub_tree[1:] if x != ['and'] and x != []]
if not valid:
sub_tree[:] = []
sub_tree.append('and')
| {
"content_hash": "472aa31fd29daa5e9ff6fd4e1519be41",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 81,
"avg_line_length": 36.19183673469388,
"alnum_prop": 0.6605390774782903,
"repo_name": "luisenp/mdp-lib",
"id": "95848127737bf75fd485239db909b58129decd2c",
"size": "8867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/reduced/ppddl_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "26608"
},
{
"name": "C++",
"bytes": "1394882"
},
{
"name": "Common Lisp",
"bytes": "11809"
},
{
"name": "Lex",
"bytes": "4103"
},
{
"name": "Makefile",
"bytes": "16623"
},
{
"name": "Python",
"bytes": "32477"
},
{
"name": "Shell",
"bytes": "48306"
},
{
"name": "Yacc",
"bytes": "52546"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_local_role_info
short_description: Gather info about local roles on an ESXi host
description:
- This module can be used to gather information about local role info on an ESXi host
version_added: '2.9'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on ESXi 6.5
- Be sure that the ESXi user used for login, has the appropriate rights to view roles
- The module returns a list of dict in version 2.8 and above.
requirements:
- "python >= 2.6"
- PyVmomi
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Gather info about local role from an ESXi
vmware_local_role_info:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
register: fact_details
delegate_to: localhost
- name: Get Admin privileges
set_fact:
admin_priv: "{{ fact_details.local_role_info['Admin']['privileges'] }}"
- debug:
msg: "{{ admin_priv }}"
'''
RETURN = r'''
local_role_info:
description: Info about role present on ESXi host
returned: always
type: dict
sample: [
{
"privileges": [
"Alarm.Acknowledge",
"Alarm.Create",
"Alarm.Delete",
"Alarm.DisableActions",
],
"role_id": -12,
"role_info_label": "Ansible User",
"role_info_summary": "Ansible Automation user",
"role_name": "AnsiUser1",
"role_system": true
},
{
"privileges": [],
"role_id": -5,
"role_info_label": "No access",
"role_info_summary": "Used for restricting granted access",
"role_name": "NoAccess",
"role_system": true
},
{
"privileges": [
"System.Anonymous",
"System.View"
],
"role_id": -3,
"role_info_label": "View",
"role_info_summary": "Visibility access (cannot be granted)",
"role_name": "View",
"role_system": true
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
class VMwareLocalRoleInfo(PyVmomi):
"""Class to manage local role info"""
def __init__(self, module):
super(VMwareLocalRoleInfo, self).__init__(module)
self.module = module
self.params = module.params
if self.content.authorizationManager is None:
self.module.fail_json(
msg="Failed to get local authorization manager settings.",
details="It seems that '%s' is a vCenter server instead of an ESXi server" % self.params['hostname']
)
def gather_local_role_info(self):
"""Gather info about local roles"""
results = list()
for role in self.content.authorizationManager.roleList:
results.append(
dict(
role_name=role.name,
role_id=role.roleId,
privileges=[priv_name for priv_name in role.privilege],
role_system=role.system,
role_info_label=role.info.label,
role_info_summary=role.info.summary,
)
)
self.module.exit_json(changed=False, local_role_info=results)
def main():
"""Main"""
argument_spec = vmware_argument_spec()
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
vmware_local_role_info = VMwareLocalRoleInfo(module)
vmware_local_role_info.gather_local_role_info()
if __name__ == '__main__':
main()
| {
"content_hash": "c599f23f76aca242050cc8f964d18a68",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 116,
"avg_line_length": 29.81954887218045,
"alnum_prop": 0.5718608169440242,
"repo_name": "thaim/ansible",
"id": "6918297f6fd032c2c556d934b2f8ddc6422d707c",
"size": "4166",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/vmware/vmware_local_role_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import sys
import time
# for python 3 runbook execution in a python2 runtime of Sandbox we need to use httpclient and jrdsclient of python3 automation worker
if sys.version_info[0] == 3:
AUTOMATIONWORKER_PATH = "/opt/microsoft/omsconfig/modules/nxOMSAutomationWorker/DSCResources/MSFT_nxOMSAutomationWorkerResource/automationworker/3.x/worker"
sys.path.insert(1,AUTOMATIONWORKER_PATH)
import configuration3 as configuration
else:
import configuration
import binascii
import serializerfactory
from httpclientfactory import HttpClientFactory
from jrdsclient import JRDSClient
# do not remove, allows clients to use the exceptions types
from workerexception import AutomationAssetException, AutomationAssetNotFound
KEY_VALUE = "value"
KEY_USERNAME = "userName"
KEY_CONNECTION_FIELDS = "connectionFields"
def log_warning(message):
print("WARNING: " + str(message))
def log_error(message):
print("ERROR: " + str(message))
def test_client_override(mock_jrds_client):
global jrds_client
jrds_client = mock_jrds_client
def get_automation_variable_with_retry(name, retry_count=2):
tries = 1
while tries <= retry_count:
log_warning("Retrieval of automation variable failed for " + str(name) +". Retrying ..")
variable = jrds_client.get_variable_asset(name)
if variable is not None:
return json.loads(variable[KEY_VALUE])
time_to_wait = 3 * (2 ** tries)
if time_to_wait > 60:
time_to_wait = 60
time.sleep(time_to_wait)
tries = tries + 1
# log not able to retrieve data
log_error("Retrieval of automation variable failed for " + str(name) +".")
def get_automation_variable(name):
variable = jrds_client.get_variable_asset(name)
if variable is None:
return get_automation_variable_with_retry(name, 2)
else:
return json.loads(variable[KEY_VALUE])
def set_automation_variable(name, value):
jrds_client.set_variable_asset(name, json.dumps(value), False)
def get_automation_credential_with_retry(name, retry_count=2):
tries = 1
while tries <= retry_count:
log_warning("Retrieval of automation credential failed for " + str(name) +". Retrying ..")
credential = jrds_client.get_credential_asset(name)
if credential is not None:
return {"username": credential[KEY_USERNAME], "password": credential[KEY_VALUE]}
time_to_wait = 3 * (2 ** tries)
if time_to_wait > 60:
time_to_wait = 60
time.sleep(time_to_wait)
tries = tries + 1
# log not able to retrieve data
log_error("Retrieval of automation credential failed for " + str(name) +".")
def get_automation_credential(name):
credential = jrds_client.get_credential_asset(name)
if credential is None:
return get_automation_credential_with_retry(name, 2)
else:
return {"username": credential[KEY_USERNAME], "password": credential[KEY_VALUE]}
def get_automation_connection_with_retry(name, retry_count=2):
tries = 1
while tries <= retry_count:
log_warning("Retrieval of automation connection failed for " + str(name) +". Retrying ..")
connection = jrds_client.get_connection_asset(name)
if connection is not None:
return connection[KEY_CONNECTION_FIELDS]
time_to_wait = 3 * (2 ** tries)
if time_to_wait > 60:
time_to_wait = 60
time.sleep(time_to_wait)
tries = tries + 1
# log not able to retrieve data
log_error("Retrieval of automation connection failed for " + str(name) +".")
def get_automation_connection(name):
connection = jrds_client.get_connection_asset(name)
if connection is None:
return get_automation_connection_with_retry(name, 2)
else:
return connection[KEY_CONNECTION_FIELDS]
def get_automation_certificate_with_retry(name, retry_count = 2):
tries = 1
while tries <= retry_count:
log_warning("Retrieval of automation certificate failed for " + str(name) +". Retrying ..")
certificate = jrds_client.get_certificate_asset(name)
if certificate is not None:
return binascii.a2b_base64(certificate[KEY_VALUE])
time_to_wait = 3 * (2 ** tries)
if time_to_wait > 60:
time_to_wait = 60
time.sleep(time_to_wait)
tries = tries + 1
# log not able to retrieve data
log_error("Retrieval of automation certificate failed for " + str(name) +".")
def get_automation_certificate(name):
certificate = jrds_client.get_certificate_asset(name)
if certificate is None:
return get_automation_certificate_with_retry(name, 2)
else:
return binascii.a2b_base64(certificate[KEY_VALUE])
configuration.set_config({configuration.COMPONENT: "assets"})
json = serializerfactory.get_serializer(sys.version_info)
http_client_factory = HttpClientFactory(configuration.get_jrds_cert_path(), configuration.get_jrds_key_path(),
configuration.get_verify_certificates())
http_client = http_client_factory.create_http_client(sys.version_info)
jrds_client = JRDSClient(http_client)
| {
"content_hash": "a79b76704acee4e9f19237035ece2264",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 160,
"avg_line_length": 39.31060606060606,
"alnum_prop": 0.6781653497783774,
"repo_name": "MSFTOSSMgmt/WPSDSCLinux",
"id": "8f9185765faf26cc07188ee073b629b67b1df9f6",
"size": "5275",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Providers/nxOMSAutomationWorker/automationworker/worker/automationassets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5870322"
},
{
"name": "C#",
"bytes": "98943"
},
{
"name": "C++",
"bytes": "670183"
},
{
"name": "CMake",
"bytes": "13826"
},
{
"name": "HTML",
"bytes": "166861"
},
{
"name": "Makefile",
"bytes": "164013"
},
{
"name": "Objective-C",
"bytes": "61644"
},
{
"name": "PowerShell",
"bytes": "40239"
},
{
"name": "Python",
"bytes": "1858427"
},
{
"name": "Shell",
"bytes": "8136"
},
{
"name": "SourcePawn",
"bytes": "60242"
},
{
"name": "Yacc",
"bytes": "35814"
}
],
"symlink_target": ""
} |
from msrest import Serializer, Deserializer
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
class EdgeOrderManagementClientOperationsMixin(object):
async def begin_create_address(
self,
address_name: str,
resource_group_name: str,
address_resource: "_models.AddressResource",
**kwargs: Any
) -> AsyncLROPoller["_models.AddressResource"]:
"""Creates a new address with the specified parameters. Existing address can be updated with this
API.
:param address_name: The name of the address Resource within the specified resource group.
address names must be between 3 and 24 characters in length and use any alphanumeric and
underscore only.
:type address_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param address_resource: Address details from request body.
:type address_resource: ~azure.mgmt.edgeorder.v2021_12_01.models.AddressResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AddressResource or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.edgeorder.v2021_12_01.models.AddressResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('begin_create_address')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_create_address'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.begin_create_address(address_name, resource_group_name, address_resource, **kwargs)
async def begin_create_order_item(
self,
order_item_name: str,
resource_group_name: str,
order_item_resource: "_models.OrderItemResource",
**kwargs: Any
) -> AsyncLROPoller["_models.OrderItemResource"]:
"""Creates an order item. Existing order item cannot be updated with this api and should instead
be updated with the Update order item API.
:param order_item_name: The name of the order item.
:type order_item_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param order_item_resource: Order item details from request body.
:type order_item_resource: ~azure.mgmt.edgeorder.v2021_12_01.models.OrderItemResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OrderItemResource or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.edgeorder.v2021_12_01.models.OrderItemResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('begin_create_order_item')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_create_order_item'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.begin_create_order_item(order_item_name, resource_group_name, order_item_resource, **kwargs)
async def begin_delete_address_by_name(
self,
address_name: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an address.
:param address_name: The name of the address Resource within the specified resource group.
address names must be between 3 and 24 characters in length and use any alphanumeric and
underscore only.
:type address_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('begin_delete_address_by_name')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_delete_address_by_name'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.begin_delete_address_by_name(address_name, resource_group_name, **kwargs)
async def begin_delete_order_item_by_name(
self,
order_item_name: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an order item.
:param order_item_name: The name of the order item.
:type order_item_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('begin_delete_order_item_by_name')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_delete_order_item_by_name'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.begin_delete_order_item_by_name(order_item_name, resource_group_name, **kwargs)
async def begin_return_order_item(
self,
order_item_name: str,
resource_group_name: str,
return_order_item_details: "_models.ReturnOrderItemDetails",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Return order item.
:param order_item_name: The name of the order item.
:type order_item_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param return_order_item_details: Return order item CurrentStatus.
:type return_order_item_details: ~azure.mgmt.edgeorder.v2021_12_01.models.ReturnOrderItemDetails
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('begin_return_order_item')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_return_order_item'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.begin_return_order_item(order_item_name, resource_group_name, return_order_item_details, **kwargs)
async def begin_update_address(
self,
address_name: str,
resource_group_name: str,
address_update_parameter: "_models.AddressUpdateParameter",
if_match: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.AddressResource"]:
"""Updates the properties of an existing address.
:param address_name: The name of the address Resource within the specified resource group.
address names must be between 3 and 24 characters in length and use any alphanumeric and
underscore only.
:type address_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param address_update_parameter: Address update parameters from request body.
:type address_update_parameter: ~azure.mgmt.edgeorder.v2021_12_01.models.AddressUpdateParameter
:param if_match: Defines the If-Match condition. The patch will be performed only if the ETag
of the job on the server matches this value.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AddressResource or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.edgeorder.v2021_12_01.models.AddressResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('begin_update_address')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_update_address'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.begin_update_address(address_name, resource_group_name, address_update_parameter, if_match, **kwargs)
async def begin_update_order_item(
self,
order_item_name: str,
resource_group_name: str,
order_item_update_parameter: "_models.OrderItemUpdateParameter",
if_match: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.OrderItemResource"]:
"""Updates the properties of an existing order item.
:param order_item_name: The name of the order item.
:type order_item_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param order_item_update_parameter: order item update parameters from request body.
:type order_item_update_parameter: ~azure.mgmt.edgeorder.v2021_12_01.models.OrderItemUpdateParameter
:param if_match: Defines the If-Match condition. The patch will be performed only if the ETag
of the order on the server matches this value.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OrderItemResource or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.edgeorder.v2021_12_01.models.OrderItemResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('begin_update_order_item')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_update_order_item'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.begin_update_order_item(order_item_name, resource_group_name, order_item_update_parameter, if_match, **kwargs)
async def cancel_order_item(
self,
order_item_name: str,
resource_group_name: str,
cancellation_reason: "_models.CancellationReason",
**kwargs: Any
) -> None:
"""Cancel order item.
:param order_item_name: The name of the order item.
:type order_item_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cancellation_reason: Reason for cancellation.
:type cancellation_reason: ~azure.mgmt.edgeorder.v2021_12_01.models.CancellationReason
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('cancel_order_item')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'cancel_order_item'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.cancel_order_item(order_item_name, resource_group_name, cancellation_reason, **kwargs)
async def get_address_by_name(
self,
address_name: str,
resource_group_name: str,
**kwargs: Any
) -> "_models.AddressResource":
"""Gets information about the specified address.
:param address_name: The name of the address Resource within the specified resource group.
address names must be between 3 and 24 characters in length and use any alphanumeric and
underscore only.
:type address_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AddressResource, or the result of cls(response)
:rtype: ~azure.mgmt.edgeorder.v2021_12_01.models.AddressResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('get_address_by_name')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'get_address_by_name'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.get_address_by_name(address_name, resource_group_name, **kwargs)
async def get_order_by_name(
self,
order_name: str,
resource_group_name: str,
location: str,
**kwargs: Any
) -> "_models.OrderResource":
"""Gets an order.
:param order_name: The name of the order.
:type order_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param location: The name of Azure region.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OrderResource, or the result of cls(response)
:rtype: ~azure.mgmt.edgeorder.v2021_12_01.models.OrderResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('get_order_by_name')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'get_order_by_name'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.get_order_by_name(order_name, resource_group_name, location, **kwargs)
async def get_order_item_by_name(
self,
order_item_name: str,
resource_group_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.OrderItemResource":
"""Gets an order item.
:param order_item_name: The name of the order item.
:type order_item_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param expand: $expand is supported on device details, forward shipping details and reverse
shipping details parameters. Each of these can be provided as a comma separated list. Device
Details for order item provides details on the devices of the product, Forward and Reverse
Shipping details provide forward and reverse shipping details respectively.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OrderItemResource, or the result of cls(response)
:rtype: ~azure.mgmt.edgeorder.v2021_12_01.models.OrderItemResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('get_order_item_by_name')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'get_order_item_by_name'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.get_order_item_by_name(order_item_name, resource_group_name, expand, **kwargs)
def list_addresses_at_resource_group_level(
self,
resource_group_name: str,
filter: Optional[str] = None,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncItemPaged["_models.AddressResourceList"]:
"""Lists all the addresses available under the given resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param filter: $filter is supported to filter based on shipping address properties. Filter
supports only equals operation.
:type filter: str
:param skip_token: $skipToken is supported on Get list of addresses, which provides the next
page in the list of address.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AddressResourceList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.edgeorder.v2021_12_01.models.AddressResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_addresses_at_resource_group_level')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_addresses_at_resource_group_level'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_addresses_at_resource_group_level(resource_group_name, filter, skip_token, **kwargs)
def list_addresses_at_subscription_level(
self,
filter: Optional[str] = None,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncItemPaged["_models.AddressResourceList"]:
"""Lists all the addresses available under the subscription.
:param filter: $filter is supported to filter based on shipping address properties. Filter
supports only equals operation.
:type filter: str
:param skip_token: $skipToken is supported on Get list of addresses, which provides the next
page in the list of addresses.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AddressResourceList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.edgeorder.v2021_12_01.models.AddressResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_addresses_at_subscription_level')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_addresses_at_subscription_level'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_addresses_at_subscription_level(filter, skip_token, **kwargs)
def list_configurations(
self,
configurations_request: "_models.ConfigurationsRequest",
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncItemPaged["_models.Configurations"]:
"""This method provides the list of configurations for the given product family, product line and
product under subscription.
:param configurations_request: Filters for showing the configurations.
:type configurations_request: ~azure.mgmt.edgeorder.v2021_12_01.models.ConfigurationsRequest
:param skip_token: $skipToken is supported on list of configurations, which provides the next
page in the list of configurations.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Configurations or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.edgeorder.v2021_12_01.models.Configurations]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_configurations')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_configurations'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_configurations(configurations_request, skip_token, **kwargs)
def list_operations(
self,
**kwargs: Any
) -> AsyncItemPaged["_models.OperationListResult"]:
"""This method gets all the operations that are exposed for customer.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.edgeorder.v2021_12_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_operations')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_operations'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_operations(**kwargs)
def list_order_at_resource_group_level(
self,
resource_group_name: str,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncItemPaged["_models.OrderResourceList"]:
"""Lists order at resource group level.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param skip_token: $skipToken is supported on Get list of order, which provides the next page
in the list of order.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OrderResourceList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.edgeorder.v2021_12_01.models.OrderResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_order_at_resource_group_level')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_order_at_resource_group_level'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_order_at_resource_group_level(resource_group_name, skip_token, **kwargs)
def list_order_at_subscription_level(
self,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncItemPaged["_models.OrderResourceList"]:
"""Lists order at subscription level.
:param skip_token: $skipToken is supported on Get list of order, which provides the next page
in the list of order.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OrderResourceList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.edgeorder.v2021_12_01.models.OrderResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_order_at_subscription_level')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_order_at_subscription_level'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_order_at_subscription_level(skip_token, **kwargs)
def list_order_items_at_resource_group_level(
self,
resource_group_name: str,
filter: Optional[str] = None,
expand: Optional[str] = None,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncItemPaged["_models.OrderItemResourceList"]:
"""Lists order item at resource group level.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param filter: $filter is supported to filter based on order id. Filter supports only equals
operation.
:type filter: str
:param expand: $expand is supported on device details, forward shipping details and reverse
shipping details parameters. Each of these can be provided as a comma separated list. Device
Details for order item provides details on the devices of the product, Forward and Reverse
Shipping details provide forward and reverse shipping details respectively.
:type expand: str
:param skip_token: $skipToken is supported on Get list of order items, which provides the next
page in the list of order items.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OrderItemResourceList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.edgeorder.v2021_12_01.models.OrderItemResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_order_items_at_resource_group_level')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_order_items_at_resource_group_level'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_order_items_at_resource_group_level(resource_group_name, filter, expand, skip_token, **kwargs)
def list_order_items_at_subscription_level(
self,
filter: Optional[str] = None,
expand: Optional[str] = None,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncItemPaged["_models.OrderItemResourceList"]:
"""Lists order item at subscription level.
:param filter: $filter is supported to filter based on order id. Filter supports only equals
operation.
:type filter: str
:param expand: $expand is supported on device details, forward shipping details and reverse
shipping details parameters. Each of these can be provided as a comma separated list. Device
Details for order item provides details on the devices of the product, Forward and Reverse
Shipping details provide forward and reverse shipping details respectively.
:type expand: str
:param skip_token: $skipToken is supported on Get list of order items, which provides the next
page in the list of order items.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OrderItemResourceList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.edgeorder.v2021_12_01.models.OrderItemResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_order_items_at_subscription_level')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_order_items_at_subscription_level'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_order_items_at_subscription_level(filter, expand, skip_token, **kwargs)
def list_product_families(
self,
product_families_request: "_models.ProductFamiliesRequest",
expand: Optional[str] = None,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncItemPaged["_models.ProductFamilies"]:
"""This method provides the list of product families for the given subscription.
:param product_families_request: Filters for showing the product families.
:type product_families_request: ~azure.mgmt.edgeorder.v2021_12_01.models.ProductFamiliesRequest
:param expand: $expand is supported on configurations parameter for product, which provides
details on the configurations for the product.
:type expand: str
:param skip_token: $skipToken is supported on list of product families, which provides the next
page in the list of product families.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductFamilies or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.edgeorder.v2021_12_01.models.ProductFamilies]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_product_families')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_product_families'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_product_families(product_families_request, expand, skip_token, **kwargs)
def list_product_families_metadata(
self,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncItemPaged["_models.ProductFamiliesMetadata"]:
"""This method provides the list of product families metadata for the given subscription.
:param skip_token: $skipToken is supported on list of product families metadata, which provides
the next page in the list of product families metadata.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductFamiliesMetadata or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.edgeorder.v2021_12_01.models.ProductFamiliesMetadata]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_product_families_metadata')
if api_version == '2020-12-01-preview':
from ..v2020_12_01_preview.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
elif api_version == '2021-12-01':
from ..v2021_12_01.aio.operations import EdgeOrderManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_product_families_metadata'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_product_families_metadata(skip_token, **kwargs)
| {
"content_hash": "00736314d52c4f045d73d29210002deb",
"timestamp": "",
"source": "github",
"line_count": 790,
"max_line_length": 146,
"avg_line_length": 61.029113924050634,
"alnum_prop": 0.6971563686142742,
"repo_name": "Azure/azure-sdk-for-python",
"id": "b703caa3df4af5cc164a9354204b2287da4251e6",
"size": "48686",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/edgeorder/azure-mgmt-edgeorder/azure/mgmt/edgeorder/aio/_operations_mixin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.