text stringlengths 4 1.02M | meta dict |
|---|---|
import collections
from abc import ABC, ABCMeta, abstractmethod
from prompt_toolkit import prompt, PromptSession
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.shortcuts import CompleteStyle
from prompt_toolkit.filters import completion_is_selected, has_completions
from prompt_toolkit.key_binding import KeyBindings
from .session import SessionStarter, Session, Record, SessionError, SessionException
from .pathtree import TreeFormatterFancy
from .confirm_yes_no import confirm_yes_no
from .read_passphrase import read_set_passphrase, read_passphrase
from .busy_spinner import BusySpinner
from .generate import PasswordGenerator
class PromptCompleter(Completer):
def __init__(self, shell):
super().__init__()
self.shell = shell
#self.hier = PathHierarchy(shell.db)
def get_completions(self, document, complete_event):
# Complete only at end of line:
if document.cursor_position!=len(document.text):
return
text = document.text
# Complete command names
if not " " in text:
for cmd in self.shell.commands():
if cmd.name.startswith(text):
yield Completion(cmd.name, start_position=-len(text), style='fg:ansired')
# Call command handlers if applicable
for cmd in self.shell.commands():
if text.startswith(cmd.name+" "):
yield from cmd.completion_handler(text[len(cmd.name)+1:])
return
# Call default handler else
default_cmd = self.shell.default_command()
if default_cmd:
yield from default_cmd.completion_handler(text)
class Command(metaclass=ABCMeta):
is_default = False
def __init__(self, shell):
self.shell = shell
@abstractmethod
def handle(self, args):
"""
Return True to exit.
"""
pass
def completion_handler(self, text):
return iter(())
@property
@abstractmethod
def name(self):
pass
@abstractmethod
def context_check(self) -> bool:
"""
Returns True if command is available in current context.
"""
pass
@property
def session(self) -> Session:
return self.shell.session
def completion_handler_path(self, text):
start_idx=text.rfind("/")
var=text[start_idx+1:]
cur_dir = self.session.tree.root
if start_idx>=0:
for dirname in text.split("/")[:-1]:
try:
cur_dir = cur_dir.subdirs[dirname]
except KeyError:
return
for subdir in cur_dir.subdirs.keys():
subdir=subdir+"/"
if subdir.startswith(var):
yield Completion(subdir, start_position=-len(var), style='fg:ansiblue')
for record in cur_dir.records.keys():
if record.startswith(var):
yield Completion(record, start_position=-len(var))
def completion_handler_field_name(self, text):
rec = self.session[self.shell.cur_path]
for key in iter(rec):
if key.startswith(text):
yield Completion(key, start_position=-len(text))
# class CmdSave(Command):
# name = "save"
#
# def context_check(self):
# return True
#
# def handle(self, args):
# if len(args)>0:
# print("?")
# return
# if self.session.save_required:
# with BusySpinner():
# self.session.save()
# print("Changes saved.")
# else:
# print("No unsaved changes.")
class CmdExit(Command):
name = "exit"
def context_check(self):
return True
def handle(self, args):
if not args in ["", "-f"]:
print("?")
return
if self.session.save_required and args != "-f":
print("Database has unsaved changes. Use 'save' to save changes or 'exit -f' to exit discarding changes.")
return False
else:
return True
class CmdList(Command):
name = "ls"
def context_check(self):
return True
def handle(self, args):
search_term = args
print(self.session.tree.tree_str(search_term, TreeFormatterFancy()))
class CmdNew(Command):
name = "new"
completion_handler = Command.completion_handler_path
def context_check(self):
return True
def handle(self, args):
path = args
self.session[path] = Record()
self.shell.cur_path = path
with BusySpinner():
self.session.save()
print(f"Record \"{path}\" created.")
class CmdRename(Command):
name = "rename"
completion_handler = Command.completion_handler_path
def context_check(self):
return self.shell.cur_path != None
def handle(self, args):
old_path = self.shell.cur_path
new_path = args
self.session[new_path] = self.session[old_path]
self.shell.cur_path = new_path
with BusySpinner():
self.session.save()
print(f"Record \"{old_path}\" renamed to \"{new_path}\".")
class CmdOpen(Command):
name = "open"
is_default = True
completion_handler = Command.completion_handler_path
def context_check(self):
return self.shell.cur_path == None
def handle(self, args):
path = args
if len(path) == 0:
return
if path in self.session:
self.shell.cur_path = path
self.shell.print_current_record()
else:
print(f"Record \"{path}\" not found.")
class CmdDelete(Command):
name = "del"
def context_check(self):
return self.shell.cur_path != None
def handle(self, args):
if len(args)>0:
print("?")
return
path = self.shell.cur_path
if not confirm_yes_no(f"Do you want to delete record \"{path}\" (y/n)?"):
return
del self.session[path]
self.shell.cur_path = None
with BusySpinner():
self.session.save()
print(f"Record \"{path}\" deleted.")
class CmdClose(Command):
name = "close"
is_default = True
def context_check(self):
return self.shell.cur_path != None
def handle(self, args):
if len(args) > 0:
print("?")
else:
self.shell.cur_path = None
class CmdShow(Command):
name = "show"
def context_check(self):
return self.shell.cur_path != None
def handle(self, args):
if len(args) > 0:
print("?")
else:
self.shell.print_current_record()
class CmdSet(Command):
name = "set"
completion_handler = Command.completion_handler_field_name
def context_check(self):
return self.shell.cur_path != None
def handle(self, args):
rec = self.session[self.shell.cur_path]
field_name = args
if len(field_name)==0:
print("?")
return
try:
old_value = rec[field_name]
except KeyError:
old_value = ""
new_value = prompt("Value: ", default=old_value)
rec[field_name] = new_value
with BusySpinner():
self.session.save()
class CmdGen(Command):
name = "gen"
completion_handler = Command.completion_handler_field_name
def context_check(self):
return self.shell.cur_path != None
def handle(self, args):
rec = self.session[self.shell.cur_path]
field_name = args
if len(field_name)==0:
print("?")
return
try:
template_preset = rec[field_name]
except KeyError:
template_preset = "Aaaaaaaaaaaaaa5"
template = prompt("Template: ", default=template_preset)
g = PasswordGenerator.from_template(template)
new_value = g.generate()
print(f"Settings: {g.spec()}")
print(f"Generated {field_name}: {new_value}")
rec[field_name] = new_value
with BusySpinner():
self.session.save()
class CmdUnset(Command):
name = "unset"
completion_handler = Command.completion_handler_field_name
def context_check(self):
return self.shell.cur_path != None
def handle(self, args):
rec = self.session[self.shell.cur_path]
field_name = args
if len(field_name)==0:
print("?")
return
try:
del rec[field_name]
except KeyError:
print(f"Field \"{field_name}\" not found.")
else:
with BusySpinner():
self.session.save()
print(f"Field \"{field_name}\" deleted.")
class CmdChangePassphrase(Command):
name = "change_passphrase"
def context_check(self):
return self.shell.cur_path == None
def handle(self, args):
if len(args) > 0:
print("?")
return
db_filename = self.session.config.primary_db
if read_passphrase(db_filename, open=False) != self.session.passphrase:
print("Wrong passphrase.")
return
new_passphrase = read_set_passphrase(db_filename, initial=False)
self.session.set_passphrase(new_passphrase)
with BusySpinner():
self.session.save()
print("Passphrase updated.")
class CmdSync(Command):
name = "sync"
def context_check(self):
return self.shell.cur_path == None
def handle(self, args):
if len(args) > 0:
print("?")
return
with BusySpinner():
summary = self.session.sync()
self.session.save()
for m in summary.messages():
print(m)
class Shell:
"""
The Shell class provides a shell-like interface for accessing a
passmate database.
"""
command_classes = [
# CmdSave,
CmdExit,
CmdList,
CmdNew,
CmdRename,
CmdOpen,
CmdClose,
CmdDelete,
CmdShow,
CmdSet,
CmdUnset,
CmdChangePassphrase,
CmdSync,
CmdGen
]
def __init__(self, session: Session):
self.session = session
self.cur_path = None
self.all_commands = [cls(self) for cls in self.command_classes]
def print_current_record(self):
rec = self.session[self.cur_path]
if len(rec)==0:
print(f"Record \"{self.cur_path}\" is empty.")
else:
maxlen = max(map(len, iter(rec)))
for field_name in rec:
value = rec[field_name]
value_multiline = value.split("\n")
print(f"{field_name:>{maxlen}}: {value_multiline[0]}")
for v in value_multiline[1:]:
nothing=""
print(f"{nothing:>{maxlen}}> {value_multiline}")
def commands(self):
"""
Genetor for all currently available commands.
"""
for cmd in self.all_commands:
if cmd.context_check():
yield cmd
def default_command(self) -> Command:
default_cmd = None
for cmd in self.commands():
if cmd.is_default:
assert (not default_cmd)
default_cmd = cmd
return default_cmd
@property
def cur_rec(self):
return self.db.records[self.cur_path]
def key_bindings(self):
key_bindings = KeyBindings()
@key_bindings.add("enter", filter=has_completions & ~completion_is_selected)
def _(event):
event.current_buffer.go_to_completion(0)
event.current_buffer.complete_state = None
@key_bindings.add("enter", filter=completion_is_selected)
def _(event):
event.current_buffer.complete_state = None
return key_bindings
def handle_cmd_named(self, text):
text_s = text.split(" ", 1)
cmd_name = text_s[0]
try:
args = text_s[1]
except IndexError:
args = ""
for cmd in self.commands():
if cmd.name == cmd_name:
return cmd.handle(args)
raise KeyError("Unknown command.")
def handle_cmd(self, text):
try:
try:
return self.handle_cmd_named(text)
except KeyError:
default_cmd = self.default_command()
if default_cmd:
return default_cmd.handle(text)
else:
print("?")
except SessionException as exc:
print(f"Error: {exc}")
def run(self):
"""starts interactive shell-like session."""
running = True
session = PromptSession(key_bindings=self.key_bindings(), complete_style=CompleteStyle.READLINE_LIKE)
while running:
my_completer=PromptCompleter(self)
pathinfo=""
if self.cur_path:
pathinfo=":"+self.cur_path
try:
text = session.prompt(f'passmate{pathinfo}> ', completer=my_completer, complete_while_typing=True)
except (EOFError, KeyboardInterrupt):
# Exit on Ctrl+C or Ctrl+D.
text = "exit"
if self.handle_cmd(text):
running=False
def start_shell(config, init) -> Shell:
"""
Args:
config: Config object read from user's config.toml
init: --init command line flag
"""
sync_on_start = True
while True: # loop to allow repeated entry in case of a wrong passphrase.
if init:
if config.primary_db.exists():
print("--init specified with database already present.")
return
passphrase = read_set_passphrase(config.primary_db, initial=True)
else:
if not config.primary_db.exists():
print("Database not found. Pass --init to create new database.")
return
passphrase = read_passphrase(config.primary_db, open=True)
sp = BusySpinner()
sp.start()
try:
with SessionStarter(config, passphrase, init) as session:
if sync_on_start:
summary = session.sync()
for m in summary.messages():
print(m)
sp.end()
shell = Shell(session)
shell.run()
except SessionException as e:
sp.end()
if e.error == SessionError.WRONG_PASSPHRASE:
print("Wrong passphrase, try again.")
continue # Wrong passphrase -> re-run loop
else:
raise e
else:
break # Passphrase was presumably correct -> exit loop.
finally:
sp.end() | {
"content_hash": "418bc802ff23acc4fceec26e0e421e3d",
"timestamp": "",
"source": "github",
"line_count": 531,
"max_line_length": 118,
"avg_line_length": 28.14312617702448,
"alnum_prop": 0.5551391862955032,
"repo_name": "TobiasKaiser/passmate",
"id": "0839eef3bf50ad32f5b02fa6193841ff66c56dd1",
"size": "14944",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "passmate/shell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "85529"
}
],
"symlink_target": ""
} |
from Get_Data import *
from Get_Tour import *
from gurobipy import *
import http.server
import json
import numpy as np
import os.path
import geojson
import random
import math
# Start setting up a simple local server
def start_server(port=8000, bind="", cgi=False):
if cgi==True:
http.server.test(HandlerClass=http.server.CGIHTTPRequestHandler, port=port, bind=bind)
else:
http.server.test(HandlerClass=http.server.SimpleHTTPRequestHandler,port=port,bind=bind)
def make_sure(what=''):
if what == 'city_names':
city_names = {'San_Francisco':{'N':37.806025,'E':-122.386719,'W':-122.512437,'S':37.704589},'New_York':{'N':40.870625,'E':-73.704185,'W':-74.042530,'S':40.568426},
'Los_Angeles':{'N':34.109175,'E':-118.176155,'W':-118.353310,'S':34.002226}, 'Chicago':{'N':41.975189,'E':-87.532883,'W':-87.851486,'S':41.709189}}
return city_names
elif what == 'buss_types':
buss_types = {'Restaurants' : {'durations':random.randint(50,70), 'open_hours' : [660,960], 'categories' : []},
'Shopping': {'durations':random.randint(20,30), 'open_hours' : [480,1020], 'categories' : []},'Hotels & Travel': {'durations':random.randint(540,660), 'open_hours' : [1200,1440], 'categories' : []},
'Arts & Entertainment': {'durations':random.randint(60,120), 'open_hours' : [540,1620], 'categories' : []},'Active Life': {'durations':random.randint(40,60), 'open_hours' : [540,1200], 'categories' : []}}
return buss_types
def writejson(object = ''):
''' This method writes data in to the corresponding json file '''
if type(object) == Tour:
if os.path.isfile('Data/Tour_Data.json') == False:
with open('Data/Tour_Data.json','w') as f:
geojson.dump({"type": "FeatureCollection","features": []}, f)
with open('Data/Tour_Data.json') as f:
data = json.load(f)
with open('Data/Tour_Data.json','w') as f:
data['features'].append(object.geo_interface())
geojson.dump(data, f)
else:
if os.path.isfile('{}_Data.json'.format(object.city_name)) == False:
with open('{}_Data.json'.format(object.city_name),'w') as f:
geojson.dump({"type": "FeatureCollection","features": []}, f)
with open('{}_Data.json'.format(object.city_name)) as f:
data = json.load(f)
with open('{}_Data.json'.format(object.city_name),'w') as f:
data['features'].append(object.geo_interface())
geojson.dump(data, f)
| {
"content_hash": "d434e61d7f3df80a6929c5d86d1a592d",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 226,
"avg_line_length": 49.46296296296296,
"alnum_prop": 0.5803070011231748,
"repo_name": "alegde/OSTSP-Project",
"id": "2c7ce3fc633d07e6e4be788f4822ed5520683c8c",
"size": "2671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2431"
},
{
"name": "JavaScript",
"bytes": "3354"
},
{
"name": "Python",
"bytes": "28007"
}
],
"symlink_target": ""
} |
import json
import os
import re
import pytest
import yaml
from Pegasus.api.writable import _CustomEncoder
def _tojson(obj):
"""Returns dict representation of obj using writable._CustomEncoder"""
return json.loads(json.dumps(obj, cls=_CustomEncoder))
@pytest.fixture(scope="module")
def convert_yaml_schemas_to_json():
"""
Convert all the yaml schemas into json files.
These files will be used whenever schema validation is
needed for the tests. The json files will be cleaned up
at the end of the test module.
"""
# get the path of the schema file with the given name
path = os.path.dirname(os.path.realpath(__file__))
path = path.replace("packages/pegasus-api/test/api", "share/pegasus/schema/yaml")
json_schemas = {
os.path.join(path, filename): os.path.join(
path, filename.replace(".yml", ".json")
)
for filename in [
"common.yml",
"rc-5.0.yml",
"tc-5.0.yml",
"sc-5.0.yml",
"wf-5.0.yml",
]
}
# convert each of the yml schemas to json
for yml_filename, json_filename in json_schemas.items():
with open(yml_filename) as yml_file, open(json_filename, "w") as json_file:
json_str = json.dumps(yaml.safe_load(yml_file))
# for references pointing to '*.yml' files, convert them to point
# to '.json' files instead
json_str = re.sub(
r"([a-z0-9\-\.]+)(.yml)",
os.path.join("file://" + path, r"\1.json"),
json_str,
)
json_obj = json.loads(json_str)
json_obj["$id"] = "file://" + json_filename
json.dump(json_obj, json_file, indent=4)
yield
# cleanup
for _, json_schema in json_schemas.items():
os.remove(json_schema)
@pytest.fixture(scope="function")
def load_schema():
"""
Load the given schema into memory.
"""
def _load_schema(name):
# get the path of the schema file with the given name
path = os.path.dirname(os.path.realpath(__file__))
path = path.replace(
"packages/pegasus-api/test/api", "share/pegasus/schema/yaml"
)
path = os.path.join(path, name)
with open(path) as f:
return json.load(f)
return _load_schema
| {
"content_hash": "2e069030e2c664bda22776ae1f5523c4",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 85,
"avg_line_length": 28.13095238095238,
"alnum_prop": 0.5801946677951756,
"repo_name": "pegasus-isi/pegasus",
"id": "e0ca1d5bfd5731fccf99edefb2100efea3335291",
"size": "2363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/pegasus-api/test/api/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "451637"
},
{
"name": "C++",
"bytes": "241564"
},
{
"name": "CSS",
"bytes": "3270"
},
{
"name": "Common Workflow Language",
"bytes": "2464"
},
{
"name": "Dockerfile",
"bytes": "3830"
},
{
"name": "HTML",
"bytes": "95902"
},
{
"name": "Java",
"bytes": "8737551"
},
{
"name": "JavaScript",
"bytes": "25592"
},
{
"name": "Jupyter Notebook",
"bytes": "2576298"
},
{
"name": "Makefile",
"bytes": "9884"
},
{
"name": "PHP",
"bytes": "32852"
},
{
"name": "Perl",
"bytes": "90905"
},
{
"name": "Python",
"bytes": "3039866"
},
{
"name": "R",
"bytes": "105082"
},
{
"name": "Roff",
"bytes": "36"
},
{
"name": "Shell",
"bytes": "420738"
},
{
"name": "Singularity",
"bytes": "446"
}
],
"symlink_target": ""
} |
from rest_framework import routers
from .views import RefreshTokenViewSet
router = routers.SimpleRouter()
router.register(r'refresh-token', RefreshTokenViewSet)
| {
"content_hash": "8597dc662fd281b70201cdfa4e55760a",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 54,
"avg_line_length": 27.166666666666668,
"alnum_prop": 0.8282208588957055,
"repo_name": "lock8/django-rest-framework-jwt-refresh-token",
"id": "bef0127377397f963096aa99b11ee3320b31e10b",
"size": "163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "refreshtoken/routers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "828"
},
{
"name": "Python",
"bytes": "25369"
}
],
"symlink_target": ""
} |
def extractFillertranslationsWordpressCom(item):
'''
Parser for 'fillertranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('vermillion', 'Vermillion', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | {
"content_hash": "5850753a9fbe6e6117d48d6c7977cb18",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 30.761904761904763,
"alnum_prop": 0.6207430340557275,
"repo_name": "fake-name/ReadableWebProxy",
"id": "5444508a5e52146deb6be6b4e919174f38a28c6f",
"size": "646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractFillertranslationsWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
import gzip
import numpy
import pytest
from deep_qa.data.embeddings import PretrainedEmbeddings
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.models.text_classification import ClassificationModel
from deep_qa.common.checks import ConfigurationError
from ..common.test_case import DeepQaTestCase
class TestPretrainedEmbeddings(DeepQaTestCase):
# pylint: disable=protected-access
def test_get_embedding_layer_uses_correct_embedding_dim(self):
data_indexer = DataIndexer()
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 -4.0\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
assert embedding_layer.output_dim == 3
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0 3.1\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 -4.0 -1.2\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
assert embedding_layer.output_dim == 4
def test_get_embedding_layer_crashes_when_embedding_dim_is_one(self):
data_indexer = DataIndexer()
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("dimensionality 3\n".encode('utf-8'))
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 -4.0\n".encode('utf-8'))
with pytest.raises(Exception):
PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
def test_get_embedding_layer_skips_inconsistent_lines(self):
data_indexer = DataIndexer()
data_indexer.add_word_to_index("word1")
data_indexer.add_word_to_index("word2")
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 \n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
print(embedding_layer.weights)
word_vector = embedding_layer._initial_weights[0][data_indexer.get_word_index("word2")]
assert not numpy.allclose(word_vector[:2], numpy.asarray([0.1, 0.4]))
def test_get_embedding_layer_actually_initializes_word_vectors_correctly(self):
data_indexer = DataIndexer()
data_indexer.add_word_to_index("word")
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
word_vector = embedding_layer._initial_weights[0][data_indexer.get_word_index("word")]
assert numpy.allclose(word_vector, numpy.asarray([1.0, 2.3, -1.0]))
def test_get_embedding_layer_initializes_unseen_words_randomly_not_zero(self):
data_indexer = DataIndexer()
data_indexer.add_word_to_index("word2")
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
word_vector = embedding_layer._initial_weights[0][data_indexer.get_word_index("word2")]
assert not numpy.allclose(word_vector, numpy.asarray([0.0, 0.0, 0.0]))
def test_embedding_will_not_project_random_embeddings(self):
self.write_pretrained_vector_files()
self.write_true_false_model_files()
with pytest.raises(ConfigurationError):
args = {
"embeddings": {
"words": {
"dimension": 5,
"project": True,
"fine_tune": True,
"dropout": 0.2
}
}
}
model = self.get_model(ClassificationModel, args)
model.train()
def test_projection_dim_not_equal_to_pretrained_dim_with_no_projection_flag_raises_error(self):
self.write_pretrained_vector_files()
self.write_true_false_model_files()
with pytest.raises(ConfigurationError):
args = {
"embeddings": {
"words": {
"dimension": 13,
"pretrained_file": self.PRETRAINED_VECTORS_GZIP,
"project": False,
"fine_tune": False,
"dropout": 0.2
}
}
}
model = self.get_model(ClassificationModel, args)
model.train()
| {
"content_hash": "048fceb8dfbfd4402ea8bcdacad4dc6c",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 101,
"avg_line_length": 52.31730769230769,
"alnum_prop": 0.6072413159345709,
"repo_name": "matt-gardner/deep_qa",
"id": "6b5344ecefa1aef6d7d37f613ec0a632d6c2f7d2",
"size": "5484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/data/embeddings_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1236066"
},
{
"name": "Shell",
"bytes": "5494"
}
],
"symlink_target": ""
} |
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
# This script gets run on the first day of the following month, and that month's URL is
# what goes here. E.g. If this directory is the directory for February, this script gets
# run on March 1, and this URL is the URL for the March challenge page.
nextMonthURL = "https://www.reddit.com/r/pornfree/comments/81667l/stay_clean_march_this_thread_updated_daily_check/"
# If this directory is the directory for November, this script gets run on December 1,
# and currentMonthIndex gets the index of November, i.e. 11.
currentMonthIndex = datetime.date.today().month - 1
if currentMonthIndex == 0:
currentMonthIndex = 12
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateToUse():
answer = ""
answer += "The Stay Clean CURRENT_MONTH_NAME challenge is now over. Join us for **[the NEXT_MONTH_NAME challenge](NEXT_MONTH_URL)**.\n"
answer += "\n"
answer += "**NUMBER_STILL_IN** out of INITIAL_NUMBER participants made it all the way through the challenge. That's **PERCENT_STILL_IN%**.\n"
answer += "\n"
answer += "Congratulations to these participants, all of whom were victorious:\n\n"
answer += templateForParticipants()
return answer
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('NEXT_MONTH_URL', nextMonthURL, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
| {
"content_hash": "54f15d0bef5a8fc895fdb22be28b03fc",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 199,
"avg_line_length": 45.12307692307692,
"alnum_prop": 0.680872826457552,
"repo_name": "foobarbazblarg/stayclean",
"id": "fe2ee1530ef7102e5378f0c7fa2f22efac971f84",
"size": "3059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stayclean-2018-february/display-final-after-month-is-over.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4232161"
},
{
"name": "Shell",
"bytes": "52056"
}
],
"symlink_target": ""
} |
import logging
import time
import pykka
logger = logging.getLogger(__name__)
class Future(pykka.ThreadingFuture):
Timeout = pykka.Timeout
@classmethod
def exception(cls, exc=None):
future = cls()
future.set_exception((type(exc), exc, None))
return future
@classmethod
def fromdbus(cls, func, *args, **kwargs):
method = getattr(func, "_method_name", "<unknown>")
logger.debug("Calling D-Bus method %s%s", method, args)
future = cls()
start = time.time()
def reply(*args):
logger.debug("%s reply after %.3fs", method, time.time() - start)
future.set(args[0] if len(args) == 1 else args)
def error(e):
logger.debug("%s error after %.3fs", method, time.time() - start)
future.set_exception(exc_info=(type(e), e, None))
func(*args, reply_handler=reply, error_handler=error, **kwargs)
return future
@classmethod
def fromvalue(cls, value):
future = cls()
future.set(value)
return future
| {
"content_hash": "40fcbd2716a71a6524b154a3ecd5f644",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 26.317073170731707,
"alnum_prop": 0.5866543095458758,
"repo_name": "tkem/mopidy-dleyna",
"id": "b1ac8564c9e4abdb2b0c7bad1562af18cceeb506",
"size": "1079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mopidy_dleyna/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "38607"
}
],
"symlink_target": ""
} |
"""Tests for tf_agents.bandits.environments.classification_environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from absl.testing.absltest import mock
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.bandits.environments import classification_environment as ce
tfd = tfp.distributions
def deterministic_reward_distribution(reward_table):
"""Returns a deterministic distribution centered at `reward_table`."""
return tfd.Independent(tfd.Deterministic(loc=reward_table),
reinterpreted_batch_ndims=2)
class ClassificationEnvironmentTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
dict(testcase_name='_3x2x3',
tbl=[[[0, 1, 2],
[3, 4, 5]],
[[6, 7, 8],
[9, 10, 11]],
[[12, 13, 14],
[15, 16, 17]]],
row=[0, 1, 1],
col=[0, 2, 0],
expected=[0, 11, 15]),
)
def testBatchedTableLookup(self, tbl, row, col, expected):
actual = ce._batched_table_lookup(tbl, row, col)
np.testing.assert_almost_equal(expected, self.evaluate(actual))
@parameterized.named_parameters(
dict(
testcase_name='_scalar_batch_1',
context=np.array([[0], [1]]),
labels=np.array([0, 1]),
batch_size=1),
dict(
testcase_name='_multi_dim_batch_23',
context=np.arange(100).reshape(10, 10),
labels=np.arange(10),
batch_size=23),
)
def testObservationShapeAndValue(self, context, labels, batch_size):
"""Test that observations have correct shape and values from `context`."""
dataset = (
tf.data.Dataset.from_tensor_slices(
(context, labels)).repeat().shuffle(4 * batch_size))
# Rewards of 1. is given when action == label
reward_distribution = deterministic_reward_distribution(
tf.eye(len(set(labels))))
env = ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size)
expected_observation_shape = [batch_size] + list(context.shape[1:])
self.evaluate(tf.compat.v1.global_variables_initializer())
for _ in range(100):
observation = self.evaluate(env.reset().observation)
np.testing.assert_array_equal(observation.shape,
expected_observation_shape)
for o in observation:
self.assertIn(o, context)
def testReturnsCorrectRewards(self):
"""Test that rewards are being returned correctly for a simple case."""
# Reward of 1 is given if action == (context % 3)
context = tf.reshape(tf.range(128), shape=[128, 1])
labels = tf.math.mod(context, 3)
batch_size = 32
dataset = (
tf.data.Dataset.from_tensor_slices(
(context, labels)).repeat().shuffle(4 * batch_size))
reward_distribution = deterministic_reward_distribution(tf.eye(3))
env = ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size)
self.evaluate(tf.compat.v1.global_variables_initializer())
for _ in range(10):
# Take the 'correct' action
observation = env.reset().observation
action = tf.math.mod(observation, 3)
reward = env.step(action).reward
np.testing.assert_almost_equal(self.evaluate(reward),
self.evaluate(tf.ones_like(reward)))
for _ in range(10):
# Take the 'incorrect' action
observation = env.reset().observation
action = tf.math.mod(observation + 1, 3)
reward = env.step(action).reward
np.testing.assert_almost_equal(self.evaluate(reward),
self.evaluate(tf.zeros_like(reward)))
def testPreviousLabelIsSetCorrectly(self):
"""Test that the previous label is set correctly for a simple case."""
# Reward of 1 is given if action == (context % 3)
context = tf.reshape(tf.range(128), shape=[128, 1])
labels = tf.math.mod(context, 3)
batch_size = 4
dataset = (
tf.data.Dataset.from_tensor_slices(
(context, labels)).repeat().shuffle(4 * batch_size))
reward_distribution = deterministic_reward_distribution(tf.eye(3))
env = ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size)
self.evaluate(tf.compat.v1.global_variables_initializer())
time_step = env.reset()
time_step_label = tf.squeeze(tf.math.mod(time_step.observation, 3))
action = tf.math.mod(time_step.observation, 3)
next_time_step = env.step(action)
next_time_step_label = tf.squeeze(
tf.math.mod(next_time_step.observation, 3))
if tf.executing_eagerly():
np.testing.assert_almost_equal(
self.evaluate(time_step_label),
self.evaluate(env._previous_label))
np.testing.assert_almost_equal(
self.evaluate(next_time_step_label),
self.evaluate(env._current_label))
else:
with self.cached_session() as sess:
time_step_label_value, next_time_step_label_value = (
sess.run([time_step_label, next_time_step_label]))
previous_label_value = self.evaluate(env._previous_label)
np.testing.assert_almost_equal(
time_step_label_value, previous_label_value)
current_label_value = self.evaluate(env._current_label)
np.testing.assert_almost_equal(
next_time_step_label_value,
current_label_value)
def testShuffle(self):
"""Test that dataset is being shuffled when asked."""
# Reward of 1 is given if action == (context % 3)
context = tf.reshape(tf.range(128), shape=[128, 1])
labels = tf.math.mod(context, 3)
batch_size = 32
dataset = (
tf.data.Dataset.from_tensor_slices(
(context, labels)).repeat().shuffle(4 * batch_size))
reward_distribution = deterministic_reward_distribution(tf.eye(3))
# Note - shuffle should hapen *first* in call chain, so this
# test will fail if shuffle is called e.g. after batch or prefetch.
dataset.shuffle = mock.Mock(spec=dataset.shuffle,
side_effect=dataset.shuffle)
ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size)
dataset.shuffle.assert_not_called()
ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size, shuffle_buffer_size=3, seed=7)
dataset.shuffle.assert_called_with(
buffer_size=3, reshuffle_each_iteration=True, seed=7)
@mock.patch('tf_agents.bandits.environments.classification_environment'+
'.eager_utils.dataset_iterator')
def testPrefetch(self, mock_dataset_iterator):
"""Test that dataset is being prefetched when asked."""
mock_dataset_iterator.return_value = 'mock_iterator_result'
# Reward of 1 is given if action == (context % 3)
context = tf.reshape(tf.range(128), shape=[128, 1])
labels = tf.math.mod(context, 3)
batch_size = 32
dataset = tf.data.Dataset.from_tensor_slices((context, labels))
reward_distribution = deterministic_reward_distribution(tf.eye(3))
# Operation order should be batch() then prefetch(), have to jump
# through a couple hoops to get this sequence tested correctly.
# Save dataset.prefetch in temp mock_prefetch, return batched dataset to
# make down-stream logic work correctly with batch dimensions.
batched_dataset = dataset.batch(batch_size)
mock_prefetch = mock.Mock(spec=dataset.prefetch,
return_value=batched_dataset)
# Replace dataset.batch with mock batch that returns original dataset,
# in order to make mocking out it's prefetch call easier.
dataset.batch = mock.Mock(spec=batched_dataset,
return_value=batched_dataset)
# Replace dataset.prefetch with mock_prefetch.
batched_dataset.prefetch = mock_prefetch
env = ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size, repeat_dataset=False)
dataset.batch.assert_called_with(batch_size, drop_remainder=True)
batched_dataset.prefetch.assert_not_called()
mock_dataset_iterator.assert_called_with(batched_dataset)
self.assertEqual(env._data_iterator, 'mock_iterator_result')
env = ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size, repeat_dataset=False,
prefetch_size=3)
dataset.batch.assert_called_with(batch_size, drop_remainder=True)
batched_dataset.prefetch.assert_called_with(3)
mock_dataset_iterator.assert_called_with(batched_dataset)
self.assertEqual(env._data_iterator, 'mock_iterator_result')
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "3b5f84d44076489da0ee90779a461ccf",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 80,
"avg_line_length": 42.73557692307692,
"alnum_prop": 0.6598042524468444,
"repo_name": "tensorflow/agents",
"id": "a7591e7e8cb89c049b8952c14cb8d64cf6e1b9dc",
"size": "9492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tf_agents/bandits/environments/classification_environment_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4930266"
},
{
"name": "Shell",
"bytes": "10950"
}
],
"symlink_target": ""
} |
"""Contains internal functions for interacting with the Batfish service."""
from typing import TYPE_CHECKING, Any, Dict, Optional, Union # noqa: F401
from pybatfish.client import restv2helper
from pybatfish.client.consts import CoordConsts
from pybatfish.datamodel.answer import Answer # noqa: F401
from pybatfish.util import get_uuid
from . import resthelper, workhelper
from .options import Options
if TYPE_CHECKING:
from pybatfish.client.session import Session # noqa: F401
def _bf_answer_obj(
session: "Session",
question_str: str,
question_name: str,
background: bool,
snapshot: str,
reference_snapshot: Optional[str],
extra_args: Optional[Dict[str, Any]],
) -> Union[Answer, str]:
if not question_name:
question_name = Options.default_question_prefix + "_" + get_uuid()
# Upload the question
if session.use_deprecated_workmgr_v1():
json_data = workhelper.get_data_upload_question(
session,
question_name,
question_str,
)
resthelper.get_json_response(
session, CoordConsts.SVC_RSC_UPLOAD_QUESTION, json_data
)
else:
restv2helper.upload_question(session, question_name, question_str)
# Answer the question
work_item = workhelper.get_workitem_answer(
session, question_name, snapshot, reference_snapshot
)
workhelper.execute(work_item, session, background, extra_args)
if background:
return work_item.id
# get the answer
return session.get_answer(question_name, snapshot, reference_snapshot)
def _bf_get_question_templates(session: "Session", verbose: bool = False) -> Dict:
return restv2helper.get_question_templates(session, verbose)
| {
"content_hash": "1e6d721f0180932d880162be1a258a95",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 82,
"avg_line_length": 31.178571428571427,
"alnum_prop": 0.6918671248568156,
"repo_name": "batfish/pybatfish",
"id": "3cd5846f55b2ac105cd0eafb61e9259e66082bdb",
"size": "2376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybatfish/client/internal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1219681"
},
{
"name": "Python",
"bytes": "684750"
}
],
"symlink_target": ""
} |
import numpy as np
import torch
from typing import Dict
from fairseq.data.monolingual_dataset import MonolingualDataset
from . import FairseqDataset
class LMContextWindowDataset(FairseqDataset):
"""
Wraps a MonolingualDataset and provides more context for evaluation.
Each item in the new dataset will have a maximum size of
``tokens_per_sample + context_window``.
Args:
dataset: dataset to wrap
tokens_per_sample (int): the max number of tokens in each dataset item
context_window (int): the number of accumulated tokens to add to each
dataset item
pad_idx (int): padding symbol
"""
def __init__(
self,
dataset: MonolingualDataset,
tokens_per_sample: int,
context_window: int,
pad_idx: int,
):
assert context_window > 0
self.dataset = dataset
self.tokens_per_sample = tokens_per_sample
self.context_window = context_window
self.pad_idx = pad_idx
self.prev_tokens = np.empty([0])
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples) -> Dict:
sample = self.dataset.collater(samples)
pad = self.pad_idx
max_sample_len = self.tokens_per_sample + self.context_window
bsz, tsz = sample["net_input"]["src_tokens"].shape
start_idxs = [0] * bsz
toks = sample["net_input"]["src_tokens"]
lengths = sample["net_input"]["src_lengths"]
tgt = sample["target"]
new_toks = np.empty([bsz, tsz + self.context_window], dtype=np.int64)
new_tgt = np.full([bsz, tsz + self.context_window], pad, dtype=np.int64)
sample_lens = toks.ne(pad).long().sum(dim=1).cpu()
for i in range(bsz):
sample_len = sample_lens[i]
extra = len(self.prev_tokens) + sample_len - max_sample_len
if extra > 0:
self.prev_tokens = self.prev_tokens[extra:]
pads = np.full(self.context_window - len(self.prev_tokens), pad)
new_toks[i] = np.concatenate([self.prev_tokens, toks[i].numpy(), pads])
new_tgt[
i, len(self.prev_tokens) : len(self.prev_tokens) + len(tgt[i])
] = tgt[i]
start_idxs[i] = len(self.prev_tokens)
lengths[i] += len(self.prev_tokens)
self.prev_tokens = new_toks[i][new_toks[i] != pad][-self.context_window :]
sample["net_input"]["src_tokens"] = torch.from_numpy(new_toks)
sample["target"] = torch.from_numpy(new_tgt)
sample["start_indices"] = start_idxs
return sample
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
def ordered_indices(self):
# NOTE we don't shuffle the data to retain access to the previous dataset elements
return np.arange(len(self.dataset))
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
return self.dataset.prefetch(indices)
| {
"content_hash": "15a29e1741041036511d76e755d087d9",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 90,
"avg_line_length": 34.81521739130435,
"alnum_prop": 0.6088042460193569,
"repo_name": "pytorch/fairseq",
"id": "1a945927cf0d96719003685676a990737a3762b2",
"size": "3381",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "fairseq/data/lm_context_window_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21106"
},
{
"name": "Cuda",
"bytes": "38166"
},
{
"name": "Cython",
"bytes": "13294"
},
{
"name": "Lua",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "3699357"
},
{
"name": "Shell",
"bytes": "2182"
}
],
"symlink_target": ""
} |
import sys
from sys import stderr
import json
from os.path import basename, getsize
from safe import extract_metadata
from safe.index import write_metadata
from util.cli import error
def usage():
""" Print short command usage. """
exename = basename(sys.argv[0])
print >>stderr, "USAGE: %s <md-schema> <safe-file>" % exename
print >>stderr, ""
print >>stderr, (
"This command extracts SAFE file metadata defined by the schema and "
"writes them to the standard output as one index file line."
)
if __name__ == "__main__":
# pylint: disable=invalid-name
try:
schema = sys.argv[1]
input_ = sys.argv[2]
except IndexError:
error("Not enough input arguments.")
usage()
sys.exit(1)
# load JSON schema
with open(schema) as fobj:
schema = json.load(fobj)
# extract and print metadata
with open(input_) as fobj:
write_metadata(extract_metadata(
fobj, schema, safe_name=basename(input_), safe_size=getsize(input_)
), schema)
| {
"content_hash": "50f897a51dc10f5c64c8dd5480ebd25c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 28.72972972972973,
"alnum_prop": 0.6302916274694261,
"repo_name": "DREAM-ODA-OS/tools",
"id": "1450d061a2cff4f7a8f65b77b56f39480e2aa3cf",
"size": "2524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "safe/safe_to_index.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "629815"
}
],
"symlink_target": ""
} |
import json
class bag_of_words(dict):
def __missing__(self, word):
return 0
def __len__(self):
return sum(self.values())
def __iadd__(self, d):
for word, count in d.items():
self[word] += count
return self
def read(self, tokenizer, string):
for tpl in tokenizer.tokenize_smartly(string):
self[tpl[0]] += 1
return self
class bag_dict(dict):
def __missing__(self, field_name):
self[field_name] = bag_of_words()
return self[field_name]
def __iadd__(self, d):
for fn, bow in d.items():
if fn[0] == '~': # Continuous
assert 1 == len(bow)
k = list(bow.keys())[0]
bow = {float(k): bow[k]}
self[fn] += bow
return self
def read(self, tokenizer, d):
for (field_name, string) in d.items():
if field_name[0] in ('_', '~'): # Protect from tokenizer
self[field_name][string] += 1
else:
self[field_name].read(tokenizer, string)
return self
def reduce(self):
result = bag_of_words()
for bow in self.values():
for (word, count) in bow.items():
result[word] += count
return result
class bag_jag(object):
def __init__(self, l=[]):
self.body = [] + l
self.df = bag_of_words()
self.total_len = bag_of_words()
def __len__(self):
return len(self.body)
def __iadd__(self, other):
self.body += other.body
self.df += other.df
self.total_len += other.total_len
return self
def append(self, bd):
self.body.append(bd)
for word in bd.reduce().keys():
if isinstance(word, str):
self.df[word] += 1
for (field_name, bow) in bd.items():
self.total_len[field_name] += len(bow)
return self
def read(self, path):
with open(path, 'r', encoding='utf-8') as f:
self.df += json.loads(f.readline())
self.total_len += json.loads(f.readline())
for l in f.readlines():
bd = bag_dict()
bd += json.loads(l)
self.body.append(bd)
return self
def write(self, path):
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.df, ensure_ascii=False))
f.write('\n')
f.write(json.dumps(self.total_len, ensure_ascii=False))
f.write('\n')
for bd in self.body:
f.write(json.dumps(bd, ensure_ascii=False))
f.write('\n')
return self
| {
"content_hash": "776618d5898cc6a48dc1e5446b70f2ba",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 69,
"avg_line_length": 28.46315789473684,
"alnum_prop": 0.4940828402366864,
"repo_name": "rschaniel/BM25F",
"id": "2347653a125fb74759d4966e0ab012e5f69aafb6",
"size": "2704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BM25F/exp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27523"
}
],
"symlink_target": ""
} |
from datetime import datetime
import os
import argparse # argument parser library
from prettytable import PrettyTable
from prettytable import ALL
import pwd
from shutil import copyfile
import getpass
Dir = os.path.dirname(os.path.realpath(__file__))
def exitLoop(ncycle):
limit = 6
return True if ncycle == limit else False
def findNEvents(path, dataset):
ncycle = 0
notFind = True
nEvents = -99
while not exitLoop(ncycle) and notFind:
findNEventsCommand = 'python ' + path + '/das_client.py --limit=1000 --format=plain --query="dataset dataset=' + dataset + ' | grep dataset.nevents" > tmp.txt ; tail -1 tmp.txt > pdEvents.txt'
os.system(findNEventsCommand)
fileInput = open('pdEvents.txt','r')
for line in fileInput:
number = line.rstrip()
if not number.strip() :
return 0
try:
nEvents = int(number)
notFind = False
except ValueError:
print "Oops! Got an invalid output for findNEvents. Let me try again..."
ncycle += 1
fileInput.close()
return nEvents
def findNFiles(path, dataset):
ncycle = 0
notFind = True
nFiles = -99
while not exitLoop(ncycle) and notFind:
findNFilesCommand = 'python ' + path + '/das_client.py --limit=1000 --format=plain --query="dataset dataset=' + dataset + ' | grep dataset.nfiles" > tmp.txt ; tail -1 tmp.txt > pdNfiles.txt'
os.system(findNFilesCommand)
fileInput = open('pdNfiles.txt','r')
for line in fileInput:
number = line.rstrip()
if not number.strip() :
return 0
try:
nFiles = int(number)
notFind = False
except ValueError:
print "Oops! Got an invalid output for findNFiles. Let me try again..."
ncycle += 1
fileInput.close()
return nFiles
def findPDVolume(path, dataset):
ncycle = 0
notFind = True
pdVolume = -99
while not exitLoop(ncycle) and notFind:
findPDVolumeCommand = 'python ' + path + '/das_client.py --limit=1000 --format=plain --query="dataset dataset=' + dataset + ' | grep dataset.size" > tmp.txt ; tail -1 tmp.txt > pdSize.txt'
os.system(findPDVolumeCommand)
fileInput = open('pdSize.txt', 'r')
for line in fileInput:
number = line.rstrip()
if not number.strip() :
return 0
try:
pdVolume = int(number)
notFind = False
except ValueError:
print "Oops! Got an invalid output for findPDVolume. Let me try again..."
ncycle += 1
fileInput.close()
return pdVolume
def findDataSetFractions(path, dataset):
ncycle = 0
notFind = True
dataSetSites = []
dataSetFractions = []
while not exitLoop(ncycle) and notFind:
findSitesCommand = 'python ' + path + '/das_client.py --limit=1000 --format=plain --query="site dataset=' + dataset + ' | grep site.name, site.dataset_fraction" | grep _ > tmpSites.txt'
os.system(findSitesCommand)
bytes = os.path.getsize('tmpSites.txt')
if(bytes <= 0):
return dataSetSites, dataSetFractions
fileInput = open('tmpSites.txt','r')
for line in fileInput:
lineStrip = line.rstrip('\n')
lineStripSplit = lineStrip.split(' ')
splitLength = len(lineStripSplit)
try:
dataSetFraction = ((lineStripSplit[splitLength - 1].rstrip('"')).rstrip('%')).lstrip('"')
dataSetFraction = float(dataSetFraction)/100.
dataSetFractions.append(dataSetFraction)
frontStrip = lineStripSplit[0].lstrip('[\'"')
backStrip = frontStrip.rstrip('"\',')
dataSetSites.append(backStrip)
notFind = False
except ValueError:
print "Oops! Got an invalid output in findDataSetFractions. Let me try again..."
ncycle += 1
fileInput.close()
return dataSetSites, dataSetFractions
def main():
if(os.path.isfile('./das_client.py') == False):
print "\n Cannot find das_client.py in this working directory"
exit()
if(os.path.isfile('./listOfRAWs.txt') == False):
print "\n Cannot find listOfRAWs.txt in this working directory"
exit()
parser = argparse.ArgumentParser()
parser.add_argument('--title', default='HI RAW File Set', help="Name for the table title; default is HI RAW File Set")
parser.add_argument('--nPDs', default=-1, type=int, help="Number of PDs to be processed; default is -1 for all PDs")
args = parser.parse_args()
#
# Get the input parameters from the command line
#
title = args.title
nPDs = args.nPDs
limitPDs = True
if(nPDs < 1): limitPDs = False
x = PrettyTable(["PDs","File Size(GB)", "Files", "#Evts(K)", "# of T1 sites", "# of T2 sites", "First T2", "Second T2", "At VU", "On FNAL Tape", "On T0 Tape"])
x.align["PD Name"] = "l"
x.padding_width = 1
x.float_format = .3;
x.hrules = ALL
now = datetime.now()
mm = str(now.month)
dd = str(now.day)
yyyy = str(now.year)
hour = str(now.hour)
mi = str(now.minute)
ss = str(now.second)
tableTitle = title + " Information at %s/%s/%s %s:%s:%s (Nashville time)" % (mm, dd, yyyy, hour, mi, ss)
print tableTitle
totFiles = 0
totNEvents = 0
totPDVolume = 0 # Byte
onFNALTapeNumber = 0
onT0TapeNumber = 0
firstT2Number = 0
secondT2Number = 0
atVUNumber = 0
fileInput = open("listOfRAWs.txt", "r");
numberOfPDs = 0
totalNumberOfT1Sites = 0
totalNumberOfT2Sites = 0
volumeOnlyAtVU = 0.0
volumeAtVanderbilt = 0.0
volumeAtOtherT2 = 0.0
volumeAtNoT2 = 0.0
volumeOnTapeAtFNAL = 0.0
volumeOnTapeAtT0 = 0.0
for dataset in fileInput:
#
# Determine the PD name from the input list
#
pdName = dataset.rstrip('\n')
numberOfPDs += 1
listOfSites, listOfFractions = findDataSetFractions(Dir, pdName)
numberOfT1Sites = 0
numberOfT2Sites = 0
onT0Tape = 'No'
onFNALTape = 'No'
firstT2 = 'None'
secondT2 = 'None'
atVU = 'No'
vuFraction = 0.0
pdVolume = findPDVolume(Dir, pdName)
for kSite in range(len(listOfSites)):
siteFullName = listOfSites[kSite]
siteNameSplit = siteFullName.split('_')
if(siteNameSplit[0] == 'T1' and siteNameSplit[3] == 'MSS'): numberOfT1Sites += 1
if(siteNameSplit[0] == 'T2'):
numberOfT2Sites += 1
siteName = siteNameSplit[2]
if(siteNameSplit[2] == 'Vanderbilt'):
atVU = 'Yes' + ' (' + str(listOfFractions[kSite]) + ')'
atVUNumber += 1
siteName = 'VU'
vuFraction = listOfFractions[kSite]
volumeAtVanderbilt += pdVolume*vuFraction
else:
volumeAtOtherT2 += pdVolume*listOfFractions[kSite]
if(numberOfT2Sites == 1): firstT2 = siteName + ' (' + str(listOfFractions[kSite]) + ')'; firstT2Number += 1
if(numberOfT2Sites == 2): secondT2 = siteName + ' (' + str(listOfFractions[kSite]) + ')' ; secondT2Number += 1
if(siteFullName == 'T1_US_FNAL_MSS'):
onFNALTape = 'Yes' + ' (' + str(listOfFractions[kSite]) + ')'
onFNALTapeNumber += 1
volumeOnTapeAtFNAL += pdVolume*listOfFractions[kSite]
if(siteFullName == 'T0_CH_CERN_MSS'):
onT0Tape = 'Yes' + ' (' + str(listOfFractions[kSite]) + ')'
onT0TapeNumber += 1
volumeOnTapeAtT0 += pdVolume*listOfFractions[kSite]
totalNumberOfT1Sites += numberOfT1Sites
totalNumberOfT2Sites += numberOfT2Sites
if(numberOfT2Sites == 0):
volumeAtNoT2 += pdVolume
nEvents = findNEvents(Dir, pdName)
nFiles = findNFiles(Dir, pdName)
if(numberOfT2Sites == 1 and atVU != 'No'):
volumeOnlyAtVU += pdVolume*vuFraction
if nEvents < 0 or pdVolume < 0:
print "Something is wrong! Please check!"
if nEvents > 0 :
eventSize = (pdVolume/1.0e6) /nEvents #MB
else :
eventSize = 0
totFiles += nFiles
totNEvents += nEvents
totPDVolume += pdVolume
x.add_row([pdName, int(pdVolume/1.0e9), nFiles, int(nEvents/1.0e3), numberOfT1Sites, numberOfT2Sites, firstT2, secondT2, atVU, onFNALTape, onT0Tape])
if(limitPDs and numberOfPDs >= nPDs):
break
sumText = 'Sum for ' + str(numberOfPDs) + ' PDs'
averageT1Sites = float(totalNumberOfT1Sites)/float(numberOfPDs)
averageT2Sites = float(totalNumberOfT2Sites)/float(numberOfPDs)
averageT2SitesInt = int(100*averageT2Sites)
averageT2SitesFloat = averageT2SitesInt/100.
averageT2SitesText = 'Average = ' + str(averageT2SitesFloat)
averageT1SitesInt = int(100*averageT1Sites)
averageT1SitesFloat = averageT1SitesInt/100.
averageT1SitesText = 'Average = ' + str(averageT1SitesFloat)
x.add_row([sumText, int(totPDVolume/1.0e9), totFiles, int(totNEvents/1.0e3), averageT1SitesText, averageT2SitesText, firstT2Number, secondT2Number, atVUNumber, onFNALTapeNumber, onT0TapeNumber])
print x
print "\n"
volumeOnlyAtVUGB = volumeOnlyAtVU/1.0e9
volumeOnlyAtVUPerCent = 100*volumeOnlyAtVU/totPDVolume
volumeOnlyAtVUPerCentString = 'GB, corresponding to ' + str(int(volumeOnlyAtVUPerCent+0.5)) + '% of the total PD volume'
volumeAtVanderbiltGB = volumeAtVanderbilt/1.0e9
volumeAtOtherT2GB = volumeAtOtherT2/1.0e9
volumeAtNoT2GB = volumeAtNoT2/1.0e9
volumeNonUniqueAtVanderbiltT2GB = (totPDVolume - volumeAtNoT2 - volumeOnlyAtVU)/1.0e9
volumeOnTapeAtFNALGB = volumeOnTapeAtFNAL/1.0e9
volumeOnTapeAtT0GB = volumeOnTapeAtT0/1.0e9
print '%s %d %s' % (' PD volume amount which exists uniquely at Vanderbilt = ', volumeOnlyAtVUGB, volumeOnlyAtVUPerCentString)
print '%s %d %s' % (' PD volume amount total which exists at Vanderbilt = ', volumeAtVanderbiltGB, 'GB')
print '%s %d %s' % (' PD volume amount total replicated other T2 sites = ', volumeAtOtherT2GB, 'GB')
print '%s %d %s' % (' PD volume amount which exists at no T2 site = ', volumeAtNoT2GB, 'GB')
print '%s %d %s' % (' PD volume amount not uniquely at Vanderbilt T2 site = ', volumeNonUniqueAtVanderbiltT2GB, 'GB')
print '%s %d %s' % (' PD volume on tape at FNAL = ', volumeOnTapeAtFNALGB, 'GB')
print '%s %d %s' % (' PD volume on tape at T0 = ', volumeOnTapeAtT0GB, 'GB')
exit()
if __name__ == '__main__':
main()
exit()
| {
"content_hash": "17a8130313be4344bea385ef4e9ad515",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 200,
"avg_line_length": 38.67832167832168,
"alnum_prop": 0.5956427409148436,
"repo_name": "tuos/dasMonitorAOD2015",
"id": "86429ba9006665b934886aaeb4b8cee1a73db062",
"size": "12709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "XeXe2017/RAW/printHIRAWSites.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1105"
},
{
"name": "Python",
"bytes": "116281"
},
{
"name": "Roff",
"bytes": "35083"
},
{
"name": "Shell",
"bytes": "5682"
}
],
"symlink_target": ""
} |
import sys
from day14_part1 import *
# Calculate the accumulated points gained along the race
def points_gained(reindeers, time):
points = [0] * len(reindeers)
for second in range(1, time + 1):
positions = distances_traveled(reindeers, second)
leading_position = max(positions)
# Each reindeer in the leading position gets a point
for i in range(len(positions)):
if positions[i] == leading_position:
points[i] += 1
return points
# Main
print(max(points_gained(parse_reindeers(sys.stdin.readlines()), 2503)))
| {
"content_hash": "71847ba4d50cbbb1443d98c629b3dca4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 34.705882352941174,
"alnum_prop": 0.6508474576271186,
"repo_name": "jkbockstael/adventofcode-2015",
"id": "0e3ccbc12c52f02e7f0e29412aa3bccac54542dc",
"size": "689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day14_part2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36470"
}
],
"symlink_target": ""
} |
'''Base Module for Sensor Collection Management'''
import functools
import collections
import logging
import multiprocessing
import os
import subprocess
logger = logging.getLogger(__name__)
__all__ = ['RA_DELIMITER', 'RA_FIELDS']
intconv = functools.partial(int, base=0)
# Ra Options and Fields
RA_DELIMITER = ','
RA_FIELDS = collections.OrderedDict([
('seq', intconv),
('dir', str),
('stime', float),
('ltime', float),
('dur', float),
('flgs', str),
('state', str),
('proto', str),
('saddr', str),
('sport', intconv),
('daddr', str),
('dport', intconv),
('pkts', intconv),
('spkts', intconv),
('dpkts', intconv),
('bytes', intconv),
('sbytes', intconv),
('dbytes', intconv),
('sappbytes', intconv),
('dappbytes', intconv),
('rate', float),
('srate', float),
('sintpkt', float),
('sjit', float),
('drate', float),
('dintpkt', float),
('djit', float),
('smeansz', float),
('dmeansz', float),
('smaxsz', intconv),
('dmaxsz', intconv),
('sminsz', intconv),
('dminsz', intconv),
])
class ArgusStream(object):
'''Class that launches argus and ra to generate a stream of
netflow data for consumption.
'''
FNULL = open(os.devnull, 'w')
FIELDS = {
u'stime': u'start_time_epoch',
u'ltime': u'stop_time_epoch',
u'saddr': u'src',
u'daddr': u'dst',
}
def _construct_commands(self, interfaces):
'''Initialize netflow collection'''
argus_cmd = ['argus', '-XCAJRZ', '-S', '5', '-w', '-', ]
for eth in interfaces:
argus_cmd.extend(['-i', eth])
ra_cmd = ['ra', '-L', '-1', '-n', '-u',
'-c{}'.format(RA_DELIMITER), '-s', ]
ra_cmd.extend([str(x) for x in list(RA_FIELDS.keys())])
ra_cmd.extend(['-', 'ip'])
self.input_cmd = argus_cmd
self.output_cmd = ra_cmd
self.enabled = True
def ra_to_dict(self, flow):
'''Convert a raw ra record into a dict'''
def _normalize(f):
return self.FIELDS.get(f, f)
values = flow.split(RA_DELIMITER)
flow_dict = {}
fields = RA_FIELDS.keys()
for i, value in enumerate(values):
if value:
try:
k = fields[i]
flow_dict[_normalize(k)] = RA_FIELDS[k](value)
except ValueError:
logger.exception('Failed to convert field %s!', k)
# Stamp telemetry identifier
flow_dict['telemetry'] = 'flow'
return flow_dict
def _handle_output(self):
'''Process output lines'''
try:
for line in iter(self.output_proc.stdout.readline, b''):
record = self.ra_to_dict(line.rstrip())
self.out_queue.put(record)
except Exception:
logger.exception('Error processing output!')
def __init__(self, interfaces): # pragma: no cover
# Parse settings
self.input_cmd = None
self.output_cmd = None
self.input_proc = None
self.output_proc = None
self.out_queue = multiprocessing.Queue()
self._construct_commands(interfaces)
def start(self):
'''Start argus and ra to generate netflow records'''
self.input_proc = subprocess.Popen(self.input_cmd,
close_fds=True,
stdout=subprocess.PIPE,
stderr=self.FNULL)
self.output_proc = subprocess.Popen(self.output_cmd,
close_fds=True,
stdin=self.input_proc.stdout,
stdout=subprocess.PIPE,
stderr=self.FNULL)
self.processor = multiprocessing.Process(target=self._handle_output)
self.processor.daemon = True
self.processor.start()
def get(self):
'''Retrieve a netflow record'''
return self.out_queue.get()
def __del__(self):
if self.input_proc is not None:
self.input_proc.kill()
self.input_proc = None
if self.output_proc is not None:
self.output_proc.kill()
self.output_proc = None
| {
"content_hash": "a8ada664387c5fa4627e57114fbcb871",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 76,
"avg_line_length": 29.079470198675498,
"alnum_prop": 0.5167387838761103,
"repo_name": "effluxsystems/pyefflux",
"id": "f1062167e451d2f7cf146baa526e9886cd8d5d46",
"size": "4391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "efflux/helpers/argus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47024"
},
{
"name": "Shell",
"bytes": "614"
}
],
"symlink_target": ""
} |
"""
The coupling module describes features of coupling functions.
NB bug/feature wrt. TVB here: we assume pre_syn is observables, not state vars.
If single expression given, applied to all observables. Otherwise,
empty expressions stop evaluation of coupling on one or more observables.
A connection is specified node to node, so applies to all cvars.
"""
import enum
import numpy as np
import pymbolic as pm
from pymbolic.mapper.dependency import DependencyMapper
from .utils import exprs
from .model import BaseModel
from .utils import getLogger
class PostSumStat(enum.Enum):
sum = 'sum'
mean = 'mean'
class BaseCoupling:
"""
A coupling function describe pre- and post-summation expressions
which compute the coupling from model observables to mode inputs,
weighted by connectivity.
This class is only concerned with describing those functions, not
producing a kernel which employs these expressions. For this, please see
the network classes.
"""
param = {}
pre_sum = ''
post_sum = ''
def __init__(self, model: BaseModel):
self.model = model
self.param_sym = np.array([pm.var(name) for name in self.param.keys()])
self.pre_sum_sym = exprs(self.pre_sum)
self.post_sum_sym = exprs(self.post_sum)
lname = '%s<%s>'
lname %= self.__class__.__name__, model.__class__.__name__
self.logger = getLogger(lname)
self._check_io()
def _check_io(self):
obsrv_sym = self.model.obsrv_sym
if self.model.input_sym.size < obsrv_sym.size:
msg = 'input shorter than obsrv, truncating obsrv used for cfun.'
self.logger.debug(msg)
obsrv_sym = obsrv_sym[:self.model.input_sym.size]
terms = (
obsrv_sym,
self.pre_sum_sym,
self.post_sum_sym,
self.model.input_sym
)
bcast = np.broadcast(*terms)
self.io = list(bcast)
fmt = 'io[%d] (%s) -> (%s) -> (%s) -> (%s)'
for i, parts in enumerate(self.io):
self.logger.debug(fmt, i, *parts)
def post_stat(self, i: int) -> PostSumStat:
"""
Post-summation expressions can refer to 'sum' or 'mean' special
names referring to the summation or its average.
This function looks at variables used in the i'th post-summation
expression and returns 'sum' if found, 'mean' if found, otherwise
raises an exception.
"""
mapper = DependencyMapper(include_calls=False)
dep_names = [dep.name for dep in mapper(self.post_sum_sym[i])]
if 'mean' in dep_names:
return PostSumStat('mean')
if 'sum' in dep_names:
return PostSumStat('sum')
raise ValueError('unknown stat in %r' % (self.post_sum, ))
class Linear(BaseCoupling):
param = {'a': 1e-3, 'b': 0}
pre_sum = 'pre_syn',
post_sum = 'a * sum + b',
class Sigmoidal(BaseCoupling):
param = {'cmin': 0, 'cmax': 0.005, 'midpoint': 6, 'r': 1, 'a': 0.56}
pre_sum = 'cmax / (1 + exp(r * (midpoint - pre_syn)))',
post_sum = 'a * sum',
class Diff(Linear):
pre_sum = 'pre_syn - post_syn',
class Kuramoto(Diff):
pre_sum = 'sin(pre_syn - post_syn)',
post_sum = 'a * mean',
| {
"content_hash": "f45035f3143d2f3ce2eee76738a4d5e8",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 30.89622641509434,
"alnum_prop": 0.6137404580152672,
"repo_name": "the-virtual-brain/tvb-hpc",
"id": "4163bf4ae16e849bd1e2be33219598be0693d3cb",
"size": "3900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tvb_hpc/coupling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "77071"
},
{
"name": "Cuda",
"bytes": "379"
},
{
"name": "Dockerfile",
"bytes": "916"
},
{
"name": "Python",
"bytes": "321186"
},
{
"name": "Shell",
"bytes": "602"
}
],
"symlink_target": ""
} |
import numpy as np
import random
import tensorflow as tf
import maddpg.common.tf_util as U
from maddpg.common.distributions import make_pdtype
from maddpg import AgentTrainer
from maddpg.trainer.replay_buffer import ReplayBuffer
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r
r = r*(1.-done)
discounted.append(r)
return discounted[::-1]
def make_update_exp(vals, target_vals):
polyak = 1.0 - 1e-2
expression = []
for var, var_target in zip(sorted(vals, key=lambda v: v.name), sorted(target_vals, key=lambda v: v.name)):
expression.append(var_target.assign(polyak * var_target + (1.0-polyak) * var))
expression = tf.group(*expression)
return U.function([], [], updates=[expression])
def p_train(make_obs_ph_n, act_space_n, p_index, p_func, q_func, optimizer, grad_norm_clipping=None, local_q_func=False, num_units=64, scope="trainer", reuse=None):
with tf.variable_scope(scope, reuse=reuse):
# create distribtuions
act_pdtype_n = [make_pdtype(act_space) for act_space in act_space_n]
# set up placeholders
obs_ph_n = make_obs_ph_n
act_ph_n = [act_pdtype_n[i].sample_placeholder([None], name="action"+str(i)) for i in range(len(act_space_n))]
p_input = obs_ph_n[p_index]
p = p_func(p_input, int(act_pdtype_n[p_index].param_shape()[0]), scope="p_func", num_units=num_units)
p_func_vars = U.scope_vars(U.absolute_scope_name("p_func"))
# wrap parameters in distribution
act_pd = act_pdtype_n[p_index].pdfromflat(p)
act_sample = act_pd.sample()
p_reg = tf.reduce_mean(tf.square(act_pd.flatparam()))
act_input_n = act_ph_n + []
act_input_n[p_index] = act_pd.sample()
q_input = tf.concat(obs_ph_n + act_input_n, 1)
if local_q_func:
q_input = tf.concat([obs_ph_n[p_index], act_input_n[p_index]], 1)
q = q_func(q_input, 1, scope="q_func", reuse=True, num_units=num_units)[:,0]
pg_loss = -tf.reduce_mean(q)
loss = pg_loss + p_reg * 1e-3
optimize_expr = U.minimize_and_clip(optimizer, loss, p_func_vars, grad_norm_clipping)
# Create callable functions
train = U.function(inputs=obs_ph_n + act_ph_n, outputs=loss, updates=[optimize_expr])
act = U.function(inputs=[obs_ph_n[p_index]], outputs=act_sample)
p_values = U.function([obs_ph_n[p_index]], p)
# target network
target_p = p_func(p_input, int(act_pdtype_n[p_index].param_shape()[0]), scope="target_p_func", num_units=num_units)
target_p_func_vars = U.scope_vars(U.absolute_scope_name("target_p_func"))
update_target_p = make_update_exp(p_func_vars, target_p_func_vars)
target_act_sample = act_pdtype_n[p_index].pdfromflat(target_p).sample()
target_act = U.function(inputs=[obs_ph_n[p_index]], outputs=target_act_sample)
return act, train, update_target_p, {'p_values': p_values, 'target_act': target_act}
def q_train(make_obs_ph_n, act_space_n, q_index, q_func, optimizer, grad_norm_clipping=None, local_q_func=False, scope="trainer", reuse=None, num_units=64):
with tf.variable_scope(scope, reuse=reuse):
# create distribtuions
act_pdtype_n = [make_pdtype(act_space) for act_space in act_space_n]
# set up placeholders
obs_ph_n = make_obs_ph_n
act_ph_n = [act_pdtype_n[i].sample_placeholder([None], name="action"+str(i)) for i in range(len(act_space_n))]
target_ph = tf.placeholder(tf.float32, [None], name="target")
q_input = tf.concat(obs_ph_n + act_ph_n, 1)
if local_q_func:
q_input = tf.concat([obs_ph_n[q_index], act_ph_n[q_index]], 1)
q = q_func(q_input, 1, scope="q_func", num_units=num_units)[:,0]
q_func_vars = U.scope_vars(U.absolute_scope_name("q_func"))
q_loss = tf.reduce_mean(tf.square(q - target_ph))
# viscosity solution to Bellman differential equation in place of an initial condition
q_reg = tf.reduce_mean(tf.square(q))
loss = q_loss #+ 1e-3 * q_reg
optimize_expr = U.minimize_and_clip(optimizer, loss, q_func_vars, grad_norm_clipping)
# Create callable functions
train = U.function(inputs=obs_ph_n + act_ph_n + [target_ph], outputs=loss, updates=[optimize_expr])
q_values = U.function(obs_ph_n + act_ph_n, q)
# target network
target_q = q_func(q_input, 1, scope="target_q_func", num_units=num_units)[:,0]
target_q_func_vars = U.scope_vars(U.absolute_scope_name("target_q_func"))
update_target_q = make_update_exp(q_func_vars, target_q_func_vars)
target_q_values = U.function(obs_ph_n + act_ph_n, target_q)
return train, update_target_q, {'q_values': q_values, 'target_q_values': target_q_values}
class MADDPGAgentTrainer(AgentTrainer):
def __init__(self, name, model, obs_shape_n, act_space_n, agent_index, args, local_q_func=False):
self.name = name
self.n = len(obs_shape_n)
self.agent_index = agent_index
self.args = args
obs_ph_n = []
for i in range(self.n):
obs_ph_n.append(U.BatchInput(obs_shape_n[i], name="observation"+str(i)).get())
# Create all the functions necessary to train the model
self.q_train, self.q_update, self.q_debug = q_train(
scope=self.name,
make_obs_ph_n=obs_ph_n,
act_space_n=act_space_n,
q_index=agent_index,
q_func=model,
optimizer=tf.train.AdamOptimizer(learning_rate=args.lr),
grad_norm_clipping=0.5,
local_q_func=local_q_func,
num_units=args.num_units
)
self.act, self.p_train, self.p_update, self.p_debug = p_train(
scope=self.name,
make_obs_ph_n=obs_ph_n,
act_space_n=act_space_n,
p_index=agent_index,
p_func=model,
q_func=model,
optimizer=tf.train.AdamOptimizer(learning_rate=args.lr),
grad_norm_clipping=0.5,
local_q_func=local_q_func,
num_units=args.num_units
)
# Create experience buffer
self.replay_buffer = ReplayBuffer(1e6)
self.max_replay_buffer_len = args.batch_size * args.max_episode_len
self.replay_sample_index = None
def action(self, obs):
return self.act(obs[None])[0]
def experience(self, obs, act, rew, new_obs, done, terminal):
# Store transition in the replay buffer.
self.replay_buffer.add(obs, act, rew, new_obs, float(done))
def preupdate(self):
self.replay_sample_index = None
def update(self, agents, t):
if len(self.replay_buffer) < self.max_replay_buffer_len: # replay buffer is not large enough
return
if not t % 100 == 0: # only update every 100 steps
return
self.replay_sample_index = self.replay_buffer.make_index(self.args.batch_size)
# collect replay sample from all agents
obs_n = []
obs_next_n = []
act_n = []
index = self.replay_sample_index
for i in range(self.n):
obs, act, rew, obs_next, done = agents[i].replay_buffer.sample_index(index)
obs_n.append(obs)
obs_next_n.append(obs_next)
act_n.append(act)
obs, act, rew, obs_next, done = self.replay_buffer.sample_index(index)
# train q network
num_sample = 1
target_q = 0.0
for i in range(num_sample):
target_act_next_n = [agents[i].p_debug['target_act'](obs_next_n[i]) for i in range(self.n)]
target_q_next = self.q_debug['target_q_values'](*(obs_next_n + target_act_next_n))
target_q += rew + self.args.gamma * (1.0 - done) * target_q_next
target_q /= num_sample
q_loss = self.q_train(*(obs_n + act_n + [target_q]))
# train p network
p_loss = self.p_train(*(obs_n + act_n))
self.p_update()
self.q_update()
return [q_loss, p_loss, np.mean(target_q), np.mean(rew), np.mean(target_q_next), np.std(target_q)]
| {
"content_hash": "d477ec3369479bb2559b2cae08293eeb",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 164,
"avg_line_length": 42.12244897959184,
"alnum_prop": 0.6028343023255814,
"repo_name": "openai/maddpg",
"id": "a3f5de65aadfc2fffe6793d5749be5be520b5e74",
"size": "8256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maddpg/trainer/maddpg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44954"
}
],
"symlink_target": ""
} |
"""
Very simple script to replace a template with another one.
It also converts the old MediaWiki boilerplate format to the new template format.
Syntax: python template.py [-remove] [xml[:filename]] oldTemplate [newTemplate]
Specify the template on the command line. The program will pick up the template
page, and look for all pages using it. It will then automatically loop over
them, and replace the template.
Command line options:
-remove Remove every occurrence of the template from every article
-subst Resolves the template by putting its text directly into the
article. This is done by changing {{...}} or {{msg:...}} into
{{subst:...}}
-assubst Replaces the first argument as old template with the second
argument as new template but substitutes it like -subst does.
Using both options -remove and -subst in the same command line has
the same effect.
-xml retrieve information from a local dump
(https://download.wikimedia.org). If this argument isn't given,
info will be loaded from the maintenance page of the live wiki.
argument can also be given as "-xml:filename.xml".
-user: Only process pages edited by a given user
-skipuser: Only process pages not edited by a given user
-timestamp: (With -user or -skipuser). Only check for a user where his edit is
not older than the given timestamp. Timestamp must be writen in
MediaWiki timestamp format which is "%Y%m%d%H%M%S"
If this parameter is missed, all edits are checked but this is
restricted to the last 100 edits.
-summary: Lets you pick a custom edit summary. Use quotes if edit summary
contains spaces.
-always Don't bother asking to confirm any of the changes, Just Do It.
-category: Appends the given category to every page that is edited. This is
useful when a category is being broken out from a template
parameter or when templates are being upmerged but more information
must be preserved.
other: First argument is the old template name, second one is the new
name.
If you want to address a template which has spaces, put quotation
marks around it, or use underscores.
Examples:
If you have a template called [[Template:Cities in Washington]] and want to
change it to [[Template:Cities in Washington state]], start
python template.py "Cities in Washington" "Cities in Washington state"
Move the page [[Template:Cities in Washington]] manually afterwards.
If you have a template called [[Template:test]] and want to substitute it only
on pages in the User: and User talk: namespaces, do:
python template.py test -subst -namespace:2 -namespace:3
Note that -namespace: is a global Pywikibot parameter
This next example substitutes the template lived with a supplied edit summary.
It only performs substitutions in main article namespace and doesn't prompt to
start replacing. Note that -putthrottle: is a global Pywikibot parameter.
python template.py -putthrottle:30 -namespace:0 lived -subst -always
-summary:"BOT: Substituting {{lived}}, see [[WP:SUBST]]."
This next example removes the templates {{cfr}}, {{cfru}}, and {{cfr-speedy}}
from five category pages as given:
python template.py cfr cfru cfr-speedy -remove -always
-page:"Category:Mountain monuments and memorials" -page:"Category:Indian family names"
-page:"Category:Tennis tournaments in Belgium" -page:"Category:Tennis tournaments in Germany"
-page:"Category:Episcopal cathedrals in the United States"
-summary:"Removing Cfd templates from category pages that survived."
This next example substitutes templates test1, test2, and space test on all
pages:
python template.py test1 test2 "space test" -subst -always
"""
#
# (C) Daniel Herding, 2004
# (C) Rob W.W. Hooft, 2003-2005
# (C) xqt, 2009-2015
# (C) Pywikibot team, 2004-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 8b0931414751d7bb6753392d7923e192bfa08b2c $'
#
import re
import pywikibot
from pywikibot import i18n, pagegenerators, xmlreader, Bot
from scripts.replace import ReplaceRobot as ReplaceBot
def UserEditFilterGenerator(generator, username, timestamp=None, skip=False,
max_revision_depth=None):
"""
Generator which will yield Pages modified by username.
It only looks at the last editors given by max_revision_depth.
If timestamp is set in MediaWiki format JJJJMMDDhhmmss, older edits are
ignored.
If skip is set, pages edited by the given user are ignored otherwise only
pages edited by this user are given back.
"""
if timestamp:
ts = pywikibot.Timestamp.fromtimestampformat(timestamp)
else:
ts = pywikibot.Timestamp.min
for page in generator:
found = False
for ed in page.revisions(total=max_revision_depth):
if ed.timestamp >= ts:
if username == ed.user:
found = True
break
else:
break
if found != bool(skip): # xor operation
yield page
else:
pywikibot.output(u'Skipping %s' % page.title(asLink=True))
class XmlDumpTemplatePageGenerator(object):
"""
Generator which yields Pages that transclude a template.
These pages will be retrieved from a local XML dump file
(cur table), and may not still transclude the template.
"""
def __init__(self, templates, xmlfilename):
"""
Constructor.
Arguments:
* templateNames - A list of Page object representing the searched
templates
* xmlfilename - The dump's path, either absolute or relative
"""
self.templates = templates
self.xmlfilename = xmlfilename
def __iter__(self):
"""Yield page objects until the entire XML dump has been read."""
mysite = pywikibot.Site()
dump = xmlreader.XmlDump(self.xmlfilename)
# regular expression to find the original template.
# {{vfd}} does the same thing as {{Vfd}}, so both will be found.
# The old syntax, {{msg:vfd}}, will also be found.
templatePatterns = []
for template in self.templates:
templatePattern = template.title(withNamespace=False)
if mysite.namespaces[10].case == 'first-letter':
templatePattern = '[%s%s]%s' % (templatePattern[0].upper(),
templatePattern[0].lower(),
templatePattern[1:])
templatePattern = re.sub(' ', '[_ ]', templatePattern)
templatePatterns.append(templatePattern)
templateRegex = re.compile(
r'\{\{ *([mM][sS][gG]:)?(?:%s) *(?P<parameters>\|[^}]+|) *}}'
% '|'.join(templatePatterns))
for entry in dump.parse():
if templateRegex.search(entry.text):
page = pywikibot.Page(mysite, entry.title)
yield page
class TemplateRobot(ReplaceBot):
"""This bot will replace, remove or subst all occurrences of a template."""
def __init__(self, generator, templates, **kwargs):
"""
Constructor.
@param generator: the pages to work on
@type generator: iterable
@param templates: a dictionary which maps old template names to
their replacements. If remove or subst is True, it maps the
names of the templates that should be removed/resolved to None.
@type templates: dict
"""
self.availableOptions.update({
'subst': False,
'remove': False,
'summary': None,
'addedCat': None,
})
Bot.__init__(self, generator=generator, **kwargs)
self.templates = templates
# get edit summary message if it's empty
if not self.getOption('summary'):
comma = self.site.mediawiki_message('comma-separator')
params = {'list': comma.join(self.templates.keys()),
'num': len(self.templates)}
site = self.site
if self.getOption('remove'):
self.options['summary'] = i18n.twntranslate(
site, 'template-removing', params)
elif self.getOption('subst'):
self.options['summary'] = i18n.twntranslate(
site, 'template-substituting', params)
else:
self.options['summary'] = i18n.twntranslate(
site, 'template-changing', params)
# regular expression to find the original template.
# {{vfd}} does the same thing as {{Vfd}}, so both will be found.
# The old syntax, {{msg:vfd}}, will also be found.
# The group 'parameters' will either match the parameters, or an
# empty string if there are none.
replacements = []
exceptions = {}
namespace = self.site.namespaces[10]
for old, new in self.templates.items():
if namespace.case == 'first-letter':
pattern = '[' + \
re.escape(old[0].upper()) + \
re.escape(old[0].lower()) + \
']' + re.escape(old[1:])
else:
pattern = re.escape(old)
pattern = re.sub(r'_|\\ ', r'[_ ]', pattern)
templateRegex = re.compile(r'\{\{ *(' + ':|'.join(namespace) +
r':|[mM][sS][gG]:)?' + pattern +
r'(?P<parameters>\s*\|.+?|) *}}',
re.DOTALL)
if self.getOption('subst') and self.getOption('remove'):
replacements.append((templateRegex,
r'{{subst:%s\g<parameters>}}' % new))
exceptions['inside-tags'] = ['ref', 'gallery']
elif self.getOption('subst'):
replacements.append((templateRegex,
r'{{subst:%s\g<parameters>}}' % old))
exceptions['inside-tags'] = ['ref', 'gallery']
elif self.getOption('remove'):
replacements.append((templateRegex, ''))
else:
template = pywikibot.Page(self.site, new, ns=10)
if not template.exists():
pywikibot.warning(u'Template "%s" does not exist.' % new)
if not pywikibot.input_yn('Do you want to proceed anyway?',
default=False, automatic_quit=False):
continue
replacements.append((templateRegex,
r'{{%s\g<parameters>}}' % new))
super(TemplateRobot, self).__init__(
generator, replacements, exceptions,
always=self.getOption('always'),
addedCat=self.getOption('addedCat'),
summary=self.getOption('summary'))
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
templateNames = []
templates = {}
options = {}
# If xmlfilename is None, references will be loaded from the live wiki.
xmlfilename = None
user = None
skip = False
timestamp = None
# read command line parameters
local_args = pywikibot.handle_args(args)
site = pywikibot.Site()
genFactory = pagegenerators.GeneratorFactory()
for arg in local_args:
if arg == '-remove':
options['remove'] = True
elif arg == '-subst':
options['subst'] = True
elif arg == '-assubst':
options['subst'] = options['remove'] = True
elif arg == '-always':
options['always'] = True
elif arg.startswith('-xml'):
if len(arg) == 4:
xmlfilename = pywikibot.input(
u'Please enter the XML dump\'s filename: ')
else:
xmlfilename = arg[5:]
elif arg.startswith('-category:'):
options['addedCat'] = arg[len('-category:'):]
elif arg.startswith('-summary:'):
options['summary'] = arg[len('-summary:'):]
elif arg.startswith('-user:'):
user = arg[len('-user:'):]
elif arg.startswith('-skipuser:'):
user = arg[len('-skipuser:'):]
skip = True
elif arg.startswith('-timestamp:'):
timestamp = arg[len('-timestamp:'):]
else:
if not genFactory.handleArg(arg):
templateName = pywikibot.Page(site, arg, ns=10)
templateNames.append(templateName.title(withNamespace=False))
if not templateNames:
pywikibot.showHelp()
return
if options.get('subst', False) ^ options.get('remove', False):
for templateName in templateNames:
templates[templateName] = None
else:
try:
for i in range(0, len(templateNames), 2):
templates[templateNames[i]] = templateNames[i + 1]
except IndexError:
pywikibot.output('Unless using solely -subst or -remove, '
'you must give an even number of template names.')
return
oldTemplates = []
for templateName in templates.keys():
oldTemplate = pywikibot.Page(site, templateName, ns=10)
oldTemplates.append(oldTemplate)
if xmlfilename:
gen = XmlDumpTemplatePageGenerator(oldTemplates, xmlfilename)
else:
gen = genFactory.getCombinedGenerator()
if not gen:
gens = [
pagegenerators.ReferringPageGenerator(t, onlyTemplateInclusion=True)
for t in oldTemplates
]
gen = pagegenerators.CombinedPageGenerator(gens)
gen = pagegenerators.DuplicateFilterPageGenerator(gen)
if user:
gen = UserEditFilterGenerator(gen, user, timestamp, skip,
max_revision_depth=100)
if not genFactory.gens:
# make sure that proper namespace filtering etc. is handled
gen = genFactory.getCombinedGenerator(gen)
preloadingGen = pagegenerators.PreloadingGenerator(gen)
bot = TemplateRobot(preloadingGen, templates, **options)
bot.run()
if __name__ == "__main__":
try:
main()
except Exception:
pywikibot.error("Fatal error:", exc_info=True)
| {
"content_hash": "a3c1dd220cf5380143b2dac97b8b3db6",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 101,
"avg_line_length": 37.70992366412214,
"alnum_prop": 0.5936572199730095,
"repo_name": "hperala/kontuwikibot",
"id": "f7c520c4b1e30615c4c753aafad6f1e6445cf859",
"size": "14862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/template.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "C",
"bytes": "137889"
},
{
"name": "C++",
"bytes": "4113"
},
{
"name": "Python",
"bytes": "3758566"
}
],
"symlink_target": ""
} |
from pentagon.component import ComponentBase
import os
class Component(ComponentBase):
_path = os.path.dirname(__file__)
| {
"content_hash": "3c11a37a95cb542fe1e20c9f1aa3e800",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 44,
"avg_line_length": 21.166666666666668,
"alnum_prop": 0.7559055118110236,
"repo_name": "reactiveops/pentagon",
"id": "ec618c1aebf2a65c482dead1c5c3ebfccf0b7c40",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example-component/pentagon_component/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "684"
},
{
"name": "HCL",
"bytes": "523"
},
{
"name": "HTML",
"bytes": "20948"
},
{
"name": "Python",
"bytes": "185329"
},
{
"name": "Shell",
"bytes": "2098"
}
],
"symlink_target": ""
} |
import urllib
from pymongo import MongoClient
from pymongo import ReadPreference
from pymongo import monitoring
from heisen.config import settings
from heisen.core.log import logger
class CommandLogger(monitoring.CommandListener):
def __init__(self, *args, **kwargs):
self.log_query = kwargs.pop('log_query', True)
self.log_query_data = kwargs.pop('log_query_data', False)
self.log_results = kwargs.pop('log_results', False)
super(CommandLogger, self).__init__(*args, **kwargs)
def started(self, event):
try:
command = event.command.to_dict()
except AttributeError:
command = event.command
collection = command.pop(event.command_name, '')
command_string = ''
if self.log_query_data:
command_string = ', Command: {}'.format(command)
string = '[Running] ID {} {}:{} {}.{}.{}{}'.format(
event.operation_id,
event.connection_id[0], event.connection_id[1],
event.database_name, collection,
event.command_name, command_string
)
logger.db(string)
def succeeded(self, event):
replay_string = ''
if self.log_results:
replay_string = ', Reply: {}'.format(event.reply)
string = '[Success] ID {} {}:{} {}, Time: {}{}'.format(
event.operation_id,
event.connection_id[0], event.connection_id[1],
event.command_name, event.duration_micros, replay_string
)
logger.db(string)
def failed(self, event):
string = '[Failed] ID {} {}:{} {}, Time: {}, Failure: {}'.format(
event.operation_id,
event.connection_id[0], event.connection_id[1],
event.command_name, event.duration_micros, event.failure
)
logger.db(string)
class MongoConnection(object):
__db = None
def __init__(self, config_name):
logger.db('Creating cursor instance for {} db'.format(config_name))
self.db_settings = settings.DATABASES[config_name]
self.get_connection()
self.ensure_indexes()
def get_connection(self):
if self.db_settings.get('log_query', False):
monitoring.register(CommandLogger(
log_query_data=self.db_settings.pop('log_query_data', False),
log_results=self.db_settings.pop('log_results', False)
))
self.__db = MongoClient(
self.connection_string,
serverSelectionTimeoutMS=6000, maxPoolSize=None,
read_preference=ReadPreference.NEAREST, connect=False
)[self.db_settings['db']]
return self.__db
def get_cursor(self):
return self.__db
@property
def connection_string(self):
try:
password = urllib.quote_plus(self.db_settings['password'])
auth = '{0}:{1}@'.format(
self.db_settings['user'], password
)
except KeyError:
auth = ''
try:
address = self.db_settings['balancing']
except KeyError:
address = '{0}:{1}'.format(self.db_settings['host'], self.db_settings['port'])
connection_string = 'mongodb://{}{}'.format(auth, address)
return connection_string
def ensure_indexes(self):
pass
class MongoDatabases(object):
def __init__(self):
for database in settings.DATABASES.keys():
setattr(self, database, MongoConnection(database).get_cursor())
| {
"content_hash": "2efa3cab1d812e210e51c5abedfeedc5",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 90,
"avg_line_length": 30.973684210526315,
"alnum_prop": 0.5817048994619088,
"repo_name": "HeisenCore/heisen",
"id": "20435789708ec671ebacb55f18f3b21dc7c32d7c",
"size": "3531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heisen/core/db/mongodb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2291"
},
{
"name": "Python",
"bytes": "69423"
}
],
"symlink_target": ""
} |
import os
import logging
from pyramid.config import Configurator
from pyramid.session import SignedCookieSessionFactory
from pyramid.view import view_config
from waitress import serve
import psycopg2
from contextlib import closing
from pyramid.events import NewRequest, subscriber
import datetime
from pyramid.httpexceptions import HTTPFound, HTTPInternalServerError
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from cryptacular.bcrypt import BCRYPTPasswordManager
from pyramid.security import remember, forget
import markdown
import pygments
here = os.path.dirname(os.path.abspath(__file__))
DB_SCHEMA = """
CREATE TABLE IF NOT EXISTS entries (
id serial PRIMARY KEY,
title VARCHAR (127) NOT NULL,
text TEXT NOT NULL,
created TIMESTAMP NOT NULL
)
"""
INSERT_ENTRY = """INSERT INTO entries (title, text, created) VALUES (%s, %s, %s)
"""
DB_ENTRIES_LIST = """SELECT id, title, text, created FROM entries ORDER BY created DESC
"""
DB_ENTRY = """SELECT * FROM entries WHERE id=%s
"""
UPDATE_ENTRY = """UPDATE entries SET title=%s, text=%s WHERE id=%s
"""
logging.basicConfig()
log = logging.getLogger(__file__)
def connect_db(settings):
"""Return a connection to the configured database"""
return psycopg2.connect(settings['db'])
def init_db():
"""Create database dables defined by DB_SCHEMA
Warning: This function will not update existing table definitions
"""
settings = {}
settings['db'] = os.environ.get(
'DATABASE_URL', 'dbname=learning_journal user=aabulota'
)
with closing(connect_db(settings)) as db:
db.cursor().execute(DB_SCHEMA)
db.commit()
@subscriber(NewRequest)
def open_connection(event):
request = event.request
settings = request.registry.settings
request.db = connect_db(settings)
request.add_finished_callback(close_connection)
def close_connection(request):
"""close the database connection for this request
If there has been an error in the processing of the request, abort any
open transactions.
"""
db = getattr(request, 'db', None)
if db is not None:
if request.exception is not None:
db.rollback()
else:
db.commit()
request.db.close()
def main():
"""Create a configured wsgi app"""
settings = {}
settings['reload_all'] = os.environ.get('DEBUG', True)
settings['debug_all'] = os.environ.get('DEBUG', True)
settings['db'] = os.environ.get(
'DATABASE_URL', 'dbname=learning_journal user=aabulota'
)
settings['auth.username'] = os.environ.get('AUTH_USERNAME', 'admin')
manager = BCRYPTPasswordManager()
settings['auth.password'] = os.environ.get(
'AUTH_PASSWORD', manager.encode('secret'))
# secret value for session signing:
secret = os.environ.get('JOURNAL_SESSION_SECRET', 'itsaseekrit')
session_factory = SignedCookieSessionFactory(secret)
# add a secret value for auth tkt signing
auth_secret = os.environ.get('JOURNAL_AUTH_SECRET', 'anotherseekrit')
# configuration setup
config = Configurator(
settings=settings,
session_factory=session_factory,
authentication_policy=AuthTktAuthenticationPolicy(
secret=auth_secret,
hashalg='sha512'
),
authorization_policy=ACLAuthorizationPolicy(),
)
config.include('pyramid_jinja2')
config.add_static_view('static', os.path.join(here, 'static'))
config.add_route('home', '/')
config.add_route('add', '/add')
config.add_route('login', '/login')
config.add_route('logout', '/logout')
config.add_route('detail', '/detail/{id}')
config.add_route('editview', '/editview/{id}')
config.scan()
app = config.make_wsgi_app()
return app
def write_entry(request):
"""write a single entry to the database"""
title = request.params.get('title', None)
text = request.params.get('text', None)
created = datetime.datetime.utcnow()
request.db.cursor().execute(INSERT_ENTRY, [title, text, created])
def edit_entry(request):
"""write a single entry to the database"""
title = request.params.get('title', None)
text = request.params.get('text', None)
id = request.matchdict['id']
# created = datetime.datetime.utcnow()
print(request.matchdict['id'])
request.db.cursor().execute(UPDATE_ENTRY, [title, text, id])
@view_config(route_name='home', renderer='templates/list.jinja2')
def read_entries(request):
"""return a list of all entries as dicts"""
cursor = request.db.cursor()
cursor.execute(DB_ENTRIES_LIST)
keys = ('id', 'title', 'text', 'created')
entries = [dict(zip(keys, row)) for row in cursor.fetchall()]
# import pdb; pdb.set_trace()
for item in entries:
item['text'] = markdown.markdown(
item['text'], extensions=['codehilite', 'fenced_code'])
return {'entries': entries}
@view_config(route_name='detail', renderer='templates/detail.jinja2')
def read_entry(request):
"""return a list of one entry as a dict"""
cursor = request.db.cursor()
cursor.execute(DB_ENTRY, (request.matchdict['id'], ))
keys = ('id', 'title', 'text', 'created')
row = cursor.fetchone()
entry = dict(zip(keys, row))
entry['text'] = markdown.markdown(
entry['text'], extensions=['codehilite', 'fenced_code'])
return {'entry': entry}
@view_config(route_name='editview', renderer='templates/edit.jinja2')
def editview_entry(request):
"""return a list of all entries as dicts"""
if request.authenticated_userid:
cursor = request.db.cursor()
cursor.execute(DB_ENTRY, (request.matchdict['id'], ))
keys = ('id', 'title', 'text', 'created')
entries = [dict(zip(keys, row)) for row in cursor.fetchall()]
if request.method == 'POST':
try:
edit_entry(request)
except psycopg2.Error:
# this will catch any errors generated by the database
return HTTPInternalServerError()
return HTTPFound(request.route_url('home'))
else:
return HTTPForbidden()
return {'entries': entries}
@view_config(route_name='add', request_method='POST')
def add_entry(request):
if request.authenticated_userid:
try:
write_entry(request)
except psycopg2.Error:
# this will catch any errors generated by the database
return HTTPInternalServerError()
else:
return HTTPForbidden()
return HTTPFound(request.route_url('home'))
def do_login(request):
username = request.params.get('username', None)
password = request.params.get('password', None)
if not (username and password):
raise ValueError('both username and password are required')
settings = request.registry.settings
manager = BCRYPTPasswordManager()
if username == settings.get('auth.username', ''):
hashed = settings.get('auth.password', '')
return manager.check(hashed, password)
@view_config(route_name='login', renderer="templates/login.jinja2")
def login(request):
"""authenticate a user by username/password"""
username = request.params.get('username', '')
error = ''
if request.method == 'POST':
error = "Login Failed"
authenticated = False
try:
authenticated = do_login(request)
except ValueError as e:
error = str(e)
if authenticated:
headers = remember(request, username)
return HTTPFound(request.route_url('home'), headers=headers)
return {'error': error, 'username': username}
@view_config(route_name='logout')
def logout(request):
headers = forget(request)
return HTTPFound(request.route_url('home'), headers=headers)
if __name__ == '__main__':
app = main()
port = os.environ.get('PORT', 5000)
serve(app, host='0.0.0.0', port=port)
| {
"content_hash": "61dadf7b51404c8a14704cac8b45c0a7",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 87,
"avg_line_length": 31.968,
"alnum_prop": 0.6584084084084084,
"repo_name": "alibulota/learning_journal",
"id": "8ee4bf8c28295ca63804dabc7d431daa3265f5d4",
"size": "8016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "journal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4629"
},
{
"name": "HTML",
"bytes": "6143"
},
{
"name": "JavaScript",
"bytes": "2713"
},
{
"name": "Python",
"bytes": "24571"
}
],
"symlink_target": ""
} |
from models import burgers
from bcs import periodic
from simulation import simulation
from methods import weno3_lf
from rk import rk3
from grid import grid
from matplotlib import pyplot
Ngz = 3
Npoints = 400
interval = grid([-1, 1], Npoints, Ngz)
model = burgers.burgers(initial_data = burgers.initial_square())
sim = simulation(model, interval, weno3_lf, rk3, periodic)
sim.evolve(0.5)
sim.plot_scalar_vs_initial()
pyplot.show()
| {
"content_hash": "f559a935fa78621e81b9a734ecc4f944",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 64,
"avg_line_length": 24.22222222222222,
"alnum_prop": 0.7614678899082569,
"repo_name": "IanHawke/toy-evolve",
"id": "83c7daa5b9cfae0aa97d75766f00f0b039ff2cff",
"size": "472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toy-evolve/burgers_square_weno3_upwind_rk3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39557"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['ConstantTrend'] , ['Seasonal_DayOfWeek'] , ['AR'] ); | {
"content_hash": "9717c29fc84ad19c71a44d3f86cf99a9",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 86,
"avg_line_length": 39.75,
"alnum_prop": 0.710691823899371,
"repo_name": "antoinecarme/pyaf",
"id": "b6a28ce7306e5b26e28a3e353b008b3d21cf434c",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_None/model_control_one_enabled_None_ConstantTrend_Seasonal_DayOfWeek_AR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
from parameterized import parameterized
from monai.apps.pathology.transforms.post.dictionary import GenerateInstanceBorderd
from tests.utils import TEST_NDARRAYS
EXCEPTION_TESTS = []
TESTS = []
np.random.RandomState(123)
for p in TEST_NDARRAYS:
EXCEPTION_TESTS.append(
[
{"keys": "mask", "kernel_size": 3, "remove_small_objects": True, "min_size": 10},
p(np.random.rand(1, 5, 5, 5)),
p(np.random.rand(2, 5, 5)),
ValueError,
]
)
EXCEPTION_TESTS.append(
[
{"keys": "mask", "kernel_size": 3, "remove_small_objects": True, "min_size": 10},
p(np.random.rand(1, 5, 5)),
p(np.random.rand(1, 5, 5)),
ValueError,
]
)
EXCEPTION_TESTS.append(
[
{"keys": "mask", "kernel_size": 3, "remove_small_objects": True, "min_size": 10},
p(np.random.rand(2, 5, 5)),
p(np.random.rand(2, 5, 5)),
ValueError,
]
)
for p in TEST_NDARRAYS:
TESTS.append(
[
{"keys": "mask", "kernel_size": 3, "remove_small_objects": False, "min_size": 10},
p(np.random.rand(1, 5, 5)),
p(np.random.rand(2, 5, 5)),
(1, 5, 5),
]
)
TESTS.append(
[
{"keys": "mask", "kernel_size": 3, "remove_small_objects": True, "min_size": 10},
p(np.random.rand(1, 5, 5)),
p(np.random.rand(2, 5, 5)),
(1, 5, 5),
]
)
class TestGenerateInstanceBorderd(unittest.TestCase):
@parameterized.expand(EXCEPTION_TESTS)
def test_value(self, argments, mask, hover_map, exception_type):
with self.assertRaises(exception_type):
GenerateInstanceBorderd(**argments)({"mask": mask, "hover_map": hover_map})
@parameterized.expand(TESTS)
def test_value2(self, argments, mask, hover_map, expected_shape):
result = GenerateInstanceBorderd(**argments)({"mask": mask, "hover_map": hover_map})
self.assertEqual(result["border"].shape, expected_shape)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "63b3af69a324f8d935103cee1932b755",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 94,
"avg_line_length": 29.31081081081081,
"alnum_prop": 0.5527893038266483,
"repo_name": "Project-MONAI/MONAI",
"id": "a4ee5221a64835d2ba7e5e09f92d5db97db9d8ea",
"size": "2743",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/test_generate_instance_borderd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15956"
},
{
"name": "C++",
"bytes": "189648"
},
{
"name": "Cuda",
"bytes": "154905"
},
{
"name": "Dockerfile",
"bytes": "2454"
},
{
"name": "Python",
"bytes": "7209898"
},
{
"name": "Shell",
"bytes": "20587"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
class SudoStatusSerializer(serializers.Serializer):
valid = serializers.BooleanField()
expires_at = serializers.DateTimeField(default=None)
class SudoRenewSerializer(serializers.Serializer):
password = serializers.CharField(required=True)
| {
"content_hash": "3b8d4c74998d79fb0aa024d7fef0d771",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 56,
"avg_line_length": 29.4,
"alnum_prop": 0.8061224489795918,
"repo_name": "sparcs-kaist/sparcssso",
"id": "cab07b0b77e0191f2df22a4bd4ab83259583dcc0",
"size": "294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/web/serializers/sudo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4786"
},
{
"name": "Dockerfile",
"bytes": "1252"
},
{
"name": "Go",
"bytes": "3020"
},
{
"name": "HTML",
"bytes": "83809"
},
{
"name": "JavaScript",
"bytes": "22361"
},
{
"name": "Makefile",
"bytes": "77"
},
{
"name": "Python",
"bytes": "172445"
},
{
"name": "Shell",
"bytes": "326"
},
{
"name": "TypeScript",
"bytes": "7039"
}
],
"symlink_target": ""
} |
import typing
from flask import json
FlaskHeaders = typing.Union[typing.List[typing.Tuple[str, str]], typing.Dict[str, str]]
FlaskResponse = typing.Tuple[str, int, FlaskHeaders]
def success(data, status=200) -> FlaskResponse:
return json.dumps(data, indent=2), status, [("Content-Type", "application/json")]
def failure(message, status=400) -> FlaskResponse:
return json.dumps({"errors": ([message]\
if hasattr(message, 'strip') else message if hasattr(message, 'split')\
else repr(message))}, indent=2),\
status,\
[("Content-Type", "application/json")]
| {
"content_hash": "a0c6ecd39c91951468113526d2e32ebb",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 87,
"avg_line_length": 34,
"alnum_prop": 0.6650326797385621,
"repo_name": "nanobox-io/nanobox-adapter-libcloud",
"id": "2ae19d9f700aec11b84d59742166f45a0dbe4d79",
"size": "612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nanobox_libcloud/utils/output.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "580"
},
{
"name": "HTML",
"bytes": "5287"
},
{
"name": "Python",
"bytes": "107450"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
} |
'''Convenient data files for use in other modules.'''
from . import molecular_bio
from . import genbank
from .restriction_sites import fallback_enzymes
| {
"content_hash": "f22761d04a47116c6cf4156f3c4f1d32",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 53,
"avg_line_length": 38,
"alnum_prop": 0.7828947368421053,
"repo_name": "klavinslab/coral",
"id": "949873b908204e931bbd6a9df283a6d34bd8fb58",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coral/constants/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "424580"
},
{
"name": "Shell",
"bytes": "1681"
}
],
"symlink_target": ""
} |
import graphene
from ...page import models
from ..utils import filter_by_query_param, sort_queryset
from .sorters import PageSortField
PAGE_SEARCH_FIELDS = ("content", "slug", "title")
def resolve_page(info, global_page_id=None, slug=None):
assert global_page_id or slug, "No page ID or slug provided."
user = info.context.user
if slug is not None:
page = models.Page.objects.visible_to_user(user).filter(slug=slug).first()
else:
_type, page_pk = graphene.Node.from_global_id(global_page_id)
page = models.Page.objects.visible_to_user(user).filter(pk=page_pk).first()
return page
def resolve_pages(info, query, sort_by=None, **_kwargs):
user = info.context.user
qs = models.Page.objects.visible_to_user(user)
qs = sort_queryset(qs, sort_by, PageSortField)
return filter_by_query_param(qs, query, PAGE_SEARCH_FIELDS)
| {
"content_hash": "2aea7e2a408dd2bfb578a38e05ed1152",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 83,
"avg_line_length": 33.96153846153846,
"alnum_prop": 0.695356738391846,
"repo_name": "maferelo/saleor",
"id": "42cde649091c078498ef5ea8379a57f1152190a6",
"size": "883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/graphql/page/resolvers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "64217"
},
{
"name": "HTML",
"bytes": "394723"
},
{
"name": "JavaScript",
"bytes": "61157"
},
{
"name": "Python",
"bytes": "585270"
}
],
"symlink_target": ""
} |
import contextlib
import uuid
import mock
import webob
from nova.api.openstack.compute.contrib import floating_ips as fips_v2
from nova.api.openstack.compute.plugins.v3 import floating_ips as fips_v21
from nova.api.openstack import extensions
from nova import compute
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova import network
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
TEST_INST = 1
WRONG_INST = 9999
def network_api_get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': None}
def network_api_get_floating_ip_by_address(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10}
def network_api_get_floating_ips_by_project(self, context):
return [{'id': 1,
'address': '10.10.10.10',
'pool': 'nova',
'fixed_ip': {'address': '10.0.0.1',
'instance_uuid': FAKE_UUID,
'instance': objects.Instance(
**{'uuid': FAKE_UUID})}},
{'id': 2,
'pool': 'nova', 'interface': 'eth0',
'address': '10.10.10.11',
'fixed_ip': None}]
def compute_api_get(self, context, instance_id, expected_attrs=None,
want_objects=False):
return objects.Instance(uuid=FAKE_UUID, id=instance_id,
instance_type_id=1, host='bob')
def network_api_allocate(self, context):
return '10.10.10.10'
def network_api_release(self, context, address):
pass
def compute_api_associate(self, context, instance_id, address):
pass
def network_api_associate(self, context, floating_address, fixed_address):
pass
def network_api_disassociate(self, context, instance, floating_address):
pass
def fake_instance_get(context, instance_id):
return objects.Instance(**{
"id": 1,
"uuid": uuid.uuid4(),
"name": 'fake',
"user_id": 'fakeuser',
"project_id": '123'})
def stub_nw_info(stubs):
def get_nw_info_for_instance(instance):
return fake_network.fake_get_instance_nw_info(stubs)
return get_nw_info_for_instance
def get_instance_by_floating_ip_addr(self, context, address):
return None
class FloatingIpTestNeutronV21(test.NoDBTestCase):
floating_ips = fips_v21
def setUp(self):
super(FloatingIpTestNeutronV21, self).setUp()
self.flags(network_api_class='nova.network.neutronv2.api.API')
self.controller = self.floating_ips.FloatingIPController()
def test_floatingip_delete(self):
req = fakes.HTTPRequest.blank('')
fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
with contextlib.nested(
mock.patch.object(self.controller.network_api,
'disassociate_floating_ip'),
mock.patch.object(self.controller.network_api,
'disassociate_and_release_floating_ip'),
mock.patch.object(self.controller.network_api,
'release_floating_ip'),
mock.patch.object(self.controller.network_api,
'get_instance_id_by_floating_address',
return_value=None),
mock.patch.object(self.controller.network_api,
'get_floating_ip',
return_value=fip_val)) as (
disoc_fip, dis_and_del, rel_fip, _, _):
self.controller.delete(req, 1)
self.assertFalse(disoc_fip.called)
self.assertFalse(rel_fip.called)
# Only disassociate_and_release_floating_ip is
# called if using neutron
self.assertTrue(dis_and_del.called)
def _test_floatingip_delete_not_found(self, ex,
expect_ex=webob.exc.HTTPNotFound):
req = fakes.HTTPRequest.blank('')
with contextlib.nested(
mock.patch.object(self.controller.network_api,
'get_floating_ip',
side_effect=ex)
):
self.assertRaises(expect_ex,
self.controller.delete, req, 1)
def test_floatingip_delete_not_found_ip(self):
ex = exception.FloatingIpNotFound(id=1)
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_not_found(self):
ex = exception.NotFound
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPBadRequest)
class FloatingIpTestNeutronV2(FloatingIpTestNeutronV21):
floating_ips = fips_v2
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPNotFound)
class FloatingIpTestV21(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
floating_ips = fips_v21
validation_error = exception.ValidationError
def _create_floating_ips(self, floating_ips=None):
"""Create a floating ip object."""
if floating_ips is None:
floating_ips = [self.floating_ip]
elif not isinstance(floating_ips, (list, tuple)):
floating_ips = [floating_ips]
def make_ip_dict(ip):
"""Shortcut for creating floating ip dict."""
return
dict_ = {'pool': 'nova', 'host': 'fake_host'}
return db.floating_ip_bulk_create(
self.context, [dict(address=ip, **dict_) for ip in floating_ips],
)
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.floating_ip)
def setUp(self):
super(FloatingIpTestV21, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
self.stubs.Set(network.api.API, "get_floating_ips_by_project",
network_api_get_floating_ips_by_project)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
stub_nw_info(self.stubs))
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.stubs.Set(db, 'instance_get',
fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = self.floating_ips.FloatingIPController()
self.manager = self.floating_ips.\
FloatingIPActionController(self.ext_mgr)
self.fake_req = fakes.HTTPRequest.blank('')
def tearDown(self):
self._delete_floating_ip()
super(FloatingIpTestV21, self).tearDown()
def test_floatingip_delete(self):
fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
with contextlib.nested(
mock.patch.object(self.controller.network_api,
'disassociate_floating_ip'),
mock.patch.object(self.controller.network_api,
'release_floating_ip'),
mock.patch.object(self.controller.network_api,
'get_instance_id_by_floating_address',
return_value=None),
mock.patch.object(self.controller.network_api,
'get_floating_ip',
return_value=fip_val)) as (
disoc_fip, rel_fip, _, _):
self.controller.delete(self.fake_req, 1)
self.assertTrue(disoc_fip.called)
self.assertTrue(rel_fip.called)
def _test_floatingip_delete_not_found(self, ex,
expect_ex=webob.exc.HTTPNotFound):
with contextlib.nested(
mock.patch.object(self.controller.network_api,
'get_floating_ip',
side_effect=ex)
):
self.assertRaises(expect_ex,
self.controller.delete, self.fake_req, 1)
def test_floatingip_delete_not_found_ip(self):
ex = exception.FloatingIpNotFound(id=1)
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_not_found(self):
ex = exception.NotFound
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPBadRequest)
def test_translate_floating_ip_view(self):
floating_ip_address = self.floating_ip
floating_ip = db.floating_ip_get_by_address(self.context,
floating_ip_address)
# NOTE(vish): network_get uses the id not the address
floating_ip = db.floating_ip_get(self.context, floating_ip['id'])
view = self.floating_ips._translate_floating_ip_view(floating_ip)
self.assertIn('floating_ip', view)
self.assertTrue(view['floating_ip']['id'])
self.assertEqual(view['floating_ip']['ip'], self.floating_ip)
self.assertIsNone(view['floating_ip']['fixed_ip'])
self.assertIsNone(view['floating_ip']['instance_id'])
def test_translate_floating_ip_view_dict(self):
floating_ip = {'id': 0, 'address': '10.0.0.10', 'pool': 'nova',
'fixed_ip': None}
view = self.floating_ips._translate_floating_ip_view(floating_ip)
self.assertIn('floating_ip', view)
def test_floating_ips_list(self):
res_dict = self.controller.index(self.fake_req)
response = {'floating_ips': [{'instance_id': FAKE_UUID,
'ip': '10.10.10.10',
'pool': 'nova',
'fixed_ip': '10.0.0.1',
'id': 1},
{'instance_id': None,
'ip': '10.10.10.11',
'pool': 'nova',
'fixed_ip': None,
'id': 2}]}
self.assertEqual(res_dict, response)
def test_floating_ip_release_nonexisting(self):
def fake_get_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotFound(id=id)
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, self.fake_req, '9876')
self.assertIn("Floating ip not found for id 9876", ex.explanation)
def test_floating_ip_release_race_cond(self):
def fake_get_floating_ip(*args, **kwargs):
return {'fixed_ip_id': 1, 'address': self.floating_ip}
def fake_get_instance_by_floating_ip_addr(*args, **kwargs):
return 'test-inst'
def fake_disassociate_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotAssociated(args[3])
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
self.stubs.Set(self.floating_ips, "get_instance_by_floating_ip_addr",
fake_get_instance_by_floating_ip_addr)
self.stubs.Set(self.floating_ips, "disassociate_floating_ip",
fake_disassociate_floating_ip)
res = self.controller.delete(self.fake_req, '9876')
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller,
fips_v21.FloatingIPController):
status_int = self.controller.delete.wsgi_code
else:
status_int = res.status_int
self.assertEqual(status_int, 202)
def test_floating_ip_show(self):
res_dict = self.controller.show(self.fake_req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertIsNone(res_dict['floating_ip']['instance_id'])
def test_floating_ip_show_not_found(self):
def fake_get_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotFound(id='fake')
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.fake_req, '9876')
self.assertIn("Floating ip not found for id 9876", ex.explanation)
def test_show_associated_floating_ip(self):
def get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip': {'address': '10.0.0.1',
'instance_uuid': FAKE_UUID,
'instance': {'uuid': FAKE_UUID}}}
self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip)
res_dict = self.controller.show(self.fake_req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['fixed_ip'], '10.0.0.1')
self.assertEqual(res_dict['floating_ip']['instance_id'], FAKE_UUID)
def test_recreation_of_floating_ip(self):
self._delete_floating_ip()
self._create_floating_ips()
def test_floating_ip_in_bulk_creation(self):
self._delete_floating_ip()
self._create_floating_ips([self.floating_ip, self.floating_ip_2])
all_ips = db.floating_ip_get_all(self.context)
ip_list = [ip['address'] for ip in all_ips]
self.assertIn(self.floating_ip, ip_list)
self.assertIn(self.floating_ip_2, ip_list)
def test_fail_floating_ip_in_bulk_creation(self):
self.assertRaises(exception.FloatingIpExists,
self._create_floating_ips,
[self.floating_ip, self.floating_ip_2])
all_ips = db.floating_ip_get_all(self.context)
ip_list = [ip['address'] for ip in all_ips]
self.assertIn(self.floating_ip, ip_list)
self.assertNotIn(self.floating_ip_2, ip_list)
def test_floating_ip_allocate_no_free_ips(self):
def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, self.fake_req)
self.assertIn('No more floating ips', ex.explanation)
def test_floating_ip_allocate_no_free_ips_pool(self):
def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, self.fake_req,
{'pool': 'non_existent_pool'})
self.assertIn('No more floating ips in pool non_existent_pool',
ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpLimitExceeded())
def test_floating_ip_allocate_over_quota(self, allocate_mock):
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.fake_req)
self.assertIn('IP allocation over quota', ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpLimitExceeded())
def test_floating_ip_allocate_quota_exceed_in_pool(self, allocate_mock):
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.fake_req,
{'pool': 'non_existent_pool'})
self.assertIn('IP allocation over quota in pool non_existent_pool.',
ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpPoolNotFound())
def test_floating_ip_create_with_unknown_pool(self, allocate_mock):
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, self.fake_req,
{'pool': 'non_existent_pool'})
self.assertIn('Floating ip pool not found.', ex.explanation)
def test_floating_ip_allocate(self):
def fake1(*args, **kwargs):
pass
def fake2(*args, **kwargs):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova'}
self.stubs.Set(network.api.API, "allocate_floating_ip",
fake1)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake2)
res_dict = self.controller.create(self.fake_req)
ip = res_dict['floating_ip']
expected = {
"id": 1,
"instance_id": None,
"ip": "10.10.10.10",
"fixed_ip": None,
"pool": 'nova'}
self.assertEqual(ip, expected)
def test_floating_ip_release(self):
self.controller.delete(self.fake_req, 1)
def test_floating_ip_associate(self):
fixed_address = '192.168.1.100'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip))
rsp = self.manager._add_floating_ip(self.fake_req, TEST_INST,
body=body)
self.assertEqual(202, rsp.status_int)
def test_floating_ip_associate_invalid_instance(self):
def fake_get(self, context, id, expected_attrs=None,
want_objects=False):
raise exception.InstanceNotFound(instance_id=id)
self.stubs.Set(compute.api.API, "get", fake_get)
body = dict(addFloatingIp=dict(address=self.floating_ip))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip, self.fake_req,
'test_inst', body=body)
def test_associate_not_allocated_floating_ip_to_instance(self):
def fake_associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
floating_ip = '10.10.10.11'
body = dict(addFloatingIp=dict(address=floating_ip))
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip,
self.fake_req, TEST_INST, body=body)
self.assertIn("floating ip not found", ex.explanation)
@mock.patch.object(network.api.API, 'associate_floating_ip',
side_effect=exception.Forbidden)
def test_associate_floating_ip_forbidden(self, associate_mock):
body = dict(addFloatingIp=dict(address='10.10.10.11'))
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._add_floating_ip, self.fake_req,
TEST_INST, body=body)
def test_associate_floating_ip_bad_address_key(self):
body = dict(addFloatingIp=dict(bad_address='10.10.10.11'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(self.validation_error,
self.manager._add_floating_ip, req, 'test_inst',
body=body)
def test_associate_floating_ip_bad_addfloatingip_key(self):
body = dict(bad_addFloatingIp=dict(address='10.10.10.11'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(self.validation_error,
self.manager._add_floating_ip, req, 'test_inst',
body=body)
def test_floating_ip_disassociate(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return TEST_INST
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
rsp = self.manager._remove_floating_ip(self.fake_req, TEST_INST,
body=body)
self.assertEqual(202, rsp.status_int)
def test_floating_ip_disassociate_missing(self):
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPConflict,
self.manager._remove_floating_ip,
self.fake_req, 'test_inst', body=body)
def test_floating_ip_associate_non_existent_ip(self):
def fake_network_api_associate(self, context, instance,
floating_address=None,
fixed_address=None):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_network_api_associate)
body = dict(addFloatingIp=dict(address='1.1.1.1'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip,
self.fake_req, TEST_INST, body=body)
def test_floating_ip_disassociate_non_existent_ip(self):
def network_api_get_floating_ip_by_address(self, context,
floating_address):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
body = dict(removeFloatingIp=dict(address='1.1.1.1'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._remove_floating_ip,
self.fake_req, TEST_INST, body=body)
def test_floating_ip_disassociate_wrong_instance_uuid(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return TEST_INST
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
wrong_uuid = 'aaaaaaaa-ffff-ffff-ffff-aaaaaaaaaaaa'
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPConflict,
self.manager._remove_floating_ip,
self.fake_req, wrong_uuid, body=body)
def test_floating_ip_disassociate_wrong_instance_id(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return WRONG_INST
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPConflict,
self.manager._remove_floating_ip,
self.fake_req, TEST_INST, body=body)
def test_floating_ip_disassociate_auto_assigned(self):
def fake_get_floating_ip_addr_auto_assigned(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10, 'auto_assigned': 1}
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return TEST_INST
def network_api_disassociate(self, context, instance,
floating_address):
raise exception.CannotDisassociateAutoAssignedFloatingIP()
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake_get_floating_ip_addr_auto_assigned)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._remove_floating_ip,
self.fake_req, TEST_INST, body=body)
def test_floating_ip_disassociate_map_authorization_exc(self):
def fake_get_floating_ip_addr_auto_assigned(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10, 'auto_assigned': 1}
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return TEST_INST
def network_api_disassociate(self, context, instance, address):
raise exception.Forbidden()
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake_get_floating_ip_addr_auto_assigned)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._remove_floating_ip,
self.fake_req, TEST_INST, body=body)
# these are a few bad param tests
def test_bad_address_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp=dict(badparam='11.0.0.1'))
self.assertRaises(self.validation_error,
self.manager._remove_floating_ip, self.fake_req,
TEST_INST, body=body)
def test_missing_dict_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp='11.0.0.1')
self.assertRaises(self.validation_error,
self.manager._remove_floating_ip, self.fake_req,
TEST_INST, body=body)
def test_missing_dict_param_in_add_floating_ip(self):
body = dict(addFloatingIp='11.0.0.1')
self.assertRaises(self.validation_error,
self.manager._add_floating_ip, self.fake_req,
TEST_INST, body=body)
class FloatingIpTestV2(FloatingIpTestV21):
floating_ips = fips_v2
validation_error = webob.exc.HTTPBadRequest
def test_not_extended_floating_ip_associate_fixed(self):
# Check that fixed_address is ignored if os-extended-floating-ips
# is not loaded
fixed_address_requested = '192.168.1.101'
fixed_address_allocated = '192.168.1.100'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address_allocated,
kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address=fixed_address_requested))
rsp = self.manager._add_floating_ip(self.fake_req, TEST_INST, body)
self.assertEqual(202, rsp.status_int)
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPNotFound)
class ExtendedFloatingIpTestV21(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
floating_ips = fips_v21
def _create_floating_ips(self, floating_ips=None):
"""Create a floating ip object."""
if floating_ips is None:
floating_ips = [self.floating_ip]
elif not isinstance(floating_ips, (list, tuple)):
floating_ips = [floating_ips]
def make_ip_dict(ip):
"""Shortcut for creating floating ip dict."""
return
dict_ = {'pool': 'nova', 'host': 'fake_host'}
return db.floating_ip_bulk_create(
self.context, [dict(address=ip, **dict_) for ip in floating_ips],
)
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.floating_ip)
def setUp(self):
super(ExtendedFloatingIpTestV21, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
self.stubs.Set(network.api.API, "get_floating_ips_by_project",
network_api_get_floating_ips_by_project)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
stub_nw_info(self.stubs))
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.stubs.Set(db, 'instance_get',
fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.ext_mgr.extensions['os-floating-ips'] = True
self.ext_mgr.extensions['os-extended-floating-ips'] = True
self.controller = self.floating_ips.FloatingIPController()
self.manager = self.floating_ips.\
FloatingIPActionController(self.ext_mgr)
self.fake_req = fakes.HTTPRequest.blank('')
def tearDown(self):
self._delete_floating_ip()
super(ExtendedFloatingIpTestV21, self).tearDown()
def test_extended_floating_ip_associate_fixed(self):
fixed_address = '192.168.1.101'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address=fixed_address))
rsp = self.manager._add_floating_ip(self.fake_req, TEST_INST,
body=body)
self.assertEqual(202, rsp.status_int)
def test_extended_floating_ip_associate_fixed_not_allocated(self):
def fake_associate_floating_ip(*args, **kwargs):
pass
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address='11.11.11.11'))
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._add_floating_ip,
self.fake_req, TEST_INST, body=body)
self.assertIn("Specified fixed address not assigned to instance",
ex.explanation)
class ExtendedFloatingIpTestV2(ExtendedFloatingIpTestV21):
floating_ips = fips_v2
class FloatingIPPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FloatingIPPolicyEnforcementV21, self).setUp()
self.controller = fips_v21.FloatingIPController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, func, *arg, **kwarg):
rule_name = "compute_extension:v3:os-floating-ips"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req)
def test_show_policy_failed(self):
self._common_policy_check(self.controller.show, self.req, FAKE_UUID)
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req)
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID)
class FloatingIPActionPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FloatingIPActionPolicyEnforcementV21, self).setUp()
self.controller = fips_v21.FloatingIPActionController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, func, *arg, **kwarg):
rule_name = "compute_extension:v3:os-floating-ips"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_add_policy_failed(self):
body = dict(addFloatingIp=dict(address='10.10.10.11'))
self._common_policy_check(
self.controller._add_floating_ip, self.req, FAKE_UUID, body=body)
def test_remove_policy_failed(self):
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self._common_policy_check(
self.controller._remove_floating_ip, self.req,
FAKE_UUID, body=body)
| {
"content_hash": "4f008e3f67c81b236eecef8934892884",
"timestamp": "",
"source": "github",
"line_count": 870,
"max_line_length": 78,
"avg_line_length": 41.33103448275862,
"alnum_prop": 0.5850992824962457,
"repo_name": "cloudbase/nova-virtualbox",
"id": "f1368f36c3e05748987f76ff9bb99e09f0189c39",
"size": "36648",
"binary": false,
"copies": "1",
"ref": "refs/heads/virtualbox_driver",
"path": "nova/tests/unit/api/openstack/compute/contrib/test_floating_ips.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16016453"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "497954"
}
],
"symlink_target": ""
} |
from wishlist_app.models import Item, WishlistGroup, GroupItem
from django.forms import ModelForm, ModelMultipleChoiceField, CheckboxSelectMultiple
class ItemForm(ModelForm):
# Representing the many to many related field in item groups
groups = ModelMultipleChoiceField(queryset=WishlistGroup.objects.all(), widget=CheckboxSelectMultiple, required=False)
# Overriding __init__ here allows us to provide initial
# data for 'groups' field
def __init__(self, *args, **kwargs):
# Only in case we build the form from an instance
# (otherwise, 'groups' list should be empty)
if 'instance' in kwargs:
# We get the 'initial' keyword argument or initialize it
# as a dict if it didn't exist.
initial = kwargs.setdefault('initial', {})
# The widget for a ModelMultipleChoiceField expects
# a list of primary key for the selected data.
initial['groups'] = [grp.pk for grp in kwargs['instance'].wishlistgroup_set.all()]
if 'group' in kwargs:
initial = kwargs.setdefault('initial', {})
groups = initial.setdefault('groups', [])
groups.append(kwargs['group'].pk)
del kwargs['group']
user = kwargs.pop('user')
ModelForm.__init__(self, *args, **kwargs)
self.fields['groups'].queryset = WishlistGroup.get_groups_by_user(user=user)
# Overriding save allows us to process the value of 'groups' field
def save(self, commit=True):
# Get the unsaved Item instance
instance = ModelForm.save(self, False)
# Prepare a 'save_m2m' method for the form,
old_save_m2m = self.save_m2m
def new_save_m2m():
old_save_m2m()
# This is where we actually link the item with groups
instance.wishlistgroup_set.clear()
for group in self.cleaned_data['groups']:
print "creating intermediate groupitems for %s" % group
GroupItem(item=instance, group=group).save()
# instance.wishlistgroup_set.add(group)
self.save_m2m = new_save_m2m
# Do we need to save all changes now?
if commit:
instance.save()
self.save_m2m()
return instance
class Meta:
model = Item
fields = ["name", "description", "link", "quantity"]
| {
"content_hash": "518e0569b7f4386cfa8a8a3fdde29e4a",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 122,
"avg_line_length": 39.88333333333333,
"alnum_prop": 0.6188884245716674,
"repo_name": "pclements12/PyWishlist",
"id": "28a363b40d6194713b6d41d52f9b4e83295c3c63",
"size": "2393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wishlist_app/forms/ItemForm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "953"
},
{
"name": "HTML",
"bytes": "65001"
},
{
"name": "JavaScript",
"bytes": "55206"
},
{
"name": "Python",
"bytes": "110685"
}
],
"symlink_target": ""
} |
"""Tests for dicom_web.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import posixpath
from typing import Any, Dict, Optional, Text, Tuple
from absl.testing import absltest
from absl.testing import parameterized
import google.auth.credentials
import google_auth_httplib2
import httplib2
import mock
from six.moves import http_client
from hcls_imaging_ml_toolkit import dicom_json
from hcls_imaging_ml_toolkit import dicom_path
from hcls_imaging_ml_toolkit import dicom_web
from hcls_imaging_ml_toolkit import tags
from hcls_imaging_ml_toolkit import test_dicom_path_util as tdpu
_URI = 'http://healthcareapi.com/test'
_GET = 'GET'
_BODY = 'body'
_TEST_INSTANCES = 10
def _CreateMockInstanceMetadata() -> Dict[Text, Any]:
instance_metadata = {}
dicom_json.Insert(instance_metadata, tags.STUDY_INSTANCE_UID, 1)
dicom_json.Insert(instance_metadata, tags.SERIES_INSTANCE_UID, 2)
dicom_json.Insert(instance_metadata, tags.SOP_INSTANCE_UID, 3)
return instance_metadata
_MOCK_CT_INSTANCE_METADATA = _CreateMockInstanceMetadata()
def FakeHttpResponse(
status_code: int,
body: Optional[Text] = _BODY) -> Tuple[httplib2.Response, Text]:
return httplib2.Response({'status': status_code}), body
class DicomWebTest(parameterized.TestCase):
@mock.patch.object(
google.auth, 'default', return_value=(mock.MagicMock(), mock.MagicMock()))
def setUp(self, *_):
super(DicomWebTest, self).setUp()
self._dwc = dicom_web.DicomWebClientImpl(credentials=mock.MagicMock())
@mock.patch.object(httplib2, 'Http')
@mock.patch.object(google_auth_httplib2, 'AuthorizedHttp')
def testInvokeHttpRequest(self, *_):
http_mock = mock.MagicMock()
httplib2.Http.return_value = http_mock
http_mock.request.return_value = FakeHttpResponse(http_client.OK)
google_auth_httplib2.AuthorizedHttp.return_value = http_mock
resp, content = self._dwc._InvokeHttpRequest(_URI, _GET)
self.assertEqual(resp.status, 200)
self.assertEqual(content, _BODY)
@parameterized.parameters(dicom_web._TOO_MANY_REQUESTS_ERROR,
http_client.REQUEST_TIMEOUT,
http_client.SERVICE_UNAVAILABLE,
http_client.GATEWAY_TIMEOUT)
@mock.patch.object(httplib2, 'Http')
@mock.patch.object(google_auth_httplib2, 'AuthorizedHttp')
def testInvokeHttpRequestWithRetriedErrors(self, error_code, *_):
http_mock = mock.MagicMock()
httplib2.Http.return_value = http_mock
http_mock.request.side_effect = [
FakeHttpResponse(error_code),
FakeHttpResponse(http_client.OK)
]
google_auth_httplib2.AuthorizedHttp.return_value = http_mock
resp, content = self._dwc._InvokeHttpRequest(_URI, _GET)
self.assertEqual(resp.status, 200)
self.assertEqual(content, _BODY)
def testGetAllMetaData(self):
expected_url = (
'http://test/studies/1/instances/?includefield=%s&'
'includefield=%s&includefield=%s&limit=%d' %
(tags.STUDY_INSTANCE_UID.number, tags.SERIES_INSTANCE_UID.number,
tags.SOP_INSTANCE_UID.number, _TEST_INSTANCES))
mock_client = mock.create_autospec(dicom_web.DicomWebClientImpl)
mock_client.QidoRs.return_value = [_MOCK_CT_INSTANCE_METADATA]
dicomweb_url = 'http://test'
study_uid = '1'
tag_list = [
tags.STUDY_INSTANCE_UID, tags.SERIES_INSTANCE_UID, tags.SOP_INSTANCE_UID
]
all_meta_data = dicom_web.GetInstancesMetadata(mock_client, dicomweb_url,
study_uid, tag_list,
_TEST_INSTANCES)
self.assertLen(all_meta_data, 1)
self.assertEqual(all_meta_data[0], _MOCK_CT_INSTANCE_METADATA)
mock_client.QidoRs.assert_called_once()
call_args, _ = mock_client.QidoRs.call_args
self.assertEqual(call_args[0], expected_url)
def testStowRsJsonError(self):
bulk_data = dicom_json.DicomBulkData(uri='', data=b'', content_type='a/b/c')
with self.assertRaises(Exception):
self._dwc.StowRsJson('', [{}], [bulk_data])
@parameterized.parameters(199, 300, 403)
def testStowRsHttpErrors(self, error_value):
bulk_data = ['0x12', '0x13']
expected_response = (f'StowRs error. Response Status: {error_value},\nURL: '
'https://some_stow_url,\nContent: body.')
with mock.patch.object(
dicom_web.DicomWebClientImpl,
'_InvokeHttpRequest',
return_value=FakeHttpResponse(error_value),
autospec=True):
with self.assertRaisesRegex(dicom_web.UnexpectedResponseError,
expected_response):
self._dwc.StowRs('https://some_stow_url', bulk_data)
@mock.patch.object(httplib2, 'Http')
@mock.patch.object(google_auth_httplib2, 'AuthorizedHttp')
def testQidoSuccess(self, *_):
http_mock = mock.MagicMock()
httplib2.Http.return_value = http_mock
http_mock.request.side_effect = [
FakeHttpResponse(http_client.OK,
json.dumps([_MOCK_CT_INSTANCE_METADATA])),
FakeHttpResponse(http_client.NO_CONTENT, b'')
]
google_auth_httplib2.AuthorizedHttp.return_value = http_mock
resp = self._dwc.QidoRs(_URI)
self.assertEqual(resp, [_MOCK_CT_INSTANCE_METADATA])
resp = self._dwc.QidoRs(_URI)
self.assertEqual(resp, [])
def testPathToUrl(self):
dicom_path_str = tdpu.STUDY_PATH_STR
url = dicom_web.PathToUrl(dicom_path.FromString(dicom_path_str))
expected_url = posixpath.join(dicom_web.CLOUD_HEALTHCARE_API_URL,
dicom_path_str)
self.assertEqual(url, expected_url)
def testPathStrToUrl(self):
dicom_query_path_str = posixpath.join(
tdpu.DICOMWEB_PATH_STR,
'instances?00080060=SR&includefield=all&limit=10000')
url = dicom_web.PathStrToUrl(dicom_query_path_str)
expected_url = posixpath.join(dicom_web.CLOUD_HEALTHCARE_API_URL,
dicom_query_path_str)
self.assertEqual(url, expected_url)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "ce3dd7898593a287eb278ac5b181d6c8",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 80,
"avg_line_length": 37.888888888888886,
"alnum_prop": 0.6748126425545781,
"repo_name": "GoogleCloudPlatform/healthcare",
"id": "e8f4aed07507f1748d2bdd7fff67a6a2f4e61190",
"size": "6713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imaging/ml/toolkit/hcls_imaging_ml_toolkit/dicom_web_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2719"
},
{
"name": "Go",
"bytes": "265118"
},
{
"name": "HTML",
"bytes": "25188"
},
{
"name": "Java",
"bytes": "74521"
},
{
"name": "JavaScript",
"bytes": "13143"
},
{
"name": "Jupyter Notebook",
"bytes": "394814"
},
{
"name": "MATLAB",
"bytes": "3008"
},
{
"name": "Python",
"bytes": "390233"
},
{
"name": "SCSS",
"bytes": "14206"
},
{
"name": "Shell",
"bytes": "16533"
},
{
"name": "Starlark",
"bytes": "20808"
},
{
"name": "TeX",
"bytes": "5986"
},
{
"name": "TypeScript",
"bytes": "179680"
}
],
"symlink_target": ""
} |
import operator
from .local_time import get_now, TZ
from copy import deepcopy
from datetime import timedelta
from dateutil.parser import parse
from dpath.util import delete as xpathdelete, get as xpathget, new as xpathnew
from haversine import haversine
from json import load, loads
from jsonpath_rw import parse as parse_path
from munch import Munch, munchify
from robot.errors import ExecutionFailed
from robot.libraries.BuiltIn import BuiltIn
from robot.output import LOGGER
from robot.output.loggerhelper import Message
# These imports are not pointless. Robot's resource and testsuite files
# can access them by simply importing library "service_keywords".
# Please ignore the warning given by Flake8 or other linter.
from .initial_data import (
create_fake_doc,
create_fake_sentence,
create_fake_amount,
create_fake_number,
create_fake_date,
create_fake_funder,
get_fake_funder_scheme,
fake,
subtraction,
field_with_id,
test_bid_data,
test_bid_value,
test_change_data,
test_claim_answer_data,
test_claim_data,
test_complaint_data,
test_complaint_reply_data,
test_confirm_data,
test_feature_data,
test_invalid_features_data,
test_item_data,
test_lot_data,
test_lot_document_data,
test_related_question,
test_question_answer_data,
test_question_data,
test_supplier_data,
test_tender_data,
test_tender_data_competitive_dialogue,
test_tender_data_limited,
test_tender_data_openeu,
test_tender_data_openua,
test_tender_data_planning,
test_tender_data_openua_defense,
test_bid_competitive_data,
tets_monitoring_data,
test_party,
test_dialogue,
test_conclusion,
test_status_data,
test_elimination_report,
create_fake_title,
create_fake_value_amount,
test_change_document_data,
convert_amount,
get_number_of_minutes,
get_hash,
)
from barbecue import chef
from restkit import request
# End of non-pointless import
import os
import re
NUM_TYPES = (int, long, float)
STR_TYPES = (str, unicode)
def get_current_tzdate():
return get_now().strftime('%Y-%m-%d %H:%M:%S.%f')
def add_minutes_to_date(date, minutes):
return (parse(date) + timedelta(minutes=float(minutes))).isoformat()
def compare_date(left, right, accuracy="minute", absolute_delta=True):
'''Compares dates with specified accuracy
Before comparison dates are parsed into datetime.datetime format
and localized.
:param left: First date
:param right: Second date
:param accuracy: Max difference between dates to consider them equal
Default value - "minute"
Possible values - "day", "hour", "minute" or float value
of seconds
:param absolute_delta: Type of comparison. If set to True, then no matter which date order. If set to
False then right must be lower then left for accuracy value.
Default value - True
Possible values - True and False or something what can be casted into them
:returns: Boolean value
:error: ValueError when there is problem with converting accuracy
into float value. When it will be catched warning will be
given and accuracy will be set to 60.
'''
left = parse(left)
right = parse(right)
if left.tzinfo is None:
left = TZ.localize(left)
if right.tzinfo is None:
right = TZ.localize(right)
delta = (left - right).total_seconds()
if accuracy == "day":
accuracy = 24 * 60 * 60 - 1
elif accuracy == "hour":
accuracy = 60 * 60 - 1
elif accuracy == "minute":
accuracy = 60 - 1
else:
try:
accuracy = float(accuracy)
except ValueError:
LOGGER.log_message(Message("Could not convert from {} to float. Accuracy is set to 60 seconds.".format(accuracy), "WARN"))
accuracy = 60
if absolute_delta:
delta = abs(delta)
if delta > accuracy:
return False
return True
def compare_coordinates(left_lat, left_lon, right_lat, right_lon, accuracy=0.1):
'''Compares coordinates with specified accuracy
:param left_lat: First coordinate latitude
:param left_lon: First coordinate longitude
:param right_lat: Second coordinate latitude
:param right_lon: Second coordinate longitude
:param accuracy: Max difference between coordinates to consider them equal
Default value - 0.1
Possible values - float or integer value of kilometers
:returns: Boolean value
:error: ValueError when there is problem with converting accuracy
into float value. When it will be catched warning will be
given and accuracy will be set to 0.1.
'''
for key, value in {'left_lat': left_lat, 'left_lon': left_lon, 'right_lat': right_lat, 'right_lon': right_lon}.iteritems():
if not isinstance(value, NUM_TYPES):
raise TypeError("Invalid type for coordinate '{0}'. "
"Expected one of {1}, got {2}".format(
key, str(NUM_TYPES), str(type(value))))
distance = haversine((left_lat, left_lon), (right_lat, right_lon))
if distance > accuracy:
return False
return True
def log_object_data(data, file_name=None, format="yaml", update=False, artifact=False):
"""Log object data in pretty format (JSON or YAML)
Two output formats are supported: "yaml" and "json".
If a file name is specified, the output is written into that file.
If you would like to get similar output everywhere,
use the following snippet somewhere in your code
before actually using Munch. For instance,
put it into your __init__.py, or, if you use zc.buildout,
specify it in "initialization" setting of zc.recipe.egg.
from munch import Munch
Munch.__str__ = lambda self: Munch.toYAML(self, allow_unicode=True,
default_flow_style=False)
Munch.__repr__ = Munch.__str__
"""
if not isinstance(data, Munch):
data = munchify(data)
if file_name:
if artifact:
file_path = os.path.join(os.path.dirname(__file__), 'data', file_name + '.' + format)
else:
output_dir = BuiltIn().get_variable_value("${OUTPUT_DIR}")
file_path = os.path.join(output_dir, file_name + '.' + format)
if update:
try:
with open(file_path, "r+") as file_obj:
new_data = data.copy()
data = munch_from_object(file_obj.read(), format)
data.update(new_data)
file_obj.seek(0)
file_obj.truncate()
except IOError as e:
LOGGER.log_message(Message(e, "INFO"))
LOGGER.log_message(Message("Nothing to update, "
"creating new file.", "INFO"))
data_obj = munch_to_object(data, format)
with open(file_path, "w") as file_obj:
file_obj.write(data_obj)
data_obj = munch_to_object(data, format)
LOGGER.log_message(Message(data_obj.decode('utf-8'), "INFO"))
def munch_from_object(data, format="yaml"):
if format.lower() == 'json':
return Munch.fromJSON(data)
else:
return Munch.fromYAML(data)
def munch_to_object(data, format="yaml"):
if format.lower() == 'json':
return data.toJSON(indent=2)
else:
return data.toYAML(allow_unicode=True, default_flow_style=False)
def load_data_from(file_name, mode=None, external_params_name=None):
"""We assume that 'external_params' is a a valid json if passed
"""
external_params = BuiltIn().\
get_variable_value('${{{name}}}'.format(name=external_params_name))
if not os.path.exists(file_name):
file_name = os.path.join(os.path.dirname(__file__), 'data', file_name)
with open(file_name) as file_obj:
if file_name.endswith('.json'):
file_data = Munch.fromDict(load(file_obj))
elif file_name.endswith('.yaml'):
file_data = Munch.fromYAML(file_obj)
if mode == 'brokers':
default = file_data.pop('Default')
brokers = {}
for k, v in file_data.iteritems():
brokers[k] = merge_dicts(default, v)
file_data = brokers
try:
ext_params_munch \
= Munch.fromDict(loads(external_params)) \
if external_params else Munch()
except ValueError:
raise ValueError(
'Value {param} of command line parameter {name} is invalid'.
format(name=external_params_name, param=str(external_params))
)
return merge_dicts(file_data, ext_params_munch)
def compute_intrs(brokers_data, used_brokers):
"""Compute optimal values for period intervals.
Notice: This function is maximally effective if ``brokers_data``
does not contain ``Default`` entry.
Using `load_data_from` with ``mode='brokers'`` is recommended.
"""
keys_to_prefer_lesser = ('accelerator',)
def recur(l, r, prefer_greater_numbers=True):
l, r = deepcopy(l), deepcopy(r)
if isinstance(l, list) and isinstance(r, list) and len(l) == len(r):
lst = []
for ll, rr in zip(l, r):
lst.append(recur(ll, rr))
return lst
elif isinstance(l, NUM_TYPES) and isinstance(r, NUM_TYPES):
if l == r:
return l
if l > r:
return l if prefer_greater_numbers else r
if l < r:
return r if prefer_greater_numbers else l
elif isinstance(l, dict) and isinstance(r, dict):
for k, v in r.iteritems():
if k not in l.keys():
l[k] = v
elif k in keys_to_prefer_lesser:
l[k] = recur(l[k], v, prefer_greater_numbers=False)
else:
l[k] = recur(l[k], v)
return l
else:
raise TypeError("Couldn't recur({0}, {1})".format(
str(type(l)), str(type(r))))
intrs = []
for i in used_brokers:
intrs.append(brokers_data[i]['intervals'])
result = intrs.pop(0)
for i in intrs:
result = recur(result, i)
return result
def prepare_test_tender_data(procedure_intervals,
tender_parameters,
submissionMethodDetails,
accelerator,
funders):
# Get actual intervals by mode name
mode = tender_parameters['mode']
if mode in procedure_intervals:
intervals = procedure_intervals[mode]
else:
intervals = procedure_intervals['default']
LOGGER.log_message(Message(intervals))
tender_parameters['intervals'] = intervals
# Set acceleration value for certain modes
assert isinstance(intervals['accelerator'], int), \
"Accelerator should be an 'int', " \
"not '{}'".format(type(intervals['accelerator']).__name__)
assert intervals['accelerator'] >= 0, \
"Accelerator should not be less than 0"
if mode == 'negotiation':
return munchify({'data': test_tender_data_limited(tender_parameters)})
elif mode == 'negotiation.quick':
return munchify({'data': test_tender_data_limited(tender_parameters)})
elif mode == 'openeu':
return munchify({'data': test_tender_data_openeu(
tender_parameters, submissionMethodDetails)})
elif mode == 'openua':
return munchify({'data': test_tender_data_openua(
tender_parameters, submissionMethodDetails)})
elif mode == 'openua_defense':
return munchify({'data': test_tender_data_openua_defense(
tender_parameters, submissionMethodDetails)})
elif mode == 'open_competitive_dialogue':
return munchify({'data': test_tender_data_competitive_dialogue(
tender_parameters, submissionMethodDetails)})
elif mode == 'reporting':
return munchify({'data': test_tender_data_limited(tender_parameters)})
elif mode == 'belowThreshold':
return munchify({'data': test_tender_data(
tender_parameters,
submissionMethodDetails=submissionMethodDetails,
funders=funders,
accelerator=accelerator)})
# The previous line needs an explicit keyword argument because,
# unlike previous functions, this one has three arguments.
raise ValueError("Invalid mode for prepare_test_tender_data")
def run_keyword_and_ignore_keyword_definitions(name, *args, **kwargs):
"""This keyword is pretty similar to `Run Keyword And Ignore Error`,
which, unfortunately, does not suppress the error when you try
to use it to run a keyword which is not defined.
As a result, the execution of its parent keyword / test case is aborted.
How this works:
This is a simple wrapper for `Run Keyword And Ignore Error`.
It handles the error mentioned above and additionally provides
a meaningful error message.
"""
try:
status, _ = BuiltIn().run_keyword_and_ignore_error(name, *args, **kwargs)
except ExecutionFailed as e:
status, _ = "FAIL", e.message
return status, _
def set_access_key(tender, access_token):
tender.access = munchify({"token": access_token})
return tender
def get_from_object(obj, path):
"""Gets data from a dictionary using a dotted accessor-string"""
jsonpath_expr = parse_path(path)
return_list = [i.value for i in jsonpath_expr.find(obj)]
if return_list:
return return_list[0]
else:
raise AttributeError('Attribute not found: {0}'.format(path))
def set_to_object(obj, path, value):
def recur(obj, path, value):
if not isinstance(obj, dict):
raise TypeError('expected %s, got %s' %
(dict.__name__, type(obj)))
# Search the list index in path to value
groups = re.search(r'^(?P<key>[0-9a-zA-Z_]+)(?:\[(?P<index>-?\d+)\])?'
'(?:\.(?P<suffix>.+))?$', path)
err = RuntimeError('could not parse the path: ' + path)
if not groups:
raise err
gd = {k: v for k, v in groups.groupdict().items() if v is not None}
is_list = False
suffix = None
if 'key' not in gd:
raise err
key = gd['key']
if 'index' in gd:
is_list = True
index = int(gd['index'])
if 'suffix' in gd:
suffix = gd['suffix']
if is_list:
if key not in obj:
obj[key] = []
elif not isinstance(obj[key], list):
raise TypeError('expected %s, got %s' %
(list.__name__, type(obj[key])))
plusone = 1 if index >= 0 else 0
if len(obj[key]) < abs(index) + plusone:
while not len(obj[key]) == abs(index) + plusone:
extension = [None] * (abs(index) + plusone - len(obj[key]))
if index < 0:
obj[key] = extension + obj[key]
else:
obj[key].extend(extension)
if suffix:
obj[key][index] = {}
if suffix:
obj[key][index] = recur(obj[key][index], suffix, value)
else:
obj[key][index] = value
else:
if key not in obj:
obj[key] = {}
if suffix:
obj[key] = recur(obj[key], suffix, value)
else:
obj[key] = value
return obj
if not isinstance(path, STR_TYPES):
raise TypeError('Path must be one of ' + str(STR_TYPES))
return munchify(recur(obj, path, value))
def wait_to_date(date_stamp):
date = parse(date_stamp)
LOGGER.log_message(Message("date: {}".format(date.isoformat()), "INFO"))
now = get_now()
LOGGER.log_message(Message("now: {}".format(now.isoformat()), "INFO"))
wait_seconds = (date - now).total_seconds()
wait_seconds += 2
if wait_seconds < 0:
return 0
return wait_seconds
def merge_dicts(a, b):
"""Merge dicts recursively.
Origin: https://www.xormedia.com/recursively-merge-dictionaries-in-python/
"""
if not isinstance(b, dict):
return b
result = deepcopy(a)
for k, v in b.iteritems():
if k in result and isinstance(result[k], dict):
result[k] = merge_dicts(result[k], v)
else:
result[k] = deepcopy(v)
return munchify(result)
def create_data_dict(path_to_value=None, value=None):
"""Create a dictionary with one key, 'data'.
If `path_to_value` is not given, set the key's value
to an empty dictionary.
If `path_to_value` is given, set the key's value to `value`.
In case it's the latter and if `value` is not set,
the key's value is set to `None`.
Please note that `path_to_value` is relative to the parent dictionary,
thus, you may need to prepend `data.` to your path string.
To better understand how `path_to_value` is handled,
please refer to the `set_to_object()` function.
"""
data_dict = {'data': {}}
if path_to_value:
data_dict = set_to_object(data_dict, path_to_value, value)
return data_dict
def munch_dict(arg=None, data=False):
if arg is None:
arg = {}
if data:
arg['data'] = {}
return munchify(arg)
def get_id_from_object(obj):
regex = r'(^[filq]-[0-9a-fA-F]{8}): '
title = obj.get('title', '')
if title:
if not isinstance(title, STR_TYPES):
raise TypeError('title must be one of %s' % str(STR_TYPES))
obj_id = re.match(regex, title)
if obj_id and len(obj_id.groups()) >= 1:
return obj_id.group(1)
description = obj.get('description', '')
if description:
if not isinstance(description, STR_TYPES):
raise TypeError('description must be one of %s' % str(STR_TYPES))
obj_id = re.match(regex, description)
if obj_id and len(obj_id.groups()) >= 1:
return obj_id.group(1)
raise VaueError('could not find object ID in "title": "%s", '
'"description": "%s"' % (title, description))
def get_id_from_string(string):
return re.match(r'[dc]\-[0-9a-fA-F]{8}', string).group(0)
def get_object_type_by_id(object_id):
prefixes = {'q': 'questions', 'f': 'features', 'i': 'items', 'l': 'lots'}
return prefixes.get(object_id[0])
def get_complaint_index_by_complaintID(data, complaintID):
if not data:
return 0
for index, element in enumerate(data):
if element['complaintID'] == complaintID:
break
else:
index += 1
return index
def get_object_index_by_id(data, object_id):
if not data:
return 0
for index, element in enumerate(data):
element_id = get_id_from_object(element)
if element_id == object_id:
break
else:
index += 1
return index
def get_object_by_id(data, given_object_id, slice_element, object_id):
"""
data: object to slice
given_object_id: with what id we should compare
slice_element: what path should be extracted (e.g. from { key: val } extract key )
object_id: what property is id (e.g. from { id: 1, name: 2 } extract id)
"""
# Slice the given object, e.g. slice bid object to lotValues object
try:
sliced_object = data[slice_element]
except KeyError:
return data
# If there is one sliced object, get the 1st element
if len(sliced_object) == 1:
return sliced_object[0]
# Compare given object id and id from sliced object
for index, element in enumerate(sliced_object):
element_id = element[object_id]
if element_id == given_object_id:
return element
return sliced_object[0]
def generate_test_bid_data(tender_data):
if tender_data.get('procurementMethodType', '') in (
'aboveThresholdUA',
'aboveThresholdUA.defense',
'aboveThresholdEU',
'competitiveDialogueUA',
'competitiveDialogueEU'
):
bid = test_bid_competitive_data()
bid.data.selfEligible = True
bid.data.selfQualified = True
else:
bid = test_bid_data()
if 'lots' in tender_data:
bid.data.lotValues = []
for lot in tender_data['lots']:
value = test_bid_value(lot['value']['amount'])
value['relatedLot'] = lot.get('id', '')
bid.data.lotValues.append(value)
else:
bid.data.update(test_bid_value(tender_data['value']['amount']))
if 'features' in tender_data:
bid.data.parameters = []
for feature in tender_data['features']:
parameter = {"value": fake.random_element(elements=(0.05, 0.01, 0)), "code": feature.get('code', '')}
bid.data.parameters.append(parameter)
return bid
def mult_and_round(*args, **kwargs):
return round(reduce(operator.mul, args), kwargs.get('precision', 2))
def generate_test_bid_data_second_stage(tender_data, index='0'):
bid = test_bid_data()
if index.isdigit():
index = int(index)
else:
index = 0
bid['data']['tenderers'][0]['identifier']['id'] = tender_data['shortlistedFirms'][index]['identifier']['id']
bid['data']['tenderers'][0]['identifier']['scheme'] = tender_data['shortlistedFirms'][index]['identifier']['scheme']
bid['data']['tenderers'][0]['identifier']['legalName'] = tender_data['shortlistedFirms'][index]['identifier']['legalName']
bid['data']['tenderers'][0]['name'] = tender_data['shortlistedFirms'][index]['name']
if tender_data.get('procurementMethodType', '') in ('competitiveDialogueEU.stage2', 'competitiveDialogueUA.stage2'):
bid.data.selfEligible = True
bid.data.selfQualified = True
if 'lots' in tender_data:
bid.data.lotValues = []
for lot in tender_data['lots']:
value = test_bid_value(lot['value']['amount'])
value['relatedLot'] = lot.get('id', '')
bid.data.lotValues.append(value)
else:
bid.data.update(test_bid_value(tender_data['value']['amount']))
if 'features' in tender_data:
bid.data.parameters = []
for feature in tender_data['features']:
parameter = {"value": fake.random_element(elements=(0.05, 0.01, 0)), "code": feature.get('code', '')}
bid.data.parameters.append(parameter)
return bid
def convert_amount_string_to_float(amount_string):
return float(amount_string.replace(' ', '').replace(',', '.'))
def compare_rationale_types(type1, type2):
return set(type1) == set(type2)
def delete_from_dictionary(variable, path):
if not type(path) in STR_TYPES:
raise TypeError('path must be one of: ' +
str(STR_TYPES))
return xpathdelete(variable, path, separator='.')
def dictionary_should_not_contain_path(dictionary, path):
try:
xpathget(dictionary, path, separator='.')
except KeyError:
return
raise RuntimeError("Dictionary contains path '%s'." % path)
| {
"content_hash": "6d48619353b063f66d3257d675fefa00",
"timestamp": "",
"source": "github",
"line_count": 673,
"max_line_length": 134,
"avg_line_length": 35.187221396731054,
"alnum_prop": 0.5939782948355221,
"repo_name": "kosaniak/robot_tests",
"id": "bdeb22cf828c56c115f307ee0bfc9e980ee5a5a8",
"size": "23703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "op_robot_tests/tests_files/service_keywords.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "84517"
},
{
"name": "RobotFramework",
"bytes": "672122"
}
],
"symlink_target": ""
} |
import pyxon.decode as pd
def unobjectify(obj):
"""
Turns a python object (must be a class instance)
into the corresponding JSON data.
Example:
>>> @sprop.a # sprop annotations are needed to tell the
>>> @sprop.b # unobjectify function what parameter need
>>> @sprop.c # to be written out.
>>> class Baz(object): pass
>>> def __init__(self, a, b, c):
>>> self.a = a
>>> self.b = b
>>> self.c = c
>>>
>>> baz = Baz(a=1, b=2, c='three')
>>> unobjectify(baz)
{ 'a':1, 'b':2, 'c':'three' }
"""
cls = obj.__class__
# Create empty data
data = {}
sprops,cprops = _get_registered_props(cls)
# Add simple properties
for p in sprops:
data[p]=getattr(obj,p)
# Add calculated data
for p in cprops:
f2 = cprops[p][1]
data[p]=f2(getattr(obj,p))
data = pd.add_type_property(data, cls)
return data
def _get_registered_props(cls):
"""
Returns all of the registered properties for a given class.
Recursively calls up to parent classes that are inherited from.
"""
sprops = pd.class_sprops.get(cls,{}) # [name]
cprops = pd.class_cprops.get(cls,{}) # {name:(fn, inv_fn)}
if cls in pd.conc_to_abstract: # {ConcreteClass: (AbstractClass, _)}
parent_cls = pd.conc_to_abstract[cls][0]
parent_sprops, parent_cprops = _get_registered_props(parent_cls)
sprops = list(set(sprops).union(set(parent_sprops)))
cprops2 = parent_cprops.copy()
cprops2.update(cprops)
cprops = cprops2
return sprops,cprops
def obj(cls):
"""
Helper function returns a closure turning objectify into a
single argument function. This cuts down the amount of code
needed in class annotations by removing the need to write
lambda functions.
"""
return lambda d: objectify(d, cls)
def objectify(data, cls):
"""
Function takes JSON data and a target class as arguments
and returns an instance of the class created using the
JSON data.
I'm not sure whether it is a great idea to keep (un)objectify
separate from the decode module, since they need to access
some of the module-level parameters.
"""
# Create empty class
concrete_cls = pd.conc2(data, cls)
obj = concrete_cls()
sprops,cprops = _get_registered_props(cls)
# Add simple properties from data
for p in sprops:
setattr(obj, p, data[p])
# Add calculated properties from data
for p in cprops:
f1 = cprops[p][0]
setattr(obj, p, f1(data[p]))
return obj
def transform_map(kfun=lambda x: x, vfun=lambda x: x):
"""
Function that takes two functions as arguments and returns
a function that applies those functions over all of the
keys and values in a map and returns the transformed version
of the map.
kfun: function applied to all keys (default identity)
vfun: function applied to all values (default identity)
(k -> k') -> (v -> v') -> ((k, v) -> (k', v'))
"""
return lambda dct: dict([(kfun(k),vfun(v)) for k,v in dct.items()])
def transform_list(item_decoder=lambda x: x):
return lambda lst: map(item_decoder, lst)
def identity(x):
"""
Identity function is needed when performing transformations
on maps where some operation is needed on either the keys
or values, but not both.
"""
return x
| {
"content_hash": "b0f01714027a08ba3f12e4daf69a2087",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 72,
"avg_line_length": 28.516393442622952,
"alnum_prop": 0.61540672607071,
"repo_name": "k-j-m/Pyxon",
"id": "6aff4d7639431aa38a4d3a68b963afee4300b218",
"size": "3479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyxon/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45369"
}
],
"symlink_target": ""
} |
import base_filters
COPY_GOOGLE_DOC_KEY = '1Dr-ORDEr6iCs-i7arcBI2h6lFWdMbBFB7J8JZCdcUFQ'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| {
"content_hash": "b108d4b47d4f117e43b8748c7273bd65",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 77,
"avg_line_length": 25.636363636363637,
"alnum_prop": 0.7836879432624113,
"repo_name": "stlpublicradio/dailygraphics",
"id": "f9936de1e3f500d01be75a32df3426b8795a5191",
"size": "305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphic_templates/locator_map/graphic_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "1022"
},
{
"name": "CSS",
"bytes": "24947"
},
{
"name": "HTML",
"bytes": "72135"
},
{
"name": "JavaScript",
"bytes": "585694"
},
{
"name": "Python",
"bytes": "83889"
},
{
"name": "Shell",
"bytes": "810"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1beta1
def sample_cancel_batch_prediction_job():
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelBatchPredictionJobRequest(
name="name_value",
)
# Make the request
client.cancel_batch_prediction_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelBatchPredictionJob_sync]
| {
"content_hash": "2553b1460b656172c0bb5e00704ae7b5",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 88,
"avg_line_length": 27.58823529411765,
"alnum_prop": 0.7505330490405118,
"repo_name": "googleapis/python-aiplatform",
"id": "a80c7d6b1cef514ac624aa209bcc03732623ef69",
"size": "1509",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
import logging
import requests
from botologist.util import parse_dt, time_until
import botologist.plugin
log = logging.getLogger(__name__)
def get_next_episode_info(show, tz="UTC"):
query = {"q": show, "embed": "nextepisode"}
try:
response = requests.get(
"http://api.tvmaze.com/singlesearch/shows", query, timeout=4
)
response.raise_for_status()
except requests.exceptions.RequestException:
log.warning("TVMaze request caused an exception", exc_info=True)
return None
try:
data = response.json()
except ValueError:
log.warning("TVMaze returned invalid JSON: %r", response.text, exc_info=True)
return None
info = data["name"]
nextepisode = data.get("_embedded", {}).get("nextepisode")
if nextepisode:
log.debug("next episode data: %r", nextepisode)
dt = parse_dt(nextepisode["airstamp"], tz)
info += " - season %d, episode %d airs at %s" % (
nextepisode["season"],
nextepisode["number"],
dt.strftime("%Y-%m-%d %H:%M %z"),
)
time_until_str = time_until(dt)
if time_until_str:
info += " (in %s)" % time_until_str
else:
status = data["status"]
if status == "Ended":
info += " - cancelled :("
else:
info += " - next episode not announced yet"
return info
class TvseriesPlugin(botologist.plugin.Plugin):
def __init__(self, bot, channel):
super().__init__(bot, channel)
self.tz = self.bot.config.get("output_timezone", "UTC")
@botologist.plugin.command("nextepisode")
def nextepisode(self, msg):
info = get_next_episode_info(" ".join(msg.args), self.tz)
return info or "No show with that name found!"
| {
"content_hash": "000399888a25ee91b857019cb926b1f2",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 85,
"avg_line_length": 31.719298245614034,
"alnum_prop": 0.588495575221239,
"repo_name": "anlutro/botologist",
"id": "5d912e4cbd400dd97486df2467281d071f5b04b2",
"size": "1808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/tvseries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176376"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from tenant_schemas.models import TenantMixin
from protocolle.auxiliar.models import Instituicao
class Client(TenantMixin):
institute = models.ForeignKey(Instituicao, verbose_name=_(u'Instituição'),
blank=True, null=True)
name = models.CharField(max_length=100)
paid_until = models.DateField()
on_trial = models.BooleanField()
created_on = models.DateField(auto_now_add=True)
# default true, schema will be automatically created
# and synced when it is saved
auto_create_schema = True
def __unicode__(self):
return unicode(self.name)
class Meta:
verbose_name = u'Cliente'
verbose_name_plural = u'Clientes'
| {
"content_hash": "6af920546c52f6f4bb179cae8851fc16",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 78,
"avg_line_length": 30.615384615384617,
"alnum_prop": 0.6846733668341709,
"repo_name": "klebercode/protocolle",
"id": "d23e53f4b854119ce8bbd1719b1d6a74185da2b3",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "customers/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "511"
},
{
"name": "HTML",
"bytes": "25408"
},
{
"name": "JavaScript",
"bytes": "3379"
},
{
"name": "Python",
"bytes": "70100"
}
],
"symlink_target": ""
} |
'''Qeez statistics queue module
* queue monitors:
$ rq-dashboard --redis_url=unix:///tmp/redis.sock?db=1 \
--bind=127.0.0.1 --port=9181 --interval=5000
or
$ rqinfo --url unix:///tmp/redis.sock?db=1
* queue worker:
$ rqworker --url unix:///tmp/redis.sock?db=1 --name my-worker-nr-x --verbose
# or
# python manage.py rqworker --name=my-worker-nr-x queue-of-db-1
'''
import logging
from time import gmtime
from rq import Queue
from qeez_stats.config import CFG
from qeez_stats.stats import stat_collector
from qeez_stats.utils import get_method_by_path, get_redis, to_str
LOG = logging.getLogger(__name__)
COLL_ID_FMT = 'stat:%s'
STAT_ID_FMT = 'stat:%s:%s'
def direct_stat_save(qeez_token, res_dc, atime=None, **kwargs):
'''Saves stat using write method
'''
if atime is None:
atime = gmtime()
try:
function = get_method_by_path(CFG['STAT_SAVE_FN'])
if function:
return function(qeez_token, atime, res_dc, **kwargs)
except Exception as exc:
if CFG['RAVEN_CLI']:
CFG['RAVEN_CLI'].user_context({
'res_dc': res_dc,
})
CFG['RAVEN_CLI'].captureException()
LOG.exception('%s @ %s', repr(exc), repr(res_dc))
return False
def enqueue_stat_save(qeez_token, res_dc, atime=None, redis_conn=None):
'''Enqueues stat for save
'''
if atime is None:
atime = gmtime()
if redis_conn is None:
redis_conn = get_redis(CFG['SAVE_REDIS'])
queue = Queue('save', connection=redis_conn)
return queue.enqueue(
CFG['STAT_SAVE_FN'], args=(qeez_token, atime, res_dc),
timeout=30, result_ttl=30, ttl=7200)
def enqueue_stat_calc(stat, qeez_token, redis_conn=None):
'''Enqueues stat for calc
'''
if redis_conn is None:
redis_conn = get_redis(CFG['QUEUE_REDIS'])
stat_token = STAT_ID_FMT % (stat, qeez_token)
queue = Queue('calc', connection=redis_conn)
stat_append = queue.enqueue(
stat_collector, stat, stat_token, timeout=30, result_ttl=7200,
ttl=7200, job_id=COLL_ID_FMT % stat)
_ = stat_append.id
return queue.enqueue(
stat, qeez_token, timeout=30, result_ttl=7200,
ttl=7200, job_id=stat_token, depends_on=stat_append)
def pull_stat_res(stat, qeez_token, redis_conn=None):
'''Pulls one stat's result
'''
if redis_conn is None:
redis_conn = get_redis(CFG['QUEUE_REDIS'])
queue = Queue('calc', connection=redis_conn)
job = queue.fetch_job(STAT_ID_FMT % (stat, qeez_token))
res = None
if job is not None:
res = job.result
if res is not None:
job.ttl = job.result_ttl = 24 * 3600
job.save()
return res
def pull_all_stat_res(stat, redis_conn=None):
'''Pulls all stat results
'''
if redis_conn is None:
redis_conn = get_redis(CFG['QUEUE_REDIS'])
queue = Queue('calc', connection=redis_conn)
job = queue.fetch_job(COLL_ID_FMT % stat)
res = None
if job is None:
return
res = job.result
if res is None:
return
out = []
for stat_token in res:
_job = queue.fetch_job(to_str(stat_token))
_res = None
if _job is not None:
_res = _job.result
if _res is not None:
out.append(_res)
return out
| {
"content_hash": "e3b7423e3326b6ce3126fb1dd6207a83",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 76,
"avg_line_length": 27.683333333333334,
"alnum_prop": 0.6053582179409994,
"repo_name": "soutys/qeez_stats",
"id": "76c57083b0487623edb6def29ad2f7975de66222",
"size": "3347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qeez_stats/queues.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37427"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
urlpatterns = patterns('emailstubs',
# Example:
# (r'^server/', include('server.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/(.*)', admin.site.root),
(r'^inbox$', 'emailqueue.inbox'),
)
| {
"content_hash": "a24ff67fae391f8e406fe053bef2a999",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 76,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6547368421052632,
"repo_name": "eob/synckit-research",
"id": "54e82d2e2d792ba48fbc1331c1c7fbb3704c212b",
"size": "475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/emailstubs/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "65239505"
},
{
"name": "PHP",
"bytes": "15712"
},
{
"name": "Python",
"bytes": "125913"
},
{
"name": "R",
"bytes": "21637"
},
{
"name": "Shell",
"bytes": "2697"
}
],
"symlink_target": ""
} |
import subprocess
def reboot():
subprocess.call(['sudo', 'reboot', 'now'])
| {
"content_hash": "081b4e02fac4e16c5a42c0ab07860e01",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 43,
"avg_line_length": 19.25,
"alnum_prop": 0.6753246753246753,
"repo_name": "akd001/RPi",
"id": "65ce39740bc1f3b5d831e573daf6a074e832989c",
"size": "77",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4693"
}
],
"symlink_target": ""
} |
import os.path
from cStringIO import StringIO
from flask import Flask, render_template, make_response, redirect, request,\
url_for
from werkzeug import secure_filename
from fixer import fix_sms
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
OUTPUT_PATH = os.path.join(PROJECT_ROOT, '../output/')
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/fix/', methods=['POST'])
def fix():
filename = secure_filename(request.form.get('output-filename')) or 'output'
files = request.files.getlist('input-files[]')
contacts = request.form.get('contacts')
if files[0].filename == '':
error = "You need to upload some files."
return render_template(
'index.html', output=filename, contacts=contacts, error=error)
try:
address_book = dict([line.split(':') for line in contacts.split('\n')
if ':' in line])
with open(os.path.join(OUTPUT_PATH, filename + '.xml'), 'w') as output_file:
_, missing = fix_sms(
input=files,
output=output_file,
logger=app.logger,
address_book=address_book)
except Exception as ex:
error = "An unknown error occurred."
if app.debug == True:
error += "\n" + str(ex)
return render_template(
'index.html', output=filename, contacts=contacts, error=error)
return render_template(
'index.html',
output=filename,
missing=missing,
contacts=contacts,
link=url_for('get_file', filename=filename))
@app.route('/fix/<filename>.xml')
def get_file(filename):
filename = os.path.join(OUTPUT_PATH, secure_filename(filename) + '.xml')
try:
with open(filename, 'r') as output_file:
content = output_file.read()
return make_response(
content, 200, {'Content-Type': 'application/xml'})
except IOError:
return "invalid filename", 404
@app.route('/fix/sms.xsl')
def xsl():
return render_template('sms.xsl')
if __name__ == '__main__':
app.run(debug=True) | {
"content_hash": "242613946f264b6d6f864f2423839878",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 84,
"avg_line_length": 31.695652173913043,
"alnum_prop": 0.594421582075903,
"repo_name": "ianonavy/sms-fixer",
"id": "bfa336aea4a0da019e0c35dd7df512d96f5b19e5",
"size": "2210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "191"
},
{
"name": "Python",
"bytes": "8992"
},
{
"name": "XSLT",
"bytes": "1500"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from aldryn_newsblog.utils.migration import rename_tables_old_to_new, rename_tables_new_to_old
class Migration(SchemaMigration):
def forwards(self, orm):
rename_tables_old_to_new(db)
# Adding model 'FeaturedArticlesPlugin'
db.create_table(u'aldryn_newsblog_featuredarticlesplugin', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(related_name=u'+', unique=True, primary_key=True, to=orm['cms.CMSPlugin'])),
('app_config', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['aldryn_newsblog.NewsBlogConfig'])),
('entry_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)),
))
db.send_create_signal(u'aldryn_newsblog', ['FeaturedArticlesPlugin'])
def backwards(self, orm):
rename_tables_new_to_old(db)
# Deleting model 'FeaturedArticlesPlugin'
db.delete_table(u'aldryn_newsblog_featuredarticlesplugin')
models = {
u'aldryn_categories.category': {
'Meta': {'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'rgt': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'aldryn_newsblog.archiveplugin': {
'Meta': {'object_name': 'ArchivePlugin'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_newsblog.article': {
'Meta': {'ordering': "[u'-publishing_date']", 'object_name': 'Article'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Person']", 'null': 'True', 'blank': 'True'}),
'categories': ('aldryn_categories.fields.CategoryManyToManyField', [], {'to': u"orm['aldryn_categories.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'aldryn_newsblog_articles'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'featured_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'publishing_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'related': ('sortedm2m.fields.SortedManyToManyField', [], {'related_name': "'related_rel_+'", 'blank': 'True', 'to': u"orm['aldryn_newsblog.Article']"})
},
u'aldryn_newsblog.articletranslation': {
'Meta': {'unique_together': "[(u'language_code', u'slug'), (u'language_code', u'master')]", 'object_name': 'ArticleTranslation', 'db_table': "u'aldryn_newsblog_article_translation'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'lead_in': ('djangocms_text_ckeditor.fields.HTMLField', [], {'default': "u''", 'blank': 'True'}),
u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_newsblog.Article']"}),
'meta_description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'meta_title': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '234'})
},
u'aldryn_newsblog.authorsplugin': {
'Meta': {'object_name': 'AuthorsPlugin'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_newsblog.categoriesplugin': {
'Meta': {'object_name': 'CategoriesPlugin'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_newsblog.featuredarticlesplugin': {
'Meta': {'object_name': 'FeaturedArticlesPlugin'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'entry_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'aldryn_newsblog.latestentriesplugin': {
'Meta': {'object_name': 'LatestEntriesPlugin'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'latest_entries': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'aldryn_newsblog.newsblogconfig': {
'Meta': {'object_name': 'NewsBlogConfig'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'"}),
'create_authors': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_view_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'namespace': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100'}),
'paginate_by': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'search_indexed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'aldryn_newsblog.newsblogconfigtranslation': {
'Meta': {'unique_together': "[(u'language_code', u'master')]", 'object_name': 'NewsBlogConfigTranslation', 'db_table': "u'aldryn_newsblog_newsblogconfig_translation'"},
'app_title': ('django.db.models.fields.CharField', [], {'max_length': '234'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_newsblog.NewsBlogConfig']"})
},
u'aldryn_newsblog.relatedplugin': {
'Meta': {'object_name': 'RelatedPlugin', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'aldryn_newsblog.tagsplugin': {
'Meta': {'object_name': 'TagsPlugin'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_people.group': {
'Meta': {'object_name': 'Group'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "u''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'aldryn_people.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'default': "u''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'vcard_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visual': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image'},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['aldryn_newsblog'] | {
"content_hash": "dcec1707c78ef06c4d81f03f0fb3f5ef",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 195,
"avg_line_length": 86.7479674796748,
"alnum_prop": 0.5642455482661668,
"repo_name": "czpython/aldryn-newsblog",
"id": "66da86c8bb85c9b2e59145fe6fc42f9097feeba3",
"size": "21364",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aldryn_newsblog/south_migrations/0024_auto__add_featuredarticlesplugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "27023"
},
{
"name": "JavaScript",
"bytes": "32640"
},
{
"name": "Python",
"bytes": "947522"
},
{
"name": "Shell",
"bytes": "216"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Localizr', '0002_auto_20171123_0213'),
]
operations = [
migrations.AlterField(
model_name='appinfo',
name='base_locale',
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Localizr.Locale'),
),
]
| {
"content_hash": "5574fedbc929a395afc7f809cd2be6f7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 106,
"avg_line_length": 26.11111111111111,
"alnum_prop": 0.6085106382978723,
"repo_name": "michaelhenry/Localizr",
"id": "9057b785f951d06495f1c807edda7849089431f7",
"size": "519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/Localizr/migrations/0003_auto_20171123_0224.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "310"
},
{
"name": "HTML",
"bytes": "10878"
},
{
"name": "Procfile",
"bytes": "28"
},
{
"name": "Python",
"bytes": "78491"
},
{
"name": "Ruby",
"bytes": "6581"
},
{
"name": "Shell",
"bytes": "837"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
from scipy.constants import mu_0
from ...utils import omega
from ....utils import validate_type, validate_list_of_types
# Define the default component dictionaries
DEFAULT_COMP_DICT = {
"xx": {"color": "green", "label": "Imp_xx", "marker": "_", "ls": "None"},
"xy": {"color": "blue", "label": "Imp_xy", "marker": "_", "ls": "None"},
"yx": {"color": "red", "label": "Imp_yx", "marker": "_", "ls": "None"},
"yy": {"color": "yellow", "label": "Imp_yy", "marker": "_", "ls": "None"},
"zx": {"color": "brown", "label": "Tip_zx", "marker": "_", "ls": "None"},
"zy": {"color": "purple", "label": "Tip_zy", "marker": "_", "ls": "None"},
}
# Define some hidden attributes
_imp_comps = ["xx", "xy", "yx", "yy"]
def _validate_kwargs(input_dict, compare_dict):
"""
Function to deal with keyword arguments.
:param input_dict: matplotlib kwargs dictionary with custom arguments
:type input_dict: :class: `dict`
:param compare_dict: matplotlib kwargs of default to use arguments
:type compare_dict: :class: `dict`
"""
# Set the default plot kwargs
for key, val in compare_dict.items():
# Make sure they aren't already assigned
if input_dict is None:
input_dict = compare_dict.copy()
else:
if key not in input_dict:
input_dict[key] = val
# Return
return input_dict
class BaseDataNSEMPlots:
"""
A class container of matplotlib panels for plotting
NSEM data.
"""
def __init__(self, fig=None, axes=None, **kwargs):
super().__init__(**kwargs)
self.fig = fig
self.axes = axes
@property
def fig(self):
"""Figure for plotting.
Returns
-------
matplotlib.figure.Figure
"""
return self._fig
@fig.setter
def fig(self, value):
if value is not None:
value = validate_type("fig", value, plt.Figure, cast=False)
self._fig = value
@property
def axes(self):
"""Figure for plotting.
Returns
-------
list of matplotlib.axes.Axes
"""
return self._axes
@axes.setter
def axes(self, value):
if value is not None:
value = validate_list_of_types("axes", value, plt.Axes)
self._axes = value
def setup(self):
"""
Setup up the plot window.
Should populate the
self.fig and self.axes properties
"""
raise NotImplementedError(
"Is required in subclasses of {}".format(self.__class__)
)
def draw(self):
raise NotImplementedError(
"Is required in subclasses of {}".format(self.__class__)
)
def clear_axes(self):
"""
Function to clear all of the axes
"""
for ax in self.axes:
while len(ax.lines) > 0:
for line in ax.lines:
ax.lines.remove(line)
while len(ax.collections) > 0:
for item in ax.collections:
item.remove()
class TipperAmplitudeStationPlot(BaseDataNSEMPlots):
"""
Class for setting up 2 axes figure with:
tipper amplitudes | tipper phase
setup.
"""
def __init__(self):
super(TipperAmplitudeStationPlot, self).__init__()
def setup(self):
"""
Setup a station data plot figure.
"""
self.fig, axes_temp = plt.subplots(1, 2, sharex=True)
self.axes = axes_temp.ravel().tolist()
self.fig.set_size_inches((13.5, 4.0))
for ax in self.axes:
ax.set_xscale("log")
self.axes[0].invert_xaxis()
self.axes[0].set_yscale("log")
# Set labels
self.axes[0].set_xlabel("Frequency [Hz]")
self.axes[1].set_xlabel("Frequency [Hz]")
self.axes[0].set_ylabel("Tipper amplitude [V/A]")
self.axes[1].set_ylim(-180, 180)
self.axes[1].set_ylabel("Tipper angle [degrees]")
def draw(self, data_list, location):
"""
Function to draw on the axes
:param data_list: List of NSEM data objects to plot.
Has to be of length >= 1. First item is treat as a
observed data (Hast to have relative_error and noise_floor)
assigned) and the others are plotted on top.
:param location: Location of the station to plot
"""
axes = self.axes
# Set keyword arguments
st_kwargs = {"marker": "_", "ls": "None"}
eb_kwargs = {"ls": "None"}
# Pop the data from the list
data = data_list[0]
# Apparent resistivity
data.plot_tip_amp(location, ["zx", "zy"], ax=axes[0], errorbars=True)
# Apparent phase
data.plot_app_phs(location, ["zx", "zy"], ax=axes[1], errorbars=True)
# Plot the additional data
for other_data in data_list[1::]:
# Need add symbol generation
dd_kwargs = {
"zx": {"marker": ".", "ls": "--"},
"zy": {"marker": ".", "ls": "--"},
}
# Apparent resistivity
other_data.plot_tip_amp(
location,
["zx", "zy"],
ax=axes[0],
errorbars=False,
comp_plot_dict=dd_kwargs,
)
# Apparent phase
other_data.plot_app_phs(
location,
["zx", "zy"],
ax=axes[1],
errorbars=False,
comp_plot_dict=dd_kwargs,
)
class ApparentResPhsStationPlot(BaseDataNSEMPlots):
"""
Class for setting up 4 axes figure with:
+---------------------+----------------+
|apparent resistivity | phase |
+---------------------+----------------+
|impedance amplitudes | impedance phase|
+---------------------+----------------+
setup.
"""
def __init__(self):
super(ApparentResPhsStationPlot, self).__init__()
def setup(self):
"""
Setup a station data plot figure.
"""
self.fig, axes_temp = plt.subplots(2, 2, sharex=True)
self.axes = axes_temp.ravel().tolist()
self.fig.set_size_inches((13.5, 7.0))
# Have to deal with axes
# Set log
for ax in self.axes:
ax.set_xscale("log")
self.axes[0].invert_xaxis()
self.axes[0].set_yscale("log")
self.axes[2].set_yscale("log")
# Set labels
self.axes[2].set_xlabel("Frequency [Hz]")
self.axes[3].set_xlabel("Frequency [Hz]")
self.axes[0].set_ylabel("Apperent resistivity [Ohm m]")
self.axes[1].set_ylabel("Apperent phase [degrees]")
self.axes[1].set_ylim(-180, 180)
self.axes[2].set_ylabel("Impedance amplitude [V/A]")
self.axes[3].set_ylim(-180, 180)
self.axes[3].set_ylabel("Impedance angle [degrees]")
def draw(self, data_list, location):
"""
Function to draw on the axes
:param data_list: List of NSEM data objects to plot.
Has to be of length >= 1. First item is treat as a
observed data (Hast to have relative_error and noise_floor)
assigned) and the others are plotted on top.
:param location: Location of the station to plot
"""
axes = self.axes
# Set keyword arguments
st_kwargs = {"marker": "_", "ls": "None"}
eb_kwargs = {"ls": "None"}
# Pop the data from the list
data = data_list[0]
# Apparent resistivity
data.plot_app_res(location, ["xy", "yx"], ax=axes[0], errorbars=True)
# Apparent phase
data.plot_app_phs(location, ["xy", "yx"], ax=axes[1], errorbars=True)
# Impedamce amplitude
data.plot_imp_amp(
location, ["xx", "xy", "yx", "yy"], ax=axes[2], errorbars=True
)
# Impedance phase
data.plot_app_phs(
location, ["xx", "xy", "yx", "yy"], ax=axes[3], errorbars=True
)
# Plot the additional data
for other_data in data_list[1::]:
# Need add symbol generation
dd_kwargs = {
"xx": {"marker": ".", "ls": "--"},
"xy": {"marker": ".", "ls": "--"},
"yx": {"marker": ".", "ls": "--"},
"yy": {"marker": ".", "ls": "--"},
}
# Apparent resistivity
other_data.plot_app_res(
location,
["xy", "yx"],
ax=axes[0],
errorbars=False,
comp_plot_dict=dd_kwargs,
)
# Apparent phase
other_data.plot_app_phs(
location,
["xy", "yx"],
ax=axes[1],
errorbars=False,
comp_plot_dict=dd_kwargs,
)
# Impedamce amplitude
other_data.plot_imp_amp(
location,
["xx", "xy", "yx", "yy"],
ax=axes[2],
errorbars=False,
comp_plot_dict=dd_kwargs,
)
# Impedance phase
other_data.plot_app_phs(
location,
["xx", "xy", "yx", "yy"],
ax=axes[3],
errorbars=False,
comp_plot_dict=dd_kwargs,
)
class DataNSEMPlotMethods(object):
"""
Class container for properties and methods for
plotting of NSEM data.
"""
def __init__(self):
"""
Just for reference
"""
pass
def plot_app_res(
self,
location,
components=["xy", "yx"],
ax=None,
errorbars=False,
comp_plot_dict=DEFAULT_COMP_DICT,
):
"""
Plot apperent resistivity curves at a given location
:param location: Location of the data point
:type location: :class:`axes <matplotlib.axes.Axes>`
:param components: List of the components to plot.
Default = ['xy','yx']
:type components: list
:param ax: The ax object to add the, , Default: None
:type ax: :class:`axes <matplotlib.axes.Axes>`
:param errorbars: Controls if errorbars are plotted
Default = True
:type errorbars: bool
:param comp_plot_dict: Dictionary with additional kwargs
for matplotlib.plot
:type comp_plot_dict: dict
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
ax.invert_xaxis()
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Apperent resistivity [Ohm m]")
else:
fig = ax.get_figure()
for comp in components:
st_kwargs = _validate_kwargs(comp_plot_dict[comp], DEFAULT_COMP_DICT[comp])
self.station_component(location, comp, "app_res", ax=ax, **st_kwargs)
if errorbars:
eb_kwargs = _validate_kwargs(
comp_plot_dict[comp], DEFAULT_COMP_DICT[comp]
)
self.station_errorbars(location, comp, "app_res", ax=ax, **eb_kwargs)
return ax
def plot_app_phs(
self,
location,
components=["xy", "yx"],
ax=None,
errorbars=False,
comp_plot_dict=DEFAULT_COMP_DICT,
):
"""
Plot apperent resistivity curves at a given location
:param location: Location of the data point
:type location: :class:`axes <matplotlib.axes.Axes>`
:param components: List of the components to plot.
Default = ['xy','yx']
:type components: list
:param ax: The ax object to add the, , Default: None
:type ax: :class:`axes <matplotlib.axes.Axes>`
:param errorbars: Controls if errorbars are plotted
Default = True
:type errorbars: bool
:param comp_plot_dict: Dictionary with additional kwargs
for matplotlib.plot settings
:type comp_plot_dict: dict
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
ax.invert_xaxis()
ax.set_xscale("log")
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Phase angle [Degrees]")
else:
fig = ax.get_figure()
for comp in components:
st_kwargs = _validate_kwargs(comp_plot_dict[comp], DEFAULT_COMP_DICT[comp])
self.station_component(location, comp, "phase", ax=ax, **st_kwargs)
if errorbars:
eb_kwargs = _validate_kwargs(
comp_plot_dict[comp], DEFAULT_COMP_DICT[comp]
)
self.station_errorbars(location, comp, "phase", ax=ax, **eb_kwargs)
return ax
def plot_imp_amp(
self,
location,
components=["xy", "yx"],
ax=None,
errorbars=False,
comp_plot_dict=DEFAULT_COMP_DICT,
):
"""
Plot impedance amplitude curves at a given location
:param location: Location of the data point
:type location: :class:`axes <matplotlib.axes.Axes>`
:param components: List of the components to plot.
Default = ['xy','yx']
:type components: list
:param ax: The ax object to add the, , Default: None
:type ax: :class:`axes <matplotlib.axes.Axes>`
:param errorbars: Controls if errorbars are plotted
Default = True
:type errorbars: bool
:param comp_plot_dict: Dictionary with additional kwargs
for matplotlib.plot
:type comp_plot_dict: dict
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
ax.invert_xaxis()
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Impedance amplitude [V/A]")
else:
fig = ax.get_figure()
for comp in components:
st_kwargs = _validate_kwargs(comp_plot_dict[comp], DEFAULT_COMP_DICT[comp])
self.station_component(location, comp, "amplitude", ax=ax, **st_kwargs)
if errorbars:
eb_kwargs = _validate_kwargs(
comp_plot_dict[comp], DEFAULT_COMP_DICT[comp]
)
self.station_errorbars(location, comp, "amplitude", ax=ax, **eb_kwargs)
return ax
def plot_tip_amp(
self,
location,
components=["zx", "zy"],
ax=None,
errorbars=False,
comp_plot_dict=DEFAULT_COMP_DICT,
):
"""
Plot tipper amplitude curves at a given location
:param location: Location of the data point
:type location: :class:`axes <matplotlib.axes.Axes>`
:param components: List of the components to plot.
Default = ['xy','yx']
:type components: list
:param ax: The ax object to add the, , Default: None
:type ax: :class:`axes <matplotlib.axes.Axes>`
:param errorbars: Controls if errorbars are plotted
Default = True
:type errorbars: bool
:param comp_plot_dict: Dictionary with additional kwargs
for matplotlib.plot
:type comp_plot_dict: dict
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
ax.invert_xaxis()
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Tipper magnitude [unitless]")
else:
fig = ax.get_figure()
for comp in components:
st_kwargs = _validate_kwargs(comp_plot_dict[comp], DEFAULT_COMP_DICT[comp])
self.station_component(location, comp, "amplitude", ax=ax, **st_kwargs)
if errorbars:
eb_kwargs = _validate_kwargs(
comp_plot_dict[comp], DEFAULT_COMP_DICT[comp]
)
self.station_errorbars(location, comp, "amplitude", ax=ax, **eb_kwargs)
return ax
def map_data_locations(self, ax=None, **plot_kwargs):
"""
Function that plots all receiver locations of the data
(all discreate data locations).
:param ax: The ax object for mapping to. Default: None
:type ax: :class:`axes <matplotlib.axes.Axes>`
"""
# Default plot dict
default_dict = {"marker": "+", "c": "k", "ms": 10, "ls": "None", "zorder": 4}
# Set the default plot kwargs
for key, val in default_dict.items():
# Make sure they aren't already assigned
if key not in plot_kwargs:
plot_kwargs[key] = val
# Get unique locations
unique_locations = _unique_rows(
np.concatenate(
[
rx.locations
for src in self.survey.source_list
for rx in src.receiver_list
]
)
)
# Make the figure and the axes
if ax is None:
fig, ax = plt.subplots(1, 1)
else:
fig = ax.get_figure()
# Plot the locations
ax.plot(unique_locations[:, 0], unique_locations[:, 1], **plot_kwargs)
return (fig, ax)
def station_component(
self, location, orientation, component, ax=None, **plot_kwargs
):
"""
:param numpy.ndarray location: Coordnaties of the station to plot
:param str orientation: The orientation of the data
:param str component: The data component to plot
:param matplotlib.axes.Axes ax: Axes (optional):
:param matplotlib.lines.Line2D keyword_arguments plot_kwargs:
"""
# Sort the axes
if ax is None:
fig, ax = plt.subplots(1, 1)
else:
fig = ax.get_figure()
# Plot the data
freqs, plot_data = _get_plot_data(self, location, orientation, component)
# Plot
plot_obj = ax.plot(freqs, plot_data, **plot_kwargs)
return (fig, ax, plot_obj)
def station_errorbars(
self, location, orientation, component, ax=None, **plot_kwargs
):
"""
:param numpy.ndarray location: Coordnaties of the station to plot
:param str orientation: The orientation of the data
:param str component: The data component to plot
:param matplotlib.axes.Axes ax: Axes (optional)
:param matplotlib.lines.Line2D keyword_arguments plot_kwargs:
"""
# Sort the axes
if ax is None:
fig, ax = plt.subplots(1, 1)
else:
fig = ax.get_figure()
# Plot the data
freqs, plot_data, errorbars = _get_station_data(
self, location, orientation, component, plot_error=True
)
plot_obj = ax.errorbar(freqs, plot_data, yerr=errorbars, **plot_kwargs)
return (fig, ax, plot_obj)
def frequency_map(
self, frequency, orientation, component, ax=None, plot_error=True, **plot_kwargs
):
"""
Function to generate a iso-frequency map
:param numpy.ndarray frequency: Frequency to be mapped
:param str orientation: The orientation of the data
:param str component: The data component to plot
:param matplotlib.axes.Axes ax: Axes (optional)
:param matplotlib.lines.Line2D keyword_arguments plot_kwargs:
"""
# Sort the axes
if ax is None:
fig, ax = plt.subplots(1, 1)
else:
fig = ax.get_figure()
# Plot the data
locs, plot_data, errorbars = _get_map_data(
self, frequency, orientation, component, plot_error
)
plot_obj = plt.tricontourf(locs, plot_data, **plot_kwargs)
return (fig, ax, plot_obj)
# Hidden utils functions
def _get_map_data(data, frequency, orientation, component, plot_error=False):
"""
Function for getting frequency map data
"""
# Get the components
if component in ["app_res", "phase", "amplitude"]:
real_tuple = _extract_frequency_data(
data, frequency, orientation, "real", plot_error
)
imag_tuple = _extract_frequency_data(
data, frequency, orientation, "imag", plot_error
)
if plot_error:
freqs, real_data, real_std, real_floor = real_tuple
freqs, imag_data, imag_std, imag_floor = imag_tuple
# Add up the uncertainties
real_uncert = real_std * np.abs(real_data) + real_floor
imag_uncert = imag_std * np.abs(imag_data) + imag_floor
else:
freqs, real_data = real_tuple
freqs, imag_data = imag_tuple
if "app_res" in component:
comp_data = real_data + 1j * imag_data
plot_data = (1.0 / (mu_0 * omega(freqs))) * np.abs(comp_data) ** 2
if plot_error:
res_uncert = (2.0 / (mu_0 * omega(freqs))) * (
real_data * real_uncert + imag_data * imag_uncert
)
errorbars = [res_uncert, res_uncert]
elif "phase" in component:
plot_data = np.arctan2(imag_data, real_data) * (180.0 / np.pi)
if plot_error:
phs_uncert = (
(1.0 / (real_data ** 2 + imag_data ** 2))
* ((real_data * real_uncert - imag_data * imag_uncert))
) * (180.0 / np.pi)
# Scale back the errorbars
errorbars = [phs_uncert, phs_uncert]
elif "amplitude" in component:
comp_data = real_data + 1j * imag_data
plot_data = np.abs(comp_data)
if plot_error:
amp_uncert = (1.0 / plot_data) * (
(np.abs(real_data) * real_uncert)
+ (np.abs(imag_data) * imag_uncert)
)
errorbars = [amp_uncert, amp_uncert] # [low_unsert, up_unsert]
else:
if plot_error:
freqs, plot_data, std_data, floor_data = _extract_frequency_data(
data, frequency, orientation, component, return_uncert=error
)
attr_uncert = std_data * np.abs(plot_data) + floor_data
errorbars = [attr_uncert, attr_uncert]
else:
freqs, plot_data = _extract_frequency_data(
data, frequency, orientation, component, return_uncert=False
)
return (freqs, plot_data, errorbars)
def _get_station_data(data, location, orientation, component, plot_error=False):
# Get the components
if component in ["app_res", "phase", "amplitude"]:
real_tuple = _extract_location_data(
data, location, orientation, "real", plot_error
)
imag_tuple = _extract_location_data(
data, location, orientation, "imag", plot_error
)
if plot_error:
freqs, real_data, real_std, real_floor = real_tuple
freqs, imag_data, imag_std, imag_floor = imag_tuple
# Add up the uncertainties
real_uncert = real_std * np.abs(real_data) + real_floor
imag_uncert = imag_std * np.abs(imag_data) + imag_floor
else:
freqs, real_data = real_tuple
freqs, imag_data = imag_tuple
if "app_res" in component:
comp_data = real_data + 1j * imag_data
plot_data = (1.0 / (mu_0 * omega(freqs))) * np.abs(comp_data) ** 2
if plot_error:
res_uncert = (2.0 / (mu_0 * omega(freqs))) * (
real_data * real_uncert + imag_data * imag_uncert
)
errorbars = [res_uncert, res_uncert]
elif "phase" in component:
plot_data = np.arctan2(imag_data, real_data) * (180.0 / np.pi)
if plot_error:
phs_uncert = (
(1.0 / (real_data ** 2 + imag_data ** 2))
* ((real_data * real_uncert - imag_data * imag_uncert))
) * (180.0 / np.pi)
# Scale back the errorbars
errorbars = [phs_uncert, phs_uncert]
elif "amplitude" in component:
comp_data = real_data + 1j * imag_data
plot_data = np.abs(comp_data)
if plot_error:
amp_uncert = (1.0 / plot_data) * (
(np.abs(real_data) * real_uncert)
+ (np.abs(imag_data) * imag_uncert)
)
errorbars = [amp_uncert, amp_uncert] # [low_unsert, up_unsert]
else:
if plot_error:
freqs, plot_data, std_data, floor_data = _extract_location_data(
data, location, orientation, component, return_uncert=plot_error
)
attr_uncert = std_data * np.abs(plot_data) + floor_data
errorbars = [attr_uncert, attr_uncert]
else:
freqs, plot_data = _extract_location_data(
data, location, orientation, component, return_uncert=plot_error
)
if plot_error:
return (freqs, plot_data, errorbars)
else:
return (freqs, plot_data)
def _get_plot_data(data, location, orientation, component):
if "app_res" in component:
freqs, dat_r = _extract_location_data(data, location, orientation, "real")
freqs, dat_i = _extract_location_data(data, location, orientation, "imag")
dat = dat_r + 1j * dat_i
plot_data = 1.0 / (mu_0 * omega(freqs)) * np.abs(dat) ** 2
elif "phase" in component:
freqs, dat_r = _extract_location_data(data, location, orientation, "real")
freqs, dat_i = _extract_location_data(data, location, orientation, "imag")
plot_data = np.arctan2(dat_i, dat_r) * (180.0 / np.pi)
elif "amplitude" in component:
freqs, dat_r = _extract_location_data(data, location, orientation, "real")
freqs, dat_i = _extract_location_data(data, location, orientation, "imag")
dat_complex = dat_r + 1j * dat_i
plot_data = np.abs(dat_complex)
else:
freqs, plot_data = _extract_location_data(
data, location, orientation, component
)
return (freqs, plot_data)
def _extract_frequency_data(
data, frequency, orientation, component, return_uncert=False
):
"""
Function to extract data at given frequency
"""
src = data.survey.get_sources_by_frequency(frequency)
rx_list = [
rx
for rx in src.receiver_list
if rx.orientation == orientation and rx.component == component
]
# Check the number of the rx
if len(rx_list) == 1:
rx = rx_list[0]
elif len(rx_list) == 0:
# Should add a warning that the rx doesn't excist
if return_uncert:
# Returning all empty arrays
return (np.array([]), np.array([]), np.array([]), np.array([]))
return (np.array([]), np.array([]))
else:
# Should be a more specifice Exeption
raise Exception("To many Receivers of the same type, orientation and component")
loc_arr = rx.locations
data_arr = data[src, rx]
if return_uncert:
std_arr = data.relative_error[src, rx]
floor_arr = data.floor[src, rx]
if return_uncert:
return (loc_arr, data_arr, std_arr, floor_arr)
return (loc_arr, data_arr)
def _extract_location_data(data, location, orientation, component, return_uncert=False):
"""
Function to extract data at given location
"""
freq_list = []
data_list = []
std_list = []
floor_list = []
for src in data.survey.source_list:
rx_list = [
rx
for rx in src.receiver_list
if rx.orientation == orientation and rx.component == component
]
if len(rx_list) == 0:
if return_uncert:
return (np.array([]), np.array([]), np.array([]), np.array([]))
return (np.array([]), np.array([]))
else:
rx = rx_list[0]
ind_loc = np.sqrt(np.sum((rx.locations[:, :2] - location) ** 2, axis=1)) < 0.1
if np.any(ind_loc):
freq_list.append(src.frequency)
data_list.append(data[src, rx][ind_loc])
if return_uncert:
index = data.index_dictionary[src][rx]
std_list.append(data.relative_error[index][ind_loc])
floor_list.append(data.noise_floor[index][ind_loc])
if return_uncert:
return (
np.array(freq_list),
np.concatenate(data_list),
np.concatenate(std_list),
np.concatenate(floor_list),
)
return (np.array(freq_list), np.concatenate(data_list))
## Hidden utility functions
# Unique row function-should be moved to utils
def _unique_rows(array):
"""
Finds and returns unique rows in an array
"""
array = np.ascontiguousarray(array)
unique_array = np.unique(array.view([("", array.dtype)] * array.shape[1]))
return unique_array.view(array.dtype).reshape(
(unique_array.shape[0], array.shape[1])
)
| {
"content_hash": "f5aa22ee98bbc52d435706ef828e3344",
"timestamp": "",
"source": "github",
"line_count": 885,
"max_line_length": 88,
"avg_line_length": 33.159322033898306,
"alnum_prop": 0.5323383084577115,
"repo_name": "simpeg/simpeg",
"id": "701aa106f60b06c2eef9bfc0e26751fd3e6b7a6b",
"size": "29346",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "SimPEG/electromagnetics/natural_source/utils/plot_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "3476002"
}
],
"symlink_target": ""
} |
"""
WSGI config for autoscaling project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "autoscaling.settings")
application = get_wsgi_application()
| {
"content_hash": "51262c59544e78ee44177a9acb9fa1de",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.9375,
"alnum_prop": 0.7744360902255639,
"repo_name": "cuongnb14/autoscaling-paas",
"id": "5a7920a8da21dba6db29d87d903b9f52731ee4b3",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/dashboard/config/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79177"
},
{
"name": "Dockerfile",
"bytes": "875"
},
{
"name": "HTML",
"bytes": "56037"
},
{
"name": "JavaScript",
"bytes": "1367411"
},
{
"name": "Python",
"bytes": "52850"
},
{
"name": "Shell",
"bytes": "3457"
}
],
"symlink_target": ""
} |
import code
import traceback
import cgi
import sys
import json
import gc
from StringIO import StringIO
import clastic
def make_eval_app():
resources = {"global_contexts": {}}
routes = [
('/', get_console_html),
('/console/<eval_context>', get_console_html),
('/eval/<eval_context>', eval_command),
]
return clastic.Application(routes, resources=resources)
def get_console_html(request, global_contexts, eval_context=None):
if eval_context is None:
return clastic.redirect(
request.path + 'console/{0}'.format(len(global_contexts)))
path, _, _ = request.path.rsplit('/', 2)
callback = path + '/eval/{0}'.format(eval_context)
return clastic.Response(
CONSOLE_HTML.replace("CALLBACK_URL", callback), mimetype="text/html")
def eval_command(request, eval_context, global_contexts):
if eval_context not in global_contexts:
global_contexts[eval_context] = EvalContext()
ctx = global_contexts[eval_context]
resp = ctx.eval_line(request.values['command'])
complete = resp is not None
if gc.is_tracked:
href = '/meta/object/' + str(id(ctx.last_object))
else:
href = ''
if complete:
resp = {'complete': True, 'data': cgi.escape(resp), 'href': href}
else:
resp = {'complete': False, 'data': '', 'href': ''}
return clastic.Response(json.dumps(resp), mimetype="application/json")
_sys_displayhook = sys.displayhook
class EvalContext(object):
def __init__(self):
self.last_object = None
self.locals = {}
self.sofar = []
self._sys_displayhook = sys.displayhook
self.last_cmd = 0
def _displayhook(self, value):
self.last_object = value
return _sys_displayhook(value)
def eval_line(self, line):
'''
evaluate a single line of code; returns None if more lines required to compile,
a string if input is complete
'''
try:
cmd = code.compile_command("\n".join(self.sofar + [line]))
if cmd: # complete command
self.sofar = []
buff = StringIO()
sys.stdout = buff
sys.stderr = buff
try:
sys.displayhook = self._displayhook
exec cmd in self.locals
sys.displayhook = _sys_displayhook
return buff.getvalue()
except:
return traceback.format_exc()
else: # incomplete command
self.sofar.append(line)
return
except (OverflowError, ValueError, SyntaxError) as e:
self.sofar = []
return repr(e)
finally: # ensure sys.stdout / sys.stderr back to normal
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.displayhook = _sys_displayhook
# TODO: use a two column table for better cut + paste
# <tr> <td> >>> </td> <td> OUTPUT </td> </tr>
CONSOLE_HTML = '''
<!doctype html>
<html>
<head>
<meta charset="utf-8" />
<title>Console</title>
<script src="//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/8.4/styles/default.min.css">
<script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/8.4/highlight.min.js"></script>
<style>
.cli_output {
bottom: 0;
}
#cli_input, #console, #prompt, code {
font-size: 15px;
font-family: "Lucida Console", Monaco, "Bitstream Vera Sans Mono", monospace;
white-space: pre;
}
.error {
background: #FEE;
}
.output {
background: #EFF;
}
</style>
</head>
<body>
<div style="position:absolute; bottom:0; width: 100%">
<div id="console" style="overflow:scroll; height:400px; width: 100%">
<table id="console_out"></table>
</div>
<span id="prompt" style="width: 3em">>>></span>
<input type="text" id="cli_input" style="width: 50%"></input>
</div>
<script>
$('#cli_input').keyup(function(event) {
if(event.keyCode == 13) {
process_input();
}
});
function console_append(prompt, val, href) {
if(href) {
val = '<a href="' + href +'">' + val + '</a>';
}
if(prompt == '') {
if(val.indexOf("Traceback") === 0) {
var code_class = "error";
} else {
var code_class = "output";
}
var newrow = '<td colspan=2 class="' + code_class + '"><code>' +
val + '</code></td>';
} else {
var newrow = '<td style="width: 3em">' + prompt + '</td>' +
'<td><code class="python">' + val + '</code></td>';
}
newrow = '<tr>' + newrow + '</tr>';
$('#console').append(newrow);
$('#console tr:last td:last code.python').each(
function(i, block) {
hljs.highlightBlock(block);
});
$('#console').scrollTop($('#console')[0].scrollHeight);
}
function process_input() {
var val = $('#cli_input').val();
console_append($("#prompt").text(), val.replace(/ /g, ' '));
$('#cli_input').val('');
$.ajax({
type: "POST",
url: "CALLBACK_URL",
data: {"command": val},
success: function(data) {
if(data.complete) {
var prompt = ">>>";
} else {
var prompt = "...";
}
$("#prompt").text(prompt);
if(data.data != '') {
console_append('', data.data, data.href);
}
}
});
}
</script>
</body>
</html>
'''
| {
"content_hash": "b18182e2b4f9ab0e75b02accba7f9f6f",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 107,
"avg_line_length": 29.247474747474747,
"alnum_prop": 0.5294422379554481,
"repo_name": "paypal/support",
"id": "afe899300b246db98ac4db3c832ce0d9a15f8a57",
"size": "5791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "support/meta_service/eval_server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1332"
},
{
"name": "Jupyter Notebook",
"bytes": "62314"
},
{
"name": "Python",
"bytes": "200095"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^', include('newsfeed.urls')),
url(r'^', include('premises.urls')),
url(r'^', include('profiles.urls')),
url(r'^blog/', include('blog.urls')),
url('', include('social.apps.django_app.urls', namespace='social')),
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "b294912490ada3e791489148fa1bee91",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 72,
"avg_line_length": 32.46153846153846,
"alnum_prop": 0.6516587677725119,
"repo_name": "beratdogan/arguman.org",
"id": "b311ce5dc1c676d09702d145d4eabd19c212fa06",
"size": "422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/main/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28834"
},
{
"name": "JavaScript",
"bytes": "15095"
},
{
"name": "Python",
"bytes": "107663"
}
],
"symlink_target": ""
} |
"""
This scripts is used for checking the correctness of statistics reported
by the gem5 simulator. It can excercise certain components in the memory
subsystem. The reported values could be used to compare against a validated
set of statistics.
"""
import m5
import argparse
import importlib
from m5.objects import Root, MemorySize
from gem5.components.boards.test_board import TestBoard
def generator_factory(
generator_class: str, generator_cores: int, mem_size: MemorySize
):
if generator_class == "LinearGenerator":
from gem5.components.processors.linear_generator import LinearGenerator
return LinearGenerator(
duration="250us",
rate="40GB/s",
num_cores=generator_cores,
max_addr=mem_size,
)
elif generator_class == "RandomGenerator":
from gem5.components.processors.random_generator import RandomGenerator
return RandomGenerator(
duration="250us",
rate="40GB/s",
num_cores=generator_cores,
max_addr=mem_size,
)
elif generator_class == "GUPSGenerator":
if generator_cores != 1:
raise ValueError(
"Only one core should be used with GUPSGenerator. "
"In order to use multiple cores of GUPS generator, use either "
"GUPSGeneratorEP or GUPSGeneratorPAR."
)
from gem5.components.processors.gups_generator import GUPSGenerator
table_size = f"{int(mem_size / 2)}B"
return GUPSGenerator(
0, table_size, update_limit=1000, clk_freq="2GHz"
)
elif generator_class == "GUPSGeneratorEP":
from gem5.components.processors.gups_generator_ep import (
GUPSGeneratorEP,
)
table_size = f"{int(mem_size / 2)}B"
return GUPSGeneratorEP(
generator_cores, 0, table_size, update_limit=1000, clk_freq="2GHz"
)
elif generator_class == "GUPSGeneratorPAR":
from gem5.components.processors.gups_generator_par import (
GUPSGeneratorPAR,
)
table_size = f"{int(mem_size / 2)}B"
return GUPSGeneratorPAR(
generator_cores, 0, table_size, update_limit=1000, clk_freq="2GHz"
)
else:
raise ValueError(f"Unknown generator class {generator_class}")
def cache_factory(cache_class: str):
if cache_class == "NoCache":
from gem5.components.cachehierarchies.classic.no_cache import NoCache
return NoCache()
elif cache_class == "PrivateL1":
from gem5.components.cachehierarchies\
.classic.private_l1_cache_hierarchy import (
PrivateL1CacheHierarchy,
)
return PrivateL1CacheHierarchy(l1d_size="32KiB", l1i_size="32KiB")
elif cache_class == "PrivateL1PrivateL2":
from gem5.components.cachehierarchies\
.classic.private_l1_private_l2_cache_hierarchy import (
PrivateL1PrivateL2CacheHierarchy,
)
return PrivateL1PrivateL2CacheHierarchy(
l1d_size="32KiB", l1i_size="32KiB", l2_size="256KiB"
)
elif cache_class == "MESITwoLevel":
from gem5.components.cachehierarchies\
.ruby.mesi_two_level_cache_hierarchy import (
MESITwoLevelCacheHierarchy,
)
return MESITwoLevelCacheHierarchy(
l1i_size="32KiB",
l1i_assoc="8",
l1d_size="32KiB",
l1d_assoc="8",
l2_size="256KiB",
l2_assoc="4",
num_l2_banks=1,
)
else:
raise ValueError(f"The cache class {cache_class} is not supported.")
parser = argparse.ArgumentParser(
description="A traffic generator that can be used to test a gem5 "
"memory component."
)
parser.add_argument(
"generator_class",
type=str,
help="The class of generator to use.",
choices=[
"LinearGenerator",
"RandomGenerator",
"GUPSGenerator",
"GUPSGeneratorEP",
"GUPSGeneratorPAR",
],
)
parser.add_argument(
"generator_cores", type=int, help="The number of generator cores to use."
)
parser.add_argument(
"cache_class",
type=str,
help="The cache class to import and instantiate.",
choices=["NoCache", "PrivateL1", "PrivateL1PrivateL2", "MESITwoLevel"],
)
parser.add_argument(
"mem_module",
type=str,
help="The python module to import for memory.",
)
parser.add_argument(
"mem_class", type=str, help="The memory class to import and instantiate."
)
parser.add_argument(
"mem_args",
nargs="*",
help="The arguments needed to instantiate the memory class.",
)
args = parser.parse_args()
cache_hierarchy = cache_factory(args.cache_class)
memory_class = getattr(
importlib.import_module(args.mem_module), args.mem_class
)
memory = memory_class(*args.mem_args)
generator = generator_factory(
args.generator_class, args.generator_cores, memory.get_size()
)
# We use the Test Board. This is a special board to run traffic generation
# tasks
motherboard = TestBoard(
clk_freq="3GHz",
processor=generator, # We pass the traffic generator as the processor.
memory=memory,
cache_hierarchy=cache_hierarchy,
)
root = Root(full_system=False, system=motherboard)
m5.instantiate()
generator.start_traffic()
print("Beginning simulation!")
exit_event = m5.simulate()
print(
"Exiting @ tick {} because {}.".format(m5.curTick(), exit_event.getCause())
)
| {
"content_hash": "69de0c0579bc6f6ea0397f926b0b6d37",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 79,
"avg_line_length": 28.931578947368422,
"alnum_prop": 0.643441877387666,
"repo_name": "gem5/gem5",
"id": "ede49937ac99511a0cbab163338e700af556ede6",
"size": "7047",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "tests/gem5/traffic_gen/simple_traffic_run.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "145626"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "C",
"bytes": "3927153"
},
{
"name": "C++",
"bytes": "42960484"
},
{
"name": "CMake",
"bytes": "133888"
},
{
"name": "Dockerfile",
"bytes": "34102"
},
{
"name": "Emacs Lisp",
"bytes": "1914"
},
{
"name": "Forth",
"bytes": "354"
},
{
"name": "Fortran",
"bytes": "15436"
},
{
"name": "HTML",
"bytes": "146414"
},
{
"name": "Hack",
"bytes": "139769"
},
{
"name": "Java",
"bytes": "6966"
},
{
"name": "M4",
"bytes": "42624"
},
{
"name": "Makefile",
"bytes": "39573"
},
{
"name": "Perl",
"bytes": "23784"
},
{
"name": "Python",
"bytes": "8079781"
},
{
"name": "Roff",
"bytes": "8754"
},
{
"name": "SCSS",
"bytes": "2971"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "5328"
},
{
"name": "Shell",
"bytes": "95638"
},
{
"name": "Starlark",
"bytes": "25668"
},
{
"name": "SuperCollider",
"bytes": "8869"
},
{
"name": "Vim Script",
"bytes": "4343"
},
{
"name": "sed",
"bytes": "3897"
}
],
"symlink_target": ""
} |
import logging
from typing import Dict, Any
from rflambda.sentinel2.new_sentinel2_event import NewSentinel2Event
from rflambda.model import handler
logger = logging.getLogger(__name__)
def handle(event: Dict[str, Any], context: Dict[str, Any]):
logger.info('Parsing s3 information from SNS event')
logger.debug('Event: %s', event)
parsed_event = NewSentinel2Event.parse(event)
return handler(parsed_event, context)
| {
"content_hash": "31ad82f9234fb887d1f66d3d34ad76aa",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 68,
"avg_line_length": 31.071428571428573,
"alnum_prop": 0.7517241379310344,
"repo_name": "aaronxsu/raster-foundry",
"id": "f1443bc4a0b4779d46dc3c1a34d49f603a4b0d5a",
"size": "435",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "app-lambda/rflambda/sentinel2/handle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1711183"
},
{
"name": "Dockerfile",
"bytes": "1678"
},
{
"name": "Groovy",
"bytes": "7315"
},
{
"name": "HTML",
"bytes": "637187"
},
{
"name": "JavaScript",
"bytes": "1393081"
},
{
"name": "Python",
"bytes": "141967"
},
{
"name": "Scala",
"bytes": "1728005"
},
{
"name": "Shell",
"bytes": "43620"
}
],
"symlink_target": ""
} |
import math
import types
import ctypes
import numpy as np
from numpy import ctypeslib
# from numpy.ctypeslib import _typecodes
from numba import llvm_types, _ext
from numba.minivect.minitypes import *
from numba.minivect import miniast, minitypes
__all__ = minitypes.__all__ + [
'O', 'b', 'i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'f4', 'f8', 'f16', 'c8', 'c16', 'c32', 'd', 'f', 'i',
]
class NumbaType(minitypes.Type):
is_numba_type = True
class TupleType(NumbaType, minitypes.ObjectType):
is_tuple = True
name = "tuple"
size = 0
class ListType(NumbaType, minitypes.ObjectType):
is_list = True
name = "list"
size = 0
class IteratorType(NumbaType, minitypes.ObjectType):
is_iterator = True
subtypes = ['base_type']
def __init__(self, base_type, **kwds):
super(IteratorType, self).__init__(**kwds)
self.base_type = base_type
def __repr__(self):
return "iterator<%s>" % (self.base_type,)
class PHIType(NumbaType):
"""
Type for phi() values.
"""
is_phi = True
class ModuleType(NumbaType, minitypes.ObjectType):
"""
Represents a type for modules.
Attributes:
is_numpy_module: whether the module is the numpy module
module: in case of numpy, the numpy module or a submodule
"""
is_module = True
is_numpy_module = False
module = None
def __repr__(self):
if self.is_numpy_module:
return 'numpy'
else:
return 'ModuleType'
class NumpyAttributeType(NumbaType, minitypes.ObjectType):
"""
Type for attributes of a numpy (sub)module.
Attributes:
module: the numpy (sub)module
attr: the attribute name (str)
"""
is_numpy_attribute = True
module = None
attr = None
def __repr__(self):
return "%s.%s" % (self.module.__name__, self.attr)
class NumpyDtypeType(NumbaType, minitypes.ObjectType):
is_numpy_dtype = True
dtype = None
def resolve(self):
return _map_dtype(self.dtype)
class EllipsisType(NumbaType, minitypes.ObjectType):
is_ellipsis = True
def __eq__(self, other):
return other.is_ellipsis
def __repr__(self):
return "..."
class SliceType(NumbaType, minitypes.ObjectType):
is_slice = True
def __eq__(self, other):
return other.is_slice
def __repr__(self):
return ":"
class NewAxisType(NumbaType, minitypes.ObjectType):
is_newaxis = True
def __eq__(self, other):
return other.is_newaxis
def __repr__(self):
return "newaxis"
class GlobalType(NumbaType):
is_global = True
class BuiltinType(NumbaType):
is_builtin = True
class ModuleAttributeType(NumbaType):
is_module_attr = True
class RangeType(NumbaType):
is_range = True
tuple_ = TupleType()
phi = PHIType()
module_type = ModuleType()
#
### Type shorthands
#
O = object_
b = bool_
i1 = int8
i2 = int16
i = i4 = int32
i8 = int64
u1 = uint8
u2 = uint16
u4 = uint32
u8 = uint64
f = f4 = float_
d = f8 = double
f16 = float128
c8 = complex64
c16 = complex128
c32 = complex256
class NumbaTypeMapper(minitypes.TypeMapper):
def to_llvm(self, type):
if type.is_array:
return _numpy_array
elif type.is_complex:
return lc.Type.struct([type.base_type, type.base_type])
elif type.is_py_ssize_t:
return llvm_types._llvm_py_ssize_t
elif type.is_object:
return llvm_types._pyobject_head_struct_p
return super(NumbaTypeMapper, self).to_llvm(type)
def from_python(self, value):
if isinstance(value, np.ndarray):
dtype = _map_dtype(value.dtype)
return minitypes.ArrayType(dtype, value.ndim,
is_c_contig=value.flags['C_CONTIGUOUS'],
is_f_contig=value.flags['F_CONTIGUOUS'])
elif isinstance(value, tuple):
return tuple_
elif isinstance(value, types.ModuleType):
return module_type
else:
return super(NumbaTypeMapper, self).from_python(value)
def _map_dtype(dtype):
"""
>>> _map_dtype(np.dtype(np.int32))
int32
>>> _map_dtype(np.dtype(np.int64))
int64
>>> _map_dtype(np.dtype(np.object))
PyObject *
>>> _map_dtype(np.dtype(np.float64))
double
>>> _map_dtype(np.dtype(np.complex128))
complex128
"""
item_idx = int(math.log(dtype.itemsize, 2))
if dtype.kind == 'i':
return [i1, i2, i4, i8][item_idx]
elif dtype.kind == 'u':
return [u1, u2, u4, u8][item_idx]
elif dtype.kind == 'f':
if dtype.itemsize == 2:
pass # half floats not supported yet
elif dtype.itemsize == 4:
return f4
elif dtype.itemsize == 8:
return f8
elif dtype.itemsize == 16:
return f16
elif dtype.kind == 'b':
return i1
elif dtype.kind == 'c':
if dtype.itemsize == 8:
return c8
elif dtype.itemsize == 16:
return c16
elif dtype.itemsize == 32:
return c32
elif dtype.kind == 'O':
return O
raise NotImplementedError("dtype %s not supported" % (dtype,))
if __name__ == '__main__':
import doctest
doctest.testmod() | {
"content_hash": "c71524b95ae4104607cc92b94a249996",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 79,
"avg_line_length": 23.67111111111111,
"alnum_prop": 0.5903116785580172,
"repo_name": "teoliphant/numba",
"id": "3dcc4a985d096b19bb1e5a5344920ae6e90b44ce",
"size": "5326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/_numba_types.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
} |
import argparse
import json
import os.path
from joblib import Parallel, delayed
import tweepy
from tflda.timelines import download_timelines
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Twitter Friends LDA',
epilog = 'lol moar tw33tz', add_help = 'How to use',
prog = 'python 1_tflda.py <args>')
parser.add_argument("--api-key", required = True,
help = "OAuth API key.")
parser.add_argument("--api-secret", required = True,
help = "OAuth API secret.")
parser.add_argument("--access-key", required = True,
help = "OAuth access key.")
parser.add_argument("--access-secret", required = True,
help = "OAuth access secret.")
# Optional arguments.
parser.add_argument("-o", "--output", default = "data",
help = "Path to directory where intermediate data will be stored. [DEFAULT: ./data]")
args = vars(parser.parse_args())
outdir = os.path.join(".", args['output'])
if not os.path.exists(outdir):
os.mkdir(outdir)
auth = tweepy.OAuthHandler(args['api_key'], args['api_secret'])
auth.set_access_token(args['access_key'], args['access_secret'])
api = tweepy.API(auth)
# First step: gather the list of friends.
friends = api.friends_ids('magsol')
out = Parallel(n_jobs = -1, verbose = 10)(
delayed(download_timelines)(
f, args['api_key'], args['api_secret'],
args['access_key'], args['access_secret']) for f in friends)
# Create the output dictionary to be json-serialized.
final = {'{}'.format(fid): d for fid, d in out}
fp = open(os.path.join(outdir, "raw_data.json"), "w")
json.dump(final, fp)
fp.close()
| {
"content_hash": "f79e33c17036a1c28b584582b3bb8839",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 93,
"avg_line_length": 36.53191489361702,
"alnum_prop": 0.6249271986022131,
"repo_name": "magsol/twitter-friends-lda",
"id": "8d2bbc827f2e3de43b718c503a40ac17201ec0fd",
"size": "1717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "1_tflda.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8693"
}
],
"symlink_target": ""
} |
"""
__MT_post__TransitionType.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: gehan
Modified: Sun Feb 15 10:31:26 2015
_________________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3Text import *
from ATOM3String import *
from graph_MT_post__TransitionType import *
class MT_post__TransitionType(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = ['MT_post__MetaModelElement_S']
self.graphClass_ = graph_MT_post__TransitionType
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.MT_post__cardinality=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__classtype=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__name=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_label__=ATOM3String('', 20)
self.MT_pivotOut__=ATOM3String('', 20)
self.generatedAttributes = {'MT_post__cardinality': ('ATOM3Text', ),
'MT_post__classtype': ('ATOM3Text', ),
'MT_post__name': ('ATOM3Text', ),
'MT_label__': ('ATOM3String', ),
'MT_pivotOut__': ('ATOM3String', ) }
self.realOrder = ['MT_post__cardinality','MT_post__classtype','MT_post__name','MT_label__','MT_pivotOut__']
self.directEditing = [0,0,0,1,1]
def clone(self):
cloneObject = MT_post__TransitionType( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if actionID == self.CREATE:
self.autoIncrLabel(params)
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
def autoIncrLabel(self, params):
#===============================================================================
# Auto increment the label
#===============================================================================
# If there is already one, ignore
if not self.MT_label__.isNone(): return
# Get the maximum label of all MT_pre__ elements
label = 0
for nt in self.parent.ASGroot.listNodes:
if nt.startswith('MT_post__'):
for node in self.parent.ASGroot.listNodes[nt]:
currLabel = 0
try:
currLabel = int(node.MT_label__.getValue())
except:
pass
if currLabel > label:
label = currLabel
# The label of this instance will be the max label + 1
self.MT_label__.setValue(str(label + 1))
| {
"content_hash": "b9cb223c8191d2cf159c50ca9b0eba83",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 752,
"avg_line_length": 55.78151260504202,
"alnum_prop": 0.5697499246761073,
"repo_name": "levilucio/SyVOLT",
"id": "8400366e0e278837c367a488c8979449388f6311",
"size": "6638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/MT_post__TransitionType.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
from fabric.api import run, task, sudo, put
from fabric.utils import puts
from fabric.operations import open_shell
from fabric.context_managers import settings
from maestro.config import default_settings
from maestro.decorators import hosts_required
@task
@hosts_required
def memory():
"""
Shows system uptime
"""
with default_settings():
run('free -m')
@task
@hosts_required
def run_command(command):
"""
Shows the output from the specified command
:param command: Command to execute
"""
with default_settings():
try:
run(command)
except: # ignore python traceback output from command that returns non-zero
pass
@task
@hosts_required
def upload_file(src, dest, mode=None):
"""
Uploads a local file
:param src: Path to the local file
:param dest: Destination path on the host
:param mode: Mode to set the remote file
"""
if mode:
mode = int(mode)
with default_settings():
put(src, dest, use_sudo=True, mode=mode)
@task
@hosts_required
def shell():
"""
Spawns a shell on the remote instance
"""
with settings(parallel=False):
open_shell()
@task
@hosts_required
def update_check():
"""
Shows update status
"""
with default_settings():
sudo('apt-get update > /dev/null 2>&1')
run('if [ -e /var/lib/update-notifier/updates-available ]; then cat '\
'/var/lib/update-notifier/updates-available; else '\
'echo "Unavailable"; fi')
@task
@hosts_required
def update_system(dist_upgrade=False):
"""
Updates system
"""
upgrade = "apt-get -y upgrade"
if dist_upgrade or type(dist_upgrade) == type(str) and dist_upgrade.lower() == 'true':
upgrade = "apt-get -y dist-upgrade"
sudo('apt-get update && {0}'.format(upgrade))
@task
@hosts_required
def uptime():
"""
Shows system uptime
"""
with default_settings():
run('uptime')
| {
"content_hash": "3e57cf09f15e1c5271db3965709ebf63",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 90,
"avg_line_length": 21.67391304347826,
"alnum_prop": 0.6283851554663992,
"repo_name": "ehazlett/fabric-maestro",
"id": "87d965ef5f471cde3a450d8d54237bebb5c3206a",
"size": "2594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maestro/system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "50079"
},
{
"name": "Ruby",
"bytes": "883"
},
{
"name": "Shell",
"bytes": "625"
}
],
"symlink_target": ""
} |
import vim
def plugin_enabled_modules():
v_enabled = vim.eval('g:pandoc#modules#enabled')
v_disabled = vim.eval('g:pandoc#modules#disabled')
return [m for m in v_enabled if m not in v_disabled]
| {
"content_hash": "32345b64848e573d2d7783a0d3b432a9",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 56,
"avg_line_length": 34.5,
"alnum_prop": 0.6956521739130435,
"repo_name": "pathing/my_exvim",
"id": "f9aec7939423d58f0cb787efb8a66ecbd78e3f3a",
"size": "207",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-pandoc/pythonx/vim_pandoc/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "816"
},
{
"name": "Batchfile",
"bytes": "5122"
},
{
"name": "CSS",
"bytes": "14771"
},
{
"name": "CoffeeScript",
"bytes": "1402"
},
{
"name": "Erlang",
"bytes": "2864"
},
{
"name": "HTML",
"bytes": "14608"
},
{
"name": "Inno Setup",
"bytes": "10016"
},
{
"name": "JavaScript",
"bytes": "1064"
},
{
"name": "Makefile",
"bytes": "1239"
},
{
"name": "Python",
"bytes": "41957"
},
{
"name": "Ruby",
"bytes": "17194"
},
{
"name": "Shell",
"bytes": "10104"
},
{
"name": "VimL",
"bytes": "3752274"
}
],
"symlink_target": ""
} |
import os
import hashlib
import constants
import sqlite3
HERE = os.path.dirname(__file__)
def get_path(key):
return os.path.join(HERE, "data", key)
def get_hash(string):
h = hashlib.new('md5')
h.update(string)
return h.hexdigest()
def put(files):
db_entries = []
for filename, contents in files.items():
filekey = get_hash(contents)
with open(get_path(filekey), 'w') as file_pointer:
file_pointer.write(contents)
entry = (filename, filekey)
db_entries.append(entry)
conn = sqlite3.connect(constants.DBFILE)
c = conn.cursor()
all_hashes = "ZZZ".join(entry[1] for entry in db_entries)
all_filenames = repr(sorted(files.keys()))
set_id = get_hash(all_hashes + all_filenames)
for entry in db_entries:
c.execute('''
INSERT INTO files VALUES
(?, ?, ?)''', (set_id,) + entry) # entry is a (filename, filekey) pair.
conn.commit()
conn.close()
return set_id
def get(set_id):
if not set_id:
return {}
conn = sqlite3.connect(constants.DBFILE)
c = conn.cursor()
key_list = list(c.execute('''SELECT filename, filekey FROM files WHERE fileset=?''', (set_id,)))
conn.commit()
conn.close()
files = {}
for filename, filekey in key_list:
with open(get_path(filekey), 'r') as datafile:
files[filename] = datafile.read()
return files
| {
"content_hash": "6f29a9bc2a3c316ec661dc04e3083b4e",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 100,
"avg_line_length": 28.816326530612244,
"alnum_prop": 0.6111898016997167,
"repo_name": "wnavarre/email-dictator",
"id": "bd850ec55259c729972621b54204a8d7db147ae4",
"size": "1412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/files/io.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3619"
},
{
"name": "Python",
"bytes": "18972"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ZminValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="zmin", parent_name="densitymapbox", **kwargs):
super(ZminValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"zauto": False}),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "9a8c0c55dce064978e58541a4f7f454a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 39.30769230769231,
"alnum_prop": 0.6027397260273972,
"repo_name": "plotly/python-api",
"id": "0ed2c47cb365e5447f636664370c67bf2526a346",
"size": "511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/densitymapbox/_zmin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""Shortcuts
.. module:: core.shortcuts
:platform: Linux, Unix
:synopsis: Shortcuts for core app
.. moduleauthor:: Nickolas Fox <tarvitz@blacklibrary.ru>
"""
# coding: utf-8
from django.shortcuts import render_to_response
from django.template import RequestContext
def direct_to_template(request, template, context=None, processors=None):
"""return response object
:param request: Django ``HttpRequest`` instance
:param template: template file place on filesystem and stored in
template directory ex. ``'accounts/profile.html'``
:param context: ``dict`` instance with render context
.. code-block:: python
{'context': True, 'time': datetime.now()}
:param processors: context processors
:returns: ``HttpResponse`` object instance
"""
context = context or {}
processors = processors or []
return render_to_response(
template, context,
context_instance=RequestContext(request, processors=processors))
| {
"content_hash": "e2b2047f9e739165ab8a787776bd5887",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 73,
"avg_line_length": 30.181818181818183,
"alnum_prop": 0.6927710843373494,
"repo_name": "tarvitz/djtp",
"id": "b223457181231b0f3c2cdb04d362a9f1659796a1",
"size": "996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/core/shortcuts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "86715"
},
{
"name": "HTML",
"bytes": "12887"
},
{
"name": "JavaScript",
"bytes": "13426"
},
{
"name": "Python",
"bytes": "99550"
},
{
"name": "Shell",
"bytes": "555"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
import hashlib
import os
import random
import tempfile
import time
import unittest
from glob import glob
from py4j.protocol import Py4JJavaError
from pyspark import shuffle, RDD
from pyspark.resource import ExecutorResourceRequests, ResourceProfileBuilder,\
TaskResourceRequests
from pyspark.serializers import CloudPickleSerializer, BatchedSerializer, PickleSerializer,\
MarshalSerializer, UTF8Deserializer, NoOpSerializer
from pyspark.sql import SparkSession
from pyspark.testing.utils import ReusedPySparkTestCase, SPARK_HOME, QuietTest, have_numpy
from pyspark.testing.sqlutils import have_pandas
global_func = lambda: "Hi"
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_to_localiterator(self):
rdd = self.sc.parallelize([1, 2, 3])
it = rdd.toLocalIterator()
self.assertEqual([1, 2, 3], sorted(it))
rdd2 = rdd.repartition(1000)
it2 = rdd2.toLocalIterator()
self.assertEqual([1, 2, 3], sorted(it2))
def test_to_localiterator_prefetch(self):
# Test that we fetch the next partition in parallel
# We do this by returning the current time and:
# reading the first elem, waiting, and reading the second elem
# If not in parallel then these would be at different times
# But since they are being computed in parallel we see the time
# is "close enough" to the same.
rdd = self.sc.parallelize(range(2), 2)
times1 = rdd.map(lambda x: datetime.now())
times2 = rdd.map(lambda x: datetime.now())
times_iter_prefetch = times1.toLocalIterator(prefetchPartitions=True)
times_iter = times2.toLocalIterator(prefetchPartitions=False)
times_prefetch_head = next(times_iter_prefetch)
times_head = next(times_iter)
time.sleep(2)
times_next = next(times_iter)
times_prefetch_next = next(times_iter_prefetch)
self.assertTrue(times_next - times_head >= timedelta(seconds=2))
self.assertTrue(times_prefetch_next - times_prefetch_head < timedelta(seconds=1))
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_cartesian_chaining(self):
# Tests for SPARK-16589
rdd = self.sc.parallelize(range(10), 2)
self.assertSetEqual(
set(rdd.cartesian(rdd).cartesian(rdd).collect()),
set([((x, y), z) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.cartesian(rdd)).collect()),
set([(x, (y, z)) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.zip(rdd)).collect()),
set([(x, (y, y)) for x in range(10) for y in range(10)])
)
def test_zip_chaining(self):
# Tests for SPARK-21985
rdd = self.sc.parallelize('abc', 2)
self.assertSetEqual(
set(rdd.zip(rdd).zip(rdd).collect()),
set([((x, x), x) for x in 'abc'])
)
self.assertSetEqual(
set(rdd.zip(rdd.zip(rdd)).collect()),
set([(x, (x, x)) for x in 'abc'])
)
def test_union_pair_rdd(self):
# SPARK-31788: test if pair RDDs can be combined by union.
rdd = self.sc.parallelize([1, 2])
pair_rdd = rdd.zip(rdd)
unionRDD = self.sc.union([pair_rdd, pair_rdd])
self.assertEqual(
set(unionRDD.collect()),
set([(1, 1), (2, 2), (1, 1), (2, 2)])
)
self.assertEqual(unionRDD.count(), 4)
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(range(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_unpersist(self):
N = 1000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 3MB
bdata.unpersist()
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
bdata.destroy(blocking=True)
try:
self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
except Exception:
pass
else:
raise AssertionError("job should fail after destroy the broadcast")
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_multithread_broadcast_pickle(self):
import threading
b1 = self.sc.broadcast(list(range(3)))
b2 = self.sc.broadcast(list(range(3)))
def f1():
return b1.value
def f2():
return b2.value
funcs_num_pickled = {f1: None, f2: None}
def do_pickle(f, sc):
command = (f, None, sc.serializer, sc.serializer)
ser = CloudPickleSerializer()
ser.dumps(command)
def process_vars(sc):
broadcast_vars = list(sc._pickled_broadcast_vars)
num_pickled = len(broadcast_vars)
sc._pickled_broadcast_vars.clear()
return num_pickled
def run(f, sc):
do_pickle(f, sc)
funcs_num_pickled[f] = process_vars(sc)
# pickle f1, adds b1 to sc._pickled_broadcast_vars in main thread local storage
do_pickle(f1, self.sc)
# run all for f2, should only add/count/clear b2 from worker thread local storage
t = threading.Thread(target=run, args=(f2, self.sc))
t.start()
t.join()
# count number of vars pickled in main thread, only b1 should be counted and cleared
funcs_num_pickled[f1] = process_vars(self.sc)
self.assertEqual(funcs_num_pickled[f1], 1)
self.assertEqual(funcs_num_pickled[f2], 1)
self.assertEqual(len(list(self.sc._pickled_broadcast_vars)), 0)
def test_large_closure(self):
N = 200000
data = [float(i) for i in range(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(range(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(range(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(range(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions_asc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, True)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_repartitionAndSortWithinPartitions_desc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, False)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(2, 6), (0, 5), (0, 8)])
self.assertEqual(partitions[1], [(3, 8), (3, 8), (1, 3)])
def test_repartition_no_skewed(self):
num_partitions = 20
a = self.sc.parallelize(range(int(1000)), 2)
l = a.repartition(num_partitions).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
l = a.coalesce(num_partitions, True).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
def test_repartition_on_textfile(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
rdd = self.sc.textFile(path)
result = rdd.repartition(1).collect()
self.assertEqual(u"Hello World!", result[0])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 2000001
kv = self.sc.parallelize(range(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(range(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
@unittest.skipIf(not have_numpy or not have_pandas, "NumPy or Pandas not installed")
def test_take_on_jrdd_with_large_rows_should_not_cause_deadlock(self):
# Regression test for SPARK-38677.
#
# Create a DataFrame with many columns, call a Python function on each row, and take only
# the first result row.
#
# This produces large rows that trigger a deadlock involving the following three threads:
#
# 1. The Scala task executor thread. During task execution, this is responsible for reading
# output produced by the Python process. However, in this case the task has finished
# early, and this thread is no longer reading output produced by the Python process.
# Instead, it is waiting for the Scala WriterThread to exit so that it can finish the
# task.
#
# 2. The Scala WriterThread. This is trying to send a large row to the Python process, and
# is waiting for the Python process to read that row.
#
# 3. The Python process. This is trying to send a large output to the Scala task executor
# thread, and is waiting for that thread to read that output.
#
# For this test to succeed rather than hanging, the Scala MonitorThread must detect this
# deadlock and kill the Python worker.
import numpy as np
import pandas as pd
num_rows = 100000
num_columns = 134
data = np.zeros((num_rows, num_columns))
columns = map(str, range(num_columns))
df = SparkSession(self.sc).createDataFrame(pd.DataFrame(data, columns=columns))
actual = PickleSerializer().loads(df.rdd.map(list)._jrdd.first())
expected = [list(data[0])]
self.assertEqual(expected, actual)
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('java').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('java', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
def test_pipe_unicode(self):
# Regression test for SPARK-20947
data = [u'\u6d4b\u8bd5', '1']
rdd = self.sc.parallelize(data)
result = rdd.pipe('cat').collect()
self.assertEqual(data, result)
def test_stopiteration_in_user_code(self):
def stopit(*x):
raise StopIteration()
seq_rdd = self.sc.parallelize(range(10))
keyed_rdd = self.sc.parallelize((x % 2, x) for x in range(10))
msg = "Caught StopIteration thrown from user's code; failing the task"
self.assertRaisesRegex(Py4JJavaError, msg, seq_rdd.map(stopit).collect)
self.assertRaisesRegex(Py4JJavaError, msg, seq_rdd.filter(stopit).collect)
self.assertRaisesRegex(Py4JJavaError, msg, seq_rdd.foreach, stopit)
self.assertRaisesRegex(Py4JJavaError, msg, seq_rdd.reduce, stopit)
self.assertRaisesRegex(Py4JJavaError, msg, seq_rdd.fold, 0, stopit)
self.assertRaisesRegex(Py4JJavaError, msg, seq_rdd.foreach, stopit)
self.assertRaisesRegex(Py4JJavaError, msg,
seq_rdd.cartesian(seq_rdd).flatMap(stopit).collect)
# these methods call the user function both in the driver and in the executor
# the exception raised is different according to where the StopIteration happens
# RuntimeError is raised if in the driver
# Py4JJavaError is raised if in the executor (wraps the RuntimeError raised in the worker)
self.assertRaisesRegex((Py4JJavaError, RuntimeError), msg,
keyed_rdd.reduceByKeyLocally, stopit)
self.assertRaisesRegex((Py4JJavaError, RuntimeError), msg,
seq_rdd.aggregate, 0, stopit, lambda *x: 1)
self.assertRaisesRegex((Py4JJavaError, RuntimeError), msg,
seq_rdd.aggregate, 0, lambda *x: 1, stopit)
def test_overwritten_global_func(self):
# Regression test for SPARK-27000
global global_func
self.assertEqual(self.sc.parallelize([1]).map(lambda _: global_func()).first(), "Hi")
global_func = lambda: "Yeah"
self.assertEqual(self.sc.parallelize([1]).map(lambda _: global_func()).first(), "Yeah")
def test_to_local_iterator_failure(self):
# SPARK-27548 toLocalIterator task failure not propagated to Python driver
def fail(_):
raise RuntimeError("local iterator error")
rdd = self.sc.range(10).map(fail)
with self.assertRaisesRegex(Exception, "local iterator error"):
for _ in rdd.toLocalIterator():
pass
def test_to_local_iterator_collects_single_partition(self):
# Test that partitions are not computed until requested by iteration
def fail_last(x):
if x == 9:
raise RuntimeError("This should not be hit")
return x
rdd = self.sc.range(12, numSlices=4).map(fail_last)
it = rdd.toLocalIterator()
# Only consume first 4 elements from partitions 1 and 2, this should not collect the last
# partition which would trigger the error
for i in range(4):
self.assertEqual(i, next(it))
def test_resourceprofile(self):
rp_builder = ResourceProfileBuilder()
ereqs = ExecutorResourceRequests().cores(2).memory("6g").memoryOverhead("1g")
ereqs.pysparkMemory("2g").resource("gpu", 2, "testGpus", "nvidia.com")
treqs = TaskResourceRequests().cpus(2).resource("gpu", 2)
def assert_request_contents(exec_reqs, task_reqs):
self.assertEqual(len(exec_reqs), 5)
self.assertEqual(exec_reqs["cores"].amount, 2)
self.assertEqual(exec_reqs["memory"].amount, 6144)
self.assertEqual(exec_reqs["memoryOverhead"].amount, 1024)
self.assertEqual(exec_reqs["pyspark.memory"].amount, 2048)
self.assertEqual(exec_reqs["gpu"].amount, 2)
self.assertEqual(exec_reqs["gpu"].discoveryScript, "testGpus")
self.assertEqual(exec_reqs["gpu"].resourceName, "gpu")
self.assertEqual(exec_reqs["gpu"].vendor, "nvidia.com")
self.assertEqual(len(task_reqs), 2)
self.assertEqual(task_reqs["cpus"].amount, 2.0)
self.assertEqual(task_reqs["gpu"].amount, 2.0)
assert_request_contents(ereqs.requests, treqs.requests)
rp = rp_builder.require(ereqs).require(treqs).build
assert_request_contents(rp.executorResources, rp.taskResources)
rdd = self.sc.parallelize(range(10)).withResources(rp)
return_rp = rdd.getResourceProfile()
assert_request_contents(return_rp.executorResources, return_rp.taskResources)
rddWithoutRp = self.sc.parallelize(range(10))
self.assertEqual(rddWithoutRp.getResourceProfile(), None)
def test_multiple_group_jobs(self):
import threading
group_a = "job_ids_to_cancel"
group_b = "job_ids_to_run"
threads = []
thread_ids = range(4)
thread_ids_to_cancel = [i for i in thread_ids if i % 2 == 0]
thread_ids_to_run = [i for i in thread_ids if i % 2 != 0]
# A list which records whether job is cancelled.
# The index of the array is the thread index which job run in.
is_job_cancelled = [False for _ in thread_ids]
def run_job(job_group, index):
"""
Executes a job with the group ``job_group``. Each job waits for 3 seconds
and then exits.
"""
try:
self.sc.parallelize([15]).map(lambda x: time.sleep(x)) \
.collectWithJobGroup(job_group, "test rdd collect with setting job group")
is_job_cancelled[index] = False
except Exception:
# Assume that exception means job cancellation.
is_job_cancelled[index] = True
# Test if job succeeded when not cancelled.
run_job(group_a, 0)
self.assertFalse(is_job_cancelled[0])
# Run jobs
for i in thread_ids_to_cancel:
t = threading.Thread(target=run_job, args=(group_a, i))
t.start()
threads.append(t)
for i in thread_ids_to_run:
t = threading.Thread(target=run_job, args=(group_b, i))
t.start()
threads.append(t)
# Wait to make sure all jobs are executed.
time.sleep(3)
# And then, cancel one job group.
self.sc.cancelJobGroup(group_a)
# Wait until all threads launching jobs are finished.
for t in threads:
t.join()
for i in thread_ids_to_cancel:
self.assertTrue(
is_job_cancelled[i],
"Thread {i}: Job in group A was not cancelled.".format(i=i))
for i in thread_ids_to_run:
self.assertFalse(
is_job_cancelled[i],
"Thread {i}: Job in group B did not succeeded.".format(i=i))
if __name__ == "__main__":
import unittest
from pyspark.tests.test_rdd import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| {
"content_hash": "951f565c0db11697116b3d802f9007da",
"timestamp": "",
"source": "github",
"line_count": 913,
"max_line_length": 99,
"avg_line_length": 41.801752464403066,
"alnum_prop": 0.5997117778068911,
"repo_name": "milliman/spark",
"id": "60c000f6d90437484d5e0d071da3c75226121392",
"size": "38949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/tests/test_rdd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "52464"
},
{
"name": "Batchfile",
"bytes": "27405"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "24622"
},
{
"name": "Dockerfile",
"bytes": "9429"
},
{
"name": "HTML",
"bytes": "41560"
},
{
"name": "HiveQL",
"bytes": "1859465"
},
{
"name": "Java",
"bytes": "4316296"
},
{
"name": "JavaScript",
"bytes": "221431"
},
{
"name": "Jupyter Notebook",
"bytes": "4310524"
},
{
"name": "Makefile",
"bytes": "2374"
},
{
"name": "PLpgSQL",
"bytes": "352905"
},
{
"name": "PowerShell",
"bytes": "3882"
},
{
"name": "Python",
"bytes": "7191174"
},
{
"name": "R",
"bytes": "1265563"
},
{
"name": "ReScript",
"bytes": "240"
},
{
"name": "Roff",
"bytes": "27389"
},
{
"name": "Scala",
"bytes": "39048900"
},
{
"name": "Shell",
"bytes": "229968"
},
{
"name": "Thrift",
"bytes": "2016"
},
{
"name": "q",
"bytes": "111129"
}
],
"symlink_target": ""
} |
import redis
class RedisConnectionPingable:
"""Checker for ensuring that redis is up and a connection is possible."""
def __init__(self, host, port=6379, db=0, auth=None):
"""Initialise a new RedisConnectionPingable."""
self.host = host
self.port = port
self.db = db
self.auth = auth
def __call__(self):
"""Connect to redis using the details from the initialiser."""
try:
r = redis.StrictRedis(
host=self.host,
port=self.port,
db=self.db,
password=self.auth,
)
return r.ping()
except redis.ConnectionError:
return False, 'cannot connect to redis'
except Exception as e:
return False, 'an uncaught exception caused RedisConnectionPingable to fail'
| {
"content_hash": "d11ea1cc63b128eb383ad7f6c6c3b52d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 88,
"avg_line_length": 31.814814814814813,
"alnum_prop": 0.5657741559953434,
"repo_name": "dammitjim/healthysnake",
"id": "9e4f33e70ab3051953067fef59f4dfea9f0c0f1e",
"size": "859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "healthysnake/checkers/redis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31803"
}
],
"symlink_target": ""
} |
import os
import sys
import json
BUILD_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
BUILD_INTERNAL_ROOT = os.path.join(
os.path.dirname(BUILD_ROOT), 'build_internal')
sys.path.append(os.path.join(BUILD_ROOT, 'third_party'))
from recipe_engine import loader
def main(argv):
roots = [BUILD_ROOT, BUILD_INTERNAL_ROOT]
universe = loader.RecipeUniverse(
module_dirs=[os.path.join(root, 'scripts', 'slave', 'recipe_modules')
for root in roots],
recipe_dirs=[os.path.join(root, 'scripts', 'slave', 'recipes')
for root in roots])
recipes = list(universe.loop_over_recipes())
paths = []
for _, name in recipes:
recipe = universe.load_recipe(name)
recipe_file = os.path.relpath(recipe.__file__)
paths.append(recipe_file)
# Strip off the .py
expected_dir = recipe_file[:-3] + '.expected/'
if os.path.exists(expected_dir):
paths.append(expected_dir)
cmd = [sys.executable, '../unittests/recipe_simulation_test.py'] + argv[1:]
out = {
'includes': [
'recipes_test.isolate',
],
'variables': {
'command': cmd,
},
}
out['variables']['files'] = paths
print json.dumps(out, indent=2, sort_keys=True)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "87551ec40498505bcd355647925b2ceb",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 26.54901960784314,
"alnum_prop": 0.6255539143279173,
"repo_name": "eunchong/build",
"id": "035f854c4b3ec40900a8757108355513ab8969ad",
"size": "1543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/slave/isolation/isolate_recipes.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3128"
},
{
"name": "CSS",
"bytes": "211818"
},
{
"name": "HTML",
"bytes": "429981"
},
{
"name": "JavaScript",
"bytes": "75624"
},
{
"name": "Makefile",
"bytes": "21204"
},
{
"name": "Python",
"bytes": "6143109"
},
{
"name": "Shell",
"bytes": "23512"
}
],
"symlink_target": ""
} |
import pytest
import sys
import openpnm as op
from numpy.testing import assert_approx_equal, assert_allclose
from openpnm.utils import get_mixture_model_args
import chemicals
from thermo import Chemical
is_python_38 = sys.version_info[:2] == (3, 8)
is_linux = sys.platform.startswith('linux')
condition = is_python_38 and is_linux
@pytest.mark.skipif(condition, reason="Strange error coming from numba/chemicals")
class DensityTest:
def setup_class(self):
self.net = op.network.Cubic(shape=[3, 3, 3])
self.phase = op.phase.Species(network=self.net, species='h2o')
self.phase['pore.salinity'] = 0.0 # ppt
def test_standard(self):
# Liquid water
self.phase.add_model(propname='pore.density',
model=op.models.phase.density.liquid_pure_COSTALD)
assert_approx_equal(self.phase['pore.density'].mean(), 992.345519756)
def test_ideal_gas(self):
# Water vapor
self.phase.add_model(propname='pore.density',
model=op.models.phase.density.ideal_gas)
self.phase.regenerate_models()
assert_approx_equal(self.phase['pore.density'].mean(), 0.7367280065145)
def test_water(self):
# Liquid water
self.phase.add_model(propname='pore.density',
model=op.models.phase.density.water_correlation)
self.phase.regenerate_models()
assert_approx_equal(self.phase['pore.density'].mean(), 996.9522)
def test_chemicals_for_pure_gas_molar_volume(self):
mods = [
# numba version not working for any
chemicals.virial.BVirial_Pitzer_Curl,
chemicals.virial.BVirial_Abbott,
chemicals.virial.BVirial_Tsonopoulos,
chemicals.virial.BVirial_Tsonopoulos_extended,
]
n2 = op.phase.Species(network=self.net, species='nitrogen')
n2['pore.temperature'] = 400
Vm = []
for f in mods:
Vm.append(op.models.phase.chemicals_wrapper(n2, f=f).mean())
assert_allclose(Vm, 8.795e-6, rtol=.3)
def test_chemicals_wrapper_for_pure_liq_molar_volume(self):
mods = [
chemicals.volume.Yen_Woods_saturation,
chemicals.volume.Rackett,
chemicals.volume.Yamada_Gunn,
chemicals.volume.Townsend_Hales,
chemicals.volume.Bhirud_normal,
chemicals.volume.COSTALD,
# chemicals.volume.Campbell_Thodos, # numba version not working
# chemicals.volume.SNM0, # numba version not working
# chemicals.volume.CRC_inorganic, # requires rho
# chemicals.volume.COSTALD_compressed, # requires Psat
]
h2o = op.phase.Species(network=self.net, species='water')
Vm = []
for f in mods:
Vm.append(op.models.phase.chemicals_wrapper(h2o, f=f).mean())
assert_allclose(Vm, 1.88e-5, rtol=0.2)
def test_chemicals_wrapper_for_pure_liq_with_args(self):
h2o = op.phase.Species(network=self.net, species='water')
# Using kwargs to map args to custom propnames
temp = Chemical('h2o')
h2o['pore.density'] = temp.rhol
Vm = op.models.phase.chemicals_wrapper(
phase=h2o,
f=chemicals.volume.CRC_inorganic,
rho0='pore.density',
k=1,
)
assert_allclose(Vm, 1.85309071e-05, rtol=1e-4)
# Put args directly in target
h2o['pore.Psat'] = temp.Psat
h2o['pore.Vs'] = temp.Vms
Vm = op.models.phase.chemicals_wrapper(
phase=h2o,
f=chemicals.volume.COSTALD_compressed,
rho='pore.density',
)
assert_allclose(Vm, 1.61982081e-05, rtol=1e-4)
def test_chemicals_wrapper_for_liquid_mixture(self):
h2o = op.phase.Species(network=self.net, species='h2o')
etoh = op.phase.Species(network=self.net, species='ethanol')
vodka = op.phase.LiquidMixture(network=self.net, components=[h2o, etoh])
vodka.x(h2o.name, 0.60)
vodka.x(etoh.name, 0.40)
Vm = op.models.phase.chemicals_wrapper(
phase=vodka,
f=chemicals.volume.COSTALD_mixture,
)
h2o['param.Zr'] = 0.001
etoh['param.Zr'] = 0.001
Vm = op.models.phase.chemicals_wrapper(
phase=vodka,
f=chemicals.volume.Rackett_mixture,
Zrs='param.Zr',
)
def test_liquid_pure_and_mixture(self):
pn = op.network.Demo()
h2o = op.phase.Species(network=pn, species='water')
h2o.add_model(propname='pore.density',
model=op.models.phase.density.liquid_pure_COSTALD)
Vm = chemicals.COSTALD(
T=h2o['pore.temperature'][0],
Tc=h2o['param.critical_temperature'],
Vc=h2o['param.critical_volume'],
omega=h2o['param.acentric_factor'],
)
rho_ref = chemicals.Vm_to_rho(Vm, h2o['param.molecular_weight'])
rho_calc = h2o['pore.density'][0]
assert_allclose(rho_ref, rho_calc, rtol=1e-10, atol=0)
etoh = op.phase.Species(network=pn, species='ethanol')
etoh.add_model(propname='pore.density',
model=op.models.phase.density.liquid_pure_COSTALD)
vodka = op.phase.LiquidMixture(network=pn, components=[h2o, etoh])
vodka.x(h2o.name, 0.5)
vodka.x(etoh.name, 0.5)
vodka.add_model(propname='pore.density',
model=op.models.phase.density.liquid_mixture_COSTALD)
args = get_mixture_model_args(
phase=vodka,
composition='xs',
args={
'Tcs': 'param.critical_temperature',
'Vcs': 'param.critical_volume',
'omegas': 'param.acentric_factor',
})
Vm = chemicals.COSTALD_mixture(T=vodka['pore.temperature'][0], **args)
rho_ref = chemicals.Vm_to_rho(Vm, vodka.get_mix_vals('param.molecular_weight')[0])
rho_calc = vodka['pore.density'][0]
assert_allclose(rho_ref, rho_calc, rtol=1e-10, atol=0)
if __name__ == '__main__':
t = DensityTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
| {
"content_hash": "390350ac42e25d69764b38c2d0a1cede",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 90,
"avg_line_length": 39.17901234567901,
"alnum_prop": 0.593666299038916,
"repo_name": "PMEAL/OpenPNM",
"id": "b617c3ba88b03dac2e9a557dc2a923804dd3b73b",
"size": "6347",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/unit/models/phase/DensityTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "375"
},
{
"name": "Python",
"bytes": "1437146"
}
],
"symlink_target": ""
} |
from cloudify import non_existent # NOQA
from cloudify.decorators import operation
@operation
def run(**_):
pass
| {
"content_hash": "5454c4b69ba9ec65bf6d99636174f490",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 41,
"avg_line_length": 15.125,
"alnum_prop": 0.7355371900826446,
"repo_name": "geokala/cloudify-agent",
"id": "10a63f64272ad0aed85544452b3b31685a1c599d",
"size": "826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudify_agent/tests/resources/plugins/mock-plugin-error/mock_plugin_error/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Inno Setup",
"bytes": "16035"
},
{
"name": "Makefile",
"bytes": "1033"
},
{
"name": "Python",
"bytes": "1759583"
},
{
"name": "Ruby",
"bytes": "10052"
},
{
"name": "Shell",
"bytes": "20353"
}
],
"symlink_target": ""
} |
"""Adds a note type "Default".
"""
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from lino.api import rt, dd
def objects():
NoteType = rt.models.notes.NoteType
yield NoteType(**dd.str2kw('name', _("Default")))
# yield noteType(
# _("Default"), build_method='appyodt', template='Default.odt')
| {
"content_hash": "70be74761aacf6a62722aa193a1116d9",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 20.11111111111111,
"alnum_prop": 0.6657458563535912,
"repo_name": "lino-framework/xl",
"id": "a0b1cac26ad93b50998a4038188dd4aa7415ce2d",
"size": "504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lino_xl/lib/notes/fixtures/std.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "186625"
},
{
"name": "HTML",
"bytes": "1417287"
},
{
"name": "JavaScript",
"bytes": "1630929"
},
{
"name": "PHP",
"bytes": "40437"
},
{
"name": "Python",
"bytes": "2395471"
}
],
"symlink_target": ""
} |
import pika
import requests
import logging
import time
import json
defaults = {'logging': {'levels': {'': 'INFO'}}}
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s+0000 - %(name)s - %(levelname)s - %(message)s')
logging.Formatter.converter = time.gmtime
logging.getLogger('pika').setLevel(logging.WARNING)
log = logging.getLogger("API_Caller")
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='api_calls_queue', durable=True)
connection2 = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel2 = connection.channel()
channel2.queue_declare(queue='response_queue', durable=True)
# r = requests.post("http://www.example.com", data=None, headers={})
# print r.status_code
# print r.text
log.info(' [*] Waiting for messages. To exit press CTRL+C')
def _parse_call(call):
try:
call = json.loads(call)
except:
log.error("Cloudn't parse json: %s" % (call,))
return None
return (call["url"], call["body"], call["headers"])
def handle_call(ch, method, properties, call):
call = _parse_call(call)
ch.basic_ack(delivery_tag=method.delivery_tag)
if call is None:
return
url, body, headers = call
log.info("got request url=%s, body=%s, headers=%s" % (url, body, headers,))
try:
response = requests.post(url, data=body, headers=headers)
except:
log.error("Could not get response. Skipping...")
return
log.info("Got response: %s, %s" % (response.status_code, response.text,))
try:
channel2.basic_publish(exchange='',
routing_key='response_queue',
body=json.dumps((response.status_code, response.text)),
properties=pika.BasicProperties(delivery_mode=2,))
except Exception as e:
log.error("Got exception while trying to publish repsponse: %s" % (e,))
channel.basic_qos(prefetch_count=1)
channel.basic_consume(handle_call,
queue='task_queue')
channel.start_consuming()
| {
"content_hash": "ce5b5ee331ea1f5eba1967a3f3c7654c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 87,
"avg_line_length": 32.621212121212125,
"alnum_prop": 0.6470041802136554,
"repo_name": "Shaywei/rabbitmq",
"id": "b9c4d62acec3dfde4f96d0da20b14f5d41a4df71",
"size": "2176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api_caller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5112"
}
],
"symlink_target": ""
} |
import functools
import six
from .constants import MESSAGES
from .logger import logger
from .common import StateError
def inherit_docstrings(cls):
'''
Take a base class and apply it's docstrings to functions
that do NOT have docstrings.
'''
bases = cls.__bases__
for base_class in bases:
for member_name in dir(cls):
member = getattr(cls, member_name)
if not member_name.startswith('_') and six.callable(member) \
and hasattr(base_class, member_name) and not member.__doc__:
if hasattr(member, '__func__'):
member.__func__.__doc__ = \
getattr(base_class, member_name).__doc__
return cls
def override(func):
'''Call the inherited function first'''
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
getattr(super(self.__class__, self), func.__name__)(*args, **kwargs)
return func(self, *args, **kwargs)
return wrapped
def order_call_once(state_type, required_state, new_state=None):
'''
Give me a primitive state tracker so I can
declare the ordering without extra crap.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
set_state = True
if self.state[state_type] == required_state:
try:
return func(self, *args, **kwargs)
except Exception:
set_state = False
raise
finally:
if set_state and new_state is not None:
self.state[state_type] = new_state
raise StateError(
MESSAGES['order_call_once_failure'].format(
state_type,
required_state, self.state[state_type]))
wrapper.orig_func = func
return wrapper
return decorator
def requires(*aerospike_versions):
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.version in aerospike_versions:
return func(self, *args, **kwargs)
raise NotImplementedError(
MESSAGES['requires_failure'].format(
func.__name__, self.version, aerospike_versions))
wrapper.min_version = min(aerospike_versions)
wrapper.orig_func = func
return wrapper
return decorator
def warning(version, message):
'''Print a warning of message if version matches on call'''
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.version == version:
logger.warn(' :: '.join((func.__name__, version, message,)))
return func(self, *args, **kwargs)
wrapper.orig_func = func
return wrapper
return decorator
| {
"content_hash": "35537b7be013a7c0f0ce7d3891236a21",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 80,
"avg_line_length": 33.83720930232558,
"alnum_prop": 0.5646048109965636,
"repo_name": "benjolitz/aerospike",
"id": "92d62122b4ecd46cc6bf6eb094bd7843ca2ca8f4",
"size": "2934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aerospike/decorators.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "19640"
},
{
"name": "Python",
"bytes": "77991"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.contrib.auth import get_user_model
import datetime
from treatment_sheets.models import TxSheet, TxItem
from common.models import Prescription
User = get_user_model()
class TxSheetTest(TestCase):
def setUp(self):
self.owner = User.objects.create(username='Marfalo')
def test_get_absolute_url(self):
sheet = TxSheet.objects.create(owner=self.owner)
self.assertEqual(sheet.get_absolute_url(), '/tx_sheet/{}/'.format(sheet.id))
def test_tx_sheet_saves_owner(self):
# Should not raise
TxSheet(owner=User())
def test_tx_sheet_saves_date_on_creation(self):
date = datetime.date.today()
sheet = TxSheet.objects.create(owner=self.owner, name='Poochy', comment='Euthanasia')
self.assertEqual(date, sheet.date)
class TxItemTest(TestCase):
def setUp(self):
self.sheet = TxSheet.objects.create(owner=User.objects.create())
self.drug = Prescription.objects.create(name='Drug')
def test_item_related_to_tx_sheet(self):
item = TxItem()
item.med = self.drug
item.sheet = self.sheet
item.save()
self.assertEqual(self.sheet.id, item.sheet_id)
def test_get_absolute_url(self):
item = TxItem.objects.create(sheet=self.sheet, med=self.drug, dose=11, unit='mL', freq='BID')
self.assertEqual(item.get_absolute_url(), '/tx_sheet/{}/'.format(self.sheet.id))
def test_output_instructions(self):
item = TxItem.objects.create(sheet=self.sheet, med=self.drug, dose=11, unit='mL', freq='BID')
instruction = 'Take 11 mLs of Drug twice a day.'
self.assertEqual(instruction, item.instruction)
| {
"content_hash": "db9c1bec6436b261f2a6b6b291a012c4",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 101,
"avg_line_length": 32.92307692307692,
"alnum_prop": 0.6699766355140186,
"repo_name": "onnudilol/vetcalc",
"id": "34a89aa3a57e4f1b84fc561aaaa905a89a21c761",
"size": "1712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treatment_sheets/tests/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2989"
},
{
"name": "HTML",
"bytes": "34854"
},
{
"name": "JavaScript",
"bytes": "225144"
},
{
"name": "Python",
"bytes": "129623"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
class UserToken(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
tokens = models.PositiveSmallIntegerField()
def deduct(self):
self.tokens -= 1
self.save()
def __str__(self):
return "%s - %s" % (self.user.username, self.tokens)
| {
"content_hash": "835c52598ed41a9f379bbb1a537c2666",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 63,
"avg_line_length": 26.5,
"alnum_prop": 0.6657681940700808,
"repo_name": "wfhio/tramcar",
"id": "aa73aa2a0dc77264ee7391785c7410d34146f056",
"size": "371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "job_board/models/user_token.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "260"
},
{
"name": "HTML",
"bytes": "29392"
},
{
"name": "Python",
"bytes": "112378"
},
{
"name": "Ruby",
"bytes": "3313"
}
],
"symlink_target": ""
} |
import pandas as pd
import argparse
from collections import defaultdict
import os
import math
def fix_strange_metaxa_vals(vals):
for i, val in enumerate(vals):
if i == 2 and val == 'Flavobacteria':
vals[2] = 'Flavobacteriia'
if i == 3 and val == 'Micrococcales':
vals[3] = "Actinomycetales"
return vals
def main(args):
clustering = pd.read_table(args.clustering_file, sep=',', names=['contig_id', 'cluster_id'], index_col=0)
taxonomy_df = pd.read_table(args.taxonomy_file, header=None, index_col=0, names=["contig_id", "taxonomy", "bla", "bla1", "bla2"])
all_approved = pd.read_table(args.all_approved_file, header=None, names=["contig_id"], index_col=0)
checkm_taxonomy = pd.read_table(args.checkm_taxonomy_file, index_col=0)
all_approved_set = set(all_approved.index.values)
unapproved_rrna = defaultdict(int)
approved_rrna = {}
levels = ['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']
taxonomy_df['taxonomy'].fillna('', inplace=True)
for rrna_contig in taxonomy_df.index.values:
if rrna_contig in clustering.index:
cluster_id = clustering.loc[rrna_contig]['cluster_id']
metaxa_val = taxonomy_df.loc[rrna_contig]['taxonomy'].split(';')
metaxa_has_val = metaxa_val != ['']
if cluster_id in all_approved_set and metaxa_has_val:
checkm_val = checkm_taxonomy.loc[cluster_id]['Taxonomy'].split(';')
metaxa_val = fix_strange_metaxa_vals(metaxa_val)
matched_level = None
for i, level in enumerate(levels):
checkm_level_val, metaxa_level_val = None, None
if len(checkm_val) > i and len(metaxa_val) > i:
checkm_level_val = checkm_val[i][3:]
metaxa_level_val = metaxa_val[i]
if level == 'species':
metaxa_level_val = metaxa_val[i].replace(' ', '_')
if checkm_level_val == metaxa_level_val:
matched_level = i
else:
break
else:
matched_level = i-1
break
if cluster_id not in approved_rrna:
approved_rrna[cluster_id] = {'matching': 0, 'not matching': 0}
if matched_level >= args.level:
approved_rrna[cluster_id]['matching'] += 1
else:
approved_rrna[cluster_id]['not matching'] += 1
#print(most_detailed_level_checkm, most_detailed_level_metaxa)
#print(most_detailed_matched_level)
#print(taxonomy_df.loc[rrna_contig]['taxonomy'], checkm_taxonomy.loc[cluster_id]['Taxonomy'])
else:
unapproved_rrna[cluster_id] += 1
for cluster_id in all_approved_set:
if cluster_id not in approved_rrna:
approved_rrna[cluster_id] = {'matching': 0, 'not matching': 0}
approved_stats_df = pd.DataFrame.from_dict(approved_rrna, orient='index')
unapproved_stats_df = pd.DataFrame.from_dict(unapproved_rrna, orient='index')
unapproved_stats_df.columns = ['nr_rrna']
with open(os.path.join(args.outdir, 'stats_per_approved.tsv'), 'w') as ofh:
approved_stats_df.to_csv(ofh, sep='\t')
with open(os.path.join(args.outdir, 'stats_per_unapproved.tsv'), 'w') as ofh:
unapproved_stats_df.to_csv(ofh, sep='\t')
with open(os.path.join(args.outdir, 'summary_nr_matches.tsv'), 'w') as ofh:
print(len(approved_stats_df[approved_stats_df['matching'] != 0]), len(approved_stats_df), file=ofh)
with open(os.path.join(args.outdir, 'summary_nr_mismatches.tsv'), 'w') as ofh:
print(len(approved_stats_df[approved_stats_df['not matching'] != 0]), len(approved_stats_df), file=ofh)
# Things to output:
#
# Number of approved genomes with matching rrna
# Number of approved genomes with unmatching rrna
# Number of rrna genes per bin
#
# Number of approved genomes with > 0 matching rrna and < 2 unmatching rrna
# Matching is counted at order level
#
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--clustering_file', help="e.g. ../../Data/test_binning_and_16s_combo/clustering_nocutup.csv")
parser.add_argument('--taxonomy_file', help="e.g. ../../Data/test_binning_and_16s_combo/all_contigs.taxonomy.txt")
parser.add_argument('--all_approved_file', help="e.g. ../../Data/test_binning_and_16s_combo/list_of_all_approved_bins_nocutup.tsv")
parser.add_argument('--checkm_taxonomy_file', help="e.g. ../../Data/test_binning_and_16s_combo/checkm_tree_qa.tsv")
parser.add_argument('--level', type=int, help='Taxonomic level to run comparison on kingdom: 0, phylum: 1, class: 2, order: 3, family: 4, genus: 5, species: 6')
parser.add_argument('--outdir', help="A directory for output files")
args = parser.parse_args()
main(args)
| {
"content_hash": "b821731f7adb8daa574ba9bda634caa1",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 164,
"avg_line_length": 46.64545454545455,
"alnum_prop": 0.5928668875462872,
"repo_name": "EnvGen/toolbox",
"id": "48e6ed16d9d5a09524470b0500f32b743df1de03",
"size": "5131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/concoct/correctly_placed_16S.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "10510"
},
{
"name": "Python",
"bytes": "173548"
},
{
"name": "Shell",
"bytes": "4027"
}
],
"symlink_target": ""
} |
import logging
import json
from colorlog import ColoredFormatter
from relay import log
def configure_logging(add_handler, log=log):
"""
Configure log records. If adding a handler, make the formatter print all
passed in key:value data.
ie log.extra('msg', extra=dict(a=1))
generates 'msg a=1'
`add_handler` (True, False, None, or Handler instance)
if True, add a logging.StreamHandler() instance
if False, do not add any handlers.
if given a handler instance, add that the the logger
"""
_ignore_log_keys = set(logging.makeLogRecord({}).__dict__)
def _json_format(record):
extras = ' '.join(
"%s=%s" % (k, record.__dict__[k])
for k in set(record.__dict__).difference(_ignore_log_keys))
if extras:
record.msg = "%s %s" % (record.msg, extras)
return record
class ColoredJsonFormatter(ColoredFormatter):
def format(self, record):
record = _json_format(record)
return super(ColoredJsonFormatter, self).format(record)
if isinstance(add_handler, logging.Handler):
log.addHandler(add_handler)
elif add_handler is True:
if not any(isinstance(h, logging.StreamHandler) for h in log.handlers):
_h = logging.StreamHandler()
_h.setFormatter(ColoredJsonFormatter(
"%(log_color)s%(levelname)-8s %(message)s %(reset)s %(cyan)s",
reset=True
))
log.addHandler(_h)
elif not log.handlers:
log.addHandler(logging.NullHandler())
log.setLevel(logging.DEBUG)
log.propagate = False
return log
def add_zmq_log_handler(address):
import zmq.log.handlers
class JSONPubHandler(zmq.log.handlers.PUBHandler):
def format(self, record):
return json.dumps(record.__dict__)
sock = zmq.Context().socket(zmq.PUB)
sock.connect(address)
handler = JSONPubHandler(sock)
return configure_logging(handler)
| {
"content_hash": "6c5c90ae705e61b7b0413a20f12f4302",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 79,
"avg_line_length": 33.55,
"alnum_prop": 0.6224540486835569,
"repo_name": "sailthru/relay",
"id": "3cc73335459e360aa739c65ce584d6a234c72543",
"size": "2013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "relay/relay_logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2180"
},
{
"name": "JavaScript",
"bytes": "1394"
},
{
"name": "Python",
"bytes": "28908"
},
{
"name": "Shell",
"bytes": "469"
}
],
"symlink_target": ""
} |
"""
States to manage git repositories and git configuration
.. important::
Before using git over ssh, make sure your remote host fingerprint exists in
your ``~/.ssh/known_hosts`` file.
.. versionchanged:: 2015.8.8
This state module now requires git 1.6.5 (released 10 October 2009) or
newer.
"""
import errno
import logging
import os
import re
import string
import salt.utils.args
import salt.utils.files
import salt.utils.url
import salt.utils.versions
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if git is available
"""
if "git.version" not in __salt__:
return (False, "git module could not be loaded")
git_ver = _LooseVersion(__salt__["git.version"](versioninfo=False))
return git_ver >= _LooseVersion("1.6.5")
def _revs_equal(rev1, rev2, rev_type):
"""
Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then
the comparison will be done using str.startwith() to allow short SHA1s to
compare successfully.
NOTE: This means that rev2 must be the short rev.
"""
if (rev1 is None and rev2 is not None) or (rev2 is None and rev1 is not None):
return False
elif rev1 is rev2 is None:
return True
elif rev_type == "sha1":
return rev1.startswith(rev2)
else:
return rev1 == rev2
def _short_sha(sha1):
return sha1[:7] if sha1 is not None else None
def _format_comments(comments):
"""
Return a joined list
"""
ret = ". ".join(comments)
if len(comments) > 1:
ret += "."
return ret
def _need_branch_change(branch, local_branch):
"""
Short hand for telling when a new branch is needed
"""
return branch is not None and branch != local_branch
def _get_branch_opts(
branch, local_branch, all_local_branches, desired_upstream, git_ver=None
):
"""
DRY helper to build list of opts for git.branch, for the purposes of
setting upstream tracking branch
"""
if branch is not None and branch not in all_local_branches:
# We won't be setting upstream because the act of checking out a new
# branch will set upstream for us
return None
if git_ver is None:
git_ver = _LooseVersion(__salt__["git.version"](versioninfo=False))
ret = []
if git_ver >= _LooseVersion("1.8.0"):
ret.extend(["--set-upstream-to", desired_upstream])
else:
ret.append("--set-upstream")
# --set-upstream does not assume the current branch, so we have to
# tell it which branch we'll be using
ret.append(local_branch if branch is None else branch)
ret.append(desired_upstream)
return ret
def _get_local_rev_and_branch(target, user, password, output_encoding=None):
"""
Return the local revision for before/after comparisons
"""
log.info("Checking local revision for %s", target)
try:
local_rev = __salt__["git.revision"](
target,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError:
log.info("No local revision for %s", target)
local_rev = None
log.info("Checking local branch for %s", target)
try:
local_branch = __salt__["git.current_branch"](
target,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError:
log.info("No local branch for %s", target)
local_branch = None
return local_rev, local_branch
def _strip_exc(exc):
"""
Strip the actual command that was run from exc.strerror to leave just the
error message
"""
return re.sub(r'^Command [\'"].+[\'"] failed: ', "", exc.strerror)
def _uptodate(ret, target, comments=None, local_changes=False):
ret["comment"] = "Repository {} is up-to-date".format(target)
if local_changes:
ret["comment"] += (
", but with uncommitted changes. Set 'force_reset' to True to "
"purge uncommitted changes."
)
if comments:
# Shouldn't be making any changes if the repo was up to date, but
# report on them so we are alerted to potential problems with our
# logic.
ret["comment"] += "\n\nChanges {}made: {}".format(
"that would be " if __opts__["test"] else "", _format_comments(comments)
)
return ret
def _neutral_test(ret, comment):
ret["result"] = None
ret["comment"] = comment
return ret
def _fail(ret, msg, comments=None):
ret["result"] = False
if comments:
msg += "\n\nChanges already made: " + _format_comments(comments)
ret["comment"] = msg
return ret
def _already_cloned(ret, target, branch=None, comments=None):
ret["result"] = True
ret["comment"] = "Repository already exists at {}{}".format(
target, " and is checked out to branch '{}'".format(branch) if branch else ""
)
if comments:
ret["comment"] += "\n\nChanges {}made: {}".format(
"that would be " if __opts__["test"] else "", _format_comments(comments)
)
return ret
def _failed_fetch(ret, exc, comments=None):
msg = (
"Fetch failed. Set 'force_fetch' to True to force the fetch if the "
"failure was due to not being able to fast-forward. Output of the fetch "
"command follows:\n\n{}".format(_strip_exc(exc))
)
return _fail(ret, msg, comments)
def _failed_submodule_update(ret, exc, comments=None):
msg = "Failed to update submodules: " + _strip_exc(exc)
return _fail(ret, msg, comments)
def _not_fast_forward(
ret, rev, pre, post, branch, local_branch, default_branch, local_changes, comments
):
branch_msg = ""
if branch is None:
if rev != "HEAD":
if local_branch != rev:
branch_msg = (
" The desired rev ({0}) differs from the name of the "
"local branch ({1}), if the desired rev is a branch name "
"then a forced update could possibly be avoided by "
"setting the 'branch' argument to '{0}' instead.".format(
rev, local_branch
)
)
else:
if default_branch is not None and local_branch != default_branch:
branch_msg = (
" The default remote branch ({0}) differs from the "
"local branch ({1}). This could be caused by changing the "
"default remote branch, or if the local branch was "
"manually changed. Rather than forcing an update, it "
"may be advisable to set the 'branch' argument to "
"'{0}' instead. To ensure that this state follows the "
"'{0}' branch instead of the remote HEAD, set the "
"'rev' argument to '{0}'.".format(default_branch, local_branch)
)
pre = _short_sha(pre)
post = _short_sha(post)
return _fail(
ret,
"Repository would be updated {}{}, but {}. Set 'force_reset' to "
"True{} to force this update{}.{}".format(
"from {} to {}".format(pre, post)
if local_changes and pre != post
else "to {}".format(post),
" (after checking out local branch '{}')".format(branch)
if _need_branch_change(branch, local_branch)
else "",
"this is not a fast-forward merge"
if not local_changes
else "there are uncommitted changes",
" (or 'remote-changes')" if local_changes else "",
" and discard these changes" if local_changes else "",
branch_msg,
),
comments,
)
def latest(
name,
target,
rev="HEAD",
branch=None,
user=None,
password=None,
update_head=True,
force_checkout=False,
force_clone=False,
force_fetch=False,
force_reset=False,
submodules=False,
bare=False,
mirror=False,
remote="origin",
fetch_tags=True,
sync_tags=True,
depth=None,
identity=None,
https_user=None,
https_pass=None,
refspec_branch="*",
refspec_tag="*",
output_encoding=None,
**kwargs
):
"""
Make sure the repository is cloned to the given directory and is
up-to-date.
name
Address of the remote repository, as passed to ``git clone``
.. note::
From the `Git documentation`_, there are two URL formats
supported for SSH authentication. The below two examples are
equivalent:
.. code-block:: text
# ssh:// URL
ssh://user@server/project.git
# SCP-like syntax
user@server:project.git
A common mistake is to use an ``ssh://`` URL, but with a colon
after the domain instead of a slash. This is invalid syntax in
Git, and will therefore not work in Salt. When in doubt, confirm
that a ``git clone`` works for the URL before using it in Salt.
It has been reported by some users that SCP-like syntax is
incompatible with git repos hosted on `Atlassian Stash/BitBucket
Server`_. In these cases, it may be necessary to use ``ssh://``
URLs for SSH authentication.
.. _`Git documentation`: https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#The-SSH-Protocol
.. _`Atlassian Stash/BitBucket Server`: https://www.atlassian.com/software/bitbucket/server
rev : HEAD
The remote branch, tag, or revision ID to checkout after clone / before
update. If specified, then Salt will also ensure that the tracking
branch is set to ``<remote>/<rev>``, unless ``rev`` refers to a tag or
SHA1, in which case Salt will ensure that the tracking branch is unset.
If ``rev`` is not specified, it will be assumed to be ``HEAD``, and
Salt will not manage the tracking branch at all.
.. versionchanged:: 2015.8.0
If not specified, ``rev`` now defaults to the remote repository's
HEAD.
target
Name of the target directory where repository is about to be cloned
branch
Name of the local branch into which to checkout the specified rev. If
not specified, then Salt will not care what branch is being used
locally and will just use whatever branch is currently there.
.. versionadded:: 2015.8.0
.. note::
If this argument is not specified, this means that Salt will not
change the local branch if the repository is reset to another
branch/tag/SHA1. For example, assume that the following state was
run initially:
.. code-block:: yaml
foo_app:
git.latest:
- name: https://mydomain.tld/apps/foo.git
- target: /var/www/foo
- user: www
This would have cloned the HEAD of that repo (since a ``rev``
wasn't specified), and because ``branch`` is not specified, the
branch in the local clone at ``/var/www/foo`` would be whatever the
default branch is on the remote repository (usually ``master``, but
not always). Now, assume that it becomes necessary to switch this
checkout to the ``dev`` branch. This would require ``rev`` to be
set, and probably would also require ``force_reset`` to be enabled:
.. code-block:: yaml
foo_app:
git.latest:
- name: https://mydomain.tld/apps/foo.git
- target: /var/www/foo
- user: www
- rev: dev
- force_reset: True
The result of this state would be to perform a hard-reset to
``origin/dev``. Since ``branch`` was not specified though, while
``/var/www/foo`` would reflect the contents of the remote repo's
``dev`` branch, the local branch would still remain whatever it was
when it was cloned. To make the local branch match the remote one,
set ``branch`` as well, like so:
.. code-block:: yaml
foo_app:
git.latest:
- name: https://mydomain.tld/apps/foo.git
- target: /var/www/foo
- user: www
- rev: dev
- branch: dev
- force_reset: True
This may seem redundant, but Salt tries to support a wide variety
of use cases, and doing it this way allows for the use case where
the local branch doesn't need to be strictly managed.
user
Local system user under which to run git commands. By default, commands
are run by the user under which the minion is running.
.. note::
This is not to be confused with the username for http(s)/SSH
authentication.
.. versionadded:: 0.17.0
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
update_head : True
If set to ``False``, then the remote repository will be fetched (if
necessary) to ensure that the commit to which ``rev`` points exists in
the local checkout, but no changes will be made to the local HEAD.
.. versionadded:: 2015.8.3
force_checkout : False
When checking out the local branch, the state will fail if there are
unwritten changes. Set this argument to ``True`` to discard unwritten
changes when checking out.
force_clone : False
If the ``target`` directory exists and is not a git repository, then
this state will fail. Set this argument to ``True`` to remove the
contents of the target directory and clone the repo into it.
force_fetch : False
If a fetch needs to be performed, non-fast-forward fetches will cause
this state to fail. Set this argument to ``True`` to force the fetch
even if it is a non-fast-forward update.
.. versionadded:: 2015.8.0
force_reset : False
If the update is not a fast-forward, this state will fail. Set this
argument to ``True`` to force a hard-reset to the remote revision in
these cases.
.. versionchanged:: 2019.2.0
This option can now be set to ``remote-changes``, which will
instruct Salt not to discard local changes if the repo is
up-to-date with the remote repository.
submodules : False
Update submodules on clone or branch change
bare : False
Set to ``True`` if the repository is to be a bare clone of the remote
repository.
.. note:
Setting this option to ``True`` is incompatible with the ``rev``
argument.
mirror
Set to ``True`` if the repository is to be a mirror of the remote
repository. This implies that ``bare`` set to ``True``, and thus is
incompatible with ``rev``.
remote : origin
Git remote to use. If this state needs to clone the repo, it will clone
it using this value as the initial remote name. If the repository
already exists, and a remote by this name is not present, one will be
added.
fetch_tags : True
If ``True``, then when a fetch is performed all tags will be fetched,
even those which are not reachable by any branch on the remote.
sync_tags : True
If ``True``, then Salt will delete tags which exist in the local clone
but are not found on the remote repository.
.. versionadded:: 2018.3.4
depth
Defines depth in history when git a clone is needed in order to ensure
latest. E.g. ``depth: 1`` is useful when deploying from a repository
with a long history. Use rev to specify branch or tag. This is not
compatible with revision IDs.
.. versionchanged:: 2019.2.0
This option now supports tags as well as branches, on Git 1.8.0 and
newer.
identity
Path to a private key to use for ssh URLs. This can be either a single
string, or a list of strings. For example:
.. code-block:: yaml
# Single key
git@github.com:user/repo.git:
git.latest:
- user: deployer
- identity: /home/deployer/.ssh/id_rsa
# Two keys
git@github.com:user/repo.git:
git.latest:
- user: deployer
- identity:
- /home/deployer/.ssh/id_rsa
- /home/deployer/.ssh/id_rsa_alternate
If multiple keys are specified, they will be tried one-by-one in order
for each git command which needs to authenticate.
.. warning::
Unless Salt is invoked from the minion using ``salt-call``, the
key(s) must be passphraseless. For greater security with
passphraseless private keys, see the `sshd(8)`_ manpage for
information on securing the keypair from the remote side in the
``authorized_keys`` file.
.. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE%20FORMAT
.. versionchanged:: 2015.8.7
Salt will no longer attempt to use passphrase-protected keys unless
invoked from the minion using ``salt-call``, to prevent blocking
waiting for user input.
.. versionchanged:: 2016.3.0
Key can now be specified as a SaltStack fileserver URL (e.g.
``salt://path/to/identity_file``).
https_user
HTTP Basic Auth username for HTTPS (only) clones
.. versionadded:: 2015.5.0
https_pass
HTTP Basic Auth password for HTTPS (only) clones
.. versionadded:: 2015.5.0
refspec_branch : *
A glob expression defining which branches to retrieve when fetching.
See `git-fetch(1)`_ for more information on how refspecs work.
.. versionadded:: 2017.7.0
refspec_tag : *
A glob expression defining which tags to retrieve when fetching. See
`git-fetch(1)`_ for more information on how refspecs work.
.. versionadded:: 2017.7.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch
.. note::
Clashing ID declarations can be avoided when including different
branches from the same git repository in the same SLS file by using the
``name`` argument. The example below checks out the ``gh-pages`` and
``gh-pages-prod`` branches from the same repository into separate
directories. The example also sets up the ``ssh_known_hosts`` ssh key
required to perform the git checkout.
.. code-block:: yaml
gitlab.example.com:
ssh_known_hosts:
- present
- user: root
- enc: ecdsa
- fingerprint: 4e:94:b0:54:c1:5b:29:a2:70:0e:e1:a3:51:ee:ee:e3
git-website-staging:
git.latest:
- name: git@gitlab.example.com:user/website.git
- rev: gh-pages
- target: /usr/share/nginx/staging
- identity: /root/.ssh/website_id_rsa
- require:
- pkg: git
- ssh_known_hosts: gitlab.example.com
git-website-staging:
git.latest:
- name: git@gitlab.example.com:user/website.git
- rev: gh-pages
- target: /usr/share/nginx/staging
- identity: salt://website/id_rsa
- require:
- pkg: git
- ssh_known_hosts: gitlab.example.com
git-website-prod:
git.latest:
- name: git@gitlab.example.com:user/website.git
- rev: gh-pages-prod
- target: /usr/share/nginx/prod
- identity: /root/.ssh/website_id_rsa
- require:
- pkg: git
- ssh_known_hosts: gitlab.example.com
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
return _fail(ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False))
if not remote:
return _fail(ret, "'remote' argument is required")
if not target:
return _fail(ret, "'target' argument is required")
if not rev:
return _fail(
ret, "'{}' is not a valid value for the 'rev' argument".format(rev)
)
if force_reset not in (True, False, "remote-changes"):
return _fail(
ret, "'force_reset' must be one of True, False, or 'remote-changes'"
)
# Ensure that certain arguments are strings to ensure that comparisons work
if not isinstance(rev, str):
rev = str(rev)
if target is not None:
if not isinstance(target, str):
target = str(target)
if not os.path.isabs(target):
return _fail(ret, "target '{}' is not an absolute path".format(target))
if branch is not None and not isinstance(branch, str):
branch = str(branch)
if user is not None and not isinstance(user, str):
user = str(user)
if password is not None and not isinstance(password, str):
password = str(password)
if remote is not None and not isinstance(remote, str):
remote = str(remote)
if identity is not None:
if isinstance(identity, str):
identity = [identity]
elif not isinstance(identity, list):
return _fail(ret, "identity must be either a list or a string")
identity = [os.path.expanduser(x) for x in identity]
for ident_path in identity:
if "salt://" in ident_path:
try:
ident_path = __salt__["cp.cache_file"](ident_path, __env__)
except OSError as exc:
log.exception("Failed to cache %s", ident_path)
return _fail(
ret, "identity '{}' does not exist.".format(ident_path)
)
if not os.path.isabs(ident_path):
return _fail(
ret, "identity '{}' is not an absolute path".format(ident_path)
)
if https_user is not None and not isinstance(https_user, str):
https_user = str(https_user)
if https_pass is not None and not isinstance(https_pass, str):
https_pass = str(https_pass)
# Check for lfs filter settings, and setup lfs_opts accordingly. These opts
# will be passed where appropriate to ensure that these commands are
# authenticated and that the git LFS plugin can download files.
use_lfs = bool(
__salt__["git.config_get_regexp"](
r"filter\.lfs\.", **{"global": True, "ignore_retcode": True}
)
)
lfs_opts = {"identity": identity} if use_lfs else {}
if os.path.isfile(target):
return _fail(
ret,
"Target '{}' exists and is a regular file, cannot proceed".format(target),
)
try:
desired_fetch_url = salt.utils.url.add_http_basic_auth(
name, https_user, https_pass, https_only=True
)
except ValueError as exc:
return _fail(ret, exc.__str__())
redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url)
if mirror:
bare = True
# Check to make sure rev and mirror/bare are not both in use
if rev != "HEAD" and bare:
return _fail(
ret, "'rev' is not compatible with the 'mirror' and 'bare' arguments"
)
run_check_cmd_kwargs = {"runas": user, "password": password}
if "shell" in __grains__:
run_check_cmd_kwargs["shell"] = __grains__["shell"]
refspecs = (
[
"refs/heads/{0}:refs/remotes/{1}/{0}".format(refspec_branch, remote),
"+refs/tags/{0}:refs/tags/{0}".format(refspec_tag),
]
if fetch_tags
else []
)
log.info("Checking remote revision for %s", name)
try:
all_remote_refs = __salt__["git.remote_refs"](
name,
heads=False,
tags=False,
user=user,
password=password,
identity=identity,
https_user=https_user,
https_pass=https_pass,
ignore_retcode=False,
saltenv=__env__,
output_encoding=output_encoding,
)
except CommandExecutionError as exc:
return _fail(ret, "Failed to check remote refs: {}".format(_strip_exc(exc)))
if "HEAD" in all_remote_refs:
head_rev = all_remote_refs["HEAD"]
for refname, refsha in all_remote_refs.items():
if refname.startswith("refs/heads/"):
if refsha == head_rev:
default_branch = refname.partition("refs/heads/")[-1]
break
else:
default_branch = None
else:
head_rev = None
default_branch = None
desired_upstream = False
if bare:
remote_rev = None
remote_rev_type = None
else:
if rev == "HEAD":
if head_rev is not None:
remote_rev = head_rev
# Just go with whatever the upstream currently is
desired_upstream = None
remote_rev_type = "sha1"
else:
# Empty remote repo
remote_rev = None
remote_rev_type = None
elif "refs/heads/" + rev in all_remote_refs:
remote_rev = all_remote_refs["refs/heads/" + rev]
desired_upstream = "/".join((remote, rev))
remote_rev_type = "branch"
elif "refs/tags/" + rev + "^{}" in all_remote_refs:
# Annotated tag
remote_rev = all_remote_refs["refs/tags/" + rev + "^{}"]
remote_rev_type = "tag"
elif "refs/tags/" + rev in all_remote_refs:
# Non-annotated tag
remote_rev = all_remote_refs["refs/tags/" + rev]
remote_rev_type = "tag"
else:
if len(rev) <= 40 and all(x in string.hexdigits for x in rev):
# git ls-remote did not find the rev, and because it's a
# hex string <= 40 chars we're going to assume that the
# desired rev is a SHA1
rev = rev.lower()
remote_rev = rev
remote_rev_type = "sha1"
else:
remote_rev = None
remote_rev_type = None
# For the comment field of the state return dict, the remote location
# (and short-sha1, if rev is not a sha1) is referenced several times,
# determine it once here and reuse the value below.
if remote_rev_type == "sha1":
if rev == "HEAD":
remote_loc = "remote HEAD (" + remote_rev[:7] + ")"
else:
remote_loc = remote_rev[:7]
elif remote_rev is not None:
remote_loc = "{} ({})".format(
desired_upstream if remote_rev_type == "branch" else rev, remote_rev[:7]
)
else:
# Shouldn't happen but log a warning here for future
# troubleshooting purposes in the event we find a corner case.
log.warning(
"Unable to determine remote_loc. rev is %s, remote_rev is "
"%s, remove_rev_type is %s, desired_upstream is %s, and bare "
"is%s set",
rev,
remote_rev,
remote_rev_type,
desired_upstream,
" not" if not bare else "",
)
remote_loc = None
if depth is not None and remote_rev_type not in ("branch", "tag"):
return _fail(
ret,
"When 'depth' is used, 'rev' must be set to the name of a "
"branch or tag on the remote repository",
)
if remote_rev is None and not bare:
if rev != "HEAD":
# A specific rev is desired, but that rev doesn't exist on the
# remote repo.
return _fail(
ret,
"No revision matching '{}' exists in the remote repository".format(rev),
)
git_ver = _LooseVersion(__salt__["git.version"](versioninfo=False))
check = "refs" if bare else ".git"
gitdir = os.path.join(target, check)
comments = []
if os.path.isdir(gitdir) or __salt__["git.is_worktree"](
target, user=user, password=password, output_encoding=output_encoding
):
# Target directory is a git repository or git worktree
try:
all_local_branches = __salt__["git.list_branches"](
target, user=user, password=password, output_encoding=output_encoding
)
all_local_tags = set(
__salt__["git.list_tags"](
target,
user=user,
password=password,
output_encoding=output_encoding,
)
)
local_rev, local_branch = _get_local_rev_and_branch(
target, user, password, output_encoding
)
if not bare and remote_rev is None and local_rev is not None:
return _fail(
ret,
"Remote repository is empty, cannot update from a "
"non-empty to an empty repository",
)
# Base rev and branch are the ones from which any reset or merge
# will take place. If the branch is not being specified, the base
# will be the "local" rev and branch, i.e. those we began with
# before this state was run. If a branch is being specified and it
# both exists and is not the one with which we started, then we'll
# be checking that branch out first, and it instead becomes our
# base. The base branch and rev will be used below in comparisons
# to determine what changes to make.
base_rev = local_rev
base_branch = local_branch
if _need_branch_change(branch, local_branch):
if branch not in all_local_branches:
# We're checking out a new branch, so the base_rev and
# remote_rev will be identical.
base_rev = remote_rev
else:
base_branch = branch
# Desired branch exists locally and is not the current
# branch. We'll be performing a checkout to that branch
# eventually, but before we do that we need to find the
# current SHA1.
try:
base_rev = __salt__["git.rev_parse"](
target,
branch + "^{commit}",
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError as exc:
return _fail(
ret,
"Unable to get position of local branch '{}': {}".format(
branch, _strip_exc(exc)
),
comments,
)
remotes = __salt__["git.remotes"](
target,
user=user,
password=password,
redact_auth=False,
output_encoding=output_encoding,
)
revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type)
try:
# If not a bare repo, check `git diff HEAD` to determine if
# there are local changes.
local_changes = bool(
not bare
and __salt__["git.diff"](
target,
"HEAD",
user=user,
password=password,
output_encoding=output_encoding,
)
)
except CommandExecutionError:
# No need to capture the error and log it, the _git_run()
# helper in the git execution module will have already logged
# the output from the command.
log.warning(
"git.latest: Unable to determine if %s has local changes", target
)
local_changes = False
if local_changes and revs_match:
if force_reset is True:
msg = (
"{} is up-to-date, but with uncommitted changes. "
"Since 'force_reset' is set to True, these local "
"changes would be reset. To only reset when there are "
"changes in the remote repository, set "
"'force_reset' to 'remote-changes'.".format(target)
)
if __opts__["test"]:
ret["changes"]["forced update"] = True
if comments:
msg += _format_comments(comments)
return _neutral_test(ret, msg)
log.debug(msg.replace("would", "will"))
else:
log.debug(
"%s up-to-date, but with uncommitted changes. Since "
"'force_reset' is set to %s, no changes will be "
"made.",
target,
force_reset,
)
return _uptodate(
ret, target, _format_comments(comments), local_changes
)
if (
remote_rev_type == "sha1"
and base_rev is not None
and base_rev.startswith(remote_rev)
):
# Either we're already checked out to the branch we need and it
# is up-to-date, or the branch to which we need to switch is
# on the same SHA1 as the desired remote revision. Either way,
# we know we have the remote rev present already and no fetch
# will be needed.
has_remote_rev = True
else:
has_remote_rev = False
if remote_rev is not None:
try:
__salt__["git.rev_parse"](
target,
remote_rev + "^{commit}",
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError:
# Local checkout doesn't have the remote_rev
pass
else:
# The object might exist enough to get a rev-parse to
# work, while the local ref could have been
# deleted/changed/force updated. Do some further sanity
# checks to determine if we really do have the
# remote_rev.
if remote_rev_type == "branch":
if remote in remotes:
try:
# Do a rev-parse on <remote>/<rev> to get
# the local SHA1 for it, so we can compare
# it to the remote_rev SHA1.
local_copy = __salt__["git.rev_parse"](
target,
desired_upstream,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError:
pass
else:
# If the SHA1s don't match, then the remote
# branch was force-updated, and we need to
# fetch to update our local copy the ref
# for the remote branch. If they do match,
# then we have the remote_rev and don't
# need to fetch.
if local_copy == remote_rev:
has_remote_rev = True
elif remote_rev_type == "tag":
if rev in all_local_tags:
try:
local_tag_sha1 = __salt__["git.rev_parse"](
target,
rev + "^{commit}",
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError:
# Shouldn't happen if the tag exists
# locally but account for this just in
# case.
local_tag_sha1 = None
if local_tag_sha1 == remote_rev:
has_remote_rev = True
else:
if not force_reset:
# SHA1 of tag on remote repo is
# different than local tag. Unless
# we're doing a hard reset then we
# don't need to proceed as we know that
# the fetch will update the tag and the
# only way to make the state succeed is
# to reset the branch to point at the
# tag's new location.
return _fail(
ret,
"'{}' is a tag, but the remote "
"SHA1 for this tag ({}) doesn't "
"match the local SHA1 ({}). Set "
"'force_reset' to True to force "
"this update.".format(
rev,
_short_sha(remote_rev),
_short_sha(local_tag_sha1),
),
)
elif remote_rev_type == "sha1":
has_remote_rev = True
# If fast_forward is not boolean, then we don't yet know if this
# will be a fast forward or not, because a fetch is required.
fast_forward = (
False if (local_changes and force_reset != "remote-changes") else None
)
if has_remote_rev:
if (not revs_match and not update_head) and (
branch is None or branch == local_branch
):
ret["comment"] = (
"{} is already present and local HEAD ({}) does not "
"match, but update_head=False. HEAD has not been "
"updated locally.".format(
remote_loc.capitalize() if rev == "HEAD" else remote_loc,
local_rev[:7],
)
)
return ret
# No need to check if this is a fast_forward if we already know
# that it won't be (due to local changes).
if fast_forward is not False:
if base_rev is None:
# If we're here, the remote_rev exists in the local
# checkout but there is still no HEAD locally. A
# possible reason for this is that an empty repository
# existed there and a remote was added and fetched, but
# the repository was not fast-forwarded. Regardless,
# going from no HEAD to a locally-present rev is
# considered a fast-forward update.
fast_forward = True
else:
fast_forward = __salt__["git.merge_base"](
target,
refs=[base_rev, remote_rev],
is_ancestor=True,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
if fast_forward is False:
if force_reset is False:
return _not_fast_forward(
ret,
rev,
base_rev,
remote_rev,
branch,
local_branch,
default_branch,
local_changes,
comments,
)
merge_action = "hard-reset"
elif fast_forward is True:
merge_action = "fast-forwarded"
else:
merge_action = "updated"
if base_branch is None:
# No local branch, no upstream tracking branch
upstream = None
else:
try:
upstream = __salt__["git.rev_parse"](
target,
base_branch + "@{upstream}",
opts=["--abbrev-ref"],
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError:
# There is a local branch but the rev-parse command
# failed, so that means there is no upstream tracking
# branch. This could be because it is just not set, or
# because the branch was checked out to a SHA1 or tag
# instead of a branch. Set upstream to False to make a
# distinction between the case above where there is no
# local_branch (when the local checkout is an empty
# repository).
upstream = False
if remote in remotes:
fetch_url = remotes[remote]["fetch"]
else:
log.debug("Remote '%s' not found in git checkout at %s", remote, target)
fetch_url = None
if remote_rev is not None and desired_fetch_url != fetch_url:
if __opts__["test"]:
actions = [
"Remote '{}' would be changed from {} to {}".format(
remote,
salt.utils.url.redact_http_basic_auth(fetch_url),
redacted_fetch_url,
)
]
if not has_remote_rev:
actions.append("Remote would be fetched")
if not revs_match:
if update_head:
ret["changes"]["revision"] = {
"old": local_rev,
"new": remote_rev,
}
if fast_forward is False:
ret["changes"]["forced update"] = True
actions.append(
"Repository would be {} to {}".format(
merge_action, _short_sha(remote_rev)
)
)
if ret["changes"]:
return _neutral_test(ret, _format_comments(actions))
else:
if not revs_match and not update_head:
# Repo content would not be modified but the remote
# URL would be modified, so we can't just say that
# the repo is up-to-date, we need to inform the
# user of the actions taken.
ret["comment"] = _format_comments(actions)
return ret
return _uptodate(ret, target, _format_comments(actions))
# The fetch_url for the desired remote does not match the
# specified URL (or the remote does not exist), so set the
# remote URL.
__salt__["git.remote_set"](
target,
url=name,
remote=remote,
user=user,
password=password,
https_user=https_user,
https_pass=https_pass,
output_encoding=output_encoding,
)
if fetch_url is None:
comments.append(
"Remote '{}' set to {}".format(remote, redacted_fetch_url)
)
ret["changes"]["new"] = name + " => " + remote
else:
comments.append(
"Remote '{}' changed from {} to {}".format(
remote,
salt.utils.url.redact_http_basic_auth(fetch_url),
redacted_fetch_url,
)
)
if remote_rev is not None:
if __opts__["test"]:
actions = []
if not has_remote_rev:
actions.append("Remote '{}' would be fetched".format(remote))
if (not revs_match) and (
update_head or (branch is not None and branch != local_branch)
):
ret["changes"]["revision"] = {
"old": local_rev,
"new": remote_rev,
}
if _need_branch_change(branch, local_branch):
if branch not in all_local_branches:
actions.append(
"New branch '{}' would be checked "
"out, with {} as a starting "
"point".format(branch, remote_loc)
)
if desired_upstream:
actions.append(
"Tracking branch would be set to {}".format(
desired_upstream
)
)
else:
actions.append(
"Branch '{}' would be checked out and {} to {}".format(
branch, merge_action, _short_sha(remote_rev)
)
)
else:
if not revs_match:
if update_head:
if fast_forward is True:
actions.append(
"Repository would be fast-forwarded from "
"{} to {}".format(
_short_sha(local_rev),
_short_sha(remote_rev),
)
)
else:
actions.append(
"Repository would be {} from {} to {}".format(
"hard-reset"
if force_reset and has_remote_rev
else "updated",
_short_sha(local_rev),
_short_sha(remote_rev),
)
)
else:
actions.append(
"Local HEAD ({}) does not match {} but "
"update_head=False, HEAD would not be "
"updated locally".format(local_rev[:7], remote_loc)
)
# Check if upstream needs changing
if not upstream and desired_upstream:
actions.append(
"Tracking branch would be set to {}".format(
desired_upstream
)
)
elif upstream and desired_upstream is False:
actions.append("Tracking branch would be unset")
elif desired_upstream and upstream != desired_upstream:
actions.append(
"Tracking branch would be updated to {}".format(
desired_upstream
)
)
if ret["changes"]:
return _neutral_test(ret, _format_comments(actions))
else:
formatted_actions = _format_comments(actions)
if not revs_match and not update_head and formatted_actions:
ret["comment"] = formatted_actions
return ret
return _uptodate(ret, target, _format_comments(actions))
if not upstream and desired_upstream:
upstream_action = "Tracking branch was set to {}".format(
desired_upstream
)
branch_opts = _get_branch_opts(
branch,
local_branch,
all_local_branches,
desired_upstream,
git_ver,
)
elif upstream and desired_upstream is False:
# If the remote_rev is a tag or SHA1, and there is an
# upstream tracking branch, we will unset it. However, we
# can only do this if the git version is 1.8.0 or newer, as
# the --unset-upstream option was not added until that
# version.
if git_ver >= _LooseVersion("1.8.0"):
upstream_action = "Tracking branch was unset"
branch_opts = ["--unset-upstream"]
else:
branch_opts = None
elif desired_upstream and upstream != desired_upstream:
upstream_action = "Tracking branch was updated to {}".format(
desired_upstream
)
branch_opts = _get_branch_opts(
branch,
local_branch,
all_local_branches,
desired_upstream,
git_ver,
)
else:
branch_opts = None
if branch_opts is not None and local_branch is None:
return _fail(
ret,
"Cannot set/unset upstream tracking branch, local "
"HEAD refers to nonexistent branch. This may have "
"been caused by cloning a remote repository for which "
"the default branch was renamed or deleted. If you "
"are unable to fix the remote repository, you can "
"work around this by setting the 'branch' argument "
"(which will ensure that the named branch is created "
"if it does not already exist).",
comments,
)
remote_tags = {
x.replace("refs/tags/", "")
for x in __salt__["git.ls_remote"](
cwd=target,
remote=remote,
opts="--tags",
user=user,
password=password,
identity=identity,
saltenv=__env__,
ignore_retcode=True,
output_encoding=output_encoding,
)
if "^{}" not in x
}
if all_local_tags != remote_tags:
has_remote_rev = False
new_tags = remote_tags - all_local_tags
deleted_tags = all_local_tags - remote_tags
if new_tags:
ret["changes"]["new_tags"] = new_tags
if sync_tags and deleted_tags:
# Delete the local copy of the tags to keep up with the
# remote repository.
for tag_name in deleted_tags:
try:
if not __opts__["test"]:
__salt__["git.tag"](
target,
tag_name,
opts="-d",
user=user,
password=password,
output_encoding=output_encoding,
)
except CommandExecutionError as exc:
ret.setdefault("warnings", []).append(
"Failed to remove local tag '{}':\n\n{}\n\n".format(
tag_name, exc
)
)
else:
ret["changes"].setdefault("deleted_tags", []).append(
tag_name
)
if ret["changes"].get("deleted_tags"):
comments.append(
"The following tags {} removed from the local "
"checkout: {}".format(
"would be" if __opts__["test"] else "were",
", ".join(ret["changes"]["deleted_tags"]),
)
)
if not has_remote_rev:
try:
fetch_changes = __salt__["git.fetch"](
target,
remote=remote,
force=force_fetch,
refspecs=refspecs,
user=user,
password=password,
identity=identity,
saltenv=__env__,
output_encoding=output_encoding,
)
except CommandExecutionError as exc:
return _failed_fetch(ret, exc, comments)
else:
if fetch_changes:
comments.append(
"{} was fetched, resulting in updated refs".format(name)
)
try:
__salt__["git.rev_parse"](
target,
remote_rev + "^{commit}",
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError as exc:
return _fail(
ret,
"Fetch did not successfully retrieve rev '{}' "
"from {}: {}".format(rev, name, exc),
)
if (not revs_match and not update_head) and (
branch is None or branch == local_branch
):
# Rev now exists locally (was fetched), and since we're
# not updating HEAD we'll just exit here.
ret["comment"] = (
remote_loc.capitalize() if rev == "HEAD" else remote_loc
)
ret["comment"] += (
" is already present and local HEAD ({}) does not "
"match, but update_head=False. HEAD has not been "
"updated locally.".format(local_rev[:7])
)
return ret
# Now that we've fetched, check again whether or not
# the update is a fast-forward.
if base_rev is None:
fast_forward = True
else:
fast_forward = __salt__["git.merge_base"](
target,
refs=[base_rev, remote_rev],
is_ancestor=True,
user=user,
password=password,
output_encoding=output_encoding,
)
if fast_forward is force_reset is False or (
fast_forward is True and local_changes and force_reset is False
):
return _not_fast_forward(
ret,
rev,
base_rev,
remote_rev,
branch,
local_branch,
default_branch,
local_changes,
comments,
)
if _need_branch_change(branch, local_branch):
if local_changes and not force_checkout:
return _fail(
ret,
"Local branch '{}' has uncommitted "
"changes. Set 'force_checkout' to True to "
"discard them and proceed.".format(local_branch),
)
# TODO: Maybe re-retrieve all_local_branches to handle
# the corner case where the destination branch was
# added to the local checkout during a fetch that takes
# a long time to complete.
if branch not in all_local_branches:
if rev == "HEAD":
checkout_rev = remote_rev
else:
checkout_rev = desired_upstream if desired_upstream else rev
checkout_opts = ["-b", branch]
else:
checkout_rev = branch
checkout_opts = []
__salt__["git.checkout"](
target,
checkout_rev,
force=force_checkout,
opts=checkout_opts,
user=user,
password=password,
output_encoding=output_encoding,
)
if "-b" in checkout_opts:
comments.append(
"New branch '{}' was checked out, with {} "
"as a starting point".format(branch, remote_loc)
)
else:
comments.append("'{}' was checked out".format(checkout_rev))
if fast_forward is False:
__salt__["git.reset"](
target,
opts=["--hard", remote_rev],
user=user,
password=password,
output_encoding=output_encoding,
**lfs_opts
)
ret["changes"]["forced update"] = True
if local_changes:
comments.append("Uncommitted changes were discarded")
comments.append(
"Repository was hard-reset to {}".format(remote_loc)
)
elif (
fast_forward is True and local_changes and force_reset is not False
):
__salt__["git.discard_local_changes"](
target,
user=user,
password=password,
output_encoding=output_encoding,
)
comments.append("Uncommitted changes were discarded")
if branch_opts is not None:
__salt__["git.branch"](
target,
opts=branch_opts,
user=user,
password=password,
output_encoding=output_encoding,
)
comments.append(upstream_action)
# Fast-forward to the desired revision
if fast_forward is True and not _revs_equal(
base_rev, remote_rev, remote_rev_type
):
if desired_upstream or rev == "HEAD":
# Check first to see if we are on a branch before
# trying to merge changes. (The call to
# git.symbolic_ref will only return output if HEAD
# points to a branch.)
if __salt__["git.symbolic_ref"](
target,
"HEAD",
opts=["--quiet"],
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
):
if git_ver >= _LooseVersion("1.8.1.6"):
# --ff-only added in version 1.8.1.6. It's not
# 100% necessary, but if we can use it, we'll
# ensure that the merge doesn't go through if
# not a fast-forward. Granted, the logic that
# gets us to this point shouldn't allow us to
# attempt this merge if it's not a
# fast-forward, but it's an extra layer of
# protection.
merge_opts = ["--ff-only"]
else:
merge_opts = []
__salt__["git.merge"](
target,
rev=remote_rev,
opts=merge_opts,
user=user,
password=password,
output_encoding=output_encoding,
**lfs_opts
)
comments.append(
"Repository was fast-forwarded to {}".format(remote_loc)
)
else:
return _fail(
ret,
"Unable to fast-forward, HEAD is detached",
comments,
)
else:
# Update is a fast forward, but we cannot merge to that
# commit so we'll reset to it.
__salt__["git.reset"](
target,
opts=["--hard", remote_rev if rev == "HEAD" else rev],
user=user,
password=password,
output_encoding=output_encoding,
**lfs_opts
)
comments.append(
"Repository was reset to {} (fast-forward)".format(rev)
)
# TODO: Figure out how to add submodule update info to
# test=True return data, and changes dict.
if submodules:
try:
__salt__["git.submodule"](
target,
"update",
opts=["--init", "--recursive"],
user=user,
password=password,
identity=identity,
saltenv=__env__,
output_encoding=output_encoding,
)
except CommandExecutionError as exc:
return _failed_submodule_update(ret, exc, comments)
elif bare:
if __opts__["test"]:
msg = "Bare repository at {} would be fetched".format(target)
if ret["changes"]:
return _neutral_test(ret, msg)
else:
return _uptodate(ret, target, msg)
try:
fetch_changes = __salt__["git.fetch"](
target,
remote=remote,
force=force_fetch,
refspecs=refspecs,
user=user,
password=password,
identity=identity,
saltenv=__env__,
output_encoding=output_encoding,
)
except CommandExecutionError as exc:
return _failed_fetch(ret, exc, comments)
else:
comments.append(
"Bare repository at {} was fetched{}".format(
target,
", resulting in updated refs" if fetch_changes else "",
)
)
try:
new_rev = __salt__["git.revision"](
cwd=target,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError:
new_rev = None
except Exception as exc: # pylint: disable=broad-except
log.error("Unexpected exception in git.latest state", exc_info=True)
if isinstance(exc, CommandExecutionError):
msg = _strip_exc(exc)
else:
msg = str(exc)
return _fail(ret, msg, comments)
if not bare and not _revs_equal(new_rev, remote_rev, remote_rev_type):
return _fail(ret, "Failed to update repository", comments)
if local_rev != new_rev:
log.info("Repository %s updated: %s => %s", target, local_rev, new_rev)
ret["comment"] = _format_comments(comments)
ret["changes"]["revision"] = {"old": local_rev, "new": new_rev}
else:
return _uptodate(ret, target, _format_comments(comments))
else:
if os.path.isdir(target):
target_contents = os.listdir(target)
if force_clone:
# Clone is required, and target directory exists, but the
# ``force`` option is enabled, so we need to clear out its
# contents to proceed.
if __opts__["test"]:
ret["changes"]["forced clone"] = True
ret["changes"]["new"] = name + " => " + target
return _neutral_test(
ret,
"Target directory {0} exists. Since force_clone=True, "
"the contents of {0} would be deleted, and {1} would "
"be cloned into this directory.".format(target, name),
)
log.debug(
"Removing contents of %s to clone repository %s in its "
"place (force_clone=True set in git.latest state)",
target,
name,
)
removal_errors = {}
for target_object in target_contents:
target_path = os.path.join(target, target_object)
try:
salt.utils.files.rm_rf(target_path)
except OSError as exc:
if exc.errno != errno.ENOENT:
removal_errors[target_path] = exc
if removal_errors:
err_strings = [
" {}\n {}".format(k, v) for k, v in removal_errors.items()
]
return _fail(
ret,
"Unable to remove\n{}".format("\n".join(err_strings)),
comments,
)
ret["changes"]["forced clone"] = True
# Clone is required, but target dir exists and is non-empty. We
# can't proceed.
elif target_contents:
return _fail(
ret,
"Target '{}' exists, is non-empty and is not a git "
"repository. Set the 'force_clone' option to True to "
"remove this directory's contents and proceed with "
"cloning the remote repository".format(target),
)
log.debug("Target %s is not found, 'git clone' is required", target)
if __opts__["test"]:
ret["changes"]["new"] = name + " => " + target
return _neutral_test(
ret, "Repository {} would be cloned to {}".format(name, target)
)
try:
clone_opts = ["--mirror"] if mirror else ["--bare"] if bare else []
if remote != "origin":
clone_opts.extend(["--origin", remote])
if depth is not None:
clone_opts.extend(["--depth", str(depth), "--branch", rev])
# We're cloning a fresh repo, there is no local branch or revision
local_branch = local_rev = None
try:
__salt__["git.clone"](
target,
name,
user=user,
password=password,
opts=clone_opts,
identity=identity,
https_user=https_user,
https_pass=https_pass,
saltenv=__env__,
output_encoding=output_encoding,
)
except CommandExecutionError as exc:
msg = "Clone failed: {}".format(_strip_exc(exc))
return _fail(ret, msg, comments)
ret["changes"]["new"] = name + " => " + target
comments.append(
"{} cloned to {}{}".format(
name,
target,
" as mirror" if mirror else " as bare repository" if bare else "",
)
)
if not bare:
if not remote_rev:
if rev != "HEAD":
# No HEAD means the remote repo is empty, which means
# our new clone will also be empty. This state has
# failed, since a rev was specified but no matching rev
# exists on the remote host.
msg = (
"%s was cloned but is empty, so {}/{} "
"cannot be checked out".format(remote, rev)
)
log.error(msg, name)
# Disable check for string substitution
return _fail(
ret, msg % "Repository", comments
) # pylint: disable=E1321
else:
if remote_rev_type == "tag" and rev not in __salt__[
"git.list_tags"
](
target,
user=user,
password=password,
output_encoding=output_encoding,
):
return _fail(
ret,
"Revision '{}' does not exist in clone".format(rev),
comments,
)
if branch is not None:
if branch not in __salt__["git.list_branches"](
target,
user=user,
password=password,
output_encoding=output_encoding,
):
if rev == "HEAD":
checkout_rev = remote_rev
else:
checkout_rev = (
desired_upstream if desired_upstream else rev
)
__salt__["git.checkout"](
target,
checkout_rev,
opts=["-b", branch],
user=user,
password=password,
output_encoding=output_encoding,
)
comments.append(
"Branch '{}' checked out, with {} "
"as a starting point".format(branch, remote_loc)
)
local_rev, local_branch = _get_local_rev_and_branch(
target, user, password, output_encoding=output_encoding
)
if (
local_branch is None
and remote_rev is not None
and "HEAD" not in all_remote_refs
):
return _fail(
ret,
"Remote HEAD refers to a ref that does not exist. "
"This can happen when the default branch on the "
"remote repository is renamed or deleted. If you "
"are unable to fix the remote repository, you can "
"work around this by setting the 'branch' argument "
"(which will ensure that the named branch is created "
"if it does not already exist).",
comments,
)
if not _revs_equal(local_rev, remote_rev, remote_rev_type):
__salt__["git.reset"](
target,
opts=["--hard", remote_rev],
user=user,
password=password,
output_encoding=output_encoding,
)
comments.append("Repository was reset to {}".format(remote_loc))
try:
upstream = __salt__["git.rev_parse"](
target,
local_branch + "@{upstream}",
opts=["--abbrev-ref"],
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError:
upstream = False
if not upstream and desired_upstream:
upstream_action = "Tracking branch was set to {}".format(
desired_upstream
)
branch_opts = _get_branch_opts(
branch,
local_branch,
__salt__["git.list_branches"](
target,
user=user,
password=password,
output_encoding=output_encoding,
),
desired_upstream,
git_ver,
)
elif upstream and desired_upstream is False:
# If the remote_rev is a tag or SHA1, and there is an
# upstream tracking branch, we will unset it. However,
# we can only do this if the git version is 1.8.0 or
# newer, as the --unset-upstream option was not added
# until that version.
if git_ver >= _LooseVersion("1.8.0"):
upstream_action = "Tracking branch was unset"
branch_opts = ["--unset-upstream"]
else:
branch_opts = None
elif desired_upstream and upstream != desired_upstream:
upstream_action = "Tracking branch was updated to {}".format(
desired_upstream
)
branch_opts = _get_branch_opts(
branch,
local_branch,
__salt__["git.list_branches"](
target,
user=user,
password=password,
output_encoding=output_encoding,
),
desired_upstream,
git_ver,
)
else:
branch_opts = None
if branch_opts is not None:
__salt__["git.branch"](
target,
opts=branch_opts,
user=user,
password=password,
output_encoding=output_encoding,
)
comments.append(upstream_action)
if submodules and remote_rev:
try:
__salt__["git.submodule"](
target,
"update",
opts=["--init", "--recursive"],
user=user,
password=password,
identity=identity,
output_encoding=output_encoding,
)
except CommandExecutionError as exc:
return _failed_submodule_update(ret, exc, comments)
try:
new_rev = __salt__["git.revision"](
cwd=target,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError:
new_rev = None
except Exception as exc: # pylint: disable=broad-except
log.error("Unexpected exception in git.latest state", exc_info=True)
if isinstance(exc, CommandExecutionError):
msg = _strip_exc(exc)
else:
msg = str(exc)
return _fail(ret, msg, comments)
msg = _format_comments(comments)
log.info(msg)
ret["comment"] = msg
if new_rev is not None:
ret["changes"]["revision"] = {"old": None, "new": new_rev}
return ret
def present(
name,
force=False,
bare=True,
template=None,
separate_git_dir=None,
shared=None,
user=None,
password=None,
output_encoding=None,
):
"""
Ensure that a repository exists in the given directory
.. warning::
If the minion has Git 2.5 or later installed, ``name`` points to a
worktree_, and ``force`` is set to ``True``, then the worktree will be
deleted. This has been corrected in Salt 2015.8.0.
name
Path to the directory
.. versionchanged:: 2015.8.0
This path must now be absolute
force : False
If ``True``, and if ``name`` points to an existing directory which does
not contain a git repository, then the contents of that directory will
be recursively removed and a new repository will be initialized in its
place.
bare : True
If ``True``, and a repository must be initialized, then the repository
will be a bare repository.
.. note::
This differs from the default behavior of :py:func:`git.init
<salt.modules.git.init>`, make sure to set this value to ``False``
if a bare repo is not desired.
template
If a new repository is initialized, this argument will specify an
alternate template directory.
.. versionadded:: 2015.8.0
separate_git_dir
If a new repository is initialized, this argument will specify an
alternate ``$GIT_DIR``
.. versionadded:: 2015.8.0
shared
Set sharing permissions on git repo. See `git-init(1)`_ for more
details.
.. versionadded:: 2015.5.0
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
.. versionadded:: 0.17.0
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-init(1)`: http://git-scm.com/docs/git-init
.. _`worktree`: http://git-scm.com/docs/git-worktree
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
# If the named directory is a git repo return True
if os.path.isdir(name):
if bare and os.path.isfile(os.path.join(name, "HEAD")):
return ret
elif not bare and (
os.path.isdir(os.path.join(name, ".git"))
or __salt__["git.is_worktree"](
name, user=user, password=password, output_encoding=output_encoding
)
):
return ret
# Directory exists and is not a git repo, if force is set destroy the
# directory and recreate, otherwise throw an error
elif force:
# Directory exists, and the ``force`` option is enabled, so we need
# to clear out its contents to proceed.
if __opts__["test"]:
ret["changes"]["new"] = name
ret["changes"]["forced init"] = True
return _neutral_test(
ret,
"Target directory {0} exists. Since force=True, the "
"contents of {0} would be deleted, and a {1}repository "
"would be initialized in its place.".format(
name, "bare " if bare else ""
),
)
log.debug(
"Removing contents of %s to initialize %srepository in its "
"place (force=True set in git.present state)",
name,
"bare " if bare else "",
)
try:
if os.path.islink(name):
os.unlink(name)
else:
salt.utils.files.rm_rf(name)
except OSError as exc:
return _fail(ret, "Unable to remove {}: {}".format(name, exc))
else:
ret["changes"]["forced init"] = True
elif os.listdir(name):
return _fail(
ret,
"Target '{}' exists, is non-empty, and is not a git "
"repository. Set the 'force' option to True to remove "
"this directory's contents and proceed with initializing a "
"repository".format(name),
)
# Run test is set
if __opts__["test"]:
ret["changes"]["new"] = name
return _neutral_test(
ret, "New {}repository would be created".format("bare " if bare else "")
)
__salt__["git.init"](
cwd=name,
bare=bare,
template=template,
separate_git_dir=separate_git_dir,
shared=shared,
user=user,
password=password,
output_encoding=output_encoding,
)
actions = ["Initialized {}repository in {}".format("bare " if bare else "", name)]
if template:
actions.append("Template directory set to {}".format(template))
if separate_git_dir:
actions.append("Gitdir set to {}".format(separate_git_dir))
message = ". ".join(actions)
if len(actions) > 1:
message += "."
log.info(message)
ret["changes"]["new"] = name
ret["comment"] = message
return ret
def detached(
name,
rev,
target,
remote="origin",
user=None,
password=None,
force_clone=False,
force_checkout=False,
fetch_remote=True,
hard_reset=False,
submodules=False,
identity=None,
https_user=None,
https_pass=None,
output_encoding=None,
**kwargs
):
"""
.. versionadded:: 2016.3.0
Make sure a repository is cloned to the given target directory and is
a detached HEAD checkout of the commit ID resolved from ``rev``.
name
Address of the remote repository.
rev
The branch, tag, or commit ID to checkout after clone.
If a branch or tag is specified it will be resolved to a commit ID
and checked out.
target
Name of the target directory where repository is about to be cloned.
remote : origin
Git remote to use. If this state needs to clone the repo, it will clone
it using this value as the initial remote name. If the repository
already exists, and a remote by this name is not present, one will be
added.
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
force_clone : False
If the ``target`` directory exists and is not a git repository, then
this state will fail. Set this argument to ``True`` to remove the
contents of the target directory and clone the repo into it.
force_checkout : False
When checking out the revision ID, the state will fail if there are
unwritten changes. Set this argument to ``True`` to discard unwritten
changes when checking out.
fetch_remote : True
If ``False`` a fetch will not be performed and only local refs
will be reachable.
hard_reset : False
If ``True`` a hard reset will be performed before the checkout and any
uncommitted modifications to the working directory will be discarded.
Untracked files will remain in place.
.. note::
Changes resulting from a hard reset will not trigger requisites.
submodules : False
Update submodules
identity
A path on the minion (or a SaltStack fileserver URL, e.g.
``salt://path/to/identity_file``) to a private key to use for SSH
authentication.
https_user
HTTP Basic Auth username for HTTPS (only) clones
https_pass
HTTP Basic Auth password for HTTPS (only) clones
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
return _fail(ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False))
if not rev:
return _fail(
ret, "'{}' is not a valid value for the 'rev' argument".format(rev)
)
if not target:
return _fail(
ret, "'{}' is not a valid value for the 'target' argument".format(rev)
)
# Ensure that certain arguments are strings to ensure that comparisons work
if not isinstance(rev, str):
rev = str(rev)
if target is not None:
if not isinstance(target, str):
target = str(target)
if not os.path.isabs(target):
return _fail(ret, "Target '{}' is not an absolute path".format(target))
if user is not None and not isinstance(user, str):
user = str(user)
if remote is not None and not isinstance(remote, str):
remote = str(remote)
if identity is not None:
if isinstance(identity, str):
identity = [identity]
elif not isinstance(identity, list):
return _fail(ret, "Identity must be either a list or a string")
identity = [os.path.expanduser(x) for x in identity]
for ident_path in identity:
if "salt://" in ident_path:
try:
ident_path = __salt__["cp.cache_file"](ident_path)
except OSError as exc:
log.error("Failed to cache %s: %s", ident_path, exc)
return _fail(
ret, "Identity '{}' does not exist.".format(ident_path)
)
if not os.path.isabs(ident_path):
return _fail(
ret, "Identity '{}' is not an absolute path".format(ident_path)
)
if https_user is not None and not isinstance(https_user, str):
https_user = str(https_user)
if https_pass is not None and not isinstance(https_pass, str):
https_pass = str(https_pass)
if os.path.isfile(target):
return _fail(
ret,
"Target '{}' exists and is a regular file, cannot proceed".format(target),
)
try:
desired_fetch_url = salt.utils.url.add_http_basic_auth(
name, https_user, https_pass, https_only=True
)
except ValueError as exc:
return _fail(ret, exc.__str__())
redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url)
# Determine if supplied ref is a hash
remote_rev_type = "ref"
if len(rev) <= 40 and all(x in string.hexdigits for x in rev):
rev = rev.lower()
remote_rev_type = "hash"
comments = []
hash_exists_locally = False
local_commit_id = None
gitdir = os.path.join(target, ".git")
if os.path.isdir(gitdir) or __salt__["git.is_worktree"](
target, user=user, password=password, output_encoding=output_encoding
):
# Target directory is a git repository or git worktree
local_commit_id = _get_local_rev_and_branch(
target, user, password, output_encoding=output_encoding
)[0]
if remote_rev_type == "hash":
try:
__salt__["git.describe"](
target,
rev,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError:
hash_exists_locally = False
else:
# The rev is a hash and it exists locally so skip to checkout
hash_exists_locally = True
else:
# Check that remote is present and set to correct url
remotes = __salt__["git.remotes"](
target,
user=user,
password=password,
redact_auth=False,
output_encoding=output_encoding,
)
if remote in remotes and name in remotes[remote]["fetch"]:
pass
else:
# The fetch_url for the desired remote does not match the
# specified URL (or the remote does not exist), so set the
# remote URL.
current_fetch_url = None
if remote in remotes:
current_fetch_url = remotes[remote]["fetch"]
if __opts__["test"]:
return _neutral_test(
ret, "Remote {} would be set to {}".format(remote, name)
)
__salt__["git.remote_set"](
target,
url=name,
remote=remote,
user=user,
password=password,
https_user=https_user,
https_pass=https_pass,
output_encoding=output_encoding,
)
comments.append(
"Remote {} updated from '{}' to '{}'".format(
remote, current_fetch_url, name
)
)
else:
# Clone repository
if os.path.isdir(target):
target_contents = os.listdir(target)
if force_clone:
# Clone is required, and target directory exists, but the
# ``force`` option is enabled, so we need to clear out its
# contents to proceed.
if __opts__["test"]:
return _neutral_test(
ret,
"Target directory {0} exists. Since force_clone=True, "
"the contents of {0} would be deleted, and {1} would "
"be cloned into this directory.".format(target, name),
)
log.debug(
"Removing contents of %s to clone repository %s in its "
"place (force_clone=True set in git.detached state)",
target,
name,
)
removal_errors = {}
for target_object in target_contents:
target_path = os.path.join(target, target_object)
try:
salt.utils.files.rm_rf(target_path)
except OSError as exc:
if exc.errno != errno.ENOENT:
removal_errors[target_path] = exc
if removal_errors:
err_strings = [
" {}\n {}".format(k, v) for k, v in removal_errors.items()
]
return _fail(
ret,
"Unable to remove\n{}".format("\n".join(err_strings)),
comments,
)
ret["changes"]["forced clone"] = True
elif target_contents:
# Clone is required, but target dir exists and is non-empty. We
# can't proceed.
return _fail(
ret,
"Target '{}' exists, is non-empty and is not a git "
"repository. Set the 'force_clone' option to True to "
"remove this directory's contents and proceed with "
"cloning the remote repository".format(target),
)
log.debug("Target %s is not found, 'git clone' is required", target)
if __opts__["test"]:
return _neutral_test(
ret, "Repository {} would be cloned to {}".format(name, target)
)
try:
clone_opts = ["--no-checkout"]
if remote != "origin":
clone_opts.extend(["--origin", remote])
__salt__["git.clone"](
target,
name,
user=user,
password=password,
opts=clone_opts,
identity=identity,
https_user=https_user,
https_pass=https_pass,
saltenv=__env__,
output_encoding=output_encoding,
)
comments.append("{} cloned to {}".format(name, target))
except Exception as exc: # pylint: disable=broad-except
log.error("Unexpected exception in git.detached state", exc_info=True)
if isinstance(exc, CommandExecutionError):
msg = _strip_exc(exc)
else:
msg = str(exc)
return _fail(ret, msg, comments)
# Repository exists and is ready for fetch/checkout
refspecs = [
"refs/heads/*:refs/remotes/{}/*".format(remote),
"+refs/tags/*:refs/tags/*",
]
if hash_exists_locally or fetch_remote is False:
pass
else:
# Fetch refs from remote
if __opts__["test"]:
return _neutral_test(
ret, "Repository remote {} would be fetched".format(remote)
)
try:
fetch_changes = __salt__["git.fetch"](
target,
remote=remote,
force=True,
refspecs=refspecs,
user=user,
password=password,
identity=identity,
saltenv=__env__,
output_encoding=output_encoding,
)
except CommandExecutionError as exc:
msg = "Fetch failed"
msg += ":\n\n" + str(exc)
return _fail(ret, msg, comments)
else:
if fetch_changes:
comments.append(
"Remote {} was fetched, resulting in updated refs".format(remote)
)
# get refs and checkout
checkout_commit_id = ""
if remote_rev_type == "hash":
if __salt__["git.describe"](
target, rev, user=user, password=password, output_encoding=output_encoding
):
checkout_commit_id = rev
else:
return _fail(ret, "Revision '{}' does not exist".format(rev))
else:
try:
all_remote_refs = __salt__["git.remote_refs"](
target,
user=user,
password=password,
identity=identity,
https_user=https_user,
https_pass=https_pass,
ignore_retcode=False,
output_encoding=output_encoding,
)
if "refs/remotes/" + remote + "/" + rev in all_remote_refs:
checkout_commit_id = all_remote_refs[
"refs/remotes/" + remote + "/" + rev
]
elif "refs/tags/" + rev in all_remote_refs:
checkout_commit_id = all_remote_refs["refs/tags/" + rev]
else:
return _fail(ret, "Revision '{}' does not exist".format(rev))
except CommandExecutionError as exc:
return _fail(
ret, "Failed to list refs for {}: {}".format(remote, _strip_exc(exc))
)
if hard_reset:
if __opts__["test"]:
return _neutral_test(
ret, "Hard reset to HEAD would be performed on {}".format(target)
)
__salt__["git.reset"](
target,
opts=["--hard", "HEAD"],
user=user,
password=password,
output_encoding=output_encoding,
)
comments.append("Repository was reset to HEAD before checking out revision")
# TODO: implement clean function for git module and add clean flag
if checkout_commit_id == local_commit_id:
new_rev = None
else:
if __opts__["test"]:
ret["changes"]["HEAD"] = {"old": local_commit_id, "new": checkout_commit_id}
return _neutral_test(
ret,
"Commit ID {} would be checked out at {}".format(
checkout_commit_id, target
),
)
__salt__["git.checkout"](
target,
checkout_commit_id,
force=force_checkout,
user=user,
password=password,
output_encoding=output_encoding,
)
comments.append(
"Commit ID {} was checked out at {}".format(checkout_commit_id, target)
)
try:
new_rev = __salt__["git.revision"](
cwd=target,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError:
new_rev = None
if submodules:
__salt__["git.submodule"](
target,
"update",
opts=["--init", "--recursive"],
user=user,
password=password,
identity=identity,
output_encoding=output_encoding,
)
comments.append("Submodules were updated")
if new_rev is not None:
ret["changes"]["HEAD"] = {"old": local_commit_id, "new": new_rev}
else:
comments.append("Already checked out at correct revision")
msg = _format_comments(comments)
log.info(msg)
ret["comment"] = msg
return ret
def cloned(
name,
target,
branch=None,
user=None,
password=None,
identity=None,
https_user=None,
https_pass=None,
output_encoding=None,
):
"""
.. versionadded:: 2018.3.3,2019.2.0
Ensure that a repository has been cloned to the specified target directory.
If not, clone that repository. No fetches will be performed once cloned.
name
Address of the remote repository
target
Name of the target directory where repository should be cloned
branch
Remote branch to check out. If unspecified, the default branch (i.e.
the one to the remote HEAD points) will be checked out.
.. note::
The local branch name will match the remote branch name. If the
branch name is changed, then that branch will be checked out
locally, but keep in mind that remote repository will not be
fetched. If your use case requires that you keep the clone up to
date with the remote repository, then consider using
:py:func:`git.latest <salt.states.git.latest>`.
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
identity
Path to a private key to use for ssh URLs. Works the same way as in
:py:func:`git.latest <salt.states.git.latest>`, see that state's
documentation for more information.
https_user
HTTP Basic Auth username for HTTPS (only) clones
https_pass
HTTP Basic Auth password for HTTPS (only) clones
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if target is None:
ret["comment"] = "'target' argument is required"
return ret
elif not isinstance(target, str):
target = str(target)
if not os.path.isabs(target):
ret["comment"] = "'target' path must be absolute"
return ret
if branch is not None:
if not isinstance(branch, str):
branch = str(branch)
if not branch:
ret["comment"] = "Invalid 'branch' argument"
return ret
if not os.path.exists(target):
need_clone = True
else:
try:
__salt__["git.status"](
target, user=user, password=password, output_encoding=output_encoding
)
except Exception as exc: # pylint: disable=broad-except
ret["comment"] = str(exc)
return ret
else:
need_clone = False
comments = []
def _clone_changes(ret):
ret["changes"]["new"] = name + " => " + target
def _branch_changes(ret, old, new):
ret["changes"]["branch"] = {"old": old, "new": new}
if need_clone:
if __opts__["test"]:
_clone_changes(ret)
comment = "{} would be cloned to {}{}".format(
name,
target,
" with branch '{}'".format(branch) if branch is not None else "",
)
return _neutral_test(ret, comment)
clone_opts = ["--branch", branch] if branch is not None else None
try:
__salt__["git.clone"](
target,
name,
opts=clone_opts,
user=user,
password=password,
identity=identity,
https_user=https_user,
https_pass=https_pass,
output_encoding=output_encoding,
)
except CommandExecutionError as exc:
msg = "Clone failed: {}".format(_strip_exc(exc))
return _fail(ret, msg, comments)
comments.append(
"{} cloned to {}{}".format(
name,
target,
" with branch '{}'".format(branch) if branch is not None else "",
)
)
_clone_changes(ret)
ret["comment"] = _format_comments(comments)
ret["result"] = True
return ret
else:
if branch is None:
return _already_cloned(ret, target, branch, comments)
else:
current_branch = __salt__["git.current_branch"](
target, user=user, password=password, output_encoding=output_encoding
)
if current_branch == branch:
return _already_cloned(ret, target, branch, comments)
else:
if __opts__["test"]:
_branch_changes(ret, current_branch, branch)
return _neutral_test(
ret, "Branch would be changed to '{}'".format(branch)
)
try:
__salt__["git.rev_parse"](
target,
rev=branch,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
)
except CommandExecutionError:
# Local head does not exist, so we need to check out a new
# branch at the remote rev
checkout_rev = "/".join(("origin", branch))
checkout_opts = ["-b", branch]
else:
# Local head exists, so we just need to check it out
checkout_rev = branch
checkout_opts = None
try:
__salt__["git.checkout"](
target,
rev=checkout_rev,
opts=checkout_opts,
user=user,
password=password,
output_encoding=output_encoding,
)
except CommandExecutionError as exc:
msg = "Failed to change branch to '{}': {}".format(branch, exc)
return _fail(ret, msg, comments)
else:
comments.append("Branch changed to '{}'".format(branch))
_branch_changes(ret, current_branch, branch)
ret["comment"] = _format_comments(comments)
ret["result"] = True
return ret
def config_unset(
name,
value_regex=None,
repo=None,
user=None,
password=None,
output_encoding=None,
**kwargs
):
r"""
.. versionadded:: 2015.8.0
Ensure that the named config key is not present
name
The name of the configuration key to unset. This value can be a regex,
but the regex must match the entire key name. For example, ``foo\.``
would not match all keys in the ``foo`` section, it would be necessary
to use ``foo\..+`` to do so.
value_regex
Regex indicating the values to unset for the matching key(s)
.. note::
This option behaves differently depending on whether or not ``all``
is set to ``True``. If it is, then all values matching the regex
will be deleted (this is the only way to delete multiple values
from a multivar). If ``all`` is set to ``False``, then this state
will fail if the regex matches more than one value in a multivar.
all : False
If ``True``, unset all matches
repo
Location of the git repository for which the config value should be
set. Required unless ``global`` is set to ``True``.
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
global : False
If ``True``, this will set a global git config option
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
**Examples:**
.. code-block:: yaml
# Value matching 'baz'
mylocalrepo:
git.config_unset:
- name: foo.bar
- value_regex: 'baz'
- repo: /path/to/repo
# Ensure entire multivar is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- all: True
# Ensure all variables in 'foo' section are unset, including multivars
mylocalrepo:
git.config_unset:
- name: 'foo\..+'
- all: True
# Ensure that global config value is unset
mylocalrepo:
git.config_unset:
- name: foo.bar
- global: True
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "No matching keys are set",
}
# Sanitize kwargs and make sure that no invalid ones were passed. This
# allows us to accept 'global' as an argument to this function without
# shadowing global(), while also not allowing unwanted arguments to be
# passed.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
global_ = kwargs.pop("global", False)
all_ = kwargs.pop("all", False)
if kwargs:
return _fail(ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False))
if not global_ and not repo:
return _fail(
ret, "Non-global config options require the 'repo' argument to be set"
)
if not isinstance(name, str):
name = str(name)
if value_regex is not None:
if not isinstance(value_regex, str):
value_regex = str(value_regex)
# Ensure that the key regex matches the full key name
key = "^" + name.lstrip("^").rstrip("$") + "$"
# Get matching keys/values
pre_matches = __salt__["git.config_get_regexp"](
cwd=repo,
key=key,
value_regex=value_regex,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{"global": global_}
)
if not pre_matches:
# No changes need to be made
return ret
# Perform sanity check on the matches. We can't proceed if the value_regex
# matches more than one value in a given key, and 'all' is not set to True
if not all_:
greedy_matches = [
"{} ({})".format(x, ", ".join(y))
for x, y in pre_matches.items()
if len(y) > 1
]
if greedy_matches:
if value_regex is not None:
return _fail(
ret,
"Multiple values are matched by value_regex for the "
"following keys (set 'all' to True to force removal): "
"{}".format("; ".join(greedy_matches)),
)
else:
return _fail(
ret,
"Multivar(s) matched by the key expression (set 'all' "
"to True to force removal): {}".format("; ".join(greedy_matches)),
)
if __opts__["test"]:
ret["changes"] = pre_matches
return _neutral_test(
ret, "{} key(s) would have value(s) unset".format(len(pre_matches))
)
if value_regex is None:
pre = pre_matches
else:
# Get all keys matching the key expression, so we can accurately report
# on changes made.
pre = __salt__["git.config_get_regexp"](
cwd=repo,
key=key,
value_regex=None,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{"global": global_}
)
failed = []
# Unset the specified value(s). There is no unset for regexes so loop
# through the pre_matches dict and unset each matching key individually.
for key_name in pre_matches:
try:
__salt__["git.config_unset"](
cwd=repo,
key=name,
value_regex=value_regex,
all=all_,
user=user,
password=password,
output_encoding=output_encoding,
**{"global": global_}
)
except CommandExecutionError as exc:
msg = "Failed to unset '{}'".format(key_name)
if value_regex is not None:
msg += " using value_regex '{1}'"
msg += ": " + _strip_exc(exc)
log.error(msg)
failed.append(key_name)
if failed:
return _fail(
ret,
"Error(s) occurred unsetting values for the following keys (see "
"the minion log for details): {}".format(", ".join(failed)),
)
post = __salt__["git.config_get_regexp"](
cwd=repo,
key=key,
value_regex=None,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{"global": global_}
)
for key_name in pre:
if key_name not in post:
ret["changes"][key_name] = pre[key_name]
unset = [x for x in pre[key_name] if x not in post[key_name]]
if unset:
ret["changes"][key_name] = unset
if value_regex is None:
post_matches = post
else:
post_matches = __salt__["git.config_get_regexp"](
cwd=repo,
key=key,
value_regex=value_regex,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{"global": global_}
)
if post_matches:
failed = ["{} ({})".format(x, ", ".join(y)) for x, y in post_matches.items()]
return _fail(ret, "Failed to unset value(s): {}".format("; ".join(failed)))
ret["comment"] = "Value(s) successfully unset"
return ret
def config_set(
name,
value=None,
multivar=None,
repo=None,
user=None,
password=None,
output_encoding=None,
**kwargs
):
"""
.. versionadded:: 2014.7.0
.. versionchanged:: 2015.8.0
Renamed from ``git.config`` to ``git.config_set``. For earlier
versions, use ``git.config``.
Ensure that a config value is set to the desired value(s)
name
Name of the git config value to set
value
Set a single value for the config item
multivar
Set multiple values for the config item
.. note::
The order matters here, if the same parameters are set but in a
different order, they will be removed and replaced in the order
specified.
.. versionadded:: 2015.8.0
repo
Location of the git repository for which the config value should be
set. Required unless ``global`` is set to ``True``.
user
User under which to run git commands. By default, the commands are run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
global : False
If ``True``, this will set a global git config option
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
**Local Config Example:**
.. code-block:: yaml
# Single value
mylocalrepo:
git.config_set:
- name: user.email
- value: foo@bar.net
- repo: /path/to/repo
# Multiple values
mylocalrepo:
git.config_set:
- name: mysection.myattribute
- multivar:
- foo
- bar
- baz
- repo: /path/to/repo
**Global Config Example (User ``foo``):**
.. code-block:: yaml
mylocalrepo:
git.config_set:
- name: user.name
- value: Foo Bar
- user: foo
- global: True
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if value is not None and multivar is not None:
return _fail(ret, "Only one of 'value' and 'multivar' is permitted")
# Sanitize kwargs and make sure that no invalid ones were passed. This
# allows us to accept 'global' as an argument to this function without
# shadowing global(), while also not allowing unwanted arguments to be
# passed.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
global_ = kwargs.pop("global", False)
if kwargs:
return _fail(ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False))
if not global_ and not repo:
return _fail(
ret, "Non-global config options require the 'repo' argument to be set"
)
if not isinstance(name, str):
name = str(name)
if value is not None:
if not isinstance(value, str):
value = str(value)
value_comment = "'" + value + "'"
desired = [value]
if multivar is not None:
if not isinstance(multivar, list):
try:
multivar = multivar.split(",")
except AttributeError:
multivar = str(multivar).split(",")
else:
new_multivar = []
for item in multivar:
if isinstance(item, str):
new_multivar.append(item)
else:
new_multivar.append(str(item))
multivar = new_multivar
value_comment = multivar
desired = multivar
# Get current value
pre = __salt__["git.config_get"](
cwd=repo,
key=name,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding,
**{"all": True, "global": global_}
)
if desired == pre:
ret["comment"] = "{}'{}' is already set to {}".format(
"Global key " if global_ else "", name, value_comment
)
return ret
if __opts__["test"]:
ret["changes"] = {"old": pre, "new": desired}
msg = "{}'{}' would be {} {}".format(
"Global key " if global_ else "",
name,
"added as" if pre is None else "set to",
value_comment,
)
return _neutral_test(ret, msg)
try:
# Set/update config value
post = __salt__["git.config_set"](
cwd=repo,
key=name,
value=value,
multivar=multivar,
user=user,
password=password,
output_encoding=output_encoding,
**{"global": global_}
)
except CommandExecutionError as exc:
return _fail(
ret,
"Failed to set {}'{}' to {}: {}".format(
"global key " if global_ else "", name, value_comment, _strip_exc(exc)
),
)
if pre != post:
ret["changes"][name] = {"old": pre, "new": post}
if post != desired:
return _fail(
ret,
"Failed to set {}'{}' to {}".format(
"global key " if global_ else "", name, value_comment
),
)
ret["comment"] = "{}'{}' was {} {}".format(
"Global key " if global_ else "",
name,
"added as" if pre is None else "set to",
value_comment,
)
return ret
| {
"content_hash": "87b740d2084289bf182b61455475fc6c",
"timestamp": "",
"source": "github",
"line_count": 3278,
"max_line_length": 112,
"avg_line_length": 38.711409395973156,
"alnum_prop": 0.47386048417601817,
"repo_name": "saltstack/salt",
"id": "307c2651f89f1230c028c97ae169c43f248d8951",
"size": "126896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/states/git.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
import os
import sys
from pathlib import Path
class Setup:
CONFIGURATION_FILE = os.path.join(Path(__file__).parents[1], "config", "server.cfg")
VLC_DEFAULT_COMMAND = "vlc -f"
POSIX = 'posix' in sys.builtin_module_names
VLC_PLAYLIST_END = "vlc://quit"
| {
"content_hash": "d45754dfc7f24a0ebdd77a976315e755",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 88,
"avg_line_length": 26.9,
"alnum_prop": 0.6728624535315985,
"repo_name": "danfr/RemoteTV",
"id": "185cbf721bcd3c3eddb2d6a3d61438b9d90e0363",
"size": "269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/bin/Setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24050"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import heapq
def top_k_frequent(nums, k):
counter = defaultdict(int)
for n in nums:
counter[n] += 1
return heapq.nlargest(k, counter, key=lambda x: counter[x])
| {
"content_hash": "59c647b683f057e76bc70e746104fd53",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 63,
"avg_line_length": 24.22222222222222,
"alnum_prop": 0.6788990825688074,
"repo_name": "xq5he/leetcodepy",
"id": "d564a53ab0ba0229f87be3ce23d2ca77cf54e8af",
"size": "218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithms/top_k_freq_elements.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4488"
}
],
"symlink_target": ""
} |
"""The version information for this release of PyCLibrary.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
from collections import namedtuple
# The major release number. Differences in the major number indicate
# possibly large differences in API.
MAJOR = 0
# The minor release number. Differences in the minor number indicate
# possibly small differences in the API, but these changes will come
# backwards compatibility support when possible. Minor releases are
# typically used for large feature additions.
MINOR = 1
# The micro release number. The micro release number is incremented
# for bug fix releases and small feature additions.
MICRO = 0
# The status indicate if this is a development or pre-release version
STATUS = ''
#: A namedtuple of the version info for the current release.
version_info = namedtuple('version_info', 'major minor micro status')
version_info = version_info(MAJOR, MINOR, MICRO, STATUS)
# Remove everything but the 'version_info' from this module.
del namedtuple, MAJOR, MINOR, MICRO, STATUS
__version__ = ('{0}.{1}.{2}'.format(*version_info) if not version_info.status
else '{0}.{1}.{2}.{3}'.format(*version_info))
| {
"content_hash": "1c0579add1c5c68b92db762debe0aa97",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 77,
"avg_line_length": 38.625,
"alnum_prop": 0.7346278317152104,
"repo_name": "mrh1997/pyclibrary",
"id": "dad8f0e626e529bac7ad4adf5db8d7202cf6b943",
"size": "1623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyclibrary/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "12031"
},
{
"name": "C++",
"bytes": "641"
},
{
"name": "Python",
"bytes": "152522"
}
],
"symlink_target": ""
} |
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from ... import models, utils
from ...exceptions import NotHistoricalModelError
get_model = apps.get_model
class Command(BaseCommand):
args = "<app.model app.model ...>"
help = (
"Populates the corresponding HistoricalRecords field with "
"the current state of all instances in a model"
)
COMMAND_HINT = "Please specify a model or use the --auto option"
MODEL_NOT_FOUND = "Unable to find model"
MODEL_NOT_HISTORICAL = "No history model found"
NO_REGISTERED_MODELS = "No registered models were found\n"
START_SAVING_FOR_MODEL = "Saving historical records for {model}\n"
DONE_SAVING_FOR_MODEL = "Finished saving historical records for {model}\n"
EXISTING_HISTORY_FOUND = "Existing history found, skipping model"
INVALID_MODEL_ARG = "An invalid model was specified"
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument("models", nargs="*", type=str)
parser.add_argument(
"--auto",
action="store_true",
dest="auto",
default=False,
help="Automatically search for models with the HistoricalRecords field "
"type",
)
parser.add_argument(
"--batchsize",
action="store",
dest="batchsize",
default=200,
type=int,
help="Set a custom batch size when bulk inserting historical records.",
)
def handle(self, *args, **options):
self.verbosity = options["verbosity"]
to_process = set()
model_strings = options.get("models", []) or args
if model_strings:
for model_pair in self._handle_model_list(*model_strings):
to_process.add(model_pair)
elif options["auto"]:
to_process = self._auto_models()
else:
if self.verbosity >= 1:
self.stdout.write(self.COMMAND_HINT)
self._process(to_process, batch_size=options["batchsize"])
def _auto_models(self):
to_process = set()
for model in models.registered_models.values():
try: # avoid issues with multi-table inheritance
history_model = utils.get_history_model_for_model(model)
except NotHistoricalModelError:
continue
to_process.add((model, history_model))
if not to_process:
if self.verbosity >= 1:
self.stdout.write(self.NO_REGISTERED_MODELS)
return to_process
def _handle_model_list(self, *args):
failing = False
for natural_key in args:
try:
model, history = self._model_from_natural_key(natural_key)
except ValueError as e:
failing = True
self.stderr.write("{error}\n".format(error=e))
else:
if not failing:
yield (model, history)
if failing:
raise CommandError(self.INVALID_MODEL_ARG)
def _model_from_natural_key(self, natural_key):
try:
app_label, model = natural_key.split(".", 1)
except ValueError:
model = None
else:
try:
model = get_model(app_label, model)
except LookupError:
model = None
if not model:
msg = self.MODEL_NOT_FOUND + " < {model} >\n".format(model=natural_key)
raise ValueError(msg)
try:
history_model = utils.get_history_model_for_model(model)
except NotHistoricalModelError:
msg = self.MODEL_NOT_HISTORICAL + " < {model} >\n".format(model=natural_key)
raise ValueError(msg)
return model, history_model
def _bulk_history_create(self, model, batch_size):
"""Save a copy of all instances to the historical model.
:param model: Model you want to bulk create
:param batch_size: number of models to create at once.
:return:
"""
instances = []
history = utils.get_history_manager_for_model(model)
if self.verbosity >= 1:
self.stdout.write(
"Starting bulk creating history models for {} instances {}-{}".format(
model, 0, batch_size
)
)
iterator_kwargs = {"chunk_size": batch_size}
for index, instance in enumerate(
model._default_manager.iterator(**iterator_kwargs)
):
# Can't Just pass batch_size to bulk_create as this can lead to
# Out of Memory Errors as we load too many models into memory after
# creating them. So we only keep batch_size worth of models in
# historical_instances and clear them after we hit batch_size
if index % batch_size == 0:
history.bulk_history_create(instances, batch_size=batch_size)
instances = []
if self.verbosity >= 1:
self.stdout.write(
"Finished bulk creating history models for {} "
"instances {}-{}, starting next {}".format(
model, index - batch_size, index, batch_size
)
)
instances.append(instance)
# create any we didn't get in the last loop
if instances:
history.bulk_history_create(instances, batch_size=batch_size)
def _process(self, to_process, batch_size):
for model, history_model in to_process:
if history_model.objects.count():
self.stderr.write(
"{msg} {model}\n".format(
msg=self.EXISTING_HISTORY_FOUND, model=model
)
)
continue
if self.verbosity >= 1:
self.stdout.write(self.START_SAVING_FOR_MODEL.format(model=model))
self._bulk_history_create(model, batch_size)
if self.verbosity >= 1:
self.stdout.write(self.DONE_SAVING_FOR_MODEL.format(model=model))
| {
"content_hash": "e8eea7c6d50a8d9769643a159d1169d9",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 88,
"avg_line_length": 36.86470588235294,
"alnum_prop": 0.562629647359183,
"repo_name": "treyhunner/django-simple-history",
"id": "3fc2c5807d955f3361913cdb6d4f7e4b9f0c7977",
"size": "6267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_history/management/commands/populate_history.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3476"
},
{
"name": "Makefile",
"bytes": "860"
},
{
"name": "Python",
"bytes": "218249"
}
],
"symlink_target": ""
} |
from .exception import TinEyeServiceException, TinEyeServiceError, TinEyeServiceWarning
from .image import Image
from .matchengine_request import MatchEngineRequest
from .mobileengine_request import MobileEngineRequest
from .multicolorengine_request import MulticolorEngineRequest
from .wineengine_request import WineEngineRequest
| {
"content_hash": "efdc2bf561124532a98306e18ac4a583",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 87,
"avg_line_length": 55.166666666666664,
"alnum_prop": 0.8851963746223565,
"repo_name": "TinEye/tineyeservices_python",
"id": "5b1c6238a77291fe71bd78ad49b9f1d99aabc663",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tineyeservices/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13"
},
{
"name": "Python",
"bytes": "90116"
}
],
"symlink_target": ""
} |
import time
from twisted.internet import protocol, defer
from twisted.python import failure
import twisted.internet.reactor
import constants
import encoding
import msgtypes
import msgformat
from contact import Contact
reactor = twisted.internet.reactor
class TimeoutError(Exception):
""" Raised when a RPC times out """
class KademliaProtocol(protocol.DatagramProtocol):
""" Implements all low-level network-related functions of a Kademlia node """
msgSizeLimit = constants.udpDatagramMaxSize-26
maxToSendDelay = 10**-3#0.05
minToSendDelay = 10**-5#0.01
def __init__(self, node, msgEncoder=encoding.Bencode(), msgTranslator=msgformat.DefaultFormat()):
self._node = node
self._encoder = msgEncoder
self._translator = msgTranslator
self._sentMessages = {}
self._partialMessages = {}
self._partialMessagesProgress = {}
self._next = 0
self._callLaterList = {}
def sendRPC(self, contact, method, args, rawResponse=False):
""" Sends an RPC to the specified contact
@param contact: The contact (remote node) to send the RPC to
@type contact: kademlia.contacts.Contact
@param method: The name of remote method to invoke
@type method: str
@param args: A list of (non-keyword) arguments to pass to the remote
method, in the correct order
@type args: tuple
@param rawResponse: If this is set to C{True}, the caller of this RPC
will receive a tuple containing the actual response
message object and the originating address tuple as
a result; in other words, it will not be
interpreted by this class. Unless something special
needs to be done with the metadata associated with
the message, this should remain C{False}.
@type rawResponse: bool
@return: This immediately returns a deferred object, which will return
the result of the RPC call, or raise the relevant exception
if the remote node raised one. If C{rawResponse} is set to
C{True}, however, it will always return the actual response
message (which may be a C{ResponseMessage} or an
C{ErrorMessage}).
@rtype: twisted.internet.defer.Deferred
"""
msg = msgtypes.RequestMessage(self._node.id, method, args)
msgPrimitive = self._translator.toPrimitive(msg)
encodedMsg = self._encoder.encode(msgPrimitive)
df = defer.Deferred()
if rawResponse:
df._rpcRawResponse = True
# Set the RPC timeout timer
timeoutCall = reactor.callLater(constants.rpcTimeout, self._msgTimeout, msg.id) #IGNORE:E1101
# Transmit the data
self._send(encodedMsg, msg.id, (contact.address, contact.port))
self._sentMessages[msg.id] = (contact.id, df, timeoutCall)
return df
def datagramReceived(self, datagram, address):
""" Handles and parses incoming RPC messages (and responses)
@note: This is automatically called by Twisted when the protocol
receives a UDP datagram
"""
if datagram[0] == '\x00' and datagram[25] == '\x00':
totalPackets = (ord(datagram[1]) << 8) | ord(datagram[2])
msgID = datagram[5:25]
seqNumber = (ord(datagram[3]) << 8) | ord(datagram[4])
if msgID not in self._partialMessages:
self._partialMessages[msgID] = {}
self._partialMessages[msgID][seqNumber] = datagram[26:]
if len(self._partialMessages[msgID]) == totalPackets:
keys = self._partialMessages[msgID].keys()
keys.sort()
data = ''
for key in keys:
data += self._partialMessages[msgID][key]
datagram = data
del self._partialMessages[msgID]
else:
return
try:
msgPrimitive = self._encoder.decode(datagram)
except encoding.DecodeError:
# We received some rubbish here
return
message = self._translator.fromPrimitive(msgPrimitive)
remoteContact = Contact(message.nodeID, address[0], address[1], self)
# Refresh the remote node's details in the local node's k-buckets
self._node.addContact(remoteContact)
if isinstance(message, msgtypes.RequestMessage):
# This is an RPC method request
self._handleRPC(remoteContact, message.id, message.request, message.args)
elif isinstance(message, msgtypes.ResponseMessage):
# Find the message that triggered this response
if self._sentMessages.has_key(message.id):
# Cancel timeout timer for this RPC
df, timeoutCall = self._sentMessages[message.id][1:3]
timeoutCall.cancel()
del self._sentMessages[message.id]
if hasattr(df, '_rpcRawResponse'):
# The RPC requested that the raw response message and originating address be returned; do not interpret it
df.callback((message, address))
elif isinstance(message, msgtypes.ErrorMessage):
# The RPC request raised a remote exception; raise it locally
if message.exceptionType.startswith('exceptions.'):
exceptionClassName = message.exceptionType[11:]
else:
localModuleHierarchy = self.__module__.split('.')
remoteHierarchy = message.exceptionType.split('.')
#strip the remote hierarchy
while remoteHierarchy[0] == localModuleHierarchy[0]:
remoteHierarchy.pop(0)
localModuleHierarchy.pop(0)
exceptionClassName = '.'.join(remoteHierarchy)
remoteException = None
try:
exec 'remoteException = %s("%s")' % (exceptionClassName, message.response)
except Exception:
# We could not recreate the exception; create a generic one
remoteException = Exception(message.response)
df.errback(remoteException)
else:
# We got a result from the RPC
df.callback(message.response)
else:
# If the original message isn't found, it must have timed out
#TODO: we should probably do something with this...
pass
def _send(self, data, rpcID, address):
""" Transmit the specified data over UDP, breaking it up into several
packets if necessary
If the data is spread over multiple UDP datagrams, the packets have the
following structure::
| | | | | |||||||||||| 0x00 |
|Transmision|Total number|Sequence number| RPC ID |Header end|
| type ID | of packets |of this packet | | indicator|
| (1 byte) | (2 bytes) | (2 bytes) |(20 bytes)| (1 byte) |
| | | | | |||||||||||| |
@note: The header used for breaking up large data segments will
possibly be moved out of the KademliaProtocol class in the
future, into something similar to a message translator/encoder
class (see C{kademlia.msgformat} and C{kademlia.encoding}).
"""
if len(data) > self.msgSizeLimit:
# We have to spread the data over multiple UDP datagrams, and provide sequencing information
# 1st byte is transmission type id, bytes 2 & 3 are the total number of packets in this transmission, bytes 4 & 5 are the sequence number for this specific packet
totalPackets = len(data) / self.msgSizeLimit
if len(data) % self.msgSizeLimit > 0:
totalPackets += 1
encTotalPackets = chr(totalPackets >> 8) + chr(totalPackets & 0xff)
seqNumber = 0
startPos = 0
while seqNumber < totalPackets:
#reactor.iterate() #IGNORE:E1101
packetData = data[startPos:startPos+self.msgSizeLimit]
encSeqNumber = chr(seqNumber >> 8) + chr(seqNumber & 0xff)
txData = '\x00%s%s%s\x00%s' % (encTotalPackets, encSeqNumber, rpcID, packetData)
self._sendNext(txData, address)
startPos += self.msgSizeLimit
seqNumber += 1
else:
self._sendNext(data, address)
def _sendNext(self, txData, address):
""" Send the next UDP packet """
ts = time.time()
delay = 0
if ts >= self._next:
delay = self.minToSendDelay
self._next = ts + self.minToSendDelay
else:
delay = (self._next-ts) + self.maxToSendDelay
self._next += self.maxToSendDelay
if self.transport:
laterCall = reactor.callLater(delay, self.transport.write, txData, address)
for key in self._callLaterList.keys():
if key <= ts:
del self._callLaterList[key]
self._callLaterList[self._next] = laterCall
def _sendResponse(self, contact, rpcID, response):
""" Send a RPC response to the specified contact
"""
msg = msgtypes.ResponseMessage(rpcID, self._node.id, response)
msgPrimitive = self._translator.toPrimitive(msg)
encodedMsg = self._encoder.encode(msgPrimitive)
self._send(encodedMsg, rpcID, (contact.address, contact.port))
def _sendError(self, contact, rpcID, exceptionType, exceptionMessage):
""" Send an RPC error message to the specified contact
"""
msg = msgtypes.ErrorMessage(rpcID, self._node.id, exceptionType, exceptionMessage)
msgPrimitive = self._translator.toPrimitive(msg)
encodedMsg = self._encoder.encode(msgPrimitive)
self._send(encodedMsg, rpcID, (contact.address, contact.port))
def _handleRPC(self, senderContact, rpcID, method, args):
""" Executes a local function in response to an RPC request """
# Set up the deferred callchain
def handleError(f):
self._sendError(senderContact, rpcID, f.type, f.getErrorMessage())
def handleResult(result):
self._sendResponse(senderContact, rpcID, result)
df = defer.Deferred()
df.addCallback(handleResult)
df.addErrback(handleError)
# Execute the RPC
func = getattr(self._node, method, None)
if callable(func) and hasattr(func, 'rpcmethod'):
# Call the exposed Node method and return the result to the deferred callback chain
try:
try:
# Try to pass the sender's node id to the function...
result = func(*args, **{'_rpcNodeID': senderContact.id, '_rpcNodeContact': senderContact})
except TypeError:
# ...or simply call it if that fails
result = func(*args)
except Exception, e:
df.errback(failure.Failure(e))
else:
df.callback(result)
else:
# No such exposed method
df.errback( failure.Failure( AttributeError('Invalid method: %s' % method) ) )
def _msgTimeout(self, messageID):
""" Called when an RPC request message times out """
# Find the message that timed out
if self._sentMessages.has_key(messageID):
remoteContactID, df = self._sentMessages[messageID][0:2]
if self._partialMessages.has_key(messageID):
# We are still receiving this message
# See if any progress has been made; if not, kill the message
if self._partialMessagesProgress.has_key(messageID):
if len(self._partialMessagesProgress[messageID]) == len(self._partialMessages[messageID]):
# No progress has been made
del self._partialMessagesProgress[messageID]
del self._partialMessages[messageID]
df.errback(failure.Failure(TimeoutError(remoteContactID)))
return
# Reset the RPC timeout timer
timeoutCall = reactor.callLater(constants.rpcTimeout, self._msgTimeout, messageID) #IGNORE:E1101
self._sentMessages[messageID] = (remoteContactID, df, timeoutCall)
return
del self._sentMessages[messageID]
# The message's destination node is now considered to be dead;
# raise an (asynchronous) TimeoutError exception and update the host node
self._node.removeContact(remoteContactID)
df.errback(failure.Failure(TimeoutError(remoteContactID)))
else:
# This should never be reached
print "ERROR: deferred timed out, but is not present in sent messages list!"
def stopProtocol(self):
""" Called when the transport is disconnected.
Will only be called once, after all ports are disconnected.
"""
for key in self._callLaterList.keys():
try:
if key > time.time():
self._callLaterList[key].cancel()
except Exception, e:
print e
del self._callLaterList[key]
#TODO: test: do we really need the reactor.iterate() call?
reactor.iterate()
| {
"content_hash": "269c12bab19a9fd0e08fb2a5f12a2739",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 174,
"avg_line_length": 47.111486486486484,
"alnum_prop": 0.5806382215847974,
"repo_name": "anupcshan/buddyfs",
"id": "54f440cb31c77c065ad93252175ccc09d6bf3ce0",
"size": "14306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entangled/kademlia/protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "218378"
},
{
"name": "Shell",
"bytes": "54"
}
],
"symlink_target": ""
} |
"""This examples show how to use some features of the LED wrapper of the
LetMeCreate library.
It turns on gradually all the LED's present on the Ci40 from left to right in
1.6 second. This operation is repeated three times.
"""
from letmecreate.core import led
from time import sleep
led.init()
for i in range(10):
led.switch_on(led.ALL_LEDS)
sleep(0.1) # Wait 100ms
led.switch_off(led.ALL_LEDS)
sleep(0.4) # Wait 400ms
led.release()
| {
"content_hash": "67e476810470b91e896e3cb4f519baf8",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 77,
"avg_line_length": 22.8,
"alnum_prop": 0.7192982456140351,
"repo_name": "francois-berder/PyLetMeCreate",
"id": "3da885694207b69bb7e0adbdfb311e0edf1b4e3d",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/led_example.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "85404"
}
],
"symlink_target": ""
} |
class Forex_Tick(object):
def __init__(self, line):
split = line.split('\t')
if len(split) != 7:
raise Exception("Error: CSV file not formated as expected for OLHC")
self.Ticker = split[0]
self.Date = split[1]
self.Time = split[2]
self.Open = float(split[3])
self.Low = float(split[4])
self.High = float(split[5])
self.Close = float(split[6])
def val(self):
return self.Close
| {
"content_hash": "52797951a0d36479c5e3ad65e85ad0e5",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 80,
"avg_line_length": 29.875,
"alnum_prop": 0.5439330543933054,
"repo_name": "mikesligo/FYP-fuzzy-time-series",
"id": "6e76611b04bf08cbda994a4384fb5787a90e0b8a",
"size": "478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "time_series/forex_tick.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27740"
}
],
"symlink_target": ""
} |
import pandas as pd
def go():
df = pd.read_csv("../data/allegations_clean.csv")
cat_df = pd.read_csv("../data/categories.csv")
ids = cat_df.cat_id.tolist()
physical = cat_df.physical.tolist()
df.set_index(['crid', 'officer_id'], inplace = True)
df.replace(to_replace=ids, value=physical, inplace=True)
cat_dum = pd.get_dummies(df.cat_id, prefix = "physical", prefix_sep = ":", dummy_na = True)
cat_dum.to_csv("catPhysDummyFeatures.csv")
if __name__ == '__main__':
go()
| {
"content_hash": "2af64f7f5411495a83d0ec18c970eadf",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 95,
"avg_line_length": 28.38888888888889,
"alnum_prop": 0.6183953033268101,
"repo_name": "ladyson/police-complaints",
"id": "48fa1e8f09f4e28b329da8b80bf7feae2af130bf",
"size": "511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "features/cat-phys-DummyVariables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "940"
},
{
"name": "Shell",
"bytes": "1105"
}
],
"symlink_target": ""
} |
"""
This is a dumb wrapper to netCDF4 Python binding. The purpose of this module
is to provide a compatibility layer to an old wrapper to netCDF C library,
which is removed.
For more information about netCDF, please refer to
http://www.unidata.ucar.edu/software/netcdf/index.html
"""
import numpy as np
import solvcon as sc
sc.import_module_may_fail('netCDF4')
class NetCDF(object):
"""
Wrapper for the netCDF library by using ctypes. Mainly designed for
reading. Native functions remains to be nc_*. NC_* are class members for
constants defined in the header file.
"""
def __init__(self, path=None, omode="r"):
"""
:keyword path: the file to open.
:type path: str
:keyword omode: opening mode.
:type omode: int
"""
self.rootgroup = None
if path is not None:
self.open_file(path, omode)
def open_file(self, path, omode="r"):
"""
Open a NetCDF file.
:keyword path: the file to open.
:type path: str
:keyword omode: opening mode.
:type omode: str
:return: Root group from the opened data set.
:rtype: netCDF4.Dataset
"""
self.root_group = netCDF4.Dataset(path, omode)
return self.root_group
def close_file(self):
"""
Close the associated NetCDF file.
:return: Nothing
"""
self.root_group.close()
def get_dim(self, name):
"""
Get the dimension of the given name.
:param name: the name of the dimension.
:type name: str
:return: the dimension (length).
:rtype: int
"""
return len(self.root_group.dimensions[name])
def get_array(self, name, shape, dtype):
"""
Load ndarray from netCDF file.
:param name: the data to be loaded.
:type name: str
:param shape: the shape of ndarray.
:type shape: tuple
:param dtype: the dtype of ndarray.
:type dtype: str
:return: the loaded ndarray.
:rtype: numpy.ndarray
"""
try:
var = self.root_group[name]
arr = var[...]
except IndexError:
sc.helper.info("Could not find the elem_map variable in the mesh file\n")
sc.helper.info("Setting elem_map Array to 0\n")
arr = np.zeros(shape, dtype=dtype)
assert isinstance(arr, np.ndarray)
assert str(arr.dtype) == str(dtype)
return arr
def get_lines(self, name, shape):
"""
Load string from netCDF file.
:param name: the data to be loaded.
:type name: str
:param shape: the shape of ndarray. Must be 1 or 2.
:type shape: tuple
:return: The loaded strings.
:rtype: list of str
"""
# Load variable.
var = self.root_group[name]
arr = var[...]
if isinstance(arr, np.ma.MaskedArray): # issue #204
arr = arr.filled()
assert isinstance(arr, np.ndarray)
if len(shape) > 2:
raise IndexError('array should have no more than two dimension')
assert arr.shape == shape
# Convert to list of strings.
lines = list()
for line in arr:
idx = np.argwhere(line == b'').min()
line = line[:idx].tobytes()
lines.append(line.decode())
return lines
def get_attr(self, name, varname=None):
"""
Get the attribute attached to an variable. If *varname* is None
(default), get the global attribute.
:param name: name of the attribute.
:type name: str
:param varname: name of the variable.
:type varname: str
:return: the attribute.
"""
if varname is None:
return self.root_group.getncattr(name)
else:
return self.root_group[varname].getncattr(name)
get_attr_int = get_attr
get_attr_text = get_attr
| {
"content_hash": "48757df8a000c75c9052c264f82e4ea0",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 85,
"avg_line_length": 29.477941176470587,
"alnum_prop": 0.5702170117236218,
"repo_name": "yungyuc/solvcon",
"id": "b0ddd9bbd68ac0e896d07568d1ce71df5b64ba7c",
"size": "5593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solvcon/io/netcdf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "439220"
},
{
"name": "C++",
"bytes": "1593473"
},
{
"name": "CMake",
"bytes": "11957"
},
{
"name": "Cuda",
"bytes": "56913"
},
{
"name": "Dockerfile",
"bytes": "3816"
},
{
"name": "GLSL",
"bytes": "1793"
},
{
"name": "HTML",
"bytes": "2659"
},
{
"name": "Jupyter Notebook",
"bytes": "278804"
},
{
"name": "Makefile",
"bytes": "9869"
},
{
"name": "Python",
"bytes": "1641991"
},
{
"name": "Ruby",
"bytes": "1181"
},
{
"name": "Shell",
"bytes": "132795"
}
],
"symlink_target": ""
} |
from pyvows import Vows, expect
from thumbor.context import Context
from thumbor.config import Config
from fixtures.storage_fixture import IMAGE_URL, IMAGE_BYTES, get_server
from boto.s3.connection import S3Connection
from moto import mock_s3
from tc_aws.storages.s3_storage import Storage
import logging
logging.getLogger('botocore').setLevel(logging.CRITICAL)
s3_bucket = 'thumbor-images-test'
@Vows.batch
class S3StorageVows(Vows.Context):
class CanStoreImage(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
thumborId = IMAGE_URL % '1'
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket)
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(thumborId, IMAGE_BYTES)
storage.get(thumborId, callback=callback)
def should_be_in_catalog(self, topic):
expect(topic.args[0]).not_to_be_null()
expect(topic.args[0]).not_to_be_an_error()
expect(topic.args[0]).to_equal(IMAGE_BYTES)
class CanGetImage(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket)
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % '2', IMAGE_BYTES)
storage.get(IMAGE_URL % '2', callback=callback)
def should_not_be_null(self, topic):
expect(topic.args[0]).not_to_be_null()
expect(topic.args[0]).not_to_be_an_error()
def should_have_proper_bytes(self, topic):
expect(topic.args[0]).to_equal(IMAGE_BYTES)
class CanGetImageExistance(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket)
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % '3', IMAGE_BYTES)
storage.exists(IMAGE_URL % '3', callback=callback)
def should_exists(self, topic):
expect(topic.args[0]).to_equal(True)
class CanGetImageInexistance(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket)
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
storage.exists(IMAGE_URL % '9999', callback)
def should_not_exists(self, topic):
expect(topic.args[0]).to_equal(False)
class CanRemoveImage(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket)
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % '4', IMAGE_BYTES) # 1: we put the image
def check_created(created):
expect(created).to_equal(True) # 2.1: assertion...
def once_removed(rm):
storage.exists(IMAGE_URL % '4', callback=callback) #4: we check if the image exists
storage.remove(IMAGE_URL % '4', callback=once_removed) # 3: we delete it
storage.exists(IMAGE_URL % '4', callback=check_created) # 2: we check it exists
def should_be_put_and_removed(self, topic):
expect(topic.args[0]).to_equal(False) # 4.1: assertion...
class CanRemovethenPutImage(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket)
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % '5', IMAGE_BYTES) # 1: we put the image
def check_created(created):
expect(created).to_equal(True) # 2.1: assertion...
def once_removed(rm):
def check_created_2(exists):
expect(exists).to_equal(True) # 4.1: assertion...
storage.put(IMAGE_URL % '5') # 5: we re-put it
storage.exists(IMAGE_URL % '5', callback=callback) #6: we check its existance again
storage.exists(IMAGE_URL % '5', callback=check_created_2) #4: we check if the image exists
storage.remove(IMAGE_URL % '5', callback=once_removed) # 3: we delete it
storage.exists(IMAGE_URL % '5', callback=check_created) # 2: we check it exists
def should_be_put_and_removed(self, topic):
expect(topic.args[0]).to_equal(True)
class CanReturnPath(Vows.Context):
@mock_s3
def topic(self):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket)
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
return storage.resolve_original_photo_path("toto")
def should_return_the_same(self, topic):
expect(topic).to_equal("toto")
class HandlesStoragePrefix(Vows.Context):
@mock_s3
def topic(self):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket, TC_AWS_STORAGE_ROOT_PATH='tata')
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
return storage._normalize_path('toto')
def should_return_the_same(self, topic):
expect(topic).to_equal("tata/toto")
class ShouldNormalize(Vows.Context):
def topic(self):
config = Config(TC_AWS_STORAGE_ROOT_PATH='')
return Storage(Context(config=config))
def should_normalize_slash(self, topic):
expect(topic._normalize_path('/test')).to_equal('test')
expect(topic._normalize_path('/test/image.png')).to_equal('test/image.png')
class CryptoVows(Vows.Context):
class RaisesIfInvalidConfig(Vows.Context):
@Vows.capture_error
@mock_s3
def topic(self):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True)
storage = Storage(Context(config=config, server=get_server('')))
storage.put(IMAGE_URL % '9999', IMAGE_BYTES)
storage.put_crypto(IMAGE_URL % '9999')
def should_be_an_error(self, topic):
expect(topic).to_be_an_error_like(RuntimeError)
expect(topic).to_have_an_error_message_of("STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be True if no SECURITY_KEY specified")
class GettingCryptoForANewImageReturnsNone(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True)
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
storage.get_crypto(IMAGE_URL % '9999', callback=callback)
def should_be_null(self, topic):
expect(topic.args[0]).to_be_null()
class DoesNotStoreIfConfigSaysNotTo(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket)
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % '9998', IMAGE_BYTES)
storage.put_crypto(IMAGE_URL % '9998')
storage.get_crypto(IMAGE_URL % '9998', callback=callback)
def should_be_null(self, topic):
expect(topic.args[0]).to_be_null()
class CanStoreCrypto(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True)
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % '6', IMAGE_BYTES)
storage.put_crypto(IMAGE_URL % '6')
storage.get_crypto(IMAGE_URL % '6', callback=callback)
def should_not_be_null(self, topic):
expect(topic.args[0]).not_to_be_null()
expect(topic.args[0]).not_to_be_an_error()
expect(topic.args[0]).to_equal('ACME-SEC')
class DetectorVows(Vows.Context):
class CanStoreDetectorData(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket)
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % '7', IMAGE_BYTES)
storage.put_detector_data(IMAGE_URL % '7', 'some-data')
storage.get_detector_data(IMAGE_URL % '7', callback=callback)
def should_not_be_null(self, topic):
expect(topic.args[0]).not_to_be_null()
expect(topic.args[0]).not_to_be_an_error()
expect(topic.args[0]).to_equal('some-data')
class ReturnsNoneIfNoDetectorData(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_STORAGE_BUCKET=s3_bucket)
storage = Storage(Context(config=config, server=get_server('ACME-SEC')))
storage.get_detector_data(IMAGE_URL % '9999', callback=callback)
def should_not_be_null(self, topic):
expect(topic.args[0]).to_be_null()
| {
"content_hash": "63a64fb0afced99b1cc4633ae0a8bc0e",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 136,
"avg_line_length": 40.18681318681319,
"alnum_prop": 0.5849056603773585,
"repo_name": "voxmedia/aws",
"id": "5be742c4b5b4590b0eee799f10464a70aee8aba7",
"size": "11016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vows/storage_vows.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "207"
},
{
"name": "Python",
"bytes": "47047"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.oep import OpenEnergyPlatformManagementServiceAPIs
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-oep
# USAGE
python operations_list.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = OpenEnergyPlatformManagementServiceAPIs(
credential=DefaultAzureCredential(),
subscription_id="SUBSCRIPTION_ID",
)
response = client.operations.list()
print(response)
# x-ms-original-file: specification/oep/resource-manager/Microsoft.OpenEnergyPlatform/preview/2022-04-04-preview/examples/Operations_List.json
if __name__ == "__main__":
main()
| {
"content_hash": "53131b129e5cab084c7e88db771b1f6d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 142,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.7495049504950495,
"repo_name": "Azure/azure-sdk-for-python",
"id": "95d6ad1af1e4f3ff47a1d142497bccf905734002",
"size": "1478",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/oep/azure-mgmt-oep/generated_samples/operations_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import sys
import django
def main():
print(
"\n\n\n%s\n Python: %s, Django: %s\n%s\n"
% (
"=" * 120,
".".join(map(str, sys.version_info[:3])),
".".join(map(str, django.VERSION[:3])),
"-" * 120,
)
)
import logging
logging.basicConfig(level=logging.ERROR)
from conftest import pytest_configure
settings = pytest_configure()
from django.test.utils import get_runner
test_runner = get_runner(settings)(verbosity=2, interactive=True)
original_suite_result = test_runner.suite_result
def patched_suite_result(suite, result, **kwargs):
from formapi.tests import TOTAL_TESTS
assert (
result.testsRun == TOTAL_TESTS
), "Run {} tests, expected to run {}".format(
result.testsRun,
TOTAL_TESTS,
)
return original_suite_result(suite, result, **kwargs)
test_runner.suite_result = patched_suite_result
failures = test_runner.run_tests(["formapi"])
sys.exit(failures)
if __name__ == "__main__":
main()
| {
"content_hash": "658c0f9d63a1d38790e1e6082aebfda5",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 69,
"avg_line_length": 23.020833333333332,
"alnum_prop": 0.5737556561085972,
"repo_name": "5monkeys/django-formapi",
"id": "24f74e04f3de5b6cfb49d147e946f4c01fce23e3",
"size": "1127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3946"
},
{
"name": "Makefile",
"bytes": "189"
},
{
"name": "Python",
"bytes": "37357"
}
],
"symlink_target": ""
} |
import asyncio
from .base import REDIS_LOCATION
MSG_PACK_CACHES = {
"default": {
"BACKEND": "django_asyncio_redis.cache.AsyncRedisCache",
"POOLSIZE": None,
"LOCATION": REDIS_LOCATION,
"LOOP": asyncio.get_event_loop(),
"SERIALIZER": "django_asyncio_redis.serializers.msgpack.MSGPackSerializer"
}
}
| {
"content_hash": "4807548a05c842d391dba39ad22aa7ae",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 26.692307692307693,
"alnum_prop": 0.6455331412103746,
"repo_name": "mackeyja92/django-asyncio-redis",
"id": "87b5a14fcec05ee1cf42353d4fad2160ef3157e8",
"size": "347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/settings/msgpack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24134"
}
],
"symlink_target": ""
} |
import urllib2
import re
import logging
import csv
import datetime
from django.core.management.base import BaseCommand
from django.contrib.contenttypes.models import ContentType
from knesset.mks.models import Member
from knesset.committees.models import Committee
from knesset.events.models import Event
logger = logging.getLogger("open-knesset.parse_future_committee_meetings")
spamWriter = csv.writer(open('eggs.csv', 'wb'))
class Command(BaseCommand):
args = ''
help = 'Parses commitee members from the Knesset website'
committee_ct = ContentType.objects.get_for_model(Committee)
def parse_future_committee_meetings(self):
retval = []
url = 'http://knesset.gov.il/agenda/heb/CommitteesByDate.asp'
data = urllib2.urlopen(url).read()
committee_re = re.compile('<td class="Day" bgcolor="#990000" >\s+\xf1\xe3\xf8 \xe4\xe9\xe5\xed \xec.+, <span style=color:#c0c0c0>')
committee_name = re.compile('<td class="Day" bgcolor="#990000" >\s+\xf1\xe3\xf8 \xe4\xe9\xe5\xed \xec(.+), <span style=color:#c0c0c0>')
date_re = re.compile("<nobr>\((\d+)/(\d+)/(\d+)\)</nobr>")
time_re = re.compile('\xe1\xf9\xf2\xe4 (\d\d):(\d\d)')
meeting_title_re = re.compile('TitleCommittee')
meeting_agenda_re = re.compile('class="Agenda"')
meeting_agenda_text_re = re.compile('<[Tt]d class=AgendaText>([^<]+)</[Tt]d>')
datas = committee_re.split( data )[1:]
committee_names = committee_name.findall( data )
committee_names = [ name.decode('cp1255') for name in committee_names ]
committee_data = zip( committee_names, datas )
for name, data in committee_data:
date = date_re.findall(data)[0]
year, month, day = int(date[2]), int(date[1]), int(date[0])
meeting_datas = meeting_title_re.split(data)[1:]
for meeting_data in meeting_datas:
meeting_time = time_re.findall(meeting_data)[0]
hour, minute = int(meeting_time[0]),int(meeting_time[1])
meeting_agenda_data = meeting_agenda_re.split(meeting_data)[1]
titles = meeting_agenda_text_re.findall( meeting_agenda_data )
titles = [ title.decode('cp1255').strip() for title in titles ]
title = " ".join( titles )
retval.append( [ name, year, month, day, hour, minute, title ] )
spamWriter.writerow( [ name.encode('utf8'), year, month, day, hour, minute, title.encode('utf8') ] )
return retval
def update_future_committee_meetings_db(self,r):
for row in r:
try:
committee = Committee.objects.get(name=row[0])
ev, created = Event.objects.get_or_create( when = datetime.datetime( year=row[1], month=row[2], day=row[3], hour=row[4], minute=row[5], second=0 ),
what = row[6],
which_pk = committee.id,
which_type = self.committee_ct,
)
print "new event at %s: %s" % (ev.when, ev.what)
except Committee.DoesNotExist:
logger.debug("couldn't find committee %s" % row[0])
try:
ev, created = Event.objects.get_or_create(when = datetime.datetime( year=row[1], month=row[2], day=row[3], hour=row[4], minute=row[5], second=0 ),
what = row[6],
)
except Event.MultipleObjectsReturned:
created = False
if created:
logger.debug("created %s" % ev)
def handle(self, *args, **options):
r = self.parse_future_committee_meetings()
logger.debug(r)
self.update_future_committee_meetings_db(r)
| {
"content_hash": "d641fcc35f77e57221098c202b4c5f54",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 166,
"avg_line_length": 47.0919540229885,
"alnum_prop": 0.5443007078350012,
"repo_name": "livni/old-OK",
"id": "bb250a4ffa0297c300b2df321f281e27a685bfcd",
"size": "4125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/knesset/simple/management/commands/parse_future_committee_meetings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "31938"
},
{
"name": "JavaScript",
"bytes": "84209"
},
{
"name": "Python",
"bytes": "1179397"
}
],
"symlink_target": ""
} |
import warnings
from . import ops
from .groupby import DataArrayGroupBy, DatasetGroupBy
RESAMPLE_DIM = "__resample_dim__"
class Resample:
"""An object that extends the `GroupBy` object with additional logic
for handling specialized re-sampling operations.
You should create a `Resample` object by using the `DataArray.resample` or
`Dataset.resample` methods. The dimension along re-sampling
See Also
--------
DataArray.resample
Dataset.resample
"""
def _upsample(self, method, *args, **kwargs):
"""Dispatch function to call appropriate up-sampling methods on
data.
This method should not be called directly; instead, use one of the
wrapper functions supplied by `Resample`.
Parameters
----------
method : {"asfreq", "pad", "ffill", "backfill", "bfill", "nearest", \
"interpolate"}
Method to use for up-sampling
See Also
--------
Resample.asfreq
Resample.pad
Resample.backfill
Resample.interpolate
"""
upsampled_index = self._full_index
# Drop non-dimension coordinates along the resampled dimension
for k, v in self._obj.coords.items():
if k == self._dim:
continue
if self._dim in v.dims:
self._obj = self._obj.drop_vars(k)
if method == "asfreq":
return self.mean(self._dim)
elif method in ["pad", "ffill", "backfill", "bfill", "nearest"]:
kwargs = kwargs.copy()
kwargs.update(**{self._dim: upsampled_index})
return self._obj.reindex(method=method, *args, **kwargs)
elif method == "interpolate":
return self._interpolate(*args, **kwargs)
else:
raise ValueError(
'Specified method was "{}" but must be one of'
'"asfreq", "ffill", "bfill", or "interpolate"'.format(method)
)
def asfreq(self):
"""Return values of original object at the new up-sampling frequency;
essentially a re-index with new times set to NaN.
"""
return self._upsample("asfreq")
def pad(self, tolerance=None):
"""Forward fill new values at up-sampled frequency.
Parameters
----------
tolerance : optional
Maximum distance between original and new labels to limit
the up-sampling method.
Up-sampled data with indices that satisfy the equation
``abs(index[indexer] - target) <= tolerance`` are filled by
new values. Data with indices that are outside the given
tolerance are filled with ``NaN`` s
"""
return self._upsample("pad", tolerance=tolerance)
ffill = pad
def backfill(self, tolerance=None):
"""Backward fill new values at up-sampled frequency.
Parameters
----------
tolerance : optional
Maximum distance between original and new labels to limit
the up-sampling method.
Up-sampled data with indices that satisfy the equation
``abs(index[indexer] - target) <= tolerance`` are filled by
new values. Data with indices that are outside the given
tolerance are filled with ``NaN`` s
"""
return self._upsample("backfill", tolerance=tolerance)
bfill = backfill
def nearest(self, tolerance=None):
"""Take new values from nearest original coordinate to up-sampled
frequency coordinates.
Parameters
----------
tolerance : optional
Maximum distance between original and new labels to limit
the up-sampling method.
Up-sampled data with indices that satisfy the equation
``abs(index[indexer] - target) <= tolerance`` are filled by
new values. Data with indices that are outside the given
tolerance are filled with ``NaN`` s
"""
return self._upsample("nearest", tolerance=tolerance)
def interpolate(self, kind="linear"):
"""Interpolate up-sampled data using the original data
as knots.
Parameters
----------
kind : {"linear", "nearest", "zero", "slinear", \
"quadratic", "cubic"}, default: "linear"
Interpolation scheme to use
See Also
--------
scipy.interpolate.interp1d
"""
return self._interpolate(kind=kind)
def _interpolate(self, kind="linear"):
"""Apply scipy.interpolate.interp1d along resampling dimension."""
# drop any existing non-dimension coordinates along the resampling
# dimension
dummy = self._obj.copy()
for k, v in self._obj.coords.items():
if k != self._dim and self._dim in v.dims:
dummy = dummy.drop_vars(k)
return dummy.interp(
assume_sorted=True,
method=kind,
kwargs={"bounds_error": False},
**{self._dim: self._full_index},
)
class DataArrayResample(DataArrayGroupBy, Resample):
"""DataArrayGroupBy object specialized to time resampling operations over a
specified dimension
"""
def __init__(self, *args, dim=None, resample_dim=None, **kwargs):
if dim == resample_dim:
raise ValueError(
"Proxy resampling dimension ('{}') "
"cannot have the same name as actual dimension "
"('{}')! ".format(resample_dim, dim)
)
self._dim = dim
self._resample_dim = resample_dim
super().__init__(*args, **kwargs)
def map(self, func, shortcut=False, args=(), **kwargs):
"""Apply a function to each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : callable
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
args : tuple, optional
Positional arguments passed on to `func`.
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array.
"""
# TODO: the argument order for Resample doesn't match that for its parent,
# GroupBy
combined = super().map(func, shortcut=shortcut, args=args, **kwargs)
# If the aggregation function didn't drop the original resampling
# dimension, then we need to do so before we can rename the proxy
# dimension we used.
if self._dim in combined.coords:
combined = combined.drop_vars(self._dim)
if self._resample_dim in combined.dims:
combined = combined.rename({self._resample_dim: self._dim})
return combined
def apply(self, func, args=(), shortcut=None, **kwargs):
"""
Backward compatible implementation of ``map``
See Also
--------
DataArrayResample.map
"""
warnings.warn(
"Resample.apply may be deprecated in the future. Using Resample.map is encouraged",
PendingDeprecationWarning,
stacklevel=2,
)
return self.map(func=func, shortcut=shortcut, args=args, **kwargs)
ops.inject_reduce_methods(DataArrayResample)
ops.inject_binary_ops(DataArrayResample)
class DatasetResample(DatasetGroupBy, Resample):
"""DatasetGroupBy object specialized to resampling a specified dimension"""
def __init__(self, *args, dim=None, resample_dim=None, **kwargs):
if dim == resample_dim:
raise ValueError(
"Proxy resampling dimension ('{}') "
"cannot have the same name as actual dimension "
"('{}')! ".format(resample_dim, dim)
)
self._dim = dim
self._resample_dim = resample_dim
super().__init__(*args, **kwargs)
def map(self, func, args=(), shortcut=None, **kwargs):
"""Apply a function over each Dataset in the groups generated for
resampling and concatenate them together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : callable
Callable to apply to each sub-dataset.
args : tuple, optional
Positional arguments passed on to `func`.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset or DataArray
The result of splitting, applying and combining this dataset.
"""
# ignore shortcut if set (for now)
applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped())
combined = self._combine(applied)
return combined.rename({self._resample_dim: self._dim})
def apply(self, func, args=(), shortcut=None, **kwargs):
"""
Backward compatible implementation of ``map``
See Also
--------
DataSetResample.map
"""
warnings.warn(
"Resample.apply may be deprecated in the future. Using Resample.map is encouraged",
PendingDeprecationWarning,
stacklevel=2,
)
return self.map(func=func, shortcut=shortcut, args=args, **kwargs)
def reduce(self, func, dim=None, keep_attrs=None, **kwargs):
"""Reduce the items in this group by applying `func` along the
pre-defined resampling dimension.
Parameters
----------
func : callable
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing
an np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
return super().reduce(func, dim, keep_attrs, **kwargs)
ops.inject_reduce_methods(DatasetResample)
ops.inject_binary_ops(DatasetResample)
| {
"content_hash": "d72246559614192e24d4fed28c046c5a",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 95,
"avg_line_length": 34.96306818181818,
"alnum_prop": 0.5915332737466482,
"repo_name": "xray/xray",
"id": "a00dedc8d05bde669c5d0c8958dc2b3720f65511",
"size": "12307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xarray/core/resample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "862869"
}
],
"symlink_target": ""
} |
"""
RSA Key management
"""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric.padding import PSS, MGF1
from cryptography.hazmat.primitives.hashes import SHA256
from .general import KeyClass
# Sizes that bootutil will recognize
RSA_KEY_SIZES = [2048, 3072]
class RSAUsageError(Exception):
pass
class RSAPublic(KeyClass):
"""The public key can only do a few operations"""
def __init__(self, key):
self.key = key
def key_size(self):
return self.key.key_size
def shortname(self):
return "rsa"
def _unsupported(self, name):
raise RSAUsageError("Operation {} requires private key".format(name))
def _get_public(self):
return self.key
def get_public_bytes(self):
# The key embedded into MCUboot is in PKCS1 format.
return self._get_public().public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.PKCS1)
def get_private_bytes(self, minimal):
self._unsupported('get_private_bytes')
def export_private(self, path, passwd=None):
self._unsupported('export_private')
def export_public(self, path):
"""Write the public key to the given file."""
pem = self._get_public().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
with open(path, 'wb') as f:
f.write(pem)
def sig_type(self):
return "PKCS1_PSS_RSA{}_SHA256".format(self.key_size())
def sig_tlv(self):
return"RSA{}".format(self.key_size())
def sig_len(self):
return self.key_size() / 8
def verify(self, signature, payload):
k = self.key
if isinstance(self.key, rsa.RSAPrivateKey):
k = self.key.public_key()
return k.verify(signature=signature, data=payload,
padding=PSS(mgf=MGF1(SHA256()), salt_length=32),
algorithm=SHA256())
class RSA(RSAPublic):
"""
Wrapper around an RSA key, with imgtool support.
"""
def __init__(self, key):
"""The key should be a private key from cryptography"""
self.key = key
@staticmethod
def generate(key_size=2048):
if key_size not in RSA_KEY_SIZES:
raise RSAUsageError("Key size {} is not supported by MCUboot"
.format(key_size))
pk = rsa.generate_private_key(
public_exponent=65537,
key_size=key_size,
backend=default_backend())
return RSA(pk)
def _get_public(self):
return self.key.public_key()
def _build_minimal_rsa_privkey(self, der):
'''
Builds a new DER that only includes N/E/D/P/Q RSA parameters;
standard DER private bytes provided by OpenSSL also includes
CRT params (DP/DQ/QP) which can be removed.
'''
OFFSET_N = 7 # N is always located at this offset
b = bytearray(der)
off = OFFSET_N
if b[off + 1] != 0x82:
raise RSAUsageError("Error parsing N while minimizing")
len_N = (b[off + 2] << 8) + b[off + 3] + 4
off += len_N
if b[off + 1] != 0x03:
raise RSAUsageError("Error parsing E while minimizing")
len_E = b[off + 2] + 4
off += len_E
if b[off + 1] != 0x82:
raise RSAUsageError("Error parsing D while minimizing")
len_D = (b[off + 2] << 8) + b[off + 3] + 4
off += len_D
if b[off + 1] != 0x81:
raise RSAUsageError("Error parsing P while minimizing")
len_P = b[off + 2] + 3
off += len_P
if b[off + 1] != 0x81:
raise RSAUsageError("Error parsing Q while minimizing")
len_Q = b[off + 2] + 3
off += len_Q
# adjust DER size for removed elements
b[2] = (off - 4) >> 8
b[3] = (off - 4) & 0xff
return b[:off]
def get_private_bytes(self, minimal):
priv = self.key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
if minimal:
priv = self._build_minimal_rsa_privkey(priv)
return priv
def export_private(self, path, passwd=None):
"""Write the private key to the given file, protecting it with the
optional password."""
if passwd is None:
enc = serialization.NoEncryption()
else:
enc = serialization.BestAvailableEncryption(passwd)
pem = self.key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=enc)
with open(path, 'wb') as f:
f.write(pem)
def sign(self, payload):
# The verification code only allows the salt length to be the
# same as the hash length, 32.
return self.key.sign(
data=payload,
padding=PSS(mgf=MGF1(SHA256()), salt_length=32),
algorithm=SHA256())
| {
"content_hash": "d34f20fe748e655a565ce595960ea47c",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 77,
"avg_line_length": 33.02453987730061,
"alnum_prop": 0.5875905628831507,
"repo_name": "mbedmicro/mbed",
"id": "a7230e30eb368aa53a8ed8175a594c34f67cf82f",
"size": "6131",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tools/psa/tfm/bin_utils/imgtool/keys/rsa.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5511149"
},
{
"name": "C",
"bytes": "152405192"
},
{
"name": "C++",
"bytes": "7777242"
},
{
"name": "CMake",
"bytes": "27635"
},
{
"name": "HTML",
"bytes": "1531100"
},
{
"name": "Makefile",
"bytes": "131050"
},
{
"name": "Objective-C",
"bytes": "169382"
},
{
"name": "Python",
"bytes": "7913"
},
{
"name": "Shell",
"bytes": "24790"
},
{
"name": "XSLT",
"bytes": "11192"
}
],
"symlink_target": ""
} |
"""Auth providers for Home Assistant."""
from __future__ import annotations
from collections.abc import Mapping
import importlib
import logging
import types
from typing import Any
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant import data_entry_flow, requirements
from homeassistant.const import CONF_ID, CONF_NAME, CONF_TYPE
from homeassistant.core import HomeAssistant, callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util import dt as dt_util
from homeassistant.util.decorator import Registry
from ..auth_store import AuthStore
from ..const import MFA_SESSION_EXPIRATION
from ..models import Credentials, RefreshToken, User, UserMeta
_LOGGER = logging.getLogger(__name__)
DATA_REQS = "auth_prov_reqs_processed"
AUTH_PROVIDERS = Registry()
AUTH_PROVIDER_SCHEMA = vol.Schema(
{
vol.Required(CONF_TYPE): str,
vol.Optional(CONF_NAME): str,
# Specify ID if you have two auth providers for same type.
vol.Optional(CONF_ID): str,
},
extra=vol.ALLOW_EXTRA,
)
class AuthProvider:
"""Provider of user authentication."""
DEFAULT_TITLE = "Unnamed auth provider"
def __init__(
self, hass: HomeAssistant, store: AuthStore, config: dict[str, Any]
) -> None:
"""Initialize an auth provider."""
self.hass = hass
self.store = store
self.config = config
@property
def id(self) -> str | None:
"""Return id of the auth provider.
Optional, can be None.
"""
return self.config.get(CONF_ID)
@property
def type(self) -> str:
"""Return type of the provider."""
return self.config[CONF_TYPE] # type: ignore
@property
def name(self) -> str:
"""Return the name of the auth provider."""
return self.config.get(CONF_NAME, self.DEFAULT_TITLE)
@property
def support_mfa(self) -> bool:
"""Return whether multi-factor auth supported by the auth provider."""
return True
async def async_credentials(self) -> list[Credentials]:
"""Return all credentials of this provider."""
users = await self.store.async_get_users()
return [
credentials
for user in users
for credentials in user.credentials
if (
credentials.auth_provider_type == self.type
and credentials.auth_provider_id == self.id
)
]
@callback
def async_create_credentials(self, data: dict[str, str]) -> Credentials:
"""Create credentials."""
return Credentials(
auth_provider_type=self.type, auth_provider_id=self.id, data=data
)
# Implement by extending class
async def async_login_flow(self, context: dict | None) -> LoginFlow:
"""Return the data flow for logging in with auth provider.
Auth provider should extend LoginFlow and return an instance.
"""
raise NotImplementedError
async def async_get_or_create_credentials(
self, flow_result: Mapping[str, str]
) -> Credentials:
"""Get credentials based on the flow result."""
raise NotImplementedError
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""Return extra user metadata for credentials.
Will be used to populate info when creating a new user.
"""
raise NotImplementedError
async def async_initialize(self) -> None:
"""Initialize the auth provider."""
@callback
def async_validate_refresh_token(
self, refresh_token: RefreshToken, remote_ip: str | None = None
) -> None:
"""Verify a refresh token is still valid.
Optional hook for an auth provider to verify validity of a refresh token.
Should raise InvalidAuthError on errors.
"""
async def auth_provider_from_config(
hass: HomeAssistant, store: AuthStore, config: dict[str, Any]
) -> AuthProvider:
"""Initialize an auth provider from a config."""
provider_name = config[CONF_TYPE]
module = await load_auth_provider_module(hass, provider_name)
try:
config = module.CONFIG_SCHEMA(config) # type: ignore
except vol.Invalid as err:
_LOGGER.error(
"Invalid configuration for auth provider %s: %s",
provider_name,
humanize_error(config, err),
)
raise
return AUTH_PROVIDERS[provider_name](hass, store, config) # type: ignore
async def load_auth_provider_module(
hass: HomeAssistant, provider: str
) -> types.ModuleType:
"""Load an auth provider."""
try:
module = importlib.import_module(f"homeassistant.auth.providers.{provider}")
except ImportError as err:
_LOGGER.error("Unable to load auth provider %s: %s", provider, err)
raise HomeAssistantError(
f"Unable to load auth provider {provider}: {err}"
) from err
if hass.config.skip_pip or not hasattr(module, "REQUIREMENTS"):
return module
processed = hass.data.get(DATA_REQS)
if processed is None:
processed = hass.data[DATA_REQS] = set()
elif provider in processed:
return module
# https://github.com/python/mypy/issues/1424
reqs = module.REQUIREMENTS # type: ignore
await requirements.async_process_requirements(
hass, f"auth provider {provider}", reqs
)
processed.add(provider)
return module
class LoginFlow(data_entry_flow.FlowHandler):
"""Handler for the login flow."""
def __init__(self, auth_provider: AuthProvider) -> None:
"""Initialize the login flow."""
self._auth_provider = auth_provider
self._auth_module_id: str | None = None
self._auth_manager = auth_provider.hass.auth
self.available_mfa_modules: dict[str, str] = {}
self.created_at = dt_util.utcnow()
self.invalid_mfa_times = 0
self.user: User | None = None
self.credential: Credentials | None = None
async def async_step_init(
self, user_input: dict[str, str] | None = None
) -> FlowResult:
"""Handle the first step of login flow.
Return self.async_show_form(step_id='init') if user_input is None.
Return await self.async_finish(flow_result) if login init step pass.
"""
raise NotImplementedError
async def async_step_select_mfa_module(
self, user_input: dict[str, str] | None = None
) -> FlowResult:
"""Handle the step of select mfa module."""
errors = {}
if user_input is not None:
auth_module = user_input.get("multi_factor_auth_module")
if auth_module in self.available_mfa_modules:
self._auth_module_id = auth_module
return await self.async_step_mfa()
errors["base"] = "invalid_auth_module"
if len(self.available_mfa_modules) == 1:
self._auth_module_id = list(self.available_mfa_modules)[0]
return await self.async_step_mfa()
return self.async_show_form(
step_id="select_mfa_module",
data_schema=vol.Schema(
{"multi_factor_auth_module": vol.In(self.available_mfa_modules)}
),
errors=errors,
)
async def async_step_mfa(
self, user_input: dict[str, str] | None = None
) -> FlowResult:
"""Handle the step of mfa validation."""
assert self.credential
assert self.user
errors = {}
assert self._auth_module_id is not None
auth_module = self._auth_manager.get_auth_mfa_module(self._auth_module_id)
if auth_module is None:
# Given an invalid input to async_step_select_mfa_module
# will show invalid_auth_module error
return await self.async_step_select_mfa_module(user_input={})
if user_input is None and hasattr(
auth_module, "async_initialize_login_mfa_step"
):
try:
await auth_module.async_initialize_login_mfa_step( # type: ignore
self.user.id
)
except HomeAssistantError:
_LOGGER.exception("Error initializing MFA step")
return self.async_abort(reason="unknown_error")
if user_input is not None:
expires = self.created_at + MFA_SESSION_EXPIRATION
if dt_util.utcnow() > expires:
return self.async_abort(reason="login_expired")
result = await auth_module.async_validate(self.user.id, user_input)
if not result:
errors["base"] = "invalid_code"
self.invalid_mfa_times += 1
if self.invalid_mfa_times >= auth_module.MAX_RETRY_TIME > 0:
return self.async_abort(reason="too_many_retry")
if not errors:
return await self.async_finish(self.credential)
description_placeholders: dict[str, str | None] = {
"mfa_module_name": auth_module.name,
"mfa_module_id": auth_module.id,
}
return self.async_show_form(
step_id="mfa",
data_schema=auth_module.input_schema,
description_placeholders=description_placeholders,
errors=errors,
)
async def async_finish(self, flow_result: Any) -> FlowResult:
"""Handle the pass of login flow."""
return self.async_create_entry(title=self._auth_provider.name, data=flow_result)
| {
"content_hash": "b1a1ed1ccffbeca300cfd814462e63ee",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 88,
"avg_line_length": 33.12328767123287,
"alnum_prop": 0.6198304383788255,
"repo_name": "kennedyshead/home-assistant",
"id": "d2dfa0e1c6d8bfd0ddf1e711864767c1bdd53222",
"size": "9672",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/auth/providers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boletin', '0003_auto_20151211_0441'),
]
operations = [
migrations.AlterField(
model_name='registrado',
name='codigo_postal',
field=models.IntegerField(),
),
]
| {
"content_hash": "9f2c12d2efb1430be64c391c67177069",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 47,
"avg_line_length": 21.055555555555557,
"alnum_prop": 0.5936675461741425,
"repo_name": "probardjango/Probar-Django-1.9",
"id": "402a13098871eff14fc1136a5b5055c5e89faf38",
"size": "449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/boletin/migrations/0004_auto_20151211_0452.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45809"
},
{
"name": "HTML",
"bytes": "917"
},
{
"name": "JavaScript",
"bytes": "88987"
},
{
"name": "Python",
"bytes": "12473"
}
],
"symlink_target": ""
} |
from os import path
from setuptools import setup
README = path.join(path.dirname(path.abspath(__file__)), "README.rst")
setup(
name="fforms",
version="1.1.1",
description=("Standalone HTML form validation library"),
long_description=open(README).read(),
author="Felipe Ochoa",
author_email="felipeochoa@find-me-on-github.com",
url="https://github.com/felipeochoa/fforms",
license="MIT",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: MIT License',
],
keywords="forms form html",
packages=["fforms"],
test_suite="tests",
)
| {
"content_hash": "c0cb77c85db1ac0eb0c1c3ca755cb0dc",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 70,
"avg_line_length": 31.12,
"alnum_prop": 0.6375321336760925,
"repo_name": "felipeochoa/fforms",
"id": "71d86e7a008353b939239338f16650119be1b06f",
"size": "778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74469"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import re
import warnings
import numpy as np
from .. import registry as io_registry
from ... import units as u
from ...extern import six
from ...extern.six import string_types
from ...table import Table
from ...utils import OrderedDict
from ...utils.exceptions import AstropyUserWarning
from astropy.units.format.fits import UnitScaleError
from . import HDUList, TableHDU, BinTableHDU, GroupsHDU
from . import FITS_rec
from .hdu.hdulist import fitsopen as fits_open
from .util import first
# FITS file signature as per RFC 4047
FITS_SIGNATURE = (b"\x53\x49\x4d\x50\x4c\x45\x20\x20\x3d\x20\x20\x20\x20\x20"
b"\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20"
b"\x20\x54")
# Keywords to remove for all tables that are read in
REMOVE_KEYWORDS = ['XTENSION', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2',
'PCOUNT', 'GCOUNT', 'TFIELDS']
# Column-specific keywords
COLUMN_KEYWORDS = ['TFORM[0-9]+',
'TBCOL[0-9]+',
'TSCAL[0-9]+',
'TZERO[0-9]+',
'TNULL[0-9]+',
'TTYPE[0-9]+',
'TUNIT[0-9]+',
'TDISP[0-9]+',
'TDIM[0-9]+',
'THEAP']
def is_column_keyword(keyword):
for c in COLUMN_KEYWORDS:
if re.match(c, keyword) is not None:
return True
return False
def is_fits(origin, filepath, fileobj, *args, **kwargs):
"""
Determine whether `origin` is a FITS file.
Parameters
----------
origin : str or readable file-like object
Path or file object containing a potential FITS file.
Returns
-------
is_fits : bool
Returns `True` if the given file is a FITS file.
"""
if fileobj is not None:
pos = fileobj.tell()
sig = fileobj.read(30)
fileobj.seek(pos)
return sig == FITS_SIGNATURE
elif filepath is not None:
if filepath.lower().endswith(('.fits', '.fits.gz', '.fit', '.fit.gz')):
return True
elif isinstance(args[0], (HDUList, TableHDU, BinTableHDU, GroupsHDU)):
return True
else:
return False
def read_table_fits(input, hdu=None):
"""
Read a Table object from an FITS file
Parameters
----------
input : str or file-like object or compatible `astropy.io.fits` HDU object
If a string, the filename to read the table from. If a file object, or
a compatible HDU object, the object to extract the table from. The
following `astropy.io.fits` HDU objects can be used as input:
- :class:`~astropy.io.fits.hdu.table.TableHDU`
- :class:`~astropy.io.fits.hdu.table.BinTableHDU`
- :class:`~astropy.io.fits.hdu.table.GroupsHDU`
- :class:`~astropy.io.fits.hdu.hdulist.HDUList`
hdu : int or str, optional
The HDU to read the table from.
"""
if isinstance(input, HDUList):
# Parse all table objects
tables = OrderedDict()
for ihdu, hdu_item in enumerate(input):
if isinstance(hdu_item, (TableHDU, BinTableHDU, GroupsHDU)):
tables[ihdu] = hdu_item
if len(tables) > 1:
if hdu is None:
warnings.warn("hdu= was not specified but multiple tables"
" are present, reading in first available"
" table (hdu={0})".format(first(tables)),
AstropyUserWarning)
hdu = first(tables)
# hdu might not be an integer, so we first need to convert it
# to the correct HDU index
hdu = input.index_of(hdu)
if hdu in tables:
table = tables[hdu]
else:
raise ValueError("No table found in hdu={0}".format(hdu))
elif len(tables) == 1:
table = tables[first(tables)]
else:
raise ValueError("No table found")
elif isinstance(input, (TableHDU, BinTableHDU, GroupsHDU)):
table = input
else:
hdulist = fits_open(input)
try:
return read_table_fits(hdulist, hdu=hdu)
finally:
hdulist.close()
# Check if table is masked
masked = False
for col in table.columns:
if col.null is not None:
masked = True
break
# Convert to an astropy.table.Table object
t = Table(table.data, masked=masked)
# Copy over null values if needed
if masked:
for col in table.columns:
if col.null is not None:
t[col.name].set_fill_value(col.null)
t[col.name].mask[t[col.name] == col.null] = True
# Copy over units
for col in table.columns:
if col.unit is not None:
t[col.name].unit = u.Unit(
col.unit, format='fits', parse_strict='warn')
# TODO: deal properly with unsigned integers
for key, value, comment in table.header.cards:
if key in ['COMMENT', 'HISTORY']:
if key in t.meta:
t.meta[key].append(value)
else:
t.meta[key] = [value]
elif key in t.meta: # key is duplicate
if isinstance(t.meta[key], list):
t.meta[key].append(value)
else:
t.meta[key] = [t.meta[key], value]
elif (is_column_keyword(key.upper()) or
key.upper() in REMOVE_KEYWORDS):
pass
else:
t.meta[key] = value
# TODO: implement masking
return t
def write_table_fits(input, output, overwrite=False):
"""
Write a Table object to a FITS file
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
overwrite : bool
Whether to overwrite any existing file without warning.
"""
# Tables with mixin columns are not supported
if input.has_mixin_columns:
mixin_names = [name for name, col in input.columns.items()
if not isinstance(col, input.ColumnClass)]
raise ValueError('cannot write table with mixin column(s) {0} to FITS'
.format(mixin_names))
# Check if output file already exists
if isinstance(output, string_types) and os.path.exists(output):
if overwrite:
os.remove(output)
else:
raise IOError("File exists: {0}".format(output))
# Create a new HDU object
if input.masked:
#float column's default mask value needs to be Nan
for column in six.itervalues(input.columns):
fill_value = column.get_fill_value()
if column.dtype.kind == 'f' and np.allclose(fill_value, 1e20):
column.set_fill_value(np.nan)
fits_rec = FITS_rec.from_columns(np.array(input.filled()))
table_hdu = BinTableHDU(fits_rec)
for col in table_hdu.columns:
# Binary FITS tables support TNULL *only* for integer data columns
# TODO: Determine a schema for handling non-integer masked columns
# in FITS (if at all possible)
int_formats = ('B', 'I', 'J', 'K')
if not (col.format in int_formats or
col.format.p_format in int_formats):
continue
# The astype is necessary because if the string column is less
# than one character, the fill value will be N/A by default which
# is too long, and so no values will get masked.
fill_value = input[col.name].get_fill_value()
col.null = fill_value.astype(input[col.name].dtype)
else:
fits_rec = FITS_rec.from_columns(np.array(input.filled()))
table_hdu = BinTableHDU(fits_rec)
# Set units for output HDU
for col in table_hdu.columns:
unit = input[col.name].unit
if unit is not None:
try:
col.unit = unit.to_string(format='fits')
except UnitScaleError:
scale = unit.scale
raise UnitScaleError(
"The column '{0}' could not be stored in FITS format "
"because it has a scale '({1})' that "
"is not recognized by the FITS standard. Either scale "
"the data or change the units.".format(col.name, str(scale)))
except ValueError:
warnings.warn(
"The unit '{0}' could not be saved to FITS format".format(
unit.to_string()), AstropyUserWarning)
for key, value in input.meta.items():
if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:
warnings.warn(
"Meta-data keyword {0} will be ignored since it conflicts "
"with a FITS reserved keyword".format(key), AstropyUserWarning)
if isinstance(value, list):
for item in value:
try:
table_hdu.header.append((key, item))
except ValueError:
warnings.warn(
"Attribute `{0}` of type {1} cannot be written to "
"FITS files - skipping".format(key, type(value)),
AstropyUserWarning)
else:
try:
table_hdu.header[key] = value
except ValueError:
warnings.warn(
"Attribute `{0}` of type {1} cannot be written to FITS "
"files - skipping".format(key, type(value)),
AstropyUserWarning)
# Write out file
table_hdu.writeto(output)
io_registry.register_reader('fits', Table, read_table_fits)
io_registry.register_writer('fits', Table, write_table_fits)
io_registry.register_identifier('fits', Table, is_fits)
| {
"content_hash": "33056e6d9715ef1e8da9b1484837de93",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 81,
"avg_line_length": 32.950331125827816,
"alnum_prop": 0.5611496332026932,
"repo_name": "piotroxp/scibibscan",
"id": "a1a2349f33f509200d123f9ec8904c763b5ba751",
"size": "10016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scib/lib/python3.5/site-packages/astropy/io/fits/connect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "568253"
},
{
"name": "C++",
"bytes": "8204"
},
{
"name": "CSS",
"bytes": "10578"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Python",
"bytes": "13727486"
},
{
"name": "Shell",
"bytes": "4887"
},
{
"name": "TeX",
"bytes": "678"
}
],
"symlink_target": ""
} |
import os
from twisted.trial import unittest
from scrapy.utils.iterators import csviter, xmliter, _body_or_str, xmliter_lxml
from scrapy.http import XmlResponse, TextResponse, Response
from tests import get_testdata
class XmliterTestCase(unittest.TestCase):
xmliter = staticmethod(xmliter)
def test_xmliter(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<products xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="someschmea.xsd">
<product id="001">
<type>Type 1</type>
<name>Name 1</name>
</product>
<product id="002">
<type>Type 2</type>
<name>Name 2</name>
</product>
</products>
"""
response = XmlResponse(url="http://example.com", body=body)
attrs = []
for x in self.xmliter(response, 'product'):
attrs.append((
x.attrib['id'],
x.xpath("name/text()").getall(),
x.xpath("./type/text()").getall()))
self.assertEqual(attrs,
[('001', ['Name 1'], ['Type 1']), ('002', ['Name 2'], ['Type 2'])])
def test_xmliter_unusual_node(self):
body = b"""<?xml version="1.0" encoding="UTF-8"?>
<root>
<matchme...></matchme...>
<matchmenot></matchmenot>
</root>
"""
response = XmlResponse(url="http://example.com", body=body)
nodenames = [e.xpath('name()').getall() for e in self.xmliter(response, 'matchme...')]
self.assertEqual(nodenames, [['matchme...']])
def test_xmliter_unicode(self):
# example taken from https://github.com/scrapy/scrapy/issues/1665
body = """<?xml version="1.0" encoding="UTF-8"?>
<þingflokkar>
<þingflokkur id="26">
<heiti />
<skammstafanir>
<stuttskammstöfun>-</stuttskammstöfun>
<löngskammstöfun />
</skammstafanir>
<tímabil>
<fyrstaþing>80</fyrstaþing>
</tímabil>
</þingflokkur>
<þingflokkur id="21">
<heiti>Alþýðubandalag</heiti>
<skammstafanir>
<stuttskammstöfun>Ab</stuttskammstöfun>
<löngskammstöfun>Alþb.</löngskammstöfun>
</skammstafanir>
<tímabil>
<fyrstaþing>76</fyrstaþing>
<síðastaþing>123</síðastaþing>
</tímabil>
</þingflokkur>
<þingflokkur id="27">
<heiti>Alþýðuflokkur</heiti>
<skammstafanir>
<stuttskammstöfun>A</stuttskammstöfun>
<löngskammstöfun>Alþfl.</löngskammstöfun>
</skammstafanir>
<tímabil>
<fyrstaþing>27</fyrstaþing>
<síðastaþing>120</síðastaþing>
</tímabil>
</þingflokkur>
</þingflokkar>"""
for r in (
# with bytes
XmlResponse(url="http://example.com", body=body.encode('utf-8')),
# Unicode body needs encoding information
XmlResponse(url="http://example.com", body=body, encoding='utf-8'),
):
attrs = []
for x in self.xmliter(r, 'þingflokkur'):
attrs.append((x.attrib['id'],
x.xpath('./skammstafanir/stuttskammstöfun/text()').getall(),
x.xpath('./tímabil/fyrstaþing/text()').getall()))
self.assertEqual(attrs,
[('26', ['-'], ['80']),
('21', ['Ab'], ['76']),
('27', ['A'], ['27'])])
def test_xmliter_text(self):
body = (
'<?xml version="1.0" encoding="UTF-8"?>'
'<products><product>one</product><product>two</product></products>'
)
self.assertEqual([x.xpath("text()").getall() for x in self.xmliter(body, 'product')],
[['one'], ['two']])
def test_xmliter_namespaces(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:g="http://base.google.com/ns/1.0">
<channel>
<title>My Dummy Company</title>
<link>http://www.mydummycompany.com</link>
<description>This is a dummy company. We do nothing.</description>
<item>
<title>Item 1</title>
<description>This is item 1</description>
<link>http://www.mydummycompany.com/items/1</link>
<g:image_link>http://www.mydummycompany.com/images/item1.jpg</g:image_link>
<g:id>ITEM_1</g:id>
<g:price>400</g:price>
</item>
</channel>
</rss>
"""
response = XmlResponse(url='http://mydummycompany.com', body=body)
my_iter = self.xmliter(response, 'item')
node = next(my_iter)
node.register_namespace('g', 'http://base.google.com/ns/1.0')
self.assertEqual(node.xpath('title/text()').getall(), ['Item 1'])
self.assertEqual(node.xpath('description/text()').getall(), ['This is item 1'])
self.assertEqual(node.xpath('link/text()').getall(), ['http://www.mydummycompany.com/items/1'])
self.assertEqual(
node.xpath('g:image_link/text()').getall(),
['http://www.mydummycompany.com/images/item1.jpg']
)
self.assertEqual(node.xpath('g:id/text()').getall(), ['ITEM_1'])
self.assertEqual(node.xpath('g:price/text()').getall(), ['400'])
self.assertEqual(node.xpath('image_link/text()').getall(), [])
self.assertEqual(node.xpath('id/text()').getall(), [])
self.assertEqual(node.xpath('price/text()').getall(), [])
def test_xmliter_exception(self):
body = (
'<?xml version="1.0" encoding="UTF-8"?>'
'<products><product>one</product><product>two</product></products>'
)
iter = self.xmliter(body, 'product')
next(iter)
next(iter)
self.assertRaises(StopIteration, next, iter)
def test_xmliter_objtype_exception(self):
i = self.xmliter(42, 'product')
self.assertRaises(TypeError, next, i)
def test_xmliter_encoding(self):
body = (
b'<?xml version="1.0" encoding="ISO-8859-9"?>\n'
b'<xml>\n'
b' <item>Some Turkish Characters \xd6\xc7\xde\xdd\xd0\xdc \xfc\xf0\xfd\xfe\xe7\xf6</item>\n'
b'</xml>\n\n'
)
response = XmlResponse('http://www.example.com', body=body)
self.assertEqual(
next(self.xmliter(response, 'item')).get(),
'<item>Some Turkish Characters \xd6\xc7\u015e\u0130\u011e\xdc \xfc\u011f\u0131\u015f\xe7\xf6</item>'
)
class LxmlXmliterTestCase(XmliterTestCase):
xmliter = staticmethod(xmliter_lxml)
def test_xmliter_iterate_namespace(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns="http://base.google.com/ns/1.0">
<channel>
<title>My Dummy Company</title>
<link>http://www.mydummycompany.com</link>
<description>This is a dummy company. We do nothing.</description>
<item>
<title>Item 1</title>
<description>This is item 1</description>
<link>http://www.mydummycompany.com/items/1</link>
<image_link>http://www.mydummycompany.com/images/item1.jpg</image_link>
<image_link>http://www.mydummycompany.com/images/item2.jpg</image_link>
</item>
</channel>
</rss>
"""
response = XmlResponse(url='http://mydummycompany.com', body=body)
no_namespace_iter = self.xmliter(response, 'image_link')
self.assertEqual(len(list(no_namespace_iter)), 0)
namespace_iter = self.xmliter(response, 'image_link', 'http://base.google.com/ns/1.0')
node = next(namespace_iter)
self.assertEqual(node.xpath('text()').getall(), ['http://www.mydummycompany.com/images/item1.jpg'])
node = next(namespace_iter)
self.assertEqual(node.xpath('text()').getall(), ['http://www.mydummycompany.com/images/item2.jpg'])
def test_xmliter_namespaces_prefix(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<root>
<h:table xmlns:h="http://www.w3.org/TR/html4/">
<h:tr>
<h:td>Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="http://www.w3schools.com/furniture">
<f:name>African Coffee Table</f:name>
<f:width>80</f:width>
<f:length>120</f:length>
</f:table>
</root>
"""
response = XmlResponse(url='http://mydummycompany.com', body=body)
my_iter = self.xmliter(response, 'table', 'http://www.w3.org/TR/html4/', 'h')
node = next(my_iter)
self.assertEqual(len(node.xpath('h:tr/h:td').getall()), 2)
self.assertEqual(node.xpath('h:tr/h:td[1]/text()').getall(), ['Apples'])
self.assertEqual(node.xpath('h:tr/h:td[2]/text()').getall(), ['Bananas'])
my_iter = self.xmliter(response, 'table', 'http://www.w3schools.com/furniture', 'f')
node = next(my_iter)
self.assertEqual(node.xpath('f:name/text()').getall(), ['African Coffee Table'])
def test_xmliter_objtype_exception(self):
i = self.xmliter(42, 'product')
self.assertRaises(TypeError, next, i)
class UtilsCsvTestCase(unittest.TestCase):
sample_feeds_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sample_data', 'feeds')
sample_feed_path = os.path.join(sample_feeds_dir, 'feed-sample3.csv')
sample_feed2_path = os.path.join(sample_feeds_dir, 'feed-sample4.csv')
sample_feed3_path = os.path.join(sample_feeds_dir, 'feed-sample5.csv')
def test_csviter_defaults(self):
body = get_testdata('feeds', 'feed-sample3.csv')
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response)
result = [row for row in csv]
self.assertEqual(result,
[{'id': '1', 'name': 'alpha', 'value': 'foobar'},
{'id': '2', 'name': 'unicode', 'value': '\xfan\xedc\xf3d\xe9\u203d'},
{'id': '3', 'name': 'multi', 'value': "foo\nbar"},
{'id': '4', 'name': 'empty', 'value': ''}])
# explicit type check cuz' we no like stinkin' autocasting! yarrr
for result_row in result:
self.assertTrue(all((isinstance(k, str) for k in result_row.keys())))
self.assertTrue(all((isinstance(v, str) for v in result_row.values())))
def test_csviter_delimiter(self):
body = get_testdata('feeds', 'feed-sample3.csv').replace(b',', b'\t')
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response, delimiter='\t')
self.assertEqual([row for row in csv],
[{'id': '1', 'name': 'alpha', 'value': 'foobar'},
{'id': '2', 'name': 'unicode', 'value': '\xfan\xedc\xf3d\xe9\u203d'},
{'id': '3', 'name': 'multi', 'value': "foo\nbar"},
{'id': '4', 'name': 'empty', 'value': ''}])
def test_csviter_quotechar(self):
body1 = get_testdata('feeds', 'feed-sample6.csv')
body2 = get_testdata('feeds', 'feed-sample6.csv').replace(b',', b'|')
response1 = TextResponse(url="http://example.com/", body=body1)
csv1 = csviter(response1, quotechar="'")
self.assertEqual([row for row in csv1],
[{'id': '1', 'name': 'alpha', 'value': 'foobar'},
{'id': '2', 'name': 'unicode', 'value': '\xfan\xedc\xf3d\xe9\u203d'},
{'id': '3', 'name': 'multi', 'value': "foo\nbar"},
{'id': '4', 'name': 'empty', 'value': ''}])
response2 = TextResponse(url="http://example.com/", body=body2)
csv2 = csviter(response2, delimiter="|", quotechar="'")
self.assertEqual([row for row in csv2],
[{'id': '1', 'name': 'alpha', 'value': 'foobar'},
{'id': '2', 'name': 'unicode', 'value': '\xfan\xedc\xf3d\xe9\u203d'},
{'id': '3', 'name': 'multi', 'value': "foo\nbar"},
{'id': '4', 'name': 'empty', 'value': ''}])
def test_csviter_wrong_quotechar(self):
body = get_testdata('feeds', 'feed-sample6.csv')
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response)
self.assertEqual([row for row in csv],
[{"'id'": "1", "'name'": "'alpha'", "'value'": "'foobar'"},
{"'id'": "2", "'name'": "'unicode'", "'value'": "'\xfan\xedc\xf3d\xe9\u203d'"},
{"'id'": "'3'", "'name'": "'multi'", "'value'": "'foo"},
{"'id'": "4", "'name'": "'empty'", "'value'": ""}])
def test_csviter_delimiter_binary_response_assume_utf8_encoding(self):
body = get_testdata('feeds', 'feed-sample3.csv').replace(b',', b'\t')
response = Response(url="http://example.com/", body=body)
csv = csviter(response, delimiter='\t')
self.assertEqual([row for row in csv],
[{'id': '1', 'name': 'alpha', 'value': 'foobar'},
{'id': '2', 'name': 'unicode', 'value': '\xfan\xedc\xf3d\xe9\u203d'},
{'id': '3', 'name': 'multi', 'value': "foo\nbar"},
{'id': '4', 'name': 'empty', 'value': ''}])
def test_csviter_headers(self):
sample = get_testdata('feeds', 'feed-sample3.csv').splitlines()
headers, body = sample[0].split(b','), b'\n'.join(sample[1:])
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response, headers=[h.decode('utf-8') for h in headers])
self.assertEqual([row for row in csv],
[{'id': '1', 'name': 'alpha', 'value': 'foobar'},
{'id': '2', 'name': 'unicode', 'value': '\xfan\xedc\xf3d\xe9\u203d'},
{'id': '3', 'name': 'multi', 'value': 'foo\nbar'},
{'id': '4', 'name': 'empty', 'value': ''}])
def test_csviter_falserow(self):
body = get_testdata('feeds', 'feed-sample3.csv')
body = b'\n'.join((body, b'a,b', b'a,b,c,d'))
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response)
self.assertEqual([row for row in csv],
[{'id': '1', 'name': 'alpha', 'value': 'foobar'},
{'id': '2', 'name': 'unicode', 'value': '\xfan\xedc\xf3d\xe9\u203d'},
{'id': '3', 'name': 'multi', 'value': "foo\nbar"},
{'id': '4', 'name': 'empty', 'value': ''}])
def test_csviter_exception(self):
body = get_testdata('feeds', 'feed-sample3.csv')
response = TextResponse(url="http://example.com/", body=body)
iter = csviter(response)
next(iter)
next(iter)
next(iter)
next(iter)
self.assertRaises(StopIteration, next, iter)
def test_csviter_encoding(self):
body1 = get_testdata('feeds', 'feed-sample4.csv')
body2 = get_testdata('feeds', 'feed-sample5.csv')
response = TextResponse(url="http://example.com/", body=body1, encoding='latin1')
csv = csviter(response)
self.assertEqual(
list(csv),
[
{'id': '1', 'name': 'latin1', 'value': 'test'},
{'id': '2', 'name': 'something', 'value': '\xf1\xe1\xe9\xf3'},
]
)
response = TextResponse(url="http://example.com/", body=body2, encoding='cp852')
csv = csviter(response)
self.assertEqual(
list(csv),
[
{'id': '1', 'name': 'cp852', 'value': 'test'},
{'id': '2', 'name': 'something', 'value': '\u255a\u2569\u2569\u2569\u2550\u2550\u2557'},
]
)
class TestHelper(unittest.TestCase):
bbody = b'utf8-body'
ubody = bbody.decode('utf8')
txtresponse = TextResponse(url='http://example.org/', body=bbody, encoding='utf-8')
response = Response(url='http://example.org/', body=bbody)
def test_body_or_str(self):
for obj in (self.bbody, self.ubody, self.txtresponse, self.response):
r1 = _body_or_str(obj)
self._assert_type_and_value(r1, self.ubody, obj)
r2 = _body_or_str(obj, unicode=True)
self._assert_type_and_value(r2, self.ubody, obj)
r3 = _body_or_str(obj, unicode=False)
self._assert_type_and_value(r3, self.bbody, obj)
self.assertTrue(type(r1) is type(r2))
self.assertTrue(type(r1) is not type(r3))
def _assert_type_and_value(self, a, b, obj):
self.assertTrue(type(a) is type(b),
f'Got {type(a)}, expected {type(b)} for { obj!r}')
self.assertEqual(a, b)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "d32c582bfe930267d7d8bd0f8d8dabd6",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 112,
"avg_line_length": 42.8968824940048,
"alnum_prop": 0.5088327370304114,
"repo_name": "starrify/scrapy",
"id": "79f5a2bbebaec69e014b5b8fbeefd499dce6413c",
"size": "17948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils_iterators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2076"
},
{
"name": "Python",
"bytes": "1466538"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
import unittest
from io import StringIO
from ...worksheet import Worksheet
class TestWriteFilterColumn(unittest.TestCase):
"""
Test the Worksheet _write_filter_column() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_filter_column(self):
"""Test the _write_filter_column() method"""
self.worksheet._write_filter_column(0, 1, ['East'])
exp = """<filterColumn colId="0"><filters><filter val="East"/></filters></filterColumn>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| {
"content_hash": "bed95289a0757cb441a1245d1c233483",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 98,
"avg_line_length": 26.24,
"alnum_prop": 0.6326219512195121,
"repo_name": "jmcnamara/XlsxWriter",
"id": "3b5088615365b91e56bf9e3f18b28f790151a266",
"size": "869",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/worksheet/test_write_filter_column.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
} |
"""
Display all redis keys and values
"""
import time
import sys
import os
if __name__ == '__main__':
# add parent directory to python path such that we can import modules
HERE = os.path.dirname(os.path.realpath(__file__))
PROJ_PATH = os.path.abspath(os.path.join(HERE, '../'))
sys.path.insert(0, PROJ_PATH)
from h5pyswmr.locking import redis_conn as r
while(True):
sys.stderr.write("\x1b[2J\x1b[H")
print("Redis server keys and values:")
print("=============================")
for key in sorted(r.keys()):
try:
print('{0}\t{1}'.format(key, r[key]))
except KeyError:
pass
time.sleep(0.1)
| {
"content_hash": "d7125083793276bb90d3850977dd789d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 73,
"avg_line_length": 25.5,
"alnum_prop": 0.5434173669467787,
"repo_name": "meteotest/h5pySWMR",
"id": "db038343e0421173a3b202dfa5b6884a1ed8b6d3",
"size": "739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/redis_showkeys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43123"
}
],
"symlink_target": ""
} |
import os
import sys
import psycopg2
import psycopg2.extras
import ujson
import json
import argparse
def pg_connect(**cargs):
pg_conn = psycopg2.connect(**cargs)
pg_conn.reset()
pg_conn.set_session(
isolation_level=psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ,
readonly=True,
deferrable=True,
autocommit=True)
cursor = pg_conn.cursor()
cursor.execute("set timezone='UTC'")
cursor.close()
return pg_conn
def server_side_cursor_fetchall(pg_conn, sql_query, sql_args=None, chunk_size=5000, using='default'):
sql = 'DECLARE ssc CURSOR FOR {}'.format(sql_query, sql_args)
sql_fetch_chunk = 'FETCH {} FROM ssc'.format(chunk_size)
cursor = pg_conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
try:
cursor.execute('BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED READ ONLY')
cursor.execute(sql)
try:
cursor.execute(sql_fetch_chunk)
while True:
rows = cursor.fetchall()
if not rows:
break
for row in rows:
yield row
cursor.execute(sql_fetch_chunk)
finally:
cursor.execute('CLOSE ssc')
finally:
cursor.close()
def get_pg_tables(pg_conn):
cursor = pg_conn.cursor()
cursor.execute("select relname from pg_class where relkind='r' and relname !~ '^(pg_|sql_)';")
tables = [x[0] for x in cursor.fetchall()]
return tables
def table2json(pg_conn, output_dir, table):
path = os.path.join(output_dir, '{}.txt'.format(table))
print('exporting table {} to json file {}'.format(table, path))
sql = "select * from \"{}\"".format(table)
f = open(path, 'w')
i = 0
for i, row in enumerate(server_side_cursor_fetchall(pg_conn, sql)):
try:
f.write(ujson.dumps(row) + '\n')
except Exception:
print('BAD ROW JSON')
# if i > 0 and i % 1000 == 0:
# print('wrote {} records'.format(i))
print('wrote {} records'.format(i))
f.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', help='database hostname', type=str, default='localhost')
parser.add_argument('--port', help='database port', type=int, default=5432)
parser.add_argument('--password', help='database password', type=str, default='password')
parser.add_argument('--user', help='database user', type=str, default='postgres')
parser.add_argument('name', help='database name', type=str)
parser.add_argument('output_dir', help='output directory for JSON files', type=str)
args = parser.parse_args()
db_source = {
'database': args.name,
'host': args.host,
'password': args.password,
'user': args.user
}
if args.port:
db_source['port'] = args.port
def connect():
_pg_conn = pg_connect(**db_source)
try:
psycopg2.extras.register_hstore(_pg_conn)
except:
pass
return _pg_conn
pg_conn = connect()
tables = get_pg_tables(pg_conn)
pg_conn.close()
for table in tables:
try:
pg_conn = connect()
table2json(pg_conn, args.output_dir, table)
except psycopg2.ProgrammingError as e:
print('EXCEPTION: {}'.format(e))
finally:
pg_conn.close()
if __name__ == '__main__':
main()
| {
"content_hash": "ebe91c961b24a0e590dc2af69021eb38",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 101,
"avg_line_length": 30.017391304347825,
"alnum_prop": 0.593279258400927,
"repo_name": "jpictor/dskit",
"id": "8e6e2f13aab5112d0a2596a9895f50015f821144",
"size": "3452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/pg2json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16497"
},
{
"name": "Shell",
"bytes": "2642"
}
],
"symlink_target": ""
} |
def escape_double_quote(string):
return string.replace('\\', '\\\\').replace('"', '\\"')
def unescape_double_quote(string):
quoting = False
chars = []
for c in string:
if not quoting:
if c == '\\':
quoting = True
else:
chars.append(c)
if quoting:
chars.append(c)
quoting = False
return ''.join(chars)
class FlatPathType(object):
"""
Types of path:
."hello" :: prefix path
."hello". :: type path <- indicates the type of an entry
."hello"[ :: type path <- indicates the type of an entry
."hello"= :: value path
"""
class PrefixPath(FlatPathType):
"""A path like that just indicates the name of something, these do not exist in our store. e.g
."hello" or
."hello"[0]
"""
class DictPrefixPath(PrefixPath):
"""A prefix path whose parent is a dictionary. e.g.
."hello"
"""
class ListPrefixPath(PrefixPath):
"""A prefix path whose parent is a list. e.g.
."hello"[0]
"""
class TypePath(FlatPathType):
"""A path that indicates data type. e.g.
."hello"[ or
."hello". or
"""
class DictPath(TypePath):
"""A path that indicates this key maps to dict. e.g.
."hello".
"""
class ListPath(TypePath):
"""A path that indicates this key maps to a list. e.g.
."hello"[
"""
class ValuePath(TypePath):
"""A path that indicates this key maps to a simple value (boolean, string or float) e.g.
."hello"."world"=
"""
class LengthPath(TypePath):
"""A path that tells us how long an iterable is
."hello"."world"#
"""
class IncorrectType(Exception):
def __init__(self, prefix, got_type, wanted_type):
Exception.__init__(self)
self.prefix = prefix
self.got_type = got_type
self.wanted_type = wanted_type
def __str__(self):
return '{!r}: Wanted {} got {}'.format(self.prefix, self.wanted_type, self.got_type)
class RootNode(Exception):
"Operation is not supported for the root node"
class PathCorrupt(Exception):
"This path looks broken"
def __init__(self, path):
Exception.__init__(self)
self.path = path
def __str__(self):
print 'Path is corrupt {!r}'.format(self.path)
DICT_PATH, LIST_PATH, VALUE_PATH, LENGTH_PATH, LIST_PREFIX_PATH, DICT_PREFIX_PATH = (
DictPath(), ListPath(), ValuePath(), LengthPath(), ListPrefixPath(), DictPrefixPath())
class FlatPath(object):
# Nope: I'm not fully parsing this
# this would have the advantage of effectively adding assertions everywhere
# and potentially making code a lot easier to understand
def __init__(self, prefix):
self._prefix = prefix
self._path_type = None
self._key_string = None
def __repr__(self):
return '<FlatPath prefix={}>'.format(self._prefix)
def __eq__(self, other):
return self.key() == other.key()
def list_key(self):
return self._prefix + "["
def path_type(self):
if self._path_type is None:
self._path_type = self._get_path_type()
return self._path_type
def _get_path_type(self):
if self._prefix == '':
return DICT_PREFIX_PATH
elif self._prefix[-1] == '.':
return DICT_PATH
elif self._prefix[-1] == '[':
return LIST_PATH
elif self._prefix[-1] == '=':
return VALUE_PATH
elif self._prefix[-1] == '#':
return LENGTH_PATH
elif self._prefix[-1] == ']':
return LIST_PREFIX_PATH
elif self._prefix[-1] == '"':
return DICT_PREFIX_PATH
else:
raise PathCorrupt(self._prefix)
def depth(self):
depth = 0
path = self
while True:
path = path.prefix()
if path.key() == '':
return depth
else:
depth += 1
path = path.parent()
def ensure_type(self, Type):
if not isinstance(self.path_type(), Type):
raise IncorrectType(self._prefix, self.path_type(), Type)
def dict(self):
# We could do this though having different
# types. But I think it might make debugging
# slightly difficult
self.ensure_type(PrefixPath)
return FlatPath(self._prefix + '.')
def list(self):
self.ensure_type(PrefixPath)
return FlatPath(self._prefix + '[')
def key_string(self):
if self._key_string is None:
self._key_string = self._get_key_string()
return self._key_string
def _get_key_string(self):
self.ensure_type(DictPrefixPath)
_, string = self._remove_terminal_string(self.prefix().key())
return string
def index_number(self):
self.ensure_type(ListPrefixPath)
prefix = self._remove(self._prefix, ']')
prefix, integer_string = self._remove_terminal_integer(prefix)
self._remove(prefix, '[')
return int(integer_string)
def parent(self):
# Yes we could do this with a parser
if isinstance(self.path_type(), ListPrefixPath):
prefix = self._remove(self._prefix, ']')
prefix, _ = self._remove_terminal_integer(prefix)
prefix = self._remove(prefix, '[')
return FlatPath(prefix)
elif isinstance(self.path_type(), DictPrefixPath):
if self._prefix == '':
raise RootNode()
prefix, _ = self._remove_terminal_string(self._prefix)
prefix = self._remove(prefix, '.')
return FlatPath(prefix)
else:
raise IncorrectType(self._prefix, self.path_type(), TypePath)
@staticmethod
def _remove_terminal_integer(string):
integer_string = []
while True:
if string[-1] in "0123456789":
integer_string.append(string[-1])
string = string[:-1]
else:
break
return string, ''.join(reversed(integer_string))
@classmethod
def _remove_terminal_string(cls, string):
initial_string = string
if string[-1] != '"':
raise ValueError(string)
string = string[:-1]
state = None
QUOTE = 'quote'
while True:
# We can if things like \ \ "
if state == None:
if string[-1] == '"':
state = QUOTE
string = string[:-1]
else:
string = string[:-1]
elif state == QUOTE:
if string[-1] == '\\':
state = None
string = string[:-1]
else:
break
else:
raise ValueError(state)
end = unescape_double_quote(initial_string[len(string) + 1:-1])
return string, end
@staticmethod
def _remove(string, char):
if string[-1] == char:
return string[:-1]
else:
raise ValueError(string[-1])
def lookup(self, key):
self.ensure_type(DictPath)
return FlatPath(self._prefix + '"{}"'.format(escape_double_quote(key)))
def length(self):
self.ensure_type(PrefixPath)
return FlatPath(self._prefix + '#')
def index(self, index):
self.ensure_type(ListPath)
if not isinstance(index, int):
raise ValueError(index)
return FlatPath(self._prefix + '{}]'.format(index))
def value(self):
"""A path representing the value of a particular prefix"""
# e.g ."hello"[0] -> ."hello"[0]=
self.ensure_type(PrefixPath)
return FlatPath(self._prefix + '=')
def key(self):
return self._prefix
def prefix(self):
if isinstance(self.path_type(), (ValuePath, TypePath)):
return FlatPath(self._prefix[:-1])
elif isinstance(self.path_type(), PrefixPath):
return self
else:
raise ValueError(self._prefix)
| {
"content_hash": "c6069e0da07e0b363a23ace42fb41ac5",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 98,
"avg_line_length": 27.815068493150687,
"alnum_prop": 0.5456784043339079,
"repo_name": "talwrii/jsdb",
"id": "d0a785b79adef8f13c56b61c8b6fb4f2d5ad3a99",
"size": "8175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsdb/flatpath.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "64136"
}
],
"symlink_target": ""
} |
import os
import sys
import rts2
# from astropy.io import ascii
class filter_set:
"""Class to simplify look up of filter by name and number
uses the python [] operator to lookup filter number
or name. If you give it the name it will return the
number and vice versa. it also uses aliases for the
lookup. RTS2 and the Galil like to use long names
like "Harris-U" observers like short names like "U"
either format is acceptable as long as the alias
is in the dictionary below.
"""
# Filter Name Aliases.
# The keyword is the name of the filter as told by
# the galil and RTS2, the value in the dict is a tupple
# of possible aliases for each filter
alias = {
"Harris-U": ("U"),
"Harris-R": ("R"),
"Harris-V": ("V"),
"Arizona-I": ("I"),
"Harris-"U"": ("B")
"Schott-8612": ("Schott") }
def __init__(self, filters = None):
if filters is None:
self._filter_list = []
elif type(filters) == list:
self._filter_list = filters
elif type(filters) == dict:
# this assumes that the keywords of the dictionary are
# the fitler names and the value is the filter number.
#sort by filter number and reverse look up.
for key, value in sorted(filters.iteritems(), key=lambda (k,v): (v,k)):
self._filter_list.append( key )
elif type(filters) == str or type(filters) == unicode:
self._filter_list = str(filters).split()
else:
raise TypeError("Unexpected filter type {}, type must be string, unicode, list or dict".format(type(filters)))
def check_alias( self, alias ):
for name, aliases in self.alias.iteritems():
if alias.lower() == name.lower():
return alias
else:
for al in aliases:
if al.lower() == alias.lower():
return name
# we didn't find the alias
return None
def __str__(self):
return "<filter_set: "+str(self._filter_list)+">"
def __repr__(self):
return self.__str__()
def __getitem__(self, key):
if type(key) == int:
return self._filter_list[key]
elif type(key) == str or type(key) == unicode:
realname = self.check_alias(key)
if realname is not None:
return self._filter_list.index(realname)
raise ValueError( "cannot find filter {}".format(key) )
# This sets a default filter order, but it may be different between runs,
# so be careful. Current default order is U,R,B,V,I,Schott.
# It returns a dictionary with key=filter name string, value = filter number
# In the future we should read the order from a file.
def set_filter_dict():
filter_dict = {}
filter_dict['U'] = 0
filter_dict['B'] = 2
filter_dict['V'] = 3
filter_dict['R'] = 1
filter_dict['I'] = 4
filter_dict['Schott'] = 5
# Also put the numbers into the dictionary so specifying a filter by
# number-string is legal, ie you can use either 'R' or '1' to mean filter 1.
for i in range(6):
filter_str = '%1i' % (i)
filter_dict[filter_str] = i
return filter_dict
# This reads the filter order from the RTS2 proxy
# and returns a dictionary similar to what
# set_filter_dict does:
def get_filter_dict(prx_name, prx_passwd):
proxy = rts2.rtsapi.createProxy( "http://bigpop:8889", prx_name, prx_passwd )
filters_str = proxy.getValue("W0", "filter_names", True)
return filter_set( filters_str )
# Write the rts2-newtarget script. This is run taking input rather than
# command line arguments, so I use the shell "<< EOF" syntax.
def make_newtarget_prompt(fname):
radec_str = raw_input('Enter RA and Dec as hh:mm:ss dd:mm:ss, separated by a space: ')
name_str = raw_input('Enter object name, eg Landolt_92_249: ')
queue_id_str = raw_input('Enter numeric queue id, eg 1200: ')
f = open(fname,'a')
f.write('rts2-newtarget << EOF\n')
f.write(radec_str+'\n')
f.write('s\n')
f.write(queue_id_str+'\n')
f.write(name_str+'\n')
f.write('EOF\n')
f.close()
print "New target script written to ",fname
queue_id_num = int(queue_id_str)
return queue_id_num
# Write the rts2-newtarget script using args passed in rather than
# interactive. Rather than writing to a file, pass back a string
# that has newlines in it.
def make_newtarget_args(name,ra,dec,queue_id_num):
cmd = 'rts2-newtarget << EOF\n'
radec_line = '%s %s\n' % (ra,dec)
id_line = '%s\n' % (queue_id_num)
name_line = '%s\n' % (name)
cmd = cmd + radec_line
cmd = cmd + 's\n'
cmd = cmd + id_line
cmd = cmd + name_line
cmd = cmd + 'EOF\n'
return cmd
def set_target_defaults():
lunardist = '20:'
airmass = ':2.2'
camera = 'C0'
offset = 'BIG61.OFFS=(1m,0)'
cmd = 'rts2-target'
cmd = cmd + ' -c ' + camera
cmd = cmd + ' --lunarDistance ' + lunardist
cmd = cmd + ' --airmass ' + airmass
cmd = cmd + ' -s "' + offset
return cmd
# Write an observing command prompting for filters, exptime, n_exp
# lunardist, airmass limits are set by default
# initial offset is always 1m and no dithering, which is not ideal
def make_obs_script_prompt(queue_id_num):
# Don't use filter names yet, until order is verified.
# filter_names = set_filter_dict()
filter_names = None
cmd = set_target_defaults()
print 'Filter number is order in filter wheel GUI starting at 0; poss U,R,B,V,I,schott'
exp_string = ''
while True:
obs_str = raw_input('Enter filter number, exptime, num. of exp [hit return to stop]: ')
if obs_str.strip() == '':
break
fields = obs_str.split()
if filter_names is not None:
try:
filtnum = filter_names[fields[0]]
except:
filtnum = int(fields[0])
else:
filtnum = int(fields[0])
exptime = float(fields[1])
nexp = int(fields[2])
# Truncating exptime to an integer for now
one_exp = ' filter=%1i E %i' % (filtnum, exptime)
for i in range(nexp):
exp_string = exp_string + one_exp
# end loop
cmd = cmd + exp_string
queue_id_str = '%i' % (queue_id_num)
cmd = cmd + '" ' + queue_id_str
print "Command with script is:"
print cmd
return cmd
# Make the obs script given the queue id and a list of
# [filtnum1, exptime1, n_exp1, ...]
def make_obs_script(queue_id_num, explist, filter_names=None):
cmd = set_target_defaults()
exp_string = ''
# Need 3 entries for each filter
nfilts = int(len(explist) / 3)
for i in range(nfilts):
j = 0 + i*3
if filter_names is not None:
# here explist[j] is a string with the filter name, eg 'R' or '1'
# use try/except to catch an undefined filter. The except will
# fall back to a filter number, so if not an integer, it will still fail.
try:
filtnum = filter_names[explist[j]]
except:
filtnum = int(explist[j])
else:
filtnum = int(explist[j])
exptime = float(explist[j+1])
nexp = int(explist[j+2])
# Truncating exptime to an integer for now
one_exp = ' filter=%1i E %i' % (filtnum, exptime)
for i in range(nexp):
exp_string = exp_string + one_exp
# end loop
cmd = cmd + exp_string
queue_id_str = '%i' % (queue_id_num)
cmd = cmd + '" ' + queue_id_str
return cmd
# Add object number N to a queue
def add_object_queue(queue_name, queue_id_num):
cmd = 'rts2-queue --queue %s %i' % (queue_name, queue_id_num)
print cmd
return cmd
# Run all commands: make the target, make its script, add to queue
# write the commands to a file
def make_obs_interactive(fname):
queue_id_num = make_newtarget_prompt(fname)
cmd = make_obs_script_prompt(queue_id_num)
f = open(fname,'a')
f.write(cmd+'\n')
queue_name = 'plan'
cmd2 = add_object_queue(queue_name, queue_id_num)
f.write(cmd2+'\n')
f.write('\n')
f.close()
print "File ",fname," now has rts2 commands appended"
return
# Read a line of
# name ra dec queue-id filter_num1 exptime1 n_exp1 filter_num2 exptime2 n_exp2 ...
# and write commands
def make_commands_fromfile(cline,filter_names=None):
fields = cline.split()
if len(fields) < 7:
print "Line for ",fields[0]," doesn't have enough data"
return 0,'',''
name = fields[0]
ra = fields[1]
dec = fields[2]
queue_id_num = int(fields[3])
expstuff = fields[4:]
cmd1 = make_newtarget_args(name,ra,dec,queue_id_num)
cmd2 = make_obs_script(queue_id_num,expstuff,filter_names=filter_names)
queue_name = 'plan'
cmd3 = add_object_queue(queue_name, queue_id_num)
return queue_id_num, cmd1, cmd2, cmd3
# Read the targets and info from a file
def make_obs_fromfile(inputname,fname):
filters = set_filter_dict()
infile = open(inputname,'r')
f = open(fname,'a')
for line in infile:
# skip empty lines and comment lines
if (line.strip() != '' and line[0] != '#'):
# Don't use the filter dict until we confirm filter order.
# queue_id_num, cmd1, cmd2, cmd3 = make_commands_fromfile(line,filter_names=filters)
queue_id_num, cmd1, cmd2, cmd3 = make_commands_fromfile(line)
f.write(cmd1+'\n')
f.write(cmd2+'\n')
# queue_name = 'plan'
f.write(cmd3+'\n')
f.write('\n')
# end for loop
infile.close()
f.close()
print "File ",fname," now has rts2 commands appended"
return
def main():
inputname = raw_input('Enter file with targets, or hit return to enter interactively: ')
fname = raw_input('Enter filename to write rts2 commands: ')
if inputname.strip() == '':
make_obs_interactive(fname)
else:
print 'Target file format is: name ra dec queue-id filter_num1 exptime1 n_exp1 filter_num2 exptime2 n_exp2 ...'
make_obs_fromfile(inputname,fname)
print 'Execute the file with: sh ',fname,' , or similar'
return
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| {
"content_hash": "2b0368e7e825bf63f7aca3f0e2366233",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 122,
"avg_line_length": 33.29652996845426,
"alnum_prop": 0.5894836570345807,
"repo_name": "bjweiner/ARTN",
"id": "eb13411b01592cde4b82323d802753e127474b87",
"size": "11368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/make_newobs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Common Lisp",
"bytes": "10318"
},
{
"name": "HTML",
"bytes": "4619"
},
{
"name": "Jupyter Notebook",
"bytes": "55937"
},
{
"name": "Python",
"bytes": "82986"
},
{
"name": "TeX",
"bytes": "99492"
}
],
"symlink_target": ""
} |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from .test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BertTokenizer
rust_tokenizer_class = BertTokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
from_pretrained_filter = filter_non_english
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11])
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
# With lower casing
tokenizer = self.get_tokenizer(do_lower_case=True)
rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True)
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def test_clean_text(self):
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]])
self.assertListEqual(
[rust_tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]]
)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_2 + [102]
def test_offsets_with_special_characters(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
tokens = tokenizer_r.encode_plus(
sentence,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False
expected_results = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])
)
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
| {
"content_hash": "6f0c05d8afbfa8aea02649f5379655d4",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 116,
"avg_line_length": 38.382456140350875,
"alnum_prop": 0.5537983362281744,
"repo_name": "huggingface/pytorch-transformers",
"id": "3b8dced0ab4a98af38fba75b9ad8eab1b49010d5",
"size": "11571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tokenization_bert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "194"
},
{
"name": "Jupyter Notebook",
"bytes": "535623"
},
{
"name": "Python",
"bytes": "897445"
}
],
"symlink_target": ""
} |
import sys
import time
import logging
from lbrynet.core import log_support
from lbrynet.core.client.ClientRequest import ClientRequest
from lbrynet.core.server.ServerProtocol import ServerProtocol
from lbrynet.core.client.ClientProtocol import ClientProtocol
from lbrynet.core.RateLimiter import RateLimiter
from lbrynet.core.Peer import Peer
from lbrynet.core.PeerManager import PeerManager
from lbrynet.core.Error import ConnectionClosedBeforeResponseError, NoResponseError
from twisted.trial import unittest
from twisted.internet import defer, reactor, task
from twisted.internet.task import deferLater
from twisted.internet.protocol import Protocol, ServerFactory
from lbrynet import conf
from lbrynet.core import utils
from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler, IRequestCreator
from zope.interface import implements
PEER_PORT = 5551
LOCAL_HOST = '127.0.0.1'
class MocDownloader(object):
def insufficient_funds(self):
pass
class MocRequestCreator(object):
implements(IRequestCreator)
def __init__(self, peers_to_return, peers_to_return_head_blob=[]):
self.peers_to_return = peers_to_return
self.peers_to_return_head_blob = peers_to_return_head_blob
self.sent_request = False
def send_next_request(self, peer, protocol):
if self.sent_request is True:
return defer.succeed(False)
response_identifier = 'moc_request'
r_dict = {'moc_request':0}
request = ClientRequest(r_dict, response_identifier)
d = protocol.add_request(request) # ClientRequest here
d.addErrback(self.request_err, peer)
d.addCallback(self.request_success)
self.sent_request = True
return defer.succeed(True)
def request_success(self, suc):
pass
def request_err(self, err, peer):
if isinstance(err.value, NoResponseError):
return err
def get_new_peers_for_next_unavailable(self):
return self.peers_to_return
def get_new_peers_for_head_blob(self):
return self.peers_to_return_head_blob
class MocFunctionalQueryHandler(object):
implements(IQueryHandler)
def __init__(self, clock, is_good=True, is_delayed=False):
self.query_identifiers = ['moc_request']
self.is_good = is_good
self.is_delayed = is_delayed
self.clock = clock
def register_with_request_handler(self, request_handler, peer):
request_handler.register_query_handler(self, self.query_identifiers)
def handle_queries(self, queries):
if self.query_identifiers[0] in queries:
if self.is_delayed:
delay = ClientProtocol.PROTOCOL_TIMEOUT+1
out = deferLater(self.clock, delay, lambda: {'moc_request':0})
self.clock.advance(delay)
return out
if self.is_good:
return defer.succeed({'moc_request':0})
else:
return defer.succeed({'bad_request':0})
else:
return defer.succeed({})
class MocQueryHandlerFactory(object):
implements(IQueryHandlerFactory)
# is is_good, the query handler works as expectd,
# is is_delayed, the query handler will delay its resposne
def __init__(self, clock, is_good=True, is_delayed=False):
self.is_good = is_good
self.is_delayed = is_delayed
self.clock = clock
def build_query_handler(self):
return MocFunctionalQueryHandler(self.clock, self.is_good, self.is_delayed)
def get_primary_query_identifier(self):
return 'moc_query'
def get_description(self):
return "This is a Moc Query"
class MocServerProtocolFactory(ServerFactory):
protocol = ServerProtocol
def __init__(self, clock, is_good=True, is_delayed=False, has_moc_query_handler=True):
self.rate_limiter = RateLimiter()
query_handler_factory = MocQueryHandlerFactory(clock, is_good, is_delayed)
if has_moc_query_handler:
self.query_handler_factories = {
query_handler_factory.get_primary_query_identifier():query_handler_factory
}
else:
self.query_handler_factories = {}
self.peer_manager = PeerManager()
class TestIntegrationConnectionManager(unittest.TestCase):
def setUp(self):
conf.initialize_settings()
self.TEST_PEER = Peer(LOCAL_HOST, PEER_PORT)
self.downloader = MocDownloader()
self.rate_limiter = RateLimiter()
self.primary_request_creator = MocRequestCreator([self.TEST_PEER])
self.clock = task.Clock()
utils.call_later = self.clock.callLater
self.server_port = None
def _init_connection_manager(self, seek_head_blob_first=False):
# this import is requierd here so utils.call_later is replaced by self.clock.callLater
from lbrynet.core.client.ConnectionManager import ConnectionManager
self.connection_manager = ConnectionManager(self.downloader, self.rate_limiter,
[self.primary_request_creator], [])
self.connection_manager.seek_head_blob_first = seek_head_blob_first
self.connection_manager._start()
def tearDown(self):
if self.server_port is not None:
self.server_port.stopListening()
self.connection_manager.stop()
conf.settings = None
@defer.inlineCallbacks
def test_success(self):
self._init_connection_manager()
# test to see that if we setup a server, we get a connection
self.server = MocServerProtocolFactory(self.clock)
self.server_port = reactor.listenTCP(PEER_PORT, self.server, interface=LOCAL_HOST)
yield self.connection_manager.manage(schedule_next_call=False)
self.assertEqual(1, self.connection_manager.num_peer_connections())
connection_made = yield self.connection_manager._peer_connections[self.TEST_PEER].factory.connection_was_made_deferred
self.assertEqual(0, self.connection_manager.num_peer_connections())
self.assertTrue(connection_made)
self.assertEqual(1, self.TEST_PEER.success_count)
self.assertEqual(0, self.TEST_PEER.down_count)
@defer.inlineCallbacks
def test_server_with_improper_reply(self):
self._init_connection_manager()
self.server = MocServerProtocolFactory(self.clock, is_good=False)
self.server_port = reactor.listenTCP(PEER_PORT, self.server, interface=LOCAL_HOST)
yield self.connection_manager.manage(schedule_next_call=False)
self.assertEqual(1, self.connection_manager.num_peer_connections())
connection_made = yield self.connection_manager._peer_connections[self.TEST_PEER].factory.connection_was_made_deferred
self.assertEqual(0, self.connection_manager.num_peer_connections())
self.assertTrue(connection_made)
self.assertEqual(0, self.TEST_PEER.success_count)
self.assertEqual(1, self.TEST_PEER.down_count)
@defer.inlineCallbacks
def test_non_existing_server(self):
# Test to see that if we don't setup a server, we don't get a connection
self._init_connection_manager()
yield self.connection_manager.manage(schedule_next_call=False)
self.assertEqual(1, self.connection_manager.num_peer_connections())
connection_made = yield self.connection_manager._peer_connections[self.TEST_PEER].factory.connection_was_made_deferred
self.assertEqual(0, self.connection_manager.num_peer_connections())
self.assertFalse(connection_made)
self.assertEqual(0, self.connection_manager.num_peer_connections())
self.assertEqual(0, self.TEST_PEER.success_count)
self.assertEqual(1, self.TEST_PEER.down_count)
@unittest.SkipTest
@defer.inlineCallbacks
def test_parallel_connections(self):
# Test to see that we make two new connections at a manage call,
# without it waiting for the connection to complete
self._init_connection_manager()
test_peer2 = Peer(LOCAL_HOST, PEER_PORT+1)
self.primary_request_creator.peers_to_return = [self.TEST_PEER, test_peer2]
yield self.connection_manager.manage(schedule_next_call=False)
self.assertEqual(2, self.connection_manager.num_peer_connections())
self.assertIn(self.TEST_PEER, self.connection_manager._peer_connections)
self.assertIn(test_peer2, self.connection_manager._peer_connections)
connection_made = yield self.connection_manager._peer_connections[self.TEST_PEER].factory.connection_was_made_deferred
self.assertFalse(connection_made)
self.assertEqual(1, self.connection_manager.num_peer_connections())
self.assertEqual(0, self.TEST_PEER.success_count)
self.assertEqual(1, self.TEST_PEER.down_count)
connection_made = yield self.connection_manager._peer_connections[test_peer2].factory.connection_was_made_deferred
self.assertFalse(connection_made)
self.assertEqual(0, self.connection_manager.num_peer_connections())
self.assertEqual(0, test_peer2.success_count)
self.assertEqual(1, test_peer2.down_count)
@defer.inlineCallbacks
def test_stop(self):
# test to see that when we call stop, the ConnectionManager waits for the
# current manage call to finish, closes connections,
# and removes scheduled manage calls
self._init_connection_manager()
self.connection_manager.manage(schedule_next_call=True)
yield self.connection_manager.stop()
self.assertEqual(0, self.TEST_PEER.success_count)
self.assertEqual(1, self.TEST_PEER.down_count)
self.assertEqual(0, self.connection_manager.num_peer_connections())
self.assertEqual(None, self.connection_manager._next_manage_call)
@defer.inlineCallbacks
def test_closed_connection_when_server_is_slow(self):
self._init_connection_manager()
self.server = MocServerProtocolFactory(self.clock, has_moc_query_handler=True,is_delayed=True)
self.server_port = reactor.listenTCP(PEER_PORT, self.server, interface=LOCAL_HOST)
yield self.connection_manager.manage(schedule_next_call=False)
self.assertEqual(1, self.connection_manager.num_peer_connections())
connection_made = yield self.connection_manager._peer_connections[self.TEST_PEER].factory.connection_was_made_deferred
self.assertEqual(0, self.connection_manager.num_peer_connections())
self.assertEqual(True, connection_made)
self.assertEqual(0, self.TEST_PEER.success_count)
self.assertEqual(1, self.TEST_PEER.down_count)
""" test header first seeks """
@defer.inlineCallbacks
def test_no_peer_for_head_blob(self):
# test that if we can't find blobs for the head blob,
# it looks at the next unavailable and makes connection
self._init_connection_manager(seek_head_blob_first=True)
self.server = MocServerProtocolFactory(self.clock)
self.server_port = reactor.listenTCP(PEER_PORT, self.server, interface=LOCAL_HOST)
self.primary_request_creator.peers_to_return_head_blob = []
self.primary_request_creator.peers_to_return = [self.TEST_PEER]
yield self.connection_manager.manage(schedule_next_call=False)
self.assertEqual(1, self.connection_manager.num_peer_connections())
connection_made = yield self.connection_manager._peer_connections[self.TEST_PEER].factory.connection_was_made_deferred
self.assertEqual(0, self.connection_manager.num_peer_connections())
self.assertTrue(connection_made)
self.assertEqual(1, self.TEST_PEER.success_count)
self.assertEqual(0, self.TEST_PEER.down_count)
| {
"content_hash": "9b5eb4db457562ec966ebb029ea393e6",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 126,
"avg_line_length": 44.88931297709924,
"alnum_prop": 0.6957741688631919,
"repo_name": "zestyr/lbry",
"id": "4fa508d194a4b878cc0cea45452a16e8996e9f64",
"size": "11761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/core/client/test_ConnectionManager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "1080"
},
{
"name": "Python",
"bytes": "1034464"
},
{
"name": "Ruby",
"bytes": "309"
},
{
"name": "Shell",
"bytes": "2881"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.