sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def get_members_in_state(self, state):
"""return all members of replica set in specific state"""
members = self.run_command(command='replSetGetStatus', is_eval=False)['members']
return [member['name'] for member in members if member['state'] == state] | return all members of replica set in specific state | entailment |
def _authenticate_client(self, client):
"""Authenticate the client if necessary."""
if self.login and not self.restart_required:
try:
db = client[self.auth_source]
if self.x509_extra_user:
db.authenticate(
DEFAULT_SUBJECT,
mechanism='MONGODB-X509'
)
else:
db.authenticate(
self.login, self.password)
except Exception:
logger.exception(
"Could not authenticate to %r as %s/%s"
% (client, self.login, self.password))
raise | Authenticate the client if necessary. | entailment |
def connection(self, hostname=None, read_preference=pymongo.ReadPreference.PRIMARY, timeout=300):
"""return MongoReplicaSetClient object if hostname specified
return MongoClient object if hostname doesn't specified
Args:
hostname - connection uri
read_preference - default PRIMARY
timeout - specify how long, in seconds, a command can take before server times out.
"""
logger.debug("connection({hostname}, {read_preference}, {timeout})".format(**locals()))
t_start = time.time()
servers = hostname or ",".join(self.server_map.values())
while True:
try:
if hostname is None:
c = pymongo.MongoReplicaSetClient(
servers, replicaSet=self.repl_id,
read_preference=read_preference,
socketTimeoutMS=self.socket_timeout,
w=self._write_concern, fsync=True, **self.kwargs)
connected(c)
if c.primary:
self._authenticate_client(c)
return c
raise pymongo.errors.AutoReconnect("No replica set primary available")
else:
logger.debug("connection to the {servers}".format(**locals()))
c = pymongo.MongoClient(
servers, socketTimeoutMS=self.socket_timeout,
w=self._write_concern, fsync=True, **self.kwargs)
connected(c)
self._authenticate_client(c)
return c
except (pymongo.errors.PyMongoError):
exc_type, exc_value, exc_tb = sys.exc_info()
err_message = traceback.format_exception(exc_type, exc_value, exc_tb)
logger.error("Exception {exc_type} {exc_value}".format(**locals()))
logger.error(err_message)
if time.time() - t_start > timeout:
raise pymongo.errors.AutoReconnect("Couldn't connect while timeout {timeout} second".format(**locals()))
time.sleep(1) | return MongoReplicaSetClient object if hostname specified
return MongoClient object if hostname doesn't specified
Args:
hostname - connection uri
read_preference - default PRIMARY
timeout - specify how long, in seconds, a command can take before server times out. | entailment |
def secondaries(self):
"""return list of secondaries members"""
return [
{
"_id": self.host2id(member),
"host": member,
"server_id": self._servers.host_to_server_id(member)
}
for member in self.get_members_in_state(2)
] | return list of secondaries members | entailment |
def arbiters(self):
"""return list of arbiters"""
return [
{
"_id": self.host2id(member),
"host": member,
"server_id": self._servers.host_to_server_id(member)
}
for member in self.get_members_in_state(7)
] | return list of arbiters | entailment |
def hidden(self):
"""return list of hidden members"""
members = [self.member_info(item["_id"]) for item in self.members()]
result = []
for member in members:
if member['rsInfo'].get('hidden'):
server_id = member['server_id']
result.append({
'_id': member['_id'],
'host': self._servers.hostname(server_id),
'server_id': server_id})
return result | return list of hidden members | entailment |
def passives(self):
"""return list of passive servers"""
servers = self.run_command('ismaster').get('passives', [])
return [member for member in self.members() if member['host'] in servers] | return list of passive servers | entailment |
def wait_while_reachable(self, servers, timeout=60):
"""wait while all servers be reachable
Args:
servers - list of servers
"""
t_start = time.time()
while True:
try:
for server in servers:
# TODO: use state code to check if server is reachable
server_info = self.connection(
hostname=server, timeout=5).admin.command('ismaster')
logger.debug("server_info: {server_info}".format(server_info=server_info))
if int(server_info['ok']) != 1:
raise pymongo.errors.OperationFailure("{server} is not reachable".format(**locals))
return True
except (KeyError, AttributeError, pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure):
if time.time() - t_start > timeout:
return False
time.sleep(0.1) | wait while all servers be reachable
Args:
servers - list of servers | entailment |
def waiting_member_state(self, timeout=300):
"""Wait for all RS members to be in an acceptable state."""
t_start = time.time()
while not self.check_member_state():
if time.time() - t_start > timeout:
return False
time.sleep(0.1)
return True | Wait for all RS members to be in an acceptable state. | entailment |
def waiting_config_state(self, timeout=300):
"""waiting while real state equal config state
Args:
timeout - specify how long, in seconds, a command can take before server times out.
return True if operation success otherwise False
"""
t_start = time.time()
while not self.check_config_state():
if time.time() - t_start > timeout:
return False
time.sleep(0.1)
return True | waiting while real state equal config state
Args:
timeout - specify how long, in seconds, a command can take before server times out.
return True if operation success otherwise False | entailment |
def check_member_state(self):
"""Verify that all RS members have an acceptable state."""
bad_states = (0, 3, 4, 5, 6, 9)
try:
rs_status = self.run_command('replSetGetStatus')
bad_members = [member for member in rs_status['members']
if member['state'] in bad_states]
if bad_members:
return False
except pymongo.errors.AutoReconnect:
# catch 'No replica set primary available' Exception
return False
logger.debug("all members in correct state")
return True | Verify that all RS members have an acceptable state. | entailment |
def check_config_state(self):
"""Return True if real state equal config state otherwise False."""
config = self.config
self.update_server_map(config)
for member in config['members']:
cfg_member_info = self.default_params.copy()
cfg_member_info.update(member)
# Remove attributes we can't check.
for attr in ('priority', 'votes', 'tags', 'buildIndexes'):
cfg_member_info.pop(attr, None)
cfg_member_info['host'] = cfg_member_info['host'].lower()
real_member_info = self.default_params.copy()
info = self.member_info(member["_id"])
real_member_info["_id"] = info['_id']
member_hostname = self._servers.hostname(info['server_id'])
real_member_info["host"] = member_hostname.lower()
real_member_info.update(info['rsInfo'])
logger.debug("real_member_info({member_id}): {info}".format(member_id=member['_id'], info=info))
for key in cfg_member_info:
if cfg_member_info[key] != real_member_info.get(key, None):
logger.debug("{key}: {value1} ! = {value2}".format(key=key, value1=cfg_member_info[key], value2=real_member_info.get(key, None)))
return False
return True | Return True if real state equal config state otherwise False. | entailment |
def restart(self, timeout=300, config_callback=None):
"""Restart each member of the replica set."""
for member_id in self.server_map:
host = self.server_map[member_id]
server_id = self._servers.host_to_server_id(host)
server = self._servers._storage[server_id]
server.restart(timeout, config_callback)
self.waiting_member_state() | Restart each member of the replica set. | entailment |
def set_settings(self, releases=None, default_release=None):
"""set path to storage"""
super(ReplicaSets, self).set_settings(releases, default_release)
Servers().set_settings(releases, default_release) | set path to storage | entailment |
def create(self, rs_params):
"""create new replica set
Args:
rs_params - replica set configuration
Return repl_id which can use to take the replica set
"""
repl_id = rs_params.get('id', None)
if repl_id is not None and repl_id in self:
raise ReplicaSetError(
"replica set with id={id} already exists".format(id=repl_id))
repl = ReplicaSet(rs_params)
self[repl.repl_id] = repl
return repl.repl_id | create new replica set
Args:
rs_params - replica set configuration
Return repl_id which can use to take the replica set | entailment |
def primary(self, repl_id):
"""find and return primary hostname
Args:
repl_id - replica set identity
"""
repl = self[repl_id]
primary = repl.primary()
return repl.member_info(repl.host2id(primary)) | find and return primary hostname
Args:
repl_id - replica set identity | entailment |
def remove(self, repl_id):
"""remove replica set with kill members
Args:
repl_id - replica set identity
return True if operation success otherwise False
"""
repl = self._storage.pop(repl_id)
repl.cleanup()
del(repl) | remove replica set with kill members
Args:
repl_id - replica set identity
return True if operation success otherwise False | entailment |
def command(self, rs_id, command, *args):
"""Call a ReplicaSet method."""
rs = self._storage[rs_id]
try:
return getattr(rs, command)(*args)
except AttributeError:
raise ValueError("Cannot issue the command %r to ReplicaSet %s"
% (command, rs_id)) | Call a ReplicaSet method. | entailment |
def member_del(self, repl_id, member_id):
"""remove member from replica set (reconfig replica)
Args:
repl_id - replica set identity
member_id - member index
"""
repl = self[repl_id]
result = repl.member_del(member_id)
self[repl_id] = repl
return result | remove member from replica set (reconfig replica)
Args:
repl_id - replica set identity
member_id - member index | entailment |
def member_add(self, repl_id, params):
"""create instance and add it to existing replcia
Args:
repl_id - replica set identity
params - member params
return True if operation success otherwise False
"""
repl = self[repl_id]
member_id = repl.repl_member_add(params)
self[repl_id] = repl
return member_id | create instance and add it to existing replcia
Args:
repl_id - replica set identity
params - member params
return True if operation success otherwise False | entailment |
def member_command(self, repl_id, member_id, command):
"""apply command(start, stop, restart) to the member of replica set
Args:
repl_id - replica set identity
member_id - member index
command - command: start, stop, restart
return True if operation success otherwise False
"""
repl = self[repl_id]
result = repl.member_command(member_id, command)
self[repl_id] = repl
return result | apply command(start, stop, restart) to the member of replica set
Args:
repl_id - replica set identity
member_id - member index
command - command: start, stop, restart
return True if operation success otherwise False | entailment |
def member_update(self, repl_id, member_id, params):
"""apply new params to replica set member
Args:
repl_id - replica set identity
member_id - member index
params - new member's params
return True if operation success otherwise False
"""
repl = self[repl_id]
result = repl.member_update(member_id, params)
self[repl_id] = repl
return result | apply new params to replica set member
Args:
repl_id - replica set identity
member_id - member index
params - new member's params
return True if operation success otherwise False | entailment |
def key_file(self):
"""Get the path to the key file containig our auth key, or None."""
if self.auth_key:
key_file_path = os.path.join(orchestration_mkdtemp(), 'key')
with open(key_file_path, 'w') as fd:
fd.write(self.auth_key)
os.chmod(key_file_path, stat.S_IRUSR)
return key_file_path | Get the path to the key file containig our auth key, or None. | entailment |
def _strip_auth(self, proc_params):
"""Remove options from parameters that cause auth to be enabled."""
params = proc_params.copy()
params.pop("auth", None)
params.pop("clusterAuthMode", None)
return params | Remove options from parameters that cause auth to be enabled. | entailment |
def mongodb_auth_uri(self, hosts):
"""Get a connection string with all info necessary to authenticate."""
parts = ['mongodb://']
if self.login:
parts.append(self.login)
if self.password:
parts.append(':' + self.password)
parts.append('@')
parts.append(hosts + '/')
if self.login:
parts.append('?authSource=' + self.auth_source)
if self.x509_extra_user:
parts.append('&authMechanism=MONGODB-X509')
return ''.join(parts) | Get a connection string with all info necessary to authenticate. | entailment |
def _add_users(self, db, mongo_version):
"""Add given user, and extra x509 user if necessary."""
if self.x509_extra_user:
# Build dict of kwargs to pass to add_user.
auth_dict = {
'name': DEFAULT_SUBJECT,
'roles': self._user_roles(db.client)
}
db.add_user(**auth_dict)
# Fix kwargs to MongoClient.
self.kwargs['ssl_certfile'] = DEFAULT_CLIENT_CERT
# Add secondary user given from request.
secondary_login = {
'name': self.login,
'roles': self._user_roles(db.client)
}
if self.password:
secondary_login['password'] = self.password
if mongo_version >= (3, 7, 2):
# Use SCRAM_SHA-1 so that pymongo < 3.7 can authenticate.
secondary_login['mechanisms'] = ['SCRAM-SHA-1']
db.add_user(**secondary_login) | Add given user, and extra x509 user if necessary. | entailment |
def base_link(rel, self_rel=False):
"""Helper for getting a link document under the API root, given a rel."""
link = _BASE_LINKS[rel].copy()
link['rel'] = 'self' if self_rel else rel
return link | Helper for getting a link document under the API root, given a rel. | entailment |
def all_base_links(rel_to=None):
"""Get a list of all links to be included to base (/) API requests."""
links = [
base_link('get-releases'),
base_link('service'),
server_link('get-servers'),
server_link('add-server'),
replica_set_link('add-replica-set'),
replica_set_link('get-replica-sets'),
sharded_cluster_link('add-sharded-cluster'),
sharded_cluster_link('get-sharded-clusters')
]
for link in links:
if link['rel'] == rel_to:
link['rel'] = 'self'
return links | Get a list of all links to be included to base (/) API requests. | entailment |
def server_link(rel, server_id=None, self_rel=False):
"""Helper for getting a Server link document, given a rel."""
servers_href = '/v1/servers'
link = _SERVER_LINKS[rel].copy()
link['href'] = link['href'].format(**locals())
link['rel'] = 'self' if self_rel else rel
return link | Helper for getting a Server link document, given a rel. | entailment |
def all_server_links(server_id, rel_to=None):
"""Get a list of all links to be included with Servers."""
return [
server_link(rel, server_id, self_rel=(rel == rel_to))
for rel in ('delete-server', 'get-server-info', 'server-command')
] | Get a list of all links to be included with Servers. | entailment |
def replica_set_link(rel, repl_id=None, member_id=None, self_rel=False):
"""Helper for getting a ReplicaSet link document, given a rel."""
repls_href = '/v1/replica_sets'
link = _REPLICA_SET_LINKS[rel].copy()
link['href'] = link['href'].format(**locals())
link['rel'] = 'self' if self_rel else rel
return link | Helper for getting a ReplicaSet link document, given a rel. | entailment |
def all_replica_set_links(rs_id, rel_to=None):
"""Get a list of all links to be included with replica sets."""
return [
replica_set_link(rel, rs_id, self_rel=(rel == rel_to))
for rel in (
'get-replica-set-info',
'delete-replica-set', 'replica-set-command',
'get-replica-set-members', 'add-replica-set-member',
'get-replica-set-secondaries', 'get-replica-set-primary',
'get-replica-set-arbiters', 'get-replica-set-hidden-members',
'get-replica-set-passive-members', 'get-replica-set-servers'
)
] | Get a list of all links to be included with replica sets. | entailment |
def sharded_cluster_link(rel, cluster_id=None,
shard_id=None, router_id=None, self_rel=False):
"""Helper for getting a ShardedCluster link document, given a rel."""
clusters_href = '/v1/sharded_clusters'
link = _SHARDED_CLUSTER_LINKS[rel].copy()
link['href'] = link['href'].format(**locals())
link['rel'] = 'self' if self_rel else rel
return link | Helper for getting a ShardedCluster link document, given a rel. | entailment |
def all_sharded_cluster_links(cluster_id, shard_id=None,
router_id=None, rel_to=None):
"""Get a list of all links to be included with ShardedClusters."""
return [
sharded_cluster_link(rel, cluster_id, shard_id, router_id,
self_rel=(rel == rel_to))
for rel in (
'get-sharded-clusters', 'get-sharded-cluster-info',
'sharded-cluster-command', 'delete-sharded-cluster',
'add-shard', 'get-shards', 'get-configsvrs',
'get-routers', 'add-router'
)
] | Get a list of all links to be included with ShardedClusters. | entailment |
def cleanup_storage(*args):
"""Clean up processes after SIGTERM or SIGINT is received."""
ShardedClusters().cleanup()
ReplicaSets().cleanup()
Servers().cleanup()
sys.exit(0) | Clean up processes after SIGTERM or SIGINT is received. | entailment |
def read_env():
"""return command-line arguments"""
parser = argparse.ArgumentParser(description='mongo-orchestration server')
parser.add_argument('-f', '--config',
action='store', default=None, type=str, dest='config')
parser.add_argument('-e', '--env',
action='store', type=str, dest='env', default=None)
parser.add_argument(action='store', type=str, dest='command',
default='start', choices=('start', 'stop', 'restart'))
parser.add_argument('--no-fork',
action='store_true', dest='no_fork', default=False)
parser.add_argument('-b', '--bind',
action='store', dest='bind', type=str,
default=DEFAULT_BIND)
parser.add_argument('-p', '--port',
action='store', dest='port', type=int,
default=DEFAULT_PORT)
parser.add_argument('--enable-majority-read-concern', action='store_true',
default=False)
parser.add_argument('-s', '--server',
action='store', dest='server', type=str,
default=DEFAULT_SERVER, choices=('cherrypy', 'wsgiref'))
parser.add_argument('--version', action='version',
version='Mongo Orchestration v' + __version__)
parser.add_argument('--socket-timeout-ms', action='store',
dest='socket_timeout',
type=int, default=DEFAULT_SOCKET_TIMEOUT)
parser.add_argument('--pidfile', action='store', type=str, dest='pidfile',
default=PID_FILE)
cli_args = parser.parse_args()
if cli_args.env and not cli_args.config:
print("Specified release '%s' without a config file" % cli_args.env)
sys.exit(1)
if cli_args.command == 'stop' or not cli_args.config:
return cli_args
try:
# read config
with open(cli_args.config, 'r') as fd:
config = json.loads(fd.read(), object_pairs_hook=SON)
if not 'releases' in config:
print("No releases defined in %s" % cli_args.config)
sys.exit(1)
releases = config['releases']
if cli_args.env is not None and cli_args.env not in releases:
print("Release '%s' is not defined in %s"
% (cli_args.env, cli_args.config))
sys.exit(1)
cli_args.releases = releases
return cli_args
except (IOError):
print("config file not found")
sys.exit(1)
except (ValueError):
print("config file is corrupted")
sys.exit(1) | return command-line arguments | entailment |
def setup(releases, default_release):
"""setup storages"""
from mongo_orchestration import set_releases, cleanup_storage
set_releases(releases, default_release)
signal.signal(signal.SIGTERM, cleanup_storage)
signal.signal(signal.SIGINT, cleanup_storage) | setup storages | entailment |
def get_app():
"""return bottle app that includes all sub-apps"""
from bottle import default_app
default_app.push()
for module in ("mongo_orchestration.apps.servers",
"mongo_orchestration.apps.replica_sets",
"mongo_orchestration.apps.sharded_clusters"):
__import__(module)
app = default_app.pop()
return app | return bottle app that includes all sub-apps | entailment |
def await_connection(host, port):
"""Wait for the mongo-orchestration server to accept connections."""
for i in range(CONNECT_ATTEMPTS):
try:
conn = socket.create_connection((host, port), CONNECT_TIMEOUT)
conn.close()
return True
except (IOError, socket.error):
time.sleep(1)
return False | Wait for the mongo-orchestration server to accept connections. | entailment |
def __init_config_params(self, config):
"""Conditionally enable options in the Server's config file."""
if self.version >= (2, 4):
params = config.get('setParameter', {})
# Set enableTestCommands by default but allow enableTestCommands:0.
params.setdefault('enableTestCommands', 1)
# Reduce transactionLifetimeLimitSeconds for faster driver testing.
if self.version >= (4, 1) and not self.is_mongos:
params.setdefault('transactionLifetimeLimitSeconds', 3)
# Increase transaction lock timeout to reduce the chance that tests
# fail with LockTimeout: "Unable to acquire lock {...} within 5ms".
if self.version >= (4, 0) and not self.is_mongos:
params.setdefault('maxTransactionLockRequestTimeoutMillis', 25)
config['setParameter'] = params
compressors = config.get('networkMessageCompressors')
if compressors is None:
if self.version >= (4, 1, 7):
# SERVER-38168 added zstd support in 4.1.7.
config['networkMessageCompressors'] = 'zstd,zlib,snappy,noop'
elif self.version >= (3, 5, 9):
# SERVER-27310 added zlib support in 3.5.9.
config['networkMessageCompressors'] = 'zlib,snappy,noop'
elif self.version >= (3, 4):
config['networkMessageCompressors'] = 'snappy,noop' | Conditionally enable options in the Server's config file. | entailment |
def connection(self):
"""return authenticated connection"""
c = pymongo.MongoClient(
self.hostname, fsync=True,
socketTimeoutMS=self.socket_timeout, **self.kwargs)
connected(c)
if not self.is_mongos and self.login and not self.restart_required:
db = c[self.auth_source]
if self.x509_extra_user:
auth_dict = {
'name': DEFAULT_SUBJECT, 'mechanism': 'MONGODB-X509'}
else:
auth_dict = {'name': self.login, 'password': self.password}
try:
db.authenticate(**auth_dict)
except:
logger.exception("Could not authenticate to %s with %r"
% (self.hostname, auth_dict))
raise
return c | return authenticated connection | entailment |
def version(self):
"""Get the version of MongoDB that this Server runs as a tuple."""
if not self.__version:
command = (self.name, '--version')
logger.debug(command)
stdout, _ = subprocess.Popen(
command, stdout=subprocess.PIPE).communicate()
version_output = str(stdout)
match = re.search(self.version_patt, version_output)
if match is None:
raise ServersError(
'Could not determine version of %s from string: %s'
% (self.name, version_output))
version_string = match.group('version')
self.__version = tuple(map(int, version_string.split('.')))
return self.__version | Get the version of MongoDB that this Server runs as a tuple. | entailment |
def run_command(self, command, arg=None, is_eval=False):
"""run command on the server
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
return command's result
"""
mode = is_eval and 'eval' or 'command'
if isinstance(arg, tuple):
name, d = arg
else:
name, d = arg, {}
result = getattr(self.connection.admin, mode)(command, name, **d)
return result | run command on the server
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
return command's result | entailment |
def info(self):
"""return info about server as dict object"""
proc_info = {"name": self.name,
"params": self.cfg,
"alive": self.is_alive,
"optfile": self.config_path}
if self.is_alive:
proc_info['pid'] = self.proc.pid
logger.debug("proc_info: {proc_info}".format(**locals()))
mongodb_uri = ''
server_info = {}
status_info = {}
if self.hostname and self.cfg.get('port', None):
try:
c = self.connection
server_info = c.server_info()
logger.debug("server_info: {server_info}".format(**locals()))
mongodb_uri = 'mongodb://' + self.hostname
status_info = {"primary": c.is_primary, "mongos": c.is_mongos}
logger.debug("status_info: {status_info}".format(**locals()))
except (pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure, pymongo.errors.ConnectionFailure):
server_info = {}
status_info = {}
result = {"mongodb_uri": mongodb_uri, "statuses": status_info,
"serverInfo": server_info, "procInfo": proc_info,
"orchestration": 'servers'}
if self.login:
result['mongodb_auth_uri'] = self.mongodb_auth_uri(self.hostname)
logger.debug("return {result}".format(result=result))
return result | return info about server as dict object | entailment |
def start(self, timeout=300):
"""start server
return True of False"""
if self.is_alive:
return True
try:
dbpath = self.cfg.get('dbpath')
if dbpath and self._is_locked:
# repair if needed
logger.info("Performing repair on locked dbpath %s", dbpath)
process.repair_mongo(self.name, self.cfg['dbpath'])
self.proc, self.hostname = process.mprocess(
self.name, self.config_path, self.cfg.get('port', None),
timeout, self.silence_stdout)
self.pid = self.proc.pid
logger.debug("pid={pid}, hostname={hostname}".format(pid=self.pid, hostname=self.hostname))
self.host = self.hostname.split(':')[0]
self.port = int(self.hostname.split(':')[1])
# Wait for Server to respond to isMaster.
# Only try 6 times, each ConnectionFailure is 30 seconds.
max_attempts = 6
for i in range(max_attempts):
try:
self.run_command('isMaster')
break
except pymongo.errors.ConnectionFailure:
logger.exception('isMaster command failed:')
else:
raise TimeoutError(
"Server did not respond to 'isMaster' after %d attempts."
% max_attempts)
except (OSError, TimeoutError):
logpath = self.cfg.get('logpath')
if logpath:
# Copy the server logs into the mongo-orchestration logs.
logger.error(
"Could not start Server. Please find server log below.\n"
"=====================================================")
with open(logpath) as lp:
logger.error(lp.read())
else:
logger.exception(
'Could not start Server, and no logpath was provided!')
reraise(TimeoutError,
'Could not start Server. '
'Please check server log located in ' +
self.cfg.get('logpath', '<no logpath given>') +
' or the mongo-orchestration log in ' +
LOG_FILE + ' for more details.')
if self.restart_required:
if self.login:
# Add users to the appropriate database.
self._add_users()
self.stop()
# Restart with keyfile and auth.
if self.is_mongos:
self.config_path, self.cfg = self.__init_mongos(self.cfg)
else:
# Add auth options to this Server's config file.
self.config_path, self.cfg = self.__init_mongod(
self.cfg, add_auth=True)
self.restart_required = False
self.start()
return True | start server
return True of False | entailment |
def shutdown(self):
"""Send shutdown command and wait for the process to exit."""
# Return early if this server has already exited.
if not process.proc_alive(self.proc):
return
logger.info("Attempting to connect to %s", self.hostname)
client = self.connection
# Attempt the shutdown command twice, the first attempt might fail due
# to an election.
attempts = 2
for i in range(attempts):
logger.info("Attempting to send shutdown command to %s",
self.hostname)
try:
client.admin.command("shutdown", force=True)
except ConnectionFailure:
# A shutdown succeeds by closing the connection but a
# connection error does not necessarily mean that the shutdown
# has succeeded.
pass
# Wait for the server to exit otherwise rerun the shutdown command.
try:
return process.wait_mprocess(self.proc, 5)
except TimeoutError as exc:
logger.info("Timed out waiting on process: %s", exc)
continue
raise ServersError("Server %s failed to shutdown after %s attempts" %
(self.hostname, attempts)) | Send shutdown command and wait for the process to exit. | entailment |
def stop(self):
"""stop server"""
try:
self.shutdown()
except (PyMongoError, ServersError) as exc:
logger.info("Killing %s with signal, shutdown command failed: %r",
self.name, exc)
return process.kill_mprocess(self.proc) | stop server | entailment |
def restart(self, timeout=300, config_callback=None):
"""restart server: stop() and start()
return status of start command
"""
self.stop()
if config_callback:
self.cfg = config_callback(self.cfg.copy())
self.config_path = process.write_config(self.cfg)
return self.start(timeout) | restart server: stop() and start()
return status of start command | entailment |
def create(self, name, procParams, sslParams={},
auth_key=None, login=None, password=None,
auth_source='admin', timeout=300, autostart=True,
server_id=None, version=None):
"""create new server
Args:
name - process name or path
procParams - dictionary with specific params for instance
auth_key - authorization key
login - username for the admin collection
password - password
timeout - specify how long, in seconds, a command can take before times out.
autostart - (default: True), autostart instance
Return server_id
where server_id - id which can use to take the server from servers collection
"""
name = os.path.split(name)[1]
if server_id is None:
server_id = str(uuid4())
if server_id in self:
raise ServersError("Server with id %s already exists." % server_id)
bin_path = self.bin_path(version)
server = Server(os.path.join(bin_path, name), procParams, sslParams,
auth_key, login, password, auth_source)
if autostart:
server.start(timeout)
self[server_id] = server
return server_id | create new server
Args:
name - process name or path
procParams - dictionary with specific params for instance
auth_key - authorization key
login - username for the admin collection
password - password
timeout - specify how long, in seconds, a command can take before times out.
autostart - (default: True), autostart instance
Return server_id
where server_id - id which can use to take the server from servers collection | entailment |
def remove(self, server_id):
"""remove server and data stuff
Args:
server_id - server identity
"""
server = self._storage.pop(server_id)
server.stop()
server.cleanup() | remove server and data stuff
Args:
server_id - server identity | entailment |
def command(self, server_id, command, *args):
"""run command
Args:
server_id - server identity
command - command which apply to server
"""
server = self._storage[server_id]
try:
if args:
result = getattr(server, command)(*args)
else:
result = getattr(server, command)()
except AttributeError:
raise ValueError("Cannot issue the command %r to server %s"
% (command, server_id))
self._storage[server_id] = server
return result | run command
Args:
server_id - server identity
command - command which apply to server | entailment |
def info(self, server_id):
"""return dicionary object with info about server
Args:
server_id - server identity
"""
result = self._storage[server_id].info()
result['id'] = server_id
return result | return dicionary object with info about server
Args:
server_id - server identity | entailment |
def set_settings(self, releases=None, default_release=None):
"""set path to storage"""
if (self._storage is None or
getattr(self, 'releases', {}) != releases or
getattr(self, 'default_release', '') != default_release):
self._storage = {}
self.releases = releases or {}
self.default_release = default_release | set path to storage | entailment |
def bin_path(self, release=None):
"""Get the bin path for a particular release."""
if release:
for r in self.releases:
if release in r:
return self.releases[r]
raise MongoOrchestrationError("No such release '%s' in %r"
% (release, self.releases))
if self.default_release:
return self.releases[self.default_release]
if self.releases:
return list(self.releases.values())[0]
return '' | Get the bin path for a particular release. | entailment |
def __init_configrs(self, rs_cfg):
"""Create and start a config replica set."""
# Use 'rs_id' to set the id for consistency, but need to rename
# to 'id' to use with ReplicaSets.create()
rs_cfg['id'] = rs_cfg.pop('rs_id', None)
for member in rs_cfg.setdefault('members', [{}]):
member['procParams'] = self._strip_auth(
member.get('procParams', {}))
member['procParams']['configsvr'] = True
if self.enable_ipv6:
common.enable_ipv6_single(member['procParams'])
rs_cfg['sslParams'] = self.sslParams
self._configsvrs.append(ReplicaSets().create(rs_cfg)) | Create and start a config replica set. | entailment |
def __init_configsvrs(self, params):
"""create and start config servers"""
self._configsvrs = []
for cfg in params:
# Remove flags that turn on auth.
cfg = self._strip_auth(cfg)
server_id = cfg.pop('server_id', None)
version = cfg.pop('version', self._version)
cfg.update({'configsvr': True})
if self.enable_ipv6:
common.enable_ipv6_single(cfg)
self._configsvrs.append(Servers().create(
'mongod', cfg, sslParams=self.sslParams, autostart=True,
version=version, server_id=server_id)) | create and start config servers | entailment |
def configsvrs(self):
"""return list of config servers"""
if self.uses_rs_configdb:
rs_id = self._configsvrs[0]
mongodb_uri = ReplicaSets().info(rs_id)['mongodb_uri']
return [{'id': rs_id, 'mongodb_uri': mongodb_uri}]
return [{'id': h_id, 'hostname': Servers().hostname(h_id)}
for h_id in self._configsvrs] | return list of config servers | entailment |
def router(self):
"""return first available router"""
for server in self._routers:
info = Servers().info(server)
if info['procInfo'].get('alive', False):
return {'id': server, 'hostname': Servers().hostname(server)} | return first available router | entailment |
def router_add(self, params):
"""add new router (mongos) into existing configuration"""
if self.uses_rs_configdb:
# Replica set configdb.
rs_id = self._configsvrs[0]
config_members = ReplicaSets().members(rs_id)
configdb = '%s/%s' % (
rs_id, ','.join(m['host'] for m in config_members))
else:
configdb = ','.join(Servers().hostname(item)
for item in self._configsvrs)
server_id = params.pop('server_id', None)
version = params.pop('version', self._version)
params.update({'configdb': configdb})
if self.enable_ipv6:
common.enable_ipv6_single(params)
# Remove flags that turn auth on.
params = self._strip_auth(params)
self._routers.append(Servers().create(
'mongos', params, sslParams=self.sslParams, autostart=True,
version=version, server_id=server_id))
return {'id': self._routers[-1], 'hostname': Servers().hostname(self._routers[-1])} | add new router (mongos) into existing configuration | entailment |
def router_connections(self):
"""Return a list of MongoClients, one for each mongos."""
clients = []
for server in self._routers:
if Servers().is_alive(server):
client = self.create_connection(Servers().hostname(server))
clients.append(client)
return clients | Return a list of MongoClients, one for each mongos. | entailment |
def router_remove(self, router_id):
"""remove """
result = Servers().remove(router_id)
del self._routers[ self._routers.index(router_id) ]
return { "ok": 1, "routers": self._routers } | remove | entailment |
def _add(self, shard_uri, name):
"""execute addShard command"""
return self.router_command("addShard", (shard_uri, {"name": name}), is_eval=False) | execute addShard command | entailment |
def member_add(self, member_id=None, params=None):
"""add new member into existing configuration"""
member_id = member_id or str(uuid4())
if self.enable_ipv6:
common.enable_ipv6_repl(params)
if 'members' in params:
# is replica set
for member in params['members']:
if not member.get('rsParams', {}).get('arbiterOnly', False):
member.setdefault('procParams', {})['shardsvr'] = True
rs_params = params.copy()
# Turn 'rs_id' -> 'id', to be consistent with 'server_id' below.
rs_params['id'] = rs_params.pop('rs_id', None)
rs_params.update({'sslParams': self.sslParams})
rs_params['version'] = params.pop('version', self._version)
rs_params['members'] = [
self._strip_auth(params) for params in rs_params['members']]
rs_id = ReplicaSets().create(rs_params)
members = ReplicaSets().members(rs_id)
cfgs = rs_id + r"/" + ','.join([item['host'] for item in members])
result = self._add(cfgs, member_id)
if result.get('ok', 0) == 1:
self._shards[result['shardAdded']] = {'isReplicaSet': True, '_id': rs_id}
# return self._shards[result['shardAdded']].copy()
return self.member_info(member_id)
else:
# is single server
params.setdefault('procParams', {})['shardsvr'] = True
params.update({'autostart': True, 'sslParams': self.sslParams})
params = params.copy()
params['procParams'] = self._strip_auth(
params.get('procParams', {}))
params.setdefault('version', self._version)
logger.debug("servers create params: {params}".format(**locals()))
server_id = Servers().create('mongod', **params)
result = self._add(Servers().hostname(server_id), member_id)
if result.get('ok', 0) == 1:
self._shards[result['shardAdded']] = {'isServer': True, '_id': server_id}
return self.member_info(member_id) | add new member into existing configuration | entailment |
def member_info(self, member_id):
"""return info about member"""
info = self._shards[member_id].copy()
info['id'] = member_id
info['tags'] = self.tags.get(member_id, list())
return info | return info about member | entailment |
def _remove(self, shard_name):
"""remove member from configuration"""
result = self.router_command("removeShard", shard_name, is_eval=False)
if result['ok'] == 1 and result['state'] == 'completed':
shard = self._shards.pop(shard_name)
if shard.get('isServer', False):
Servers().remove(shard['_id'])
if shard.get('isReplicaSet', False):
ReplicaSets().remove(shard['_id'])
return result | remove member from configuration | entailment |
def reset(self):
"""Ensure all shards, configs, and routers are running and available."""
# Ensure all shards by calling "reset" on each.
for shard_id in self._shards:
if self._shards[shard_id].get('isReplicaSet'):
singleton = ReplicaSets()
elif self._shards[shard_id].get('isServer'):
singleton = Servers()
singleton.command(self._shards[shard_id]['_id'], 'reset')
# Ensure all config servers by calling "reset" on each.
for config_id in self._configsvrs:
self.configdb_singleton.command(config_id, 'reset')
# Ensure all routers by calling "reset" on each.
for router_id in self._routers:
Servers().command(router_id, 'reset')
return self.info() | Ensure all shards, configs, and routers are running and available. | entailment |
def info(self):
"""return info about configuration"""
uri = ','.join(x['hostname'] for x in self.routers)
mongodb_uri = 'mongodb://' + uri
result = {'id': self.id,
'shards': self.members,
'configsvrs': self.configsvrs,
'routers': self.routers,
'mongodb_uri': mongodb_uri,
'orchestration': 'sharded_clusters'}
if self.login:
result['mongodb_auth_uri'] = self.mongodb_auth_uri(uri)
return result | return info about configuration | entailment |
def cleanup(self):
"""cleanup configuration: stop and remove all servers"""
for _id, shard in self._shards.items():
if shard.get('isServer', False):
Servers().remove(shard['_id'])
if shard.get('isReplicaSet', False):
ReplicaSets().remove(shard['_id'])
for mongos in self._routers:
Servers().remove(mongos)
for config_id in self._configsvrs:
self.configdb_singleton.remove(config_id)
self._configsvrs = []
self._routers = []
self._shards = {} | cleanup configuration: stop and remove all servers | entailment |
def set_settings(self, releases=None, default_release=None):
"""set path to storage"""
super(ShardedClusters, self).set_settings(releases, default_release)
ReplicaSets().set_settings(releases, default_release) | set path to storage | entailment |
def create(self, params):
"""create new ShardedCluster
Args:
params - dictionary with specific params for instance
Return cluster_id
where cluster_id - id which can use to take the cluster from servers collection
"""
sh_id = params.get('id', str(uuid4()))
if sh_id in self:
raise ShardedClusterError(
"Sharded cluster with id %s already exists." % sh_id)
params['id'] = sh_id
cluster = ShardedCluster(params)
self[cluster.id] = cluster
return cluster.id | create new ShardedCluster
Args:
params - dictionary with specific params for instance
Return cluster_id
where cluster_id - id which can use to take the cluster from servers collection | entailment |
def remove(self, cluster_id):
"""remove cluster and data stuff
Args:
cluster_id - cluster identity
"""
cluster = self._storage.pop(cluster_id)
cluster.cleanup() | remove cluster and data stuff
Args:
cluster_id - cluster identity | entailment |
def router_add(self, cluster_id, params):
"""add new router"""
cluster = self._storage[cluster_id]
result = cluster.router_add(params)
self._storage[cluster_id] = cluster
return result | add new router | entailment |
def router_del(self, cluster_id, router_id):
"""remove router from the ShardedCluster"""
cluster = self._storage[cluster_id]
result = cluster.router_remove(router_id)
self._storage[cluster_id] = cluster
return result | remove router from the ShardedCluster | entailment |
def member_info(self, cluster_id, member_id):
"""return info about member"""
cluster = self._storage[cluster_id]
return cluster.member_info(member_id) | return info about member | entailment |
def command(self, cluster_id, command, *args):
"""Call a ShardedCluster method."""
cluster = self._storage[cluster_id]
try:
return getattr(cluster, command)(*args)
except AttributeError:
raise ValueError("Cannot issue the command %r to ShardedCluster %s"
% (command, cluster_id)) | Call a ShardedCluster method. | entailment |
def member_del(self, cluster_id, member_id):
"""remove member from cluster cluster"""
cluster = self._storage[cluster_id]
result = cluster.member_remove(member_id)
self._storage[cluster_id] = cluster
return result | remove member from cluster cluster | entailment |
def member_add(self, cluster_id, params):
"""add new member into configuration"""
cluster = self._storage[cluster_id]
result = cluster.member_add(params.get('id', None), params.get('shardParams', {}))
self._storage[cluster_id] = cluster
return result | add new member into configuration | entailment |
def expand_dir(_dir, cwd=os.getcwd()):
"""Return path with environmental variables and tilde ~ expanded.
:param _dir:
:type _dir: str
:param cwd: current working dir (for deciphering relative _dir paths)
:type cwd: str
:rtype; str
"""
_dir = os.path.expanduser(os.path.expandvars(_dir))
if not os.path.isabs(_dir):
_dir = os.path.normpath(os.path.join(cwd, _dir))
return _dir | Return path with environmental variables and tilde ~ expanded.
:param _dir:
:type _dir: str
:param cwd: current working dir (for deciphering relative _dir paths)
:type cwd: str
:rtype; str | entailment |
def extract_repos(config, cwd=os.getcwd()):
"""Return expanded configuration.
end-user configuration permit inline configuration shortcuts, expand to
identical format for parsing.
:param config: the repo config in :py:class:`dict` format.
:type config: dict
:param cwd: current working dir (for deciphering relative paths)
:type cwd: str
:rtype: list
"""
configs = []
for directory, repos in config.items():
for repo, repo_data in repos.items():
conf = {}
'''
repo_name: http://myrepo.com/repo.git
to
repo_name: { url: 'http://myrepo.com/repo.git' }
also assures the repo is a :py:class:`dict`.
'''
if isinstance(repo_data, string_types):
conf['url'] = repo_data
else:
conf = update_dict(conf, repo_data)
if 'repo' in conf:
if 'url' not in conf:
conf['url'] = conf.pop('repo')
else:
conf.pop('repo', None)
'''
``shell_command_after``: if str, turn to list.
'''
if 'shell_command_after' in conf:
if isinstance(conf['shell_command_after'], string_types):
conf['shell_command_after'] = [conf['shell_command_after']]
if 'name' not in conf:
conf['name'] = repo
if 'parent_dir' not in conf:
conf['parent_dir'] = expand_dir(directory, cwd)
if 'repo_dir' not in conf:
conf['repo_dir'] = expand_dir(
os.path.join(conf['parent_dir'], conf['name']), cwd
)
if 'remotes' in conf:
remotes = []
for remote_name, url in conf['remotes'].items():
remotes.append({'remote_name': remote_name, 'url': url})
conf['remotes'] = sorted(
remotes, key=lambda x: sorted(x.get('remote_name'))
)
configs.append(conf)
return configs | Return expanded configuration.
end-user configuration permit inline configuration shortcuts, expand to
identical format for parsing.
:param config: the repo config in :py:class:`dict` format.
:type config: dict
:param cwd: current working dir (for deciphering relative paths)
:type cwd: str
:rtype: list | entailment |
def find_home_config_files(filetype=['json', 'yaml']):
"""Return configs of ``.vcspull.{yaml,json}`` in user's home directory."""
configs = []
yaml_config = os.path.expanduser('~/.vcspull.yaml')
has_yaml_config = os.path.exists(yaml_config)
json_config = os.path.expanduser('~/.vcspull.json')
has_json_config = os.path.exists(json_config)
if not has_yaml_config and not has_json_config:
log.debug(
'No config file found. Create a .vcspull.yaml or .vcspull.json'
' in your $HOME directory. http://vcspull.git-pull.com for a'
' quickstart.'
)
else:
if sum(filter(None, [has_json_config, has_yaml_config])) > int(1):
raise exc.MultipleConfigWarning()
if has_yaml_config:
configs.append(yaml_config)
if has_json_config:
configs.append(json_config)
return configs | Return configs of ``.vcspull.{yaml,json}`` in user's home directory. | entailment |
def find_config_files(
path=['~/.vcspull'], match=['*'], filetype=['json', 'yaml'], include_home=False
):
"""Return repos from a directory and match. Not recursive.
:param path: list of paths to search
:type path: list
:param match: list of globs to search against
:type match: list
:param filetype: list of filetypes to search against
:type filetype: list
:param include_home: Include home configuration files
:type include_home: bool
:raises:
- LoadConfigRepoConflict: There are two configs that have same path
and name with different repo urls.
:returns: list of absolute paths to config files.
:rtype: list
"""
configs = []
if include_home is True:
configs.extend(find_home_config_files())
if isinstance(path, list):
for p in path:
configs.extend(find_config_files(p, match, filetype))
return configs
else:
path = os.path.expanduser(path)
if isinstance(match, list):
for m in match:
configs.extend(find_config_files(path, m, filetype))
else:
if isinstance(filetype, list):
for f in filetype:
configs.extend(find_config_files(path, match, f))
else:
match = os.path.join(path, match)
match += ".{filetype}".format(filetype=filetype)
configs = glob.glob(match)
return configs | Return repos from a directory and match. Not recursive.
:param path: list of paths to search
:type path: list
:param match: list of globs to search against
:type match: list
:param filetype: list of filetypes to search against
:type filetype: list
:param include_home: Include home configuration files
:type include_home: bool
:raises:
- LoadConfigRepoConflict: There are two configs that have same path
and name with different repo urls.
:returns: list of absolute paths to config files.
:rtype: list | entailment |
def load_configs(files, cwd=os.getcwd()):
"""Return repos from a list of files.
:todo: Validate scheme, check for duplciate destinations, VCS urls
:param files: paths to config file
:type files: list
:param cwd: current path (pass down for :func:`extract_repos`
:type cwd: str
:returns: expanded config dict item
:rtype: list of dict
"""
repos = []
for f in files:
_, ext = os.path.splitext(f)
conf = kaptan.Kaptan(handler=ext.lstrip('.')).import_config(f)
newrepos = extract_repos(conf.export('dict'), cwd)
if not repos:
repos.extend(newrepos)
continue
dupes = detect_duplicate_repos(repos, newrepos)
if dupes:
msg = ('repos with same path + different VCS detected!', dupes)
raise exc.VCSPullException(msg)
repos.extend(newrepos)
return repos | Return repos from a list of files.
:todo: Validate scheme, check for duplciate destinations, VCS urls
:param files: paths to config file
:type files: list
:param cwd: current path (pass down for :func:`extract_repos`
:type cwd: str
:returns: expanded config dict item
:rtype: list of dict | entailment |
def detect_duplicate_repos(repos1, repos2):
"""Return duplicate repos dict if repo_dir same and vcs different.
:param repos1: list of repo expanded dicts
:type repos1: list of :py:dict
:param repos2: list of repo expanded dicts
:type repos2: list of :py:dict
:rtype: list of dicts or None
:returns: Duplicate lists
"""
dupes = []
path_dupe_repos = []
curpaths = [r['repo_dir'] for r in repos1]
newpaths = [r['repo_dir'] for r in repos2]
path_duplicates = list(set(curpaths).intersection(newpaths))
if not path_duplicates:
return None
path_dupe_repos.extend(
[r for r in repos2 if any(r['repo_dir'] == p for p in path_duplicates)]
)
if not path_dupe_repos:
return None
for n in path_dupe_repos:
currepo = next((r for r in repos1 if r['repo_dir'] == n['repo_dir']), None)
if n['url'] != currepo['url']:
dupes += (n, currepo)
return dupes | Return duplicate repos dict if repo_dir same and vcs different.
:param repos1: list of repo expanded dicts
:type repos1: list of :py:dict
:param repos2: list of repo expanded dicts
:type repos2: list of :py:dict
:rtype: list of dicts or None
:returns: Duplicate lists | entailment |
def in_dir(config_dir=CONFIG_DIR, extensions=['.yml', '.yaml', '.json']):
"""Return a list of configs in ``config_dir``.
:param config_dir: directory to search
:type config_dir: str
:param extensions: filetypes to check (e.g. ``['.yaml', '.json']``).
:type extensions: list
:rtype: list
"""
configs = []
for filename in os.listdir(config_dir):
if is_config_file(filename, extensions) and not filename.startswith('.'):
configs.append(filename)
return configs | Return a list of configs in ``config_dir``.
:param config_dir: directory to search
:type config_dir: str
:param extensions: filetypes to check (e.g. ``['.yaml', '.json']``).
:type extensions: list
:rtype: list | entailment |
def filter_repos(config, repo_dir=None, vcs_url=None, name=None):
"""Return a :py:obj:`list` list of repos from (expanded) config file.
repo_dir, vcs_url and name all support fnmatch.
:param config: the expanded repo config in :py:class:`dict` format.
:type config: dict
:param repo_dir: directory of checkout location, fnmatch pattern supported
:type repo_dir: str or None
:param vcs_url: url of vcs remote, fn match pattern supported
:type vcs_url: str or None
:param name: project name, fnmatch pattern supported
:type name: str or None
:rtype: list
"""
repo_list = []
if repo_dir:
repo_list.extend(
[r for r in config if fnmatch.fnmatch(r['parent_dir'], repo_dir)]
)
if vcs_url:
repo_list.extend(
r for r in config if fnmatch.fnmatch(r.get('url', r.get('repo')), vcs_url)
)
if name:
repo_list.extend([r for r in config if fnmatch.fnmatch(r.get('name'), name)])
return repo_list | Return a :py:obj:`list` list of repos from (expanded) config file.
repo_dir, vcs_url and name all support fnmatch.
:param config: the expanded repo config in :py:class:`dict` format.
:type config: dict
:param repo_dir: directory of checkout location, fnmatch pattern supported
:type repo_dir: str or None
:param vcs_url: url of vcs remote, fn match pattern supported
:type vcs_url: str or None
:param name: project name, fnmatch pattern supported
:type name: str or None
:rtype: list | entailment |
def setup_logger(log=None, level='INFO'):
"""Setup logging for CLI use.
:param log: instance of logger
:type log: :py:class:`Logger`
"""
if not log:
log = logging.getLogger()
if not log.handlers:
channel = logging.StreamHandler()
channel.setFormatter(DebugLogFormatter())
log.setLevel(level)
log.addHandler(channel)
# setup styling for repo loggers
repo_logger = logging.getLogger('libvcs')
channel = logging.StreamHandler()
channel.setFormatter(RepoLogFormatter())
channel.addFilter(RepoFilter())
repo_logger.setLevel(level)
repo_logger.addHandler(channel) | Setup logging for CLI use.
:param log: instance of logger
:type log: :py:class:`Logger` | entailment |
def copy_node_info(src, dest):
"""Copy information from src to dest
Every node in the AST has to have line number information. Get
the information from the old stmt."""
for attr in ['lineno', 'fromlineno', 'tolineno',
'col_offset', 'parent']:
if hasattr(src, attr):
setattr(dest, attr, getattr(src, attr)) | Copy information from src to dest
Every node in the AST has to have line number information. Get
the information from the old stmt. | entailment |
def make_non_magical_flask_import(flask_ext_name):
'''Convert a flask.ext.admin into flask_admin.'''
match = re.match(r'flask\.ext\.(.*)', flask_ext_name)
if match is None:
raise LookupError("Module name `{}` doesn't match"
"`flask.ext` style import.")
from_name = match.group(1)
actual_module_name = 'flask_{}'.format(from_name)
return actual_module_name | Convert a flask.ext.admin into flask_admin. | entailment |
def transform_flask_from_import(node):
'''Translates a flask.ext from-style import into a non-magical import.
Translates:
from flask.ext import wtf, bcrypt as fcrypt
Into:
import flask_wtf as wtf, flask_bcrypt as fcrypt
'''
new_names = []
# node.names is a list of 2-tuples. Each tuple consists of (name, as_name).
# So, the import would be represented as:
#
# from flask.ext import wtf as ftw, admin
#
# node.names = [('wtf', 'ftw'), ('admin', None)]
for (name, as_name) in node.names:
actual_module_name = 'flask_{}'.format(name)
new_names.append((actual_module_name, as_name or name))
new_node = nodes.Import()
copy_node_info(node, new_node)
new_node.names = new_names
mark_transformed(new_node)
return new_node | Translates a flask.ext from-style import into a non-magical import.
Translates:
from flask.ext import wtf, bcrypt as fcrypt
Into:
import flask_wtf as wtf, flask_bcrypt as fcrypt | entailment |
def transform_flask_from_long(node):
'''Translates a flask.ext.wtf from-style import into a non-magical import.
Translates:
from flask.ext.wtf import Form
from flask.ext.admin.model import InlineFormAdmin
Into:
from flask_wtf import Form
from flask_admin.model import InlineFormAdmin
'''
actual_module_name = make_non_magical_flask_import(node.modname)
new_node = nodes.ImportFrom(actual_module_name, node.names, node.level)
copy_node_info(node, new_node)
mark_transformed(new_node)
return new_node | Translates a flask.ext.wtf from-style import into a non-magical import.
Translates:
from flask.ext.wtf import Form
from flask.ext.admin.model import InlineFormAdmin
Into:
from flask_wtf import Form
from flask_admin.model import InlineFormAdmin | entailment |
def transform_flask_bare_import(node):
'''Translates a flask.ext.wtf bare import into a non-magical import.
Translates:
import flask.ext.admin as admin
Into:
import flask_admin as admin
'''
new_names = []
for (name, as_name) in node.names:
match = re.match(r'flask\.ext\.(.*)', name)
from_name = match.group(1)
actual_module_name = 'flask_{}'.format(from_name)
new_names.append((actual_module_name, as_name))
new_node = nodes.Import()
copy_node_info(node, new_node)
new_node.names = new_names
mark_transformed(new_node)
return new_node | Translates a flask.ext.wtf bare import into a non-magical import.
Translates:
import flask.ext.admin as admin
Into:
import flask_admin as admin | entailment |
def _write(self, data):
"""
_write: binary data -> None
Packages the given binary data in an API frame and writes the
result to the serial port
"""
frame = APIFrame(data, self._escaped).output()
self.serial.write(frame) | _write: binary data -> None
Packages the given binary data in an API frame and writes the
result to the serial port | entailment |
def _build_command(self, cmd, **kwargs):
"""
_build_command: string (binary data) ... -> binary data
_build_command will construct a command packet according to the
specified command's specification in api_commands. It will expect
named arguments for all fields other than those with a default
value or a length of 'None'.
Each field will be written out in the order they are defined
in the command definition.
"""
try:
cmd_spec = self.api_commands[cmd]
except AttributeError:
raise NotImplementedError("API command specifications could not be "
"found; use a derived class which defines"
" 'api_commands'.")
packet = b''
for field in cmd_spec:
try:
# Read this field's name from the function arguments dict
data = kwargs[field['name']]
if isinstance(data, str):
data = stringToBytes(data)
except KeyError:
# Data wasn't given
# Only a problem if the field has a specific length
if field['len'] is not None:
# Was a default value specified?
default_value = field['default']
if default_value:
# If so, use it
data = default_value
else:
# Otherwise, fail
raise KeyError(
"The expected field {} of length {} "
"was not provided".format(
field['name'], field['len']
)
)
else:
# No specific length, ignore it
data = None
# Ensure that the proper number of elements will be written
if field['len'] and len(data) != field['len']:
raise ValueError(
"The data provided for '{}' was not {} "
"bytes long".format(field['name'], field['len'])
)
# Add the data to the packet, if it has been specified.
# Otherwise, the parameter was of variable length, and not given.
if data:
packet += data
return packet | _build_command: string (binary data) ... -> binary data
_build_command will construct a command packet according to the
specified command's specification in api_commands. It will expect
named arguments for all fields other than those with a default
value or a length of 'None'.
Each field will be written out in the order they are defined
in the command definition. | entailment |
def _split_response(self, data):
"""
_split_response: binary data -> {'id':str,
'param':binary data,
...}
_split_response takes a data packet received from an XBee device
and converts it into a dictionary. This dictionary provides
names for each segment of binary data as specified in the
api_responses spec.
"""
# Fetch the first byte, identify the packet
# If the spec doesn't exist, raise exception
packet_id = data[0:1]
try:
packet = self.api_responses[packet_id]
except AttributeError:
raise NotImplementedError("API response specifications could not "
"be found; use a derived class which "
"defines 'api_responses'.")
except KeyError:
# Check to see if this ID can be found among transmittable packets
for cmd_name, cmd in list(self.api_commands.items()):
if cmd[0]['default'] == data[0:1]:
raise CommandFrameException("Incoming frame with id {} "
"looks like a command frame of "
"type '{}' (these should not be"
" received). Are you sure your "
"devices are in "
"API mode?".format(
data[0], cmd_name)
)
raise KeyError(
"Unrecognized response packet with id byte {0}".format(data[0]))
# Current byte index in the data stream
index = 1
# Result info
info = {'id': packet['name']}
packet_spec = packet['structure']
# Parse the packet in the order specified
for field in packet_spec:
if field['len'] == 'null_terminated':
field_data = b''
while data[index:index+1] != b'\x00':
field_data += data[index:index+1]
index += 1
index += 1
info[field['name']] = field_data
elif field['len'] is not None:
# Store the number of bytes specified
# Are we trying to read beyond the last data element?
expected_len = index + field['len']
if expected_len > len(data):
raise ValueError("Response packet was shorter than "
"expected; expected: {}, got: {} "
"bytes".format(expected_len, len(data))
)
field_data = data[index:index + field['len']]
info[field['name']] = field_data
index += field['len']
# If the data field has no length specified, store any
# leftover bytes and quit
else:
field_data = data[index:]
# Were there any remaining bytes?
if field_data:
# If so, store them
info[field['name']] = field_data
index += len(field_data)
break
# If there are more bytes than expected, raise an exception
if index < len(data):
raise ValueError("Response packet was longer than expected; "
"expected: {}, got: {} bytes".format(
index, len(data))
)
# Apply parsing rules if any exist
if 'parsing' in packet:
for parse_rule in packet['parsing']:
# Only apply a rule if it is relevant (raw data is available)
if parse_rule[0] in info:
# Apply the parse function to the indicated field and
# replace the raw data with the result
info[parse_rule[0]] = parse_rule[1](self, info)
return info | _split_response: binary data -> {'id':str,
'param':binary data,
...}
_split_response takes a data packet received from an XBee device
and converts it into a dictionary. This dictionary provides
names for each segment of binary data as specified in the
api_responses spec. | entailment |
def _parse_samples_header(self, io_bytes):
"""
_parse_samples_header: binary data in XBee IO data format ->
(int, [int ...], [int ...], int, int)
_parse_samples_header will read the first three bytes of the
binary data given and will return the number of samples which
follow, a list of enabled digital inputs, a list of enabled
analog inputs, the dio_mask, and the size of the header in bytes
"""
header_size = 3
# number of samples (always 1?) is the first byte
sample_count = byteToInt(io_bytes[0])
# part of byte 1 and byte 2 are the DIO mask ( 9 bits )
dio_mask = (byteToInt(io_bytes[1]) << 8 | byteToInt(io_bytes[2])) \
& 0x01FF
# upper 7 bits of byte 1 is the AIO mask
aio_mask = (byteToInt(io_bytes[1]) & 0xFE) >> 1
# sorted lists of enabled channels; value is position of bit in mask
dio_chans = []
aio_chans = []
for i in range(0, 9):
if dio_mask & (1 << i):
dio_chans.append(i)
dio_chans.sort()
for i in range(0, 7):
if aio_mask & (1 << i):
aio_chans.append(i)
aio_chans.sort()
return (sample_count, dio_chans, aio_chans, dio_mask, header_size) | _parse_samples_header: binary data in XBee IO data format ->
(int, [int ...], [int ...], int, int)
_parse_samples_header will read the first three bytes of the
binary data given and will return the number of samples which
follow, a list of enabled digital inputs, a list of enabled
analog inputs, the dio_mask, and the size of the header in bytes | entailment |
def _parse_samples(self, io_bytes):
"""
_parse_samples: binary data in XBee IO data format ->
[ {"dio-0":True,
"dio-1":False,
"adc-0":100"}, ...]
_parse_samples reads binary data from an XBee device in the IO
data format specified by the API. It will then return a
dictionary indicating the status of each enabled IO port.
"""
sample_count, dio_chans, aio_chans, dio_mask, header_size = \
self._parse_samples_header(io_bytes)
samples = []
# split the sample data into a list, so it can be pop()'d
sample_bytes = [byteToInt(c) for c in io_bytes[header_size:]]
# repeat for every sample provided
for sample_ind in range(0, sample_count):
tmp_samples = {}
if dio_chans:
# we have digital data
digital_data_set = (sample_bytes.pop(0) << 8 |
sample_bytes.pop(0))
digital_values = dio_mask & digital_data_set
for i in dio_chans:
tmp_samples['dio-{0}'.format(i)] = True \
if (digital_values >> i) & 1 else False
for i in aio_chans:
analog_sample = (sample_bytes.pop(0) << 8 | sample_bytes.pop(0))
tmp_samples['adc-{0}'.format(i)] = analog_sample
samples.append(tmp_samples)
return samples | _parse_samples: binary data in XBee IO data format ->
[ {"dio-0":True,
"dio-1":False,
"adc-0":100"}, ...]
_parse_samples reads binary data from an XBee device in the IO
data format specified by the API. It will then return a
dictionary indicating the status of each enabled IO port. | entailment |
def send(self, cmd, **kwargs):
"""
send: string param=binary data ... -> None
When send is called with the proper arguments, an API command
will be written to the serial port for this XBee device
containing the proper instructions and data.
This method must be called with named arguments in accordance
with the api_command specification. Arguments matching all
field names other than those in reserved_names (like 'id' and
'order') should be given, unless they are of variable length
(of 'None' in the specification. Those are optional).
"""
# Pass through the keyword arguments
self._write(self._build_command(cmd, **kwargs)) | send: string param=binary data ... -> None
When send is called with the proper arguments, an API command
will be written to the serial port for this XBee device
containing the proper instructions and data.
This method must be called with named arguments in accordance
with the api_command specification. Arguments matching all
field names other than those in reserved_names (like 'id' and
'order') should be given, unless they are of variable length
(of 'None' in the specification. Those are optional). | entailment |
def main():
"""
Run through simple demonstration of alarm concept
"""
alarm = XBeeAlarm('/dev/ttyUSB0', '\x56\x78')
routine = SimpleWakeupRoutine(alarm)
from time import sleep
while True:
"""
Run the routine with 10 second delays
"""
try:
print "Waiting 5 seconds..."
sleep(5)
print "Firing"
routine.trigger()
except KeyboardInterrupt:
break | Run through simple demonstration of alarm concept | entailment |
def _reset(self):
"""
reset: None -> None
Resets the remote XBee device to a standard configuration
"""
# Analog pin 0
self.hw.remote_at(
dest_addr=self.remote_addr,
command='D0',
parameter='\x02')
# Disengage remote LED, buzzer
self.deactivate()
self._set_send_samples(False) | reset: None -> None
Resets the remote XBee device to a standard configuration | entailment |
def _set_LED(self, status):
"""
_set_LED: boolean -> None
Sets the status of the remote LED
"""
# DIO pin 1 (LED), active low
self.hw.remote_at(
dest_addr=self.remote_addr,
command='D1',
parameter='\x04' if status else '\x05') | _set_LED: boolean -> None
Sets the status of the remote LED | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.