sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def get_age(dt):
"""Calculate delta between current time and datetime and return a human readable form of the delta object"""
delta = datetime.now() - dt
days = delta.days
hours, rem = divmod(delta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
if days:
return '%dd %dh %dm' % (days, hours, minutes)
else:
return '%dh %dm %ds' % (hours, minutes, seconds)
|
Calculate delta between current time and datetime and return a human readable form of the delta object
|
entailment
|
def json_obj(self, method, params=None, auth=True):
"""Return JSON object expected by the Zabbix API"""
if params is None:
params = {}
obj = {
'jsonrpc': '2.0',
'method': method,
'params': params,
'auth': self.__auth if auth else None,
'id': self.id,
}
return json.dumps(obj)
|
Return JSON object expected by the Zabbix API
|
entailment
|
def do_request(self, json_obj):
"""Perform one HTTP request to Zabbix API"""
self.debug('Request: url="%s" headers=%s', self._api_url, self._http_headers)
self.debug('Request: body=%s', json_obj)
self.r_query.append(json_obj)
request = urllib2.Request(url=self._api_url, data=json_obj.encode('utf-8'), headers=self._http_headers)
opener = urllib2.build_opener(self._http_handler)
urllib2.install_opener(opener)
try:
response = opener.open(request, timeout=self.timeout)
except Exception as e:
raise ZabbixAPIException('HTTP connection problem: %s' % e)
self.debug('Response: code=%s', response.code)
# NOTE: Getting a 412 response code means the headers are not in the list of allowed headers.
if response.code != 200:
raise ZabbixAPIException('HTTP error %s: %s' % (response.status, response.reason))
reads = response.read()
if len(reads) == 0:
raise ZabbixAPIException('Received zero answer')
try:
jobj = json.loads(reads.decode('utf-8'))
except ValueError as e:
self.log(ERROR, 'Unable to decode. returned string: %s', reads)
raise ZabbixAPIException('Unable to decode response: %s' % e)
self.debug('Response: body=%s', jobj)
self.id += 1
if 'error' in jobj: # zabbix API error
error = jobj['error']
if isinstance(error, dict):
raise ZabbixAPIError(**error)
try:
return jobj['result']
except KeyError:
raise ZabbixAPIException('Missing result in API response')
|
Perform one HTTP request to Zabbix API
|
entailment
|
def login(self, user=None, password=None, save=True):
"""Perform a user.login API request"""
if user and password:
if save:
self.__username = user
self.__password = password
elif self.__username and self.__password:
user = self.__username
password = self.__password
else:
raise ZabbixAPIException('No authentication information available.')
self.last_login = time()
# Don't print the raw password
hashed_pw_string = 'md5(%s)' % md5(password.encode('utf-8')).hexdigest()
self.debug('Trying to login with %r:%r', user, hashed_pw_string)
obj = self.json_obj('user.login', params={'user': user, 'password': password}, auth=False)
self.__auth = self.do_request(obj)
|
Perform a user.login API request
|
entailment
|
def relogin(self):
"""Perform a re-login"""
try:
self.__auth = None # reset auth before relogin
self.login()
except ZabbixAPIException as e:
self.log(ERROR, 'Zabbix API relogin error (%s)', e)
self.__auth = None # logged_in() will always return False
raise
|
Perform a re-login
|
entailment
|
def check_auth(self):
"""Perform a re-login if not signed in or raise an exception"""
if not self.logged_in:
if self.relogin_interval and self.last_login and (time() - self.last_login) > self.relogin_interval:
self.log(WARNING, 'Zabbix API not logged in. Performing Zabbix API relogin after %d seconds',
self.relogin_interval)
self.relogin() # Will raise exception in case of login error
else:
raise ZabbixAPIException('Not logged in.')
|
Perform a re-login if not signed in or raise an exception
|
entailment
|
def call(self, method, params=None):
"""Check authentication and perform actual API request and relogin if needed"""
start_time = time()
self.check_auth()
self.log(INFO, '[%s-%05d] Calling Zabbix API method "%s"', start_time, self.id, method)
self.log(DEBUG, '\twith parameters: %s', params)
try:
return self.do_request(self.json_obj(method, params=params))
except ZabbixAPIError as ex:
if self.relogin_interval and any(i in ex.error['data'] for i in self.LOGIN_ERRORS):
self.log(WARNING, 'Zabbix API not logged in (%s). Performing Zabbix API relogin', ex)
self.relogin() # Will raise exception in case of login error
return self.do_request(self.json_obj(method, params=params))
raise # Re-raise the exception
finally:
self.log(INFO, '[%s-%05d] Zabbix API method "%s" finished in %g seconds',
start_time, self.id, method, (time() - start_time))
|
Check authentication and perform actual API request and relogin if needed
|
entailment
|
def download_parallel(url, directory, idx, min_file_size = 0, max_file_size = -1,
no_redirects = False, pos = 0, mode = 's'):
"""
download function to download parallely
"""
global main_it
global exit_flag
global total_chunks
global file_name
global i_max
file_name[idx]= url.split('/')[-1]
file_address = directory + '/' + file_name[idx]
is_redirects = not no_redirects
resp = s.get(url, stream = True, allow_redirects = is_redirects)
if not resp.status_code == 200:
# ignore this file since server returns invalid response
exit_flag += 1
return
try:
total_size = int(resp.headers['content-length'])
except KeyError:
total_size = len(resp.content)
total_chunks[idx] = total_size / chunk_size
if total_chunks[idx] < min_file_size:
# ignore this file since file size is lesser than min_file_size
exit_flag += 1
return
elif max_file_size != -1 and total_chunks[idx] > max_file_size:
# ignore this file since file size is greater than max_file_size
exit_flag += 1
return
file_iterable = resp.iter_content(chunk_size = chunk_size)
with open(file_address, 'wb') as f:
for sno, data in enumerate(file_iterable):
i_max[idx] = sno + 1
f.write(data)
exit_flag += 1
|
download function to download parallely
|
entailment
|
def download_parallel_gui(root, urls, directory, min_file_size, max_file_size, no_redirects):
"""
called when paralled downloading is true
"""
global parallel
# create directory to save files
if not os.path.exists(directory):
os.makedirs(directory)
parallel = True
app = progress_class(root, urls, directory, min_file_size, max_file_size, no_redirects)
|
called when paralled downloading is true
|
entailment
|
def download_series_gui(frame, urls, directory, min_file_size, max_file_size, no_redirects):
"""
called when user wants serial downloading
"""
# create directory to save files
if not os.path.exists(directory):
os.makedirs(directory)
app = progress_class(frame, urls, directory, min_file_size, max_file_size, no_redirects)
|
called when user wants serial downloading
|
entailment
|
def run(self):
"""
function called when thread is started
"""
global parallel
if parallel:
download_parallel(self.url, self.directory, self.idx,
self.min_file_size, self.max_file_size, self.no_redirects)
else:
download(self.url, self.directory, self.idx,
self.min_file_size, self.max_file_size, self.no_redirects)
|
function called when thread is started
|
entailment
|
def start(self):
"""
function to initialize thread for downloading
"""
global parallel
for self.i in range(0, self.length):
if parallel:
self.thread.append(myThread(self.url[ self.i ], self.directory, self.i,
self.min_file_size, self.max_file_size, self.no_redirects))
else:
# if not parallel whole url list is passed
self.thread.append(myThread(self.url, self.directory, self.i , self.min_file_size,
self.max_file_size, self.no_redirects))
self.progress[self.i]["value"] = 0
self.bytes[self.i] = 0
self.thread[self.i].start()
self.read_bytes()
|
function to initialize thread for downloading
|
entailment
|
def read_bytes(self):
"""
reading bytes; update progress bar after 1 ms
"""
global exit_flag
for self.i in range(0, self.length) :
self.bytes[self.i] = i_max[self.i]
self.maxbytes[self.i] = total_chunks[self.i]
self.progress[self.i]["maximum"] = total_chunks[self.i]
self.progress[self.i]["value"] = self.bytes[self.i]
self.str[self.i].set(file_name[self.i]+ " " + str(self.bytes[self.i])
+ "KB / " + str(int(self.maxbytes[self.i] + 1)) + " KB")
if exit_flag == self.length:
exit_flag = 0
self.frame.destroy()
else:
self.frame.after(10, self.read_bytes)
|
reading bytes; update progress bar after 1 ms
|
entailment
|
def declare_type(self, declared_type): # type: (TypeDef) -> TypeDef
"""Add this type to our collection, if needed."""
if declared_type not in self.collected_types:
self.collected_types[declared_type.name] = declared_type
return declared_type
|
Add this type to our collection, if needed.
|
entailment
|
def get_metaschema(): # type: () -> Tuple[Names, List[Dict[Text, Any]], Loader]
"""Instantiate the metaschema."""
loader = ref_resolver.Loader({
"Any": "https://w3id.org/cwl/salad#Any",
"ArraySchema": "https://w3id.org/cwl/salad#ArraySchema",
"Array_symbol": "https://w3id.org/cwl/salad#ArraySchema/type/Array_symbol",
"DocType": "https://w3id.org/cwl/salad#DocType",
"Documentation": "https://w3id.org/cwl/salad#Documentation",
"Documentation_symbol":
"https://w3id.org/cwl/salad#Documentation/type/Documentation_symbol",
"Documented": "https://w3id.org/cwl/salad#Documented",
"EnumSchema": "https://w3id.org/cwl/salad#EnumSchema",
"Enum_symbol": "https://w3id.org/cwl/salad#EnumSchema/type/Enum_symbol",
"JsonldPredicate": "https://w3id.org/cwl/salad#JsonldPredicate",
"NamedType": "https://w3id.org/cwl/salad#NamedType",
"PrimitiveType": "https://w3id.org/cwl/salad#PrimitiveType",
"RecordField": "https://w3id.org/cwl/salad#RecordField",
"RecordSchema": "https://w3id.org/cwl/salad#RecordSchema",
"Record_symbol": "https://w3id.org/cwl/salad#RecordSchema/type/Record_symbol",
"SaladEnumSchema": "https://w3id.org/cwl/salad#SaladEnumSchema",
"SaladRecordField": "https://w3id.org/cwl/salad#SaladRecordField",
"SaladRecordSchema": "https://w3id.org/cwl/salad#SaladRecordSchema",
"SchemaDefinedType": "https://w3id.org/cwl/salad#SchemaDefinedType",
"SpecializeDef": "https://w3id.org/cwl/salad#SpecializeDef",
"_container": "https://w3id.org/cwl/salad#JsonldPredicate/_container",
"_id": {
"@id": "https://w3id.org/cwl/salad#_id",
"@type": "@id",
"identity": True
},
"_type": "https://w3id.org/cwl/salad#JsonldPredicate/_type",
"abstract": "https://w3id.org/cwl/salad#SaladRecordSchema/abstract",
"array": "https://w3id.org/cwl/salad#array",
"boolean": "http://www.w3.org/2001/XMLSchema#boolean",
"dct": "http://purl.org/dc/terms/",
"default": {
"@id": "https://w3id.org/cwl/salad#default",
"noLinkCheck": True
},
"doc": "rdfs:comment",
"docAfter": {
"@id": "https://w3id.org/cwl/salad#docAfter",
"@type": "@id"
},
"docChild": {
"@id": "https://w3id.org/cwl/salad#docChild",
"@type": "@id"
},
"docParent": {
"@id": "https://w3id.org/cwl/salad#docParent",
"@type": "@id"
},
"documentRoot": "https://w3id.org/cwl/salad#SchemaDefinedType/documentRoot",
"documentation": "https://w3id.org/cwl/salad#documentation",
"double": "http://www.w3.org/2001/XMLSchema#double",
"enum": "https://w3id.org/cwl/salad#enum",
"extends": {
"@id": "https://w3id.org/cwl/salad#extends",
"@type": "@id",
"refScope": 1
},
"fields": {
"@id": "https://w3id.org/cwl/salad#fields",
"mapPredicate": "type",
"mapSubject": "name"
},
"float": "http://www.w3.org/2001/XMLSchema#float",
"identity": "https://w3id.org/cwl/salad#JsonldPredicate/identity",
"inVocab": "https://w3id.org/cwl/salad#NamedType/inVocab",
"int": "http://www.w3.org/2001/XMLSchema#int",
"items": {
"@id": "https://w3id.org/cwl/salad#items",
"@type": "@vocab",
"refScope": 2
},
"jsonldPredicate": "sld:jsonldPredicate",
"long": "http://www.w3.org/2001/XMLSchema#long",
"mapPredicate": "https://w3id.org/cwl/salad#JsonldPredicate/mapPredicate",
"mapSubject": "https://w3id.org/cwl/salad#JsonldPredicate/mapSubject",
"name": "@id",
"noLinkCheck": "https://w3id.org/cwl/salad#JsonldPredicate/noLinkCheck",
"null": "https://w3id.org/cwl/salad#null",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"record": "https://w3id.org/cwl/salad#record",
"refScope": "https://w3id.org/cwl/salad#JsonldPredicate/refScope",
"sld": "https://w3id.org/cwl/salad#",
"specialize": {
"@id": "https://w3id.org/cwl/salad#specialize",
"mapPredicate": "specializeTo",
"mapSubject": "specializeFrom"
},
"specializeFrom": {
"@id": "https://w3id.org/cwl/salad#specializeFrom",
"@type": "@id",
"refScope": 1
},
"specializeTo": {
"@id": "https://w3id.org/cwl/salad#specializeTo",
"@type": "@id",
"refScope": 1
},
"string": "http://www.w3.org/2001/XMLSchema#string",
"subscope": "https://w3id.org/cwl/salad#JsonldPredicate/subscope",
"symbols": {
"@id": "https://w3id.org/cwl/salad#symbols",
"@type": "@id",
"identity": True
},
"type": {
"@id": "https://w3id.org/cwl/salad#type",
"@type": "@vocab",
"refScope": 2,
"typeDSL": True
},
"typeDSL": "https://w3id.org/cwl/salad#JsonldPredicate/typeDSL",
"xsd": "http://www.w3.org/2001/XMLSchema#"
})
for salad in SALAD_FILES:
with resource_stream(__name__, 'metaschema/' + salad) as stream:
loader.cache["https://w3id.org/cwl/" + salad] = stream.read()
with resource_stream(__name__, 'metaschema/metaschema.yml') as stream:
loader.cache["https://w3id.org/cwl/salad"] = stream.read()
j = yaml.round_trip_load(loader.cache["https://w3id.org/cwl/salad"])
add_lc_filename(j, "metaschema.yml")
j, _ = loader.resolve_all(j, "https://w3id.org/cwl/salad#")
sch_obj = make_avro(j, loader)
try:
sch_names = make_avro_schema_from_avro(sch_obj)
except SchemaParseException:
_logger.error("Metaschema error, avro was:\n%s",
json_dumps(sch_obj, indent=4))
raise
validate_doc(sch_names, j, loader, strict=True)
return (sch_names, j, loader)
|
Instantiate the metaschema.
|
entailment
|
def add_namespaces(metadata, namespaces):
# type: (Mapping[Text, Any], MutableMapping[Text, Text]) -> None
"""Collect the provided namespaces, checking for conflicts."""
for key, value in metadata.items():
if key not in namespaces:
namespaces[key] = value
elif namespaces[key] != value:
raise validate.ValidationException(
"Namespace prefix '{}' has conflicting definitions '{}'"
" and '{}'.".format(key, namespaces[key], value))
|
Collect the provided namespaces, checking for conflicts.
|
entailment
|
def collect_namespaces(metadata):
# type: (Mapping[Text, Any]) -> Dict[Text, Text]
"""Walk through the metadata object, collecting namespace declarations."""
namespaces = {} # type: Dict[Text, Text]
if "$import_metadata" in metadata:
for value in metadata["$import_metadata"].values():
add_namespaces(collect_namespaces(value), namespaces)
if "$namespaces" in metadata:
add_namespaces(metadata["$namespaces"], namespaces)
return namespaces
|
Walk through the metadata object, collecting namespace declarations.
|
entailment
|
def load_schema(schema_ref, # type: Union[CommentedMap, CommentedSeq, Text]
cache=None # type: Dict
):
# type: (...) -> Tuple[Loader, Union[Names, SchemaParseException], Dict[Text, Any], Loader]
"""
Load a schema that can be used to validate documents using load_and_validate.
return: document_loader, avsc_names, schema_metadata, metaschema_loader
"""
metaschema_names, _metaschema_doc, metaschema_loader = get_metaschema()
if cache is not None:
metaschema_loader.cache.update(cache)
schema_doc, schema_metadata = metaschema_loader.resolve_ref(schema_ref, "")
if not isinstance(schema_doc, MutableSequence):
raise ValueError("Schema reference must resolve to a list.")
validate_doc(metaschema_names, schema_doc, metaschema_loader, True)
metactx = schema_metadata.get("@context", {})
metactx.update(collect_namespaces(schema_metadata))
schema_ctx = jsonld_context.salad_to_jsonld_context(schema_doc, metactx)[0]
# Create the loader that will be used to load the target document.
document_loader = Loader(schema_ctx, cache=cache)
# Make the Avro validation that will be used to validate the target
# document
avsc_names = make_avro_schema(schema_doc, document_loader)
return document_loader, avsc_names, schema_metadata, metaschema_loader
|
Load a schema that can be used to validate documents using load_and_validate.
return: document_loader, avsc_names, schema_metadata, metaschema_loader
|
entailment
|
def load_and_validate(document_loader, # type: Loader
avsc_names, # type: Names
document, # type: Union[CommentedMap, Text]
strict, # type: bool
strict_foreign_properties=False # type: bool
):
# type: (...) -> Tuple[Any, Dict[Text, Any]]
"""Load a document and validate it with the provided schema.
return data, metadata
"""
try:
if isinstance(document, CommentedMap):
data, metadata = document_loader.resolve_all(
document, document["id"], checklinks=True,
strict_foreign_properties=strict_foreign_properties)
else:
data, metadata = document_loader.resolve_ref(
document, checklinks=True,
strict_foreign_properties=strict_foreign_properties)
validate_doc(avsc_names, data, document_loader, strict,
strict_foreign_properties=strict_foreign_properties)
return data, metadata
except validate.ValidationException as exc:
raise validate.ValidationException(strip_dup_lineno(str(exc)))
|
Load a document and validate it with the provided schema.
return data, metadata
|
entailment
|
def validate_doc(schema_names, # type: Names
doc, # type: Union[Dict[Text, Any], List[Dict[Text, Any]], Text, None]
loader, # type: Loader
strict, # type: bool
strict_foreign_properties=False # type: bool
):
# type: (...) -> None
"""Validate a document using the provided schema."""
has_root = False
for root in schema_names.names.values():
if ((hasattr(root, 'get_prop') and root.get_prop(u"documentRoot")) or (
u"documentRoot" in root.props)):
has_root = True
break
if not has_root:
raise validate.ValidationException(
"No document roots defined in the schema")
if isinstance(doc, MutableSequence):
vdoc = doc
elif isinstance(doc, CommentedMap):
vdoc = CommentedSeq([doc])
vdoc.lc.add_kv_line_col(0, [doc.lc.line, doc.lc.col])
vdoc.lc.filename = doc.lc.filename
else:
raise validate.ValidationException("Document must be dict or list")
roots = []
for root in schema_names.names.values():
if ((hasattr(root, "get_prop") and root.get_prop(u"documentRoot")) or (
root.props.get(u"documentRoot"))):
roots.append(root)
anyerrors = []
for pos, item in enumerate(vdoc):
sourceline = SourceLine(vdoc, pos, Text)
success = False
for root in roots:
success = validate.validate_ex(
root, item, loader.identifiers, strict,
foreign_properties=loader.foreign_properties,
raise_ex=False, skip_foreign_properties=loader.skip_schemas,
strict_foreign_properties=strict_foreign_properties)
if success:
break
if not success:
errors = [] # type: List[Text]
for root in roots:
if hasattr(root, "get_prop"):
name = root.get_prop(u"name")
elif hasattr(root, "name"):
name = root.name
try:
validate.validate_ex(
root, item, loader.identifiers, strict,
foreign_properties=loader.foreign_properties,
raise_ex=True, skip_foreign_properties=loader.skip_schemas,
strict_foreign_properties=strict_foreign_properties)
except validate.ClassValidationException as exc:
errors = [sourceline.makeError(u"tried `%s` but\n%s" % (
name, validate.indent(str(exc), nolead=False)))]
break
except validate.ValidationException as exc:
errors.append(sourceline.makeError(u"tried `%s` but\n%s" % (
name, validate.indent(str(exc), nolead=False))))
objerr = sourceline.makeError(u"Invalid")
for ident in loader.identifiers:
if ident in item:
objerr = sourceline.makeError(
u"Object `%s` is not valid because"
% (relname(item[ident])))
break
anyerrors.append(u"%s\n%s" %
(objerr, validate.indent(bullets(errors, "- "))))
if anyerrors:
raise validate.ValidationException(
strip_dup_lineno(bullets(anyerrors, "* ")))
|
Validate a document using the provided schema.
|
entailment
|
def get_anon_name(rec):
# type: (MutableMapping[Text, Any]) -> Text
"""Calculate a reproducible name for anonymous types."""
if "name" in rec:
return rec["name"]
anon_name = ""
if rec['type'] in ('enum', 'https://w3id.org/cwl/salad#enum'):
for sym in rec["symbols"]:
anon_name += sym
return "enum_"+hashlib.sha1(anon_name.encode("UTF-8")).hexdigest()
if rec['type'] in ('record', 'https://w3id.org/cwl/salad#record'):
for field in rec["fields"]:
anon_name += field["name"]
return "record_"+hashlib.sha1(anon_name.encode("UTF-8")).hexdigest()
if rec['type'] in ('array', 'https://w3id.org/cwl/salad#array'):
return ""
raise validate.ValidationException("Expected enum or record, was %s" % rec['type'])
|
Calculate a reproducible name for anonymous types.
|
entailment
|
def replace_type(items, spec, loader, found, find_embeds=True, deepen=True):
# type: (Any, Dict[Text, Any], Loader, Set[Text], bool, bool) -> Any
""" Go through and replace types in the 'spec' mapping"""
if isinstance(items, MutableMapping):
# recursively check these fields for types to replace
if items.get("type") in ("record", "enum") and items.get("name"):
if items["name"] in found:
return items["name"]
found.add(items["name"])
if not deepen:
return items
items = copy.copy(items)
if not items.get("name"):
items["name"] = get_anon_name(items)
for name in ("type", "items", "fields"):
if name in items:
items[name] = replace_type(
items[name], spec, loader, found, find_embeds=find_embeds,
deepen=find_embeds)
if isinstance(items[name], MutableSequence):
items[name] = flatten(items[name])
return items
if isinstance(items, MutableSequence):
# recursively transform list
return [replace_type(i, spec, loader, found, find_embeds=find_embeds,
deepen=deepen) for i in items]
if isinstance(items, string_types):
# found a string which is a symbol corresponding to a type.
replace_with = None
if items in loader.vocab:
# If it's a vocabulary term, first expand it to its fully qualified
# URI
items = loader.vocab[items]
if items in spec:
# Look up in specialization map
replace_with = spec[items]
if replace_with:
return replace_type(replace_with, spec, loader, found, find_embeds=find_embeds)
found.add(items)
return items
|
Go through and replace types in the 'spec' mapping
|
entailment
|
def avro_name(url): # type: (AnyStr) -> AnyStr
"""
Turn a URL into an Avro-safe name.
If the URL has no fragment, return this plain URL.
Extract either the last part of the URL fragment past the slash, otherwise
the whole fragment.
"""
frg = urllib.parse.urldefrag(url)[1]
if frg != '':
if '/' in frg:
return frg[frg.rindex('/') + 1:]
return frg
return url
|
Turn a URL into an Avro-safe name.
If the URL has no fragment, return this plain URL.
Extract either the last part of the URL fragment past the slash, otherwise
the whole fragment.
|
entailment
|
def make_valid_avro(items, # type: Avro
alltypes, # type: Dict[Text, Dict[Text, Any]]
found, # type: Set[Text]
union=False # type: bool
):
# type: (...) -> Union[Avro, Dict, Text]
"""Convert our schema to be more avro like."""
# Possibly could be integrated into our fork of avro/schema.py?
if isinstance(items, MutableMapping):
items = copy.copy(items)
if items.get("name") and items.get("inVocab", True):
items["name"] = avro_name(items["name"])
if "type" in items and items["type"] in (
"https://w3id.org/cwl/salad#record",
"https://w3id.org/cwl/salad#enum", "record", "enum"):
if (hasattr(items, "get") and items.get("abstract")) or ("abstract"
in items):
return items
if items["name"] in found:
return cast(Text, items["name"])
found.add(items["name"])
for field in ("type", "items", "values", "fields"):
if field in items:
items[field] = make_valid_avro(
items[field], alltypes, found, union=True)
if "symbols" in items:
items["symbols"] = [avro_name(sym) for sym in items["symbols"]]
return items
if isinstance(items, MutableSequence):
ret = []
for i in items:
ret.append(make_valid_avro(i, alltypes, found, union=union)) # type: ignore
return ret
if union and isinstance(items, string_types):
if items in alltypes and avro_name(items) not in found:
return cast(Dict, make_valid_avro(alltypes[items], alltypes, found,
union=union))
items = avro_name(items)
return items
|
Convert our schema to be more avro like.
|
entailment
|
def deepcopy_strip(item): # type: (Any) -> Any
"""
Make a deep copy of list and dict objects.
Intentionally do not copy attributes. This is to discard CommentedMap and
CommentedSeq metadata which is very expensive with regular copy.deepcopy.
"""
if isinstance(item, MutableMapping):
return {k: deepcopy_strip(v) for k, v in iteritems(item)}
if isinstance(item, MutableSequence):
return [deepcopy_strip(k) for k in item]
return item
|
Make a deep copy of list and dict objects.
Intentionally do not copy attributes. This is to discard CommentedMap and
CommentedSeq metadata which is very expensive with regular copy.deepcopy.
|
entailment
|
def extend_and_specialize(items, loader):
# type: (List[Dict[Text, Any]], Loader) -> List[Dict[Text, Any]]
"""
Apply 'extend' and 'specialize' to fully materialize derived record types.
"""
items = deepcopy_strip(items)
types = {i["name"]: i for i in items} # type: Dict[Text, Any]
results = []
for stype in items:
if "extends" in stype:
specs = {} # type: Dict[Text, Text]
if "specialize" in stype:
for spec in aslist(stype["specialize"]):
specs[spec["specializeFrom"]] = spec["specializeTo"]
exfields = [] # type: List[Text]
exsym = [] # type: List[Text]
for ex in aslist(stype["extends"]):
if ex not in types:
raise Exception(
"Extends {} in {} refers to invalid base type.".format(
stype["extends"], stype["name"]))
basetype = copy.copy(types[ex])
if stype["type"] == "record":
if specs:
basetype["fields"] = replace_type(
basetype.get("fields", []), specs, loader, set())
for field in basetype.get("fields", []):
if "inherited_from" not in field:
field["inherited_from"] = ex
exfields.extend(basetype.get("fields", []))
elif stype["type"] == "enum":
exsym.extend(basetype.get("symbols", []))
if stype["type"] == "record":
stype = copy.copy(stype)
exfields.extend(stype.get("fields", []))
stype["fields"] = exfields
fieldnames = set() # type: Set[Text]
for field in stype["fields"]:
if field["name"] in fieldnames:
raise validate.ValidationException(
"Field name {} appears twice in {}".format(
field["name"], stype["name"]))
else:
fieldnames.add(field["name"])
elif stype["type"] == "enum":
stype = copy.copy(stype)
exsym.extend(stype.get("symbols", []))
stype["symbol"] = exsym
types[stype["name"]] = stype
results.append(stype)
ex_types = {}
for result in results:
ex_types[result["name"]] = result
extended_by = {} # type: Dict[Text, Text]
for result in results:
if "extends" in result:
for ex in aslist(result["extends"]):
if ex_types[ex].get("abstract"):
add_dictlist(extended_by, ex, ex_types[result["name"]])
add_dictlist(extended_by, avro_name(ex), ex_types[ex])
for result in results:
if result.get("abstract") and result["name"] not in extended_by:
raise validate.ValidationException(
"{} is abstract but missing a concrete subtype".format(
result["name"]))
for result in results:
if "fields" in result:
result["fields"] = replace_type(
result["fields"], extended_by, loader, set())
return results
|
Apply 'extend' and 'specialize' to fully materialize derived record types.
|
entailment
|
def make_avro_schema(i, # type: List[Any]
loader # type: Loader
): # type: (...) -> Names
"""
All in one convenience function.
Call make_avro() and make_avro_schema_from_avro() separately if you need
the intermediate result for diagnostic output.
"""
names = Names()
avro = make_avro(i, loader)
make_avsc_object(convert_to_dict(avro), names)
return names
|
All in one convenience function.
Call make_avro() and make_avro_schema_from_avro() separately if you need
the intermediate result for diagnostic output.
|
entailment
|
def shortname(inputid): # type: (Text) -> Text
"""Returns the last segment of the provided fragment or path."""
parsed_id = urllib.parse.urlparse(inputid)
if parsed_id.fragment:
return parsed_id.fragment.split(u"/")[-1]
return parsed_id.path.split(u"/")[-1]
|
Returns the last segment of the provided fragment or path.
|
entailment
|
def print_inheritance(doc, stream):
# type: (List[Dict[Text, Any]], IO) -> None
"""Write a Grapviz inheritance graph for the supplied document."""
stream.write("digraph {\n")
for entry in doc:
if entry["type"] == "record":
label = name = shortname(entry["name"])
fields = entry.get("fields", [])
if fields:
label += "\\n* %s\\l" % (
"\\l* ".join(shortname(field["name"])
for field in fields))
shape = "ellipse" if entry.get("abstract") else "box"
stream.write("\"%s\" [shape=%s label=\"%s\"];\n"
% (name, shape, label))
if "extends" in entry:
for target in aslist(entry["extends"]):
stream.write("\"%s\" -> \"%s\";\n"
% (shortname(target), name))
stream.write("}\n")
|
Write a Grapviz inheritance graph for the supplied document.
|
entailment
|
def print_fieldrefs(doc, loader, stream):
# type: (List[Dict[Text, Any]], Loader, IO) -> None
"""Write a GraphViz graph of the relationships between the fields."""
obj = extend_and_specialize(doc, loader)
primitives = set(("http://www.w3.org/2001/XMLSchema#string",
"http://www.w3.org/2001/XMLSchema#boolean",
"http://www.w3.org/2001/XMLSchema#int",
"http://www.w3.org/2001/XMLSchema#long",
"https://w3id.org/cwl/salad#null",
"https://w3id.org/cwl/salad#enum",
"https://w3id.org/cwl/salad#array",
"https://w3id.org/cwl/salad#record",
"https://w3id.org/cwl/salad#Any"))
stream.write("digraph {\n")
for entry in obj:
if entry.get("abstract"):
continue
if entry["type"] == "record":
label = shortname(entry["name"])
for field in entry.get("fields", []):
found = set() # type: Set[Text]
field_name = shortname(field["name"])
replace_type(field["type"], {}, loader, found, find_embeds=False)
for each_type in found:
if each_type not in primitives:
stream.write(
"\"%s\" -> \"%s\" [label=\"%s\"];\n"
% (label, shortname(each_type), field_name))
stream.write("}\n")
|
Write a GraphViz graph of the relationships between the fields.
|
entailment
|
def get_other_props(all_props, reserved_props):
# type: (Dict, Tuple) -> Optional[Dict]
"""
Retrieve the non-reserved properties from a dictionary of properties
@args reserved_props: The set of reserved properties to exclude
"""
if hasattr(all_props, 'items') and callable(all_props.items):
return dict([(k,v) for (k,v) in list(all_props.items()) if k not in
reserved_props])
return None
|
Retrieve the non-reserved properties from a dictionary of properties
@args reserved_props: The set of reserved properties to exclude
|
entailment
|
def make_avsc_object(json_data, names=None):
# type: (Union[Dict[Text, Text], List[Any], Text], Optional[Names]) -> Schema
"""
Build Avro Schema from data parsed out of JSON string.
@arg names: A Name object (tracks seen names and default space)
"""
if names is None:
names = Names()
assert isinstance(names, Names)
# JSON object (non-union)
if hasattr(json_data, 'get') and callable(json_data.get): # type: ignore
assert isinstance(json_data, Dict)
atype = cast(Text, json_data.get('type'))
other_props = get_other_props(json_data, SCHEMA_RESERVED_PROPS)
if atype in PRIMITIVE_TYPES:
return PrimitiveSchema(atype, other_props)
if atype in NAMED_TYPES:
name = cast(Text, json_data.get('name'))
namespace = cast(Text, json_data.get('namespace',
names.default_namespace))
if atype == 'enum':
symbols = cast(List[Text], json_data.get('symbols'))
doc = json_data.get('doc')
return EnumSchema(name, namespace, symbols, names, doc, other_props)
if atype in ['record', 'error']:
fields = cast(List, json_data.get('fields'))
doc = json_data.get('doc')
return RecordSchema(name, namespace, fields, names, atype, doc, other_props)
raise SchemaParseException('Unknown Named Type: %s' % atype)
if atype in VALID_TYPES:
if atype == 'array':
items = cast(List, json_data.get('items'))
return ArraySchema(items, names, other_props)
if atype is None:
raise SchemaParseException('No "type" property: %s' % json_data)
raise SchemaParseException('Undefined type: %s' % atype)
# JSON array (union)
if isinstance(json_data, list):
return UnionSchema(json_data, names)
# JSON string (primitive)
if json_data in PRIMITIVE_TYPES:
return PrimitiveSchema(cast(Text, json_data))
# not for us!
fail_msg = "Could not make an Avro Schema object from %s." % json_data
raise SchemaParseException(fail_msg)
|
Build Avro Schema from data parsed out of JSON string.
@arg names: A Name object (tracks seen names and default space)
|
entailment
|
def get_space(self):
# type: () -> Optional[Text]
"""Back out a namespace from full name."""
if self._full is None:
return None
if self._full.find('.') > 0:
return self._full.rsplit(".", 1)[0]
else:
return ""
|
Back out a namespace from full name.
|
entailment
|
def add_name(self, name_attr, space_attr, new_schema):
# type: (Text, Optional[Text], NamedSchema) -> Name
"""
Add a new schema object to the name set.
@arg name_attr: name value read in schema
@arg space_attr: namespace value read in schema.
@return: the Name that was just added.
"""
to_add = Name(name_attr, space_attr, self.default_namespace)
if to_add.fullname in VALID_TYPES:
fail_msg = '%s is a reserved type name.' % to_add.fullname
raise SchemaParseException(fail_msg)
elif to_add.fullname in self.names:
fail_msg = 'The name "%s" is already in use.' % to_add.fullname
raise SchemaParseException(fail_msg)
self.names[to_add.fullname] = new_schema
return to_add
|
Add a new schema object to the name set.
@arg name_attr: name value read in schema
@arg space_attr: namespace value read in schema.
@return: the Name that was just added.
|
entailment
|
def make_field_objects(field_data, names):
# type: (List[Dict[Text, Text]], Names) -> List[Field]
"""We're going to need to make message parameters too."""
field_objects = []
field_names = [] # type: List[Text]
for field in field_data:
if hasattr(field, 'get') and callable(field.get):
atype = cast(Text, field.get('type'))
name = cast(Text, field.get('name'))
# null values can have a default value of None
has_default = False
default = None
if 'default' in field:
has_default = True
default = field.get('default')
order = field.get('order')
doc = field.get('doc')
other_props = get_other_props(field, FIELD_RESERVED_PROPS)
new_field = Field(atype, name, has_default, default, order, names, doc,
other_props)
# make sure field name has not been used yet
if new_field.name in field_names:
fail_msg = 'Field name %s already in use.' % new_field.name
raise SchemaParseException(fail_msg)
field_names.append(new_field.name)
else:
raise SchemaParseException('Not a valid field: %s' % field)
field_objects.append(new_field)
return field_objects
|
We're going to need to make message parameters too.
|
entailment
|
def search_function(root1, q, s, f, l, o='g'):
"""
function to get links
"""
global links
links = search(q, o, s, f, l)
root1.destroy()
root1.quit()
|
function to get links
|
entailment
|
def task(ft):
"""
to create loading progress bar
"""
ft.pack(expand = True, fill = BOTH, side = TOP)
pb_hD = ttk.Progressbar(ft, orient = 'horizontal', mode = 'indeterminate')
pb_hD.pack(expand = True, fill = BOTH, side = TOP)
pb_hD.start(50)
ft.mainloop()
|
to create loading progress bar
|
entailment
|
def download_content_gui(**args):
"""
function to fetch links and download them
"""
global row
if not args ['directory']:
args ['directory'] = args ['query'].replace(' ', '-')
root1 = Frame(root)
t1 = threading.Thread(target = search_function, args = (root1,
args['query'], args['website'], args['file_type'], args['limit'],args['option']))
t1.start()
task(root1)
t1.join()
#new frame for progress bar
row = Frame(root)
row.pack()
if args['parallel']:
download_parallel_gui(row, links, args['directory'], args['min_file_size'],
args['max_file_size'], args['no_redirects'])
else:
download_series_gui(row, links, args['directory'], args['min_file_size'],
args['max_file_size'], args['no_redirects'])
|
function to fetch links and download them
|
entailment
|
def main():
"""
main function
"""
s = ttk.Style()
s.theme_use('clam')
ents = makeform(root)
root.mainloop()
|
main function
|
entailment
|
def click_download(self, event):
"""
event for download button
"""
args ['parallel'] = self.p.get()
args ['file_type'] = self.optionmenu.get()
args ['no_redirects'] = self.t.get()
args ['query'] = self.entry_query.get()
args ['min_file_size'] = int( self.entry_min.get())
args ['max_file_size'] = int( self.entry_max.get())
args ['limit'] = int( self.entry_limit.get())
args ['website']= self.entry_website.get()
args ['option']= self.engine.get()
print(args)
self.check_threat()
download_content_gui( **args )
|
event for download button
|
entailment
|
def on_entry_click(self, event):
"""
function that gets called whenever entry is clicked
"""
if event.widget.config('fg') [4] == 'grey':
event.widget.delete(0, "end" ) # delete all the text in the entry
event.widget.insert(0, '') #Insert blank for user input
event.widget.config(fg = 'black')
|
function that gets called whenever entry is clicked
|
entailment
|
def on_focusout(self, event, a):
"""
function that gets called whenever anywhere except entry is clicked
"""
if event.widget.get() == '':
event.widget.insert(0, default_text[a])
event.widget.config(fg = 'grey')
|
function that gets called whenever anywhere except entry is clicked
|
entailment
|
def check_threat(self):
"""
function to check input filetype against threat extensions list
"""
is_high_threat = False
for val in THREAT_EXTENSIONS.values():
if type(val) == list:
for el in val:
if self.optionmenu.get() == el:
is_high_threat = True
break
else:
if self.optionmenu.get() == val:
is_high_threat = True
break
if is_high_threat == True:
is_high_threat = not askokcancel('FILE TYPE', 'WARNING: Downloading this \
file type may expose you to a heightened security risk.\nPress\
"OK" to proceed or "CANCEL" to exit')
return not is_high_threat
|
function to check input filetype against threat extensions list
|
entailment
|
def ask_dir(self):
"""
dialogue box for choosing directory
"""
args ['directory'] = askdirectory(**self.dir_opt)
self.dir_text.set(args ['directory'])
|
dialogue box for choosing directory
|
entailment
|
def get_google_links(limit, params, headers):
"""
function to fetch links equal to limit
every Google search result page has a start index.
every page contains 10 search results.
"""
links = []
for start_index in range(0, limit, 10):
params['start'] = start_index
resp = s.get("https://www.google.com/search", params = params, headers = headers)
page_links = scrape_links(resp.content, engine = 'g')
links.extend(page_links)
return links[:limit]
|
function to fetch links equal to limit
every Google search result page has a start index.
every page contains 10 search results.
|
entailment
|
def get_duckduckgo_links(limit, params, headers):
"""
function to fetch links equal to limit
duckduckgo pagination is not static, so there is a limit on
maximum number of links that can be scraped
"""
resp = s.get('https://duckduckgo.com/html', params = params, headers = headers)
links = scrape_links(resp.content, engine = 'd')
return links[:limit]
|
function to fetch links equal to limit
duckduckgo pagination is not static, so there is a limit on
maximum number of links that can be scraped
|
entailment
|
def scrape_links(html, engine):
"""
function to scrape file links from html response
"""
soup = BeautifulSoup(html, 'lxml')
links = []
if engine == 'd':
results = soup.findAll('a', {'class': 'result__a'})
for result in results:
link = result.get('href')[15:]
link = link.replace('/blob/', '/raw/')
links.append(link)
elif engine == 'g':
results = soup.findAll('h3', {'class': 'r'})
for result in results:
link = result.a['href'][7:].split('&')[0]
link = link.replace('/blob/', '/raw/')
links.append(link)
return links
|
function to scrape file links from html response
|
entailment
|
def get_url_nofollow(url):
"""
function to get return code of a url
Credits: http://blog.jasonantman.com/2013/06/python-script-to-check-a-list-of-urls-for-return-code-and-final-return-code-if-redirected/
"""
try:
response = urlopen(url)
code = response.getcode()
return code
except HTTPError as e:
return e.code
except:
return 0
|
function to get return code of a url
Credits: http://blog.jasonantman.com/2013/06/python-script-to-check-a-list-of-urls-for-return-code-and-final-return-code-if-redirected/
|
entailment
|
def validate_links(links):
"""
function to validate urls based on http(s) prefix and return code
"""
valid_links = []
for link in links:
if link[:7] in "http://" or link[:8] in "https://":
valid_links.append(link)
if not valid_links:
print("No files found.")
sys.exit(0)
# checking valid urls for return code
urls = {}
for link in valid_links:
if 'github.com' and '/blob/' in link:
link = link.replace('/blob/', '/raw/')
urls[link] = {'code': get_url_nofollow(link)}
# printing valid urls with return code 200
available_urls = []
for url in urls:
print("code: %d\turl: %s" % (urls[url]['code'], url))
if urls[url]['code'] != 0:
available_urls.append(url)
return available_urls
|
function to validate urls based on http(s) prefix and return code
|
entailment
|
def search(query, engine='g', site="", file_type = 'pdf', limit = 10):
"""
main function to search for links and return valid ones
"""
if site == "":
search_query = "filetype:{0} {1}".format(file_type, query)
else:
search_query = "site:{0} filetype:{1} {2}".format(site,file_type, query)
headers = {
'User Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:53.0) \
Gecko/20100101 Firefox/53.0'
}
if engine == "g":
params = {
'q': search_query,
'start': 0,
}
links = get_google_links(limit, params, headers)
elif engine == "d":
params = {
'q': search_query,
}
links = get_duckduckgo_links(limit,params,headers)
else:
print("Wrong search engine selected!")
sys.exit()
valid_links = validate_links(links)
return valid_links
|
main function to search for links and return valid ones
|
entailment
|
def check_threats(**args):
"""
function to check input filetype against threat extensions list
"""
is_high_threat = False
for val in THREAT_EXTENSIONS.values():
if type(val) == list:
for el in val:
if args['file_type'] == el:
is_high_threat = True
break
else:
if args['file_type'] == val:
is_high_threat = True
break
return is_high_threat
|
function to check input filetype against threat extensions list
|
entailment
|
def validate_args(**args):
"""
function to check if input query is not None
and set missing arguments to default value
"""
if not args['query']:
print("\nMissing required query argument.")
sys.exit()
for key in DEFAULTS:
if key not in args:
args[key] = DEFAULTS[key]
return args
|
function to check if input query is not None
and set missing arguments to default value
|
entailment
|
def download_content(**args):
"""
main function to fetch links and download them
"""
args = validate_args(**args)
if not args['directory']:
args['directory'] = args['query'].replace(' ', '-')
print("Downloading {0} {1} files on topic {2} from {3} and saving to directory: {4}"
.format(args['limit'], args['file_type'], args['query'], args['website'], args['directory']))
links = search(args['query'], args['engine'], args['website'], args['file_type'], args['limit'])
if args['parallel']:
download_parallel(links, args['directory'], args['min_file_size'], args['max_file_size'], args['no_redirects'])
else:
download_series(links, args['directory'], args['min_file_size'], args['max_file_size'], args['no_redirects'])
|
main function to fetch links and download them
|
entailment
|
def show_filetypes(extensions):
"""
function to show valid file extensions
"""
for item in extensions.items():
val = item[1]
if type(item[1]) == list:
val = ", ".join(str(x) for x in item[1])
print("{0:4}: {1}".format(val, item[0]))
|
function to show valid file extensions
|
entailment
|
def validate_ex(expected_schema, # type: Schema
datum, # type: Any
identifiers=None, # type: List[Text]
strict=False, # type: bool
foreign_properties=None, # type: Set[Text]
raise_ex=True, # type: bool
strict_foreign_properties=False, # type: bool
logger=_logger, # type: logging.Logger
skip_foreign_properties=False # type: bool
):
# type: (...) -> bool
"""Determine if a python datum is an instance of a schema."""
if not identifiers:
identifiers = []
if not foreign_properties:
foreign_properties = set()
schema_type = expected_schema.type
if schema_type == 'null':
if datum is None:
return True
else:
if raise_ex:
raise ValidationException(u"the value is not null")
else:
return False
elif schema_type == 'boolean':
if isinstance(datum, bool):
return True
else:
if raise_ex:
raise ValidationException(u"the value is not boolean")
else:
return False
elif schema_type == 'string':
if isinstance(datum, six.string_types):
return True
elif isinstance(datum, bytes):
datum = datum.decode(u"utf-8")
return True
else:
if raise_ex:
raise ValidationException(u"the value is not string")
else:
return False
elif schema_type == 'int':
if (isinstance(datum, six.integer_types)
and INT_MIN_VALUE <= datum <= INT_MAX_VALUE):
return True
else:
if raise_ex:
raise ValidationException(u"`%s` is not int" % vpformat(datum))
else:
return False
elif schema_type == 'long':
if ((isinstance(datum, six.integer_types))
and LONG_MIN_VALUE <= datum <= LONG_MAX_VALUE):
return True
else:
if raise_ex:
raise ValidationException(
u"the value `%s` is not long" % vpformat(datum))
else:
return False
elif schema_type in ['float', 'double']:
if (isinstance(datum, six.integer_types)
or isinstance(datum, float)):
return True
else:
if raise_ex:
raise ValidationException(
u"the value `%s` is not float or double" % vpformat(datum))
else:
return False
elif isinstance(expected_schema, avro.schema.EnumSchema):
if expected_schema.name == "Any":
if datum is not None:
return True
else:
if raise_ex:
raise ValidationException(u"'Any' type must be non-null")
else:
return False
if not isinstance(datum, six.string_types):
if raise_ex:
raise ValidationException(
u"value is a %s but expected a string" % (type(datum).__name__))
else:
return False
if expected_schema.name == "Expression":
if "$(" in datum or "${" in datum:
return True
if raise_ex:
raise ValidationException(u"value `%s` does not contain an expression in the form $() or ${}" % datum)
else:
return False
if datum in expected_schema.symbols:
return True
else:
if raise_ex:
raise ValidationException(u"the value %s is not a valid %s, expected %s%s" % (vpformat(datum), expected_schema.name,
"one of " if len(
expected_schema.symbols) > 1 else "",
"'" + "', '".join(expected_schema.symbols) + "'"))
else:
return False
elif isinstance(expected_schema, avro.schema.ArraySchema):
if isinstance(datum, MutableSequence):
for i, d in enumerate(datum):
try:
sl = SourceLine(datum, i, ValidationException)
if not validate_ex(expected_schema.items, d, identifiers,
strict=strict,
foreign_properties=foreign_properties,
raise_ex=raise_ex,
strict_foreign_properties=strict_foreign_properties,
logger=logger,
skip_foreign_properties=skip_foreign_properties):
return False
except ValidationException as v:
if raise_ex:
raise sl.makeError(
six.text_type("item is invalid because\n%s" % (indent(str(v)))))
else:
return False
return True
else:
if raise_ex:
raise ValidationException(u"the value %s is not a list, expected list of %s" % (
vpformat(datum), friendly(expected_schema.items)))
else:
return False
elif isinstance(expected_schema, avro.schema.UnionSchema):
for s in expected_schema.schemas:
if validate_ex(s, datum, identifiers, strict=strict, raise_ex=False,
strict_foreign_properties=strict_foreign_properties,
logger=logger, skip_foreign_properties=skip_foreign_properties):
return True
if not raise_ex:
return False
errors = [] # type: List[Text]
checked = []
for s in expected_schema.schemas:
if isinstance(datum, MutableSequence) and not isinstance(s, avro.schema.ArraySchema):
continue
elif isinstance(datum, MutableMapping) and not isinstance(s, avro.schema.RecordSchema):
continue
elif (isinstance( # type: ignore
datum, (bool, six.integer_types, float, six.string_types))
and isinstance(s, (avro.schema.ArraySchema,
avro.schema.RecordSchema))):
continue
elif datum is not None and s.type == "null":
continue
checked.append(s)
try:
validate_ex(s, datum, identifiers, strict=strict,
foreign_properties=foreign_properties,
raise_ex=True,
strict_foreign_properties=strict_foreign_properties,
logger=logger, skip_foreign_properties=skip_foreign_properties)
except ClassValidationException as e:
raise
except ValidationException as e:
errors.append(six.text_type(e))
if bool(errors):
raise ValidationException(bullets(["tried %s but\n%s" % (friendly(
checked[i]), indent(errors[i])) for i in range(0, len(errors))], "- "))
else:
raise ValidationException("value is a %s, expected %s" % (
type(datum).__name__, friendly(expected_schema)))
elif isinstance(expected_schema, avro.schema.RecordSchema):
if not isinstance(datum, MutableMapping):
if raise_ex:
raise ValidationException(u"is not a dict")
else:
return False
classmatch = None
for f in expected_schema.fields:
if f.name in ("class",):
d = datum.get(f.name)
if not d:
if raise_ex:
raise ValidationException(
u"Missing '%s' field" % (f.name))
else:
return False
if expected_schema.name != d:
if raise_ex:
raise ValidationException(
u"Expected class '%s' but this is '%s'" % (expected_schema.name, d))
else:
return False
classmatch = d
break
errors = []
for f in expected_schema.fields:
if f.name in ("class",):
continue
if f.name in datum:
fieldval = datum[f.name]
else:
try:
fieldval = f.default
except KeyError:
fieldval = None
try:
sl = SourceLine(datum, f.name, six.text_type)
if not validate_ex(f.type, fieldval, identifiers, strict=strict,
foreign_properties=foreign_properties,
raise_ex=raise_ex,
strict_foreign_properties=strict_foreign_properties,
logger=logger, skip_foreign_properties=skip_foreign_properties):
return False
except ValidationException as v:
if f.name not in datum:
errors.append(u"missing required field `%s`" % f.name)
else:
errors.append(sl.makeError(u"the `%s` field is not valid because\n%s" % (
f.name, indent(str(v)))))
for d in datum:
found = False
for f in expected_schema.fields:
if d == f.name:
found = True
if not found:
sl = SourceLine(datum, d, six.text_type)
if d not in identifiers and d not in foreign_properties and d[0] not in ("@", "$"):
if (d not in identifiers and strict) and (
d not in foreign_properties and strict_foreign_properties and not skip_foreign_properties) and not raise_ex:
return False
split = urllib.parse.urlsplit(d)
if split.scheme:
if not skip_foreign_properties:
err = sl.makeError(u"unrecognized extension field `%s`%s.%s"
% (d,
" and strict_foreign_properties checking is enabled"
if strict_foreign_properties else "",
"\nForeign properties from $schemas:\n %s" % "\n ".join(sorted(foreign_properties))
if len(foreign_properties) > 0 else ""))
if strict_foreign_properties:
errors.append(err)
elif len(foreign_properties) > 0:
logger.warning(strip_dup_lineno(err))
else:
err = sl.makeError(u"invalid field `%s`, expected one of: %s" % (
d, ", ".join("'%s'" % fn.name for fn in expected_schema.fields)))
if strict:
errors.append(err)
else:
logger.warning(err)
if bool(errors):
if raise_ex:
if classmatch:
raise ClassValidationException(bullets(errors, "* "))
else:
raise ValidationException(bullets(errors, "* "))
else:
return False
else:
return True
if raise_ex:
raise ValidationException(u"Unrecognized schema_type %s" % schema_type)
else:
return False
|
Determine if a python datum is an instance of a schema.
|
entailment
|
def json_dump(obj, # type: Any
fp, # type: IO[str]
**kwargs # type: Any
): # type: (...) -> None
""" Force use of unicode. """
if six.PY2:
kwargs['encoding'] = 'utf-8'
json.dump(convert_to_dict(obj), fp, **kwargs)
|
Force use of unicode.
|
entailment
|
def json_dumps(obj, # type: Any
**kwargs # type: Any
): # type: (...) -> str
""" Force use of unicode. """
if six.PY2:
kwargs['encoding'] = 'utf-8'
return json.dumps(convert_to_dict(obj), **kwargs)
|
Force use of unicode.
|
entailment
|
def codegen(lang, # type: str
i, # type: List[Dict[Text, Any]]
schema_metadata, # type: Dict[Text, Any]
loader # type: Loader
): # type: (...) -> None
"""Generate classes with loaders for the given Schema Salad description."""
j = schema.extend_and_specialize(i, loader)
gen = None # type: Optional[CodeGenBase]
if lang == "python":
gen = PythonCodeGen(sys.stdout)
elif lang == "java":
gen = JavaCodeGen(schema_metadata.get("$base", schema_metadata.get("id")))
else:
raise Exception("Unsupported code generation language '%s'" % lang)
assert gen is not None
gen.prologue()
document_roots = []
for rec in j:
if rec["type"] in ("enum", "record"):
gen.type_loader(rec)
gen.add_vocab(shortname(rec["name"]), rec["name"])
for rec in j:
if rec["type"] == "enum":
for symbol in rec["symbols"]:
gen.add_vocab(shortname(symbol), symbol)
if rec["type"] == "record":
if rec.get("documentRoot"):
document_roots.append(rec["name"])
field_names = []
for field in rec.get("fields", []):
field_names.append(shortname(field["name"]))
idfield = ""
for field in rec.get("fields", []):
if field.get("jsonldPredicate") == "@id":
idfield = field.get("name")
gen.begin_class(rec["name"], aslist(rec.get("extends", [])), rec.get("doc", ""),
rec.get("abstract", False), field_names, idfield)
gen.add_vocab(shortname(rec["name"]), rec["name"])
for field in rec.get("fields", []):
if field.get("jsonldPredicate") == "@id":
fieldpred = field["name"]
optional = bool("https://w3id.org/cwl/salad#null" in field["type"])
uri_loader = gen.uri_loader(gen.type_loader(field["type"]), True, False, None)
gen.declare_id_field(fieldpred, uri_loader, field.get("doc"), optional)
break
for field in rec.get("fields", []):
optional = bool("https://w3id.org/cwl/salad#null" in field["type"])
type_loader = gen.type_loader(field["type"])
jld = field.get("jsonldPredicate")
fieldpred = field["name"]
if isinstance(jld, MutableMapping):
ref_scope = jld.get("refScope")
if jld.get("typeDSL"):
type_loader = gen.typedsl_loader(type_loader, ref_scope)
elif jld.get("_type") == "@id":
type_loader = gen.uri_loader(type_loader, jld.get("identity", False),
False, ref_scope)
elif jld.get("_type") == "@vocab":
type_loader = gen.uri_loader(type_loader, False, True, ref_scope)
map_subject = jld.get("mapSubject")
if map_subject:
type_loader = gen.idmap_loader(
field["name"], type_loader, map_subject, jld.get("mapPredicate"))
if "_id" in jld and jld["_id"][0] != "@":
fieldpred = jld["_id"]
if jld == "@id":
continue
gen.declare_field(fieldpred, type_loader, field.get("doc"), optional)
gen.end_class(rec["name"], field_names)
root_type = list(document_roots)
root_type.append({
"type": "array",
"items": document_roots
})
gen.epilogue(gen.type_loader(root_type))
|
Generate classes with loaders for the given Schema Salad description.
|
entailment
|
def find(self, name):
"""
Return a list of subset of VM that match the pattern name
@param name (str): the vm name of the virtual machine
@param name (Obj): the vm object that represent the virtual
machine (can be Pro or Smart)
@return (list): the subset containing the serach result.
"""
if name.__class__ is 'base.Server.Pro' or name.__class__ is 'base.Server.Smart':
# print('DEBUG: matched VM object %s' % name.__class__)
pattern = name.vm_name
else:
# print('DEBUG: matched Str Object %s' % name.__class__)
pattern = name
# 14/06/2013: since this method is called within a thread and I wont to pass the return objects with queue or
# call back, I will allocate a list inside the Interface class object itself, which contain all of the vm found
# 02/11/2015: this must be changed ASAP! it's a mess this way... what was I thinking??
self.last_search_result = [vm for vm in self if pattern in vm.vm_name]
return self.last_search_result
|
Return a list of subset of VM that match the pattern name
@param name (str): the vm name of the virtual machine
@param name (Obj): the vm object that represent the virtual
machine (can be Pro or Smart)
@return (list): the subset containing the serach result.
|
entailment
|
def reinitialize(self, admin_password=None, debug=False, ConfigureIPv6=False, OSTemplateID=None):
"""
Reinitialize a VM.
:param admin_password: Administrator password.
:param debug: Flag to enable debug output.
:param ConfigureIPv6: Flag to enable IPv6 on the VM.
:param OSTemplateID: TemplateID to reinitialize the VM with.
:return: True in case of success, otherwise False
:type admin_password: str
:type debug: bool
:type ConfigureIPv6: bool
:type OSTemplateID: int
"""
data = dict(
AdministratorPassword=admin_password,
ServerId=self.sid,
ConfigureIPv6=ConfigureIPv6
)
if OSTemplateID is not None:
data.update(OSTemplateID=OSTemplateID)
assert data['AdministratorPassword'] is not None, 'Error reinitializing VM: no admin password specified.'
assert data['ServerId'] is not None, 'Error reinitializing VM: no Server Id specified.'
json_scheme = self.interface.gen_def_json_scheme('SetEnqueueReinitializeServer', method_fields=data)
json_obj = self.interface.call_method_post('SetEnqueueReinitializeServer', json_scheme=json_scheme, debug=debug)
return True if json_obj['Success'] is 'True' else False
|
Reinitialize a VM.
:param admin_password: Administrator password.
:param debug: Flag to enable debug output.
:param ConfigureIPv6: Flag to enable IPv6 on the VM.
:param OSTemplateID: TemplateID to reinitialize the VM with.
:return: True in case of success, otherwise False
:type admin_password: str
:type debug: bool
:type ConfigureIPv6: bool
:type OSTemplateID: int
|
entailment
|
def login(self, username, password, load=True):
"""
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
"""
self.auth = Auth(username, password)
if load is True:
self.get_ip()
self.get_servers()
|
Set the authentication data in the object, and if load is True
(default is True) it also retrieve the ip list and the vm list
in order to build the internal objects list.
@param (str) username: username of the cloud
@param (str) password: password of the cloud
@param (bool) load: define if pre cache the objects.
@return: None
|
entailment
|
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False
|
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
|
entailment
|
def get_hypervisors(self):
"""
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
"""
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False
|
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
|
entailment
|
def get_servers(self):
"""
Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool
"""
json_scheme = self.gen_def_json_scheme('GetServers')
json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)
self.json_servers = json_obj
# if this method is called I assume that i must re-read the data
# so i reinitialize the vmlist
self.vmlist = VMList()
# getting all instanced IP in case the list is empty
if len(self.iplist) <= 0:
self.get_ip()
for elem in dict(json_obj)["Value"]:
if elem['HypervisorType'] is 4:
s = Smart(interface=self, sid=elem['ServerId'])
else:
s = Pro(interface=self, sid=elem['ServerId'])
s.vm_name = elem['Name']
s.cpu_qty = elem['CPUQuantity']
s.ram_qty = elem['RAMQuantity']
s.status = elem['ServerStatus']
s.datacenter_id = elem['DatacenterId']
s.wcf_baseurl = self.wcf_baseurl
s.auth = self.auth
s.hd_qty = elem['HDQuantity']
s.hd_total_size = elem['HDTotalSize']
if elem['HypervisorType'] is 4:
ssd = self.get_server_detail(elem['ServerId'])
try:
s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])
except TypeError:
s.ip_addr = 'Not retrieved.'
else:
s.ip_addr = []
for ip in self.iplist:
if ip.serverid == s.sid:
s.ip_addr.append(ip)
self.vmlist.append(s)
return True if json_obj['Success'] is True else False
|
Create the list of Server object inside the Datacenter objects.
Build an internal list of VM Objects (pro or smart) as iterator.
:return: bool
|
entailment
|
def find_template(self, name=None, hv=None):
"""
Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None
"""
if len(self.templates) <= 0:
self.get_hypervisors()
if name is not None and hv is not None:
template_list = filter(
lambda x: name in x.descr and x.hypervisor == self.hypervisors[hv], self.templates
)
elif name is not None and hv is None:
template_list = filter(
lambda x: name in x.descr, self.templates
)
elif name is None and hv is not None:
template_list = filter(
lambda x: x.hypervisor == self.hypervisors[hv], self.templates
)
else:
raise Exception('Error, no pattern defined')
if sys.version_info.major < (3):
return template_list
else:
return(list(template_list))
|
Return a list of templates that could have one or more elements.
Args:
name: name of the template to find.
hv: the ID of the hypervisor to search the template in
Returns:
A list of templates object. If hv is None will return all the
templates matching the name if every hypervisor type. Otherwise
if name is None will return all templates of an hypervisor.
Raises:
ValidationError: if name and hv are None
|
entailment
|
def purchase_ip(self, debug=False):
"""
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
"""
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.')
|
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
|
entailment
|
def purchase_vlan(self, vlan_name, debug=False):
"""
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
"""
vlan_name = {'VLanName': vlan_name}
json_scheme = self.gen_def_json_scheme('SetPurchaseVLan', vlan_name)
json_obj = self.call_method_post(method="SetPurchaseVLan", json_scheme=json_scheme)
if debug is True:
self.logger.debug(json_obj)
if json_obj['Success'] is False:
raise Exception("Cannot purchase new vlan.")
vlan = Vlan()
vlan.name = json_obj['Value']['Name']
vlan.resource_id = json_obj['Value']['ResourceId']
vlan.vlan_code = json_obj['Value']['VlanCode']
return vlan
|
Purchase a new VLAN.
:param debug: Log the json response if True
:param vlan_name: String representing the name of the vlan (virtual switch)
:return: a Vlan Object representing the vlan created
|
entailment
|
def remove_vlan(self, vlan_resource_id):
"""
Remove a VLAN
:param vlan_resource_id:
:return:
"""
vlan_id = {'VLanResourceId': vlan_resource_id}
json_scheme = self.gen_def_json_scheme('SetRemoveVLan', vlan_id)
json_obj = self.call_method_post(method='SetRemoveVLan', json_scheme=json_scheme)
return True if json_obj['Success'] is True else False
|
Remove a VLAN
:param vlan_resource_id:
:return:
|
entailment
|
def remove_ip(self, ip_id):
"""
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
"""
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False
|
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
|
entailment
|
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId
|
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
|
entailment
|
def get_ip(self):
"""
Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None
"""
json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')
json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)
self.iplist = IpList()
for ip in json_obj['Value']:
r = Ip()
r.ip_addr = ip['Value']
r.resid = ip['ResourceId']
r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None
self.iplist.append(r)
|
Retrieve a complete list of bought ip address related only to PRO Servers.
It create an internal object (Iplist) representing all of the ips object
iterated form the WS.
@param: None
@return: None
|
entailment
|
def gen_def_json_scheme(self, req, method_fields=None):
"""
Generate the scheme for the json request.
:param req: String representing the name of the method to call
:param method_fields: A dictionary containing the method-specified fields
:rtype : json object representing the method call
"""
json_dict = dict(
ApplicationId=req,
RequestId=req,
SessionId=req,
Password=self.auth.password,
Username=self.auth.username
)
if method_fields is not None:
json_dict.update(method_fields)
self.logger.debug(json.dumps(json_dict))
return json.dumps(json_dict)
|
Generate the scheme for the json request.
:param req: String representing the name of the method to call
:param method_fields: A dictionary containing the method-specified fields
:rtype : json object representing the method call
|
entailment
|
def _commit(self):
"""
:return: (dict) Response object content
"""
assert self.uri is not None, Exception("BadArgument: uri property cannot be None")
url = '{}/{}'.format(self.uri, self.__class__.__name__)
serialized_json = jsonpickle.encode(self, unpicklable=False, )
headers = {'Content-Type': 'application/json', 'Content-Length': str(len(serialized_json))}
response = Http.post(url=url, data=serialized_json, headers=headers)
if response.status_code != 200:
from ArubaCloud.base.Errors import MalformedJsonRequest
raise MalformedJsonRequest("Request: {}, Status Code: {}".format(serialized_json, response.status_code))
content = jsonpickle.decode(response.content.decode("utf-8"))
if content['ResultCode'] == 17:
from ArubaCloud.base.Errors import OperationAlreadyEnqueued
raise OperationAlreadyEnqueued("{} already enqueued".format(self.__class__.__name__))
if content['Success'] is False:
from ArubaCloud.base.Errors import RequestFailed
raise RequestFailed("Request: {}, Response: {}".format(serialized_json, response.content))
return content
|
:return: (dict) Response object content
|
entailment
|
def get(self):
"""
Retrieve the current configured SharedStorages entries
:return: [list] List containing the current SharedStorages entries
"""
request = self._call(GetSharedStorages)
response = request.commit()
return response['Value']
|
Retrieve the current configured SharedStorages entries
:return: [list] List containing the current SharedStorages entries
|
entailment
|
def purchase_iscsi(self, quantity, iqn, name, protocol=SharedStorageProtocolType.ISCSI):
"""
:type quantity: int
:type iqn: list[str]
:type name: str
:type protocol: SharedStorageProtocols
:param quantity: Amount of GB
:param iqn: List of IQN represented in string format
:param name: Name of the resource
:param protocol: Protocol to use
:return:
"""
iqns = []
for _iqn in iqn:
iqns.append(SharedStorageIQN(Value=_iqn))
request = self._call(SetEnqueuePurchaseSharedStorage, Quantity=quantity, SharedStorageName=name,
SharedStorageIQNs=iqns, SharedStorageProtocolType=protocol)
response = request.commit()
return response['Value']
|
:type quantity: int
:type iqn: list[str]
:type name: str
:type protocol: SharedStorageProtocols
:param quantity: Amount of GB
:param iqn: List of IQN represented in string format
:param name: Name of the resource
:param protocol: Protocol to use
:return:
|
entailment
|
def _get(self, *args, **kwargs):
"""Wrapper around Requests for GET requests
Returns:
Response:
A Requests Response object
"""
if 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
req = self.session.get(*args, **kwargs)
return req
|
Wrapper around Requests for GET requests
Returns:
Response:
A Requests Response object
|
entailment
|
def _get_xml(self, *args, **kwargs):
"""Wrapper around Requests for GET XML requests
Returns:
Response:
A Requests Response object
"""
req = self.session_xml.get(*args, **kwargs)
return req
|
Wrapper around Requests for GET XML requests
Returns:
Response:
A Requests Response object
|
entailment
|
def _post(self, *args, **kwargs):
"""Wrapper around Requests for POST requests
Returns:
Response:
A Requests Response object
"""
if 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
req = self.session.post(*args, **kwargs)
return req
|
Wrapper around Requests for POST requests
Returns:
Response:
A Requests Response object
|
entailment
|
def _post_xml(self, *args, **kwargs):
"""Wrapper around Requests for POST requests
Returns:
Response:
A Requests Response object
"""
if 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
req = self.session_xml.post(*args, **kwargs)
return req
|
Wrapper around Requests for POST requests
Returns:
Response:
A Requests Response object
|
entailment
|
def _put(self, *args, **kwargs):
"""Wrapper around Requests for PUT requests
Returns:
Response:
A Requests Response object
"""
if 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
req = self.session.put(*args, **kwargs)
return req
|
Wrapper around Requests for PUT requests
Returns:
Response:
A Requests Response object
|
entailment
|
def _delete(self, *args, **kwargs):
"""Wrapper around Requests for DELETE requests
Returns:
Response:
A Requests Response object
"""
if 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
req = self.session.delete(*args, **kwargs)
return req
|
Wrapper around Requests for DELETE requests
Returns:
Response:
A Requests Response object
|
entailment
|
def auth_ping(self):
"""Test that application can authenticate to Crowd.
Attempts to authenticate the application user against
the Crowd server. In order for user authentication to
work, an application must be able to authenticate.
Returns:
bool:
True if the application authentication succeeded.
"""
url = self.rest_url + "/non-existent/location"
response = self._get(url)
if response.status_code == 401:
return False
elif response.status_code == 404:
return True
else:
# An error encountered - problem with the Crowd server?
return False
|
Test that application can authenticate to Crowd.
Attempts to authenticate the application user against
the Crowd server. In order for user authentication to
work, an application must be able to authenticate.
Returns:
bool:
True if the application authentication succeeded.
|
entailment
|
def auth_user(self, username, password):
"""Authenticate a user account against the Crowd server.
Attempts to authenticate the user against the Crowd server.
Args:
username: The account username.
password: The account password.
Returns:
dict:
A dict mapping of user attributes if the application
authentication was successful. See the Crowd documentation
for the authoritative list of attributes.
None: If authentication failed.
"""
response = self._post(self.rest_url + "/authentication",
data=json.dumps({"value": password}),
params={"username": username})
# If authentication failed for any reason return None
if not response.ok:
return None
# ...otherwise return a dictionary of user attributes
return response.json()
|
Authenticate a user account against the Crowd server.
Attempts to authenticate the user against the Crowd server.
Args:
username: The account username.
password: The account password.
Returns:
dict:
A dict mapping of user attributes if the application
authentication was successful. See the Crowd documentation
for the authoritative list of attributes.
None: If authentication failed.
|
entailment
|
def get_session(self, username, password, remote="127.0.0.1", proxy=None):
"""Create a session for a user.
Attempts to create a user session on the Crowd server.
Args:
username: The account username.
password: The account password.
remote:
The remote address of the user. This can be used
to create multiple concurrent sessions for a user.
The host you run this program on may need to be configured
in Crowd as a trusted proxy for this to work.
proxy: Value of X-Forwarded-For server header.
Returns:
dict:
A dict mapping of user attributes if the application
authentication was successful. See the Crowd
documentation for the authoritative list of attributes.
None: If authentication failed.
"""
params = {
"username": username,
"password": password,
"validation-factors": {
"validationFactors": [
{"name": "remote_address", "value": remote, },
]
}
}
if proxy:
params["validation-factors"]["validationFactors"].append({"name": "X-Forwarded-For", "value": proxy, })
response = self._post(self.rest_url + "/session",
data=json.dumps(params),
params={"expand": "user"})
# If authentication failed for any reason return None
if not response.ok:
return None
# Otherwise return the user object
return response.json()
|
Create a session for a user.
Attempts to create a user session on the Crowd server.
Args:
username: The account username.
password: The account password.
remote:
The remote address of the user. This can be used
to create multiple concurrent sessions for a user.
The host you run this program on may need to be configured
in Crowd as a trusted proxy for this to work.
proxy: Value of X-Forwarded-For server header.
Returns:
dict:
A dict mapping of user attributes if the application
authentication was successful. See the Crowd
documentation for the authoritative list of attributes.
None: If authentication failed.
|
entailment
|
def validate_session(self, token, remote="127.0.0.1", proxy=None):
"""Validate a session token.
Validate a previously acquired session token against the
Crowd server. This may be a token provided by a user from
a http cookie or by some other means.
Args:
token: The session token.
remote: The remote address of the user.
proxy: Value of X-Forwarded-For server header
Returns:
dict:
A dict mapping of user attributes if the application
authentication was successful. See the Crowd
documentation for the authoritative list of attributes.
None: If authentication failed.
"""
params = {
"validationFactors": [
{"name": "remote_address", "value": remote, },
]
}
if proxy:
params["validation-factors"]["validationFactors"].append({"name": "X-Forwarded-For", "value": proxy, })
url = self.rest_url + "/session/%s" % token
response = self._post(url, data=json.dumps(params), params={"expand": "user"})
# For consistency between methods use None rather than False
# If token validation failed for any reason return None
if not response.ok:
return None
# Otherwise return the user object
return response.json()
|
Validate a session token.
Validate a previously acquired session token against the
Crowd server. This may be a token provided by a user from
a http cookie or by some other means.
Args:
token: The session token.
remote: The remote address of the user.
proxy: Value of X-Forwarded-For server header
Returns:
dict:
A dict mapping of user attributes if the application
authentication was successful. See the Crowd
documentation for the authoritative list of attributes.
None: If authentication failed.
|
entailment
|
def terminate_session(self, token):
"""Terminates the session token, effectively logging out the user
from all crowd-enabled services.
Args:
token: The session token.
Returns:
True: If session terminated
None: If session termination failed
"""
url = self.rest_url + "/session/%s" % token
response = self._delete(url)
# For consistency between methods use None rather than False
# If token validation failed for any reason return None
if not response.ok:
return None
# Otherwise return True
return True
|
Terminates the session token, effectively logging out the user
from all crowd-enabled services.
Args:
token: The session token.
Returns:
True: If session terminated
None: If session termination failed
|
entailment
|
def add_user(self, username, raise_on_error=False, **kwargs):
"""Add a user to the directory
Args:
username: The account username
raise_on_error: optional (default: False)
**kwargs: key-value pairs:
password: mandatory
email: mandatory
first_name: optional
last_name: optional
display_name: optional
active: optional (default True)
Returns:
True: Succeeded
False: If unsuccessful
"""
# Check that mandatory elements have been provided
if 'password' not in kwargs:
raise ValueError("missing password")
if 'email' not in kwargs:
raise ValueError("missing email")
# Populate data with default and mandatory values.
# A KeyError means a mandatory value was not provided,
# so raise a ValueError indicating bad args.
try:
data = {
"name": username,
"first-name": username,
"last-name": username,
"display-name": username,
"email": kwargs["email"],
"password": {"value": kwargs["password"]},
"active": True
}
except KeyError:
return ValueError
# Remove special case 'password'
del(kwargs["password"])
# Put values from kwargs into data
for k, v in kwargs.items():
new_k = k.replace("_", "-")
if new_k not in data:
raise ValueError("invalid argument %s" % k)
data[new_k] = v
response = self._post(self.rest_url + "/user",
data=json.dumps(data))
if response.status_code == 201:
return True
if raise_on_error:
raise RuntimeError(response.json()['message'])
return False
|
Add a user to the directory
Args:
username: The account username
raise_on_error: optional (default: False)
**kwargs: key-value pairs:
password: mandatory
email: mandatory
first_name: optional
last_name: optional
display_name: optional
active: optional (default True)
Returns:
True: Succeeded
False: If unsuccessful
|
entailment
|
def get_user(self, username):
"""Retrieve information about a user
Returns:
dict: User information
None: If no user or failure occurred
"""
response = self._get(self.rest_url + "/user",
params={"username": username,
"expand": "attributes"})
if not response.ok:
return None
return response.json()
|
Retrieve information about a user
Returns:
dict: User information
None: If no user or failure occurred
|
entailment
|
def set_active(self, username, active_state):
"""Set the active state of a user
Args:
username: The account username
active_state: True or False
Returns:
True: If successful
None: If no user or failure occurred
"""
if active_state not in (True, False):
raise ValueError("active_state must be True or False")
user = self.get_user(username)
if user is None:
return None
if user['active'] is active_state:
# Already in desired state
return True
user['active'] = active_state
response = self._put(self.rest_url + "/user",
params={"username": username},
data=json.dumps(user))
if response.status_code == 204:
return True
return None
|
Set the active state of a user
Args:
username: The account username
active_state: True or False
Returns:
True: If successful
None: If no user or failure occurred
|
entailment
|
def set_user_attribute(self, username, attribute, value, raise_on_error=False):
"""Set an attribute on a user
:param username: The username on which to set the attribute
:param attribute: The name of the attribute to set
:param value: The value of the attribute to set
:return: True on success, False on failure.
"""
data = {
'attributes': [
{
'name': attribute,
'values': [
value
]
},
]
}
response = self._post(self.rest_url + "/user/attribute",
params={"username": username,},
data=json.dumps(data))
if response.status_code == 204:
return True
if raise_on_error:
raise RuntimeError(response.json()['message'])
return False
|
Set an attribute on a user
:param username: The username on which to set the attribute
:param attribute: The name of the attribute to set
:param value: The value of the attribute to set
:return: True on success, False on failure.
|
entailment
|
def add_user_to_group(self, username, groupname, raise_on_error=False):
"""Add a user to a group
:param username: The username to assign to the group
:param groupname: The group name into which to assign the user
:return: True on success, False on failure.
"""
data = {
'name': groupname,
}
response = self._post(self.rest_url + "/user/group/direct",
params={"username": username,},
data=json.dumps(data))
if response.status_code == 201:
return True
if raise_on_error:
raise RuntimeError(response.json()['message'])
return False
|
Add a user to a group
:param username: The username to assign to the group
:param groupname: The group name into which to assign the user
:return: True on success, False on failure.
|
entailment
|
def remove_user_from_group(self, username, groupname, raise_on_error=False):
"""Remove a user from a group
Attempts to remove a user from a group
Args
username: The username to remove from the group.
groupname: The group name to be removed from the user.
Returns:
True: Succeeded
False: If unsuccessful
"""
response = self._delete(self.rest_url + "/group/user/direct",params={"username": username, "groupname": groupname})
if response.status_code == 204:
return True
if raise_on_error:
raise RuntimeError(response.json()['message'])
return False
|
Remove a user from a group
Attempts to remove a user from a group
Args
username: The username to remove from the group.
groupname: The group name to be removed from the user.
Returns:
True: Succeeded
False: If unsuccessful
|
entailment
|
def change_password(self, username, newpassword, raise_on_error=False):
"""Change new password for a user
Args:
username: The account username.
newpassword: The account new password.
raise_on_error: optional (default: False)
Returns:
True: Succeeded
False: If unsuccessful
"""
response = self._put(self.rest_url + "/user/password",
data=json.dumps({"value": newpassword}),
params={"username": username})
if response.ok:
return True
if raise_on_error:
raise RuntimeError(response.json()['message'])
return False
|
Change new password for a user
Args:
username: The account username.
newpassword: The account new password.
raise_on_error: optional (default: False)
Returns:
True: Succeeded
False: If unsuccessful
|
entailment
|
def send_password_reset_link(self, username):
"""Sends the user a password reset link (by email)
Args:
username: The account username.
Returns:
True: Succeeded
False: If unsuccessful
"""
response = self._post(self.rest_url + "/user/mail/password",
params={"username": username})
if response.ok:
return True
return False
|
Sends the user a password reset link (by email)
Args:
username: The account username.
Returns:
True: Succeeded
False: If unsuccessful
|
entailment
|
def get_nested_groups(self, username):
"""Retrieve a list of all group names that have <username> as a direct or indirect member.
Args:
username: The account username.
Returns:
list:
A list of strings of group names.
"""
response = self._get(self.rest_url + "/user/group/nested",
params={"username": username})
if not response.ok:
return None
return [g['name'] for g in response.json()['groups']]
|
Retrieve a list of all group names that have <username> as a direct or indirect member.
Args:
username: The account username.
Returns:
list:
A list of strings of group names.
|
entailment
|
def get_nested_group_users(self, groupname):
"""Retrieves a list of all users that directly or indirectly belong to the given groupname.
Args:
groupname: The group name.
Returns:
list:
A list of strings of user names.
"""
response = self._get(self.rest_url + "/group/user/nested",
params={"groupname": groupname,
"start-index": 0,
"max-results": 99999})
if not response.ok:
return None
return [u['name'] for u in response.json()['users']]
|
Retrieves a list of all users that directly or indirectly belong to the given groupname.
Args:
groupname: The group name.
Returns:
list:
A list of strings of user names.
|
entailment
|
def user_exists(self, username):
"""Determines if the user exists.
Args:
username: The user name.
Returns:
bool:
True if the user exists in the Crowd application.
"""
response = self._get(self.rest_url + "/user",
params={"username": username})
if not response.ok:
return None
return True
|
Determines if the user exists.
Args:
username: The user name.
Returns:
bool:
True if the user exists in the Crowd application.
|
entailment
|
def get_memberships(self):
"""Fetches all group memberships.
Returns:
dict:
key: group name
value: (array of users, array of groups)
"""
response = self._get_xml(self.rest_url + "/group/membership")
if not response.ok:
return None
xmltree = etree.fromstring(response.content)
memberships = {}
for mg in xmltree.findall('membership'):
# coerce values to unicode in a python 2 and 3 compatible way
group = u'{}'.format(mg.get('group'))
users = [u'{}'.format(u.get('name')) for u in mg.find('users').findall('user')]
groups = [u'{}'.format(g.get('name')) for g in mg.find('groups').findall('group')]
memberships[group] = {u'users': users, u'groups': groups}
return memberships
|
Fetches all group memberships.
Returns:
dict:
key: group name
value: (array of users, array of groups)
|
entailment
|
def search(self, entity_type, property_name, search_string, start_index=0, max_results=99999):
"""Performs a user search using the Crowd search API.
https://developer.atlassian.com/display/CROWDDEV/Crowd+REST+Resources#CrowdRESTResources-SearchResource
Args:
entity_type: 'user' or 'group'
property_name: eg. 'email', 'name'
search_string: the string to search for.
start_index: starting index of the results (default: 0)
max_results: maximum number of results returned (default: 99999)
Returns:
json results:
Returns search results.
"""
params = {
"entity-type": entity_type,
"expand": entity_type,
"property-search-restriction": {
"property": {"name": property_name, "type": "STRING"},
"match-mode": "CONTAINS",
"value": search_string,
}
}
params = {
'entity-type': entity_type,
'expand': entity_type,
'start-index': start_index,
'max-results': max_results
}
# Construct XML payload of the form:
# <property-search-restriction>
# <property>
# <name>email</name>
# <type>STRING</type>
# </property>
# <match-mode>EXACTLY_MATCHES</match-mode>
# <value>bob@example.net</value>
# </property-search-restriction>
root = etree.Element('property-search-restriction')
property_ = etree.Element('property')
prop_name = etree.Element('name')
prop_name.text = property_name
property_.append(prop_name)
prop_type = etree.Element('type')
prop_type.text = 'STRING'
property_.append(prop_type)
root.append(property_)
match_mode = etree.Element('match-mode')
match_mode.text = 'CONTAINS'
root.append(match_mode)
value = etree.Element('value')
value.text = search_string
root.append(value)
# Construct the XML payload expected by search API
payload = '<?xml version="1.0" encoding="UTF-8"?>\n' + etree.tostring(root).decode('utf-8')
# We're sending XML but would like a JSON response
session = self._build_session(content_type='xml')
session.headers.update({'Accept': 'application/json'})
response = session.post(self.rest_url + "/search", params=params, data=payload, timeout=self.timeout)
if not response.ok:
return None
return response.json()
|
Performs a user search using the Crowd search API.
https://developer.atlassian.com/display/CROWDDEV/Crowd+REST+Resources#CrowdRESTResources-SearchResource
Args:
entity_type: 'user' or 'group'
property_name: eg. 'email', 'name'
search_string: the string to search for.
start_index: starting index of the results (default: 0)
max_results: maximum number of results returned (default: 99999)
Returns:
json results:
Returns search results.
|
entailment
|
def versions(self) -> List(BlenderVersion):
"""
The versions associated with Blender
"""
return [BlenderVersion(tag) for tag in self.git_repo.tags] + [BlenderVersion(BLENDER_VERSION_MASTER)]
|
The versions associated with Blender
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.