sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def validator(self):
"""
VCF Validator
"""
tstart = datetime.now()
v = validator.Validator(self.vcf_file)
std = v.run()
if std == 0:
self.is_validated = True
tend = datetime.now()
execution_time = tend - tstart | VCF Validator | entailment |
def sanitycheck(self):
"""
Search and Remove variants with [0/0, ./.]
Search and Replace chr from the beggining of the chromossomes to get positionning.
Sort VCF by 1...22, X, Y, MT and nothing else
#Discard other variants
"""
# logging.info('Starting Sanity Check...')
tstart = datetime.now()
# command = 'python %s/sanity_check.py -i %s' % (scripts_dir, self.vcf_file)
# self.shell(command)
sc = sanity_check.Sanity_check(self.vcf_file)
std = sc.run()
tend = datetime.now()
execution_time = tend - tstart | Search and Remove variants with [0/0, ./.]
Search and Replace chr from the beggining of the chromossomes to get positionning.
Sort VCF by 1...22, X, Y, MT and nothing else
#Discard other variants | entailment |
def snpeff(self):
"""
Annotation with snpEff
"""
# calculate time thread took to finish
# logging.info('Starting snpEff')
tstart = datetime.now()
se = snpeff.Snpeff(self.vcf_file)
std = se.run()
tend = datetime.now()
execution_time = tend - tstart | Annotation with snpEff | entailment |
def vep(self):
"""VEP"""
# calculate time thread took to finish
# logging.info('Starting VEP ')
tstart = datetime.now()
vep_obj = vep.Vep(self.vcf_file)
std = vep_obj.run()
# command = 'python %s/vep.py -i sanity_check/checked.vcf' % (scripts_dir)
# self.shell(command)
# command = 'mv vep/vep.log log/'
# os.system(command)
# command = 'mv vep/vep.output.vcf_summary.html reports/vep_summary.html'
# os.system(command)
tend = datetime.now()
execution_time = tend - tstart | VEP | entailment |
def decipher(self):
"""Decipher """
# calculate time thread took to finish
# logging.info('Starting HI score')
tstart = datetime.now()
decipher_obj = decipher.Decipher(self.vcf_file)
decipher_obj.run()
tend = datetime.now()
execution_time = tend - tstart | Decipher | entailment |
def hgmd(self):
"""Hi Index """
# calculate time thread took to finish
# logging.info('Starting HI score')
tstart = datetime.now()
if os.path.isfile(settings.hgmd_file):
hgmd_obj = hgmd.HGMD(self.vcf_file)
hgmd_obj.run()
tend = datetime.now()
execution_time = tend - tstart | Hi Index | entailment |
def snpsift(self):
"""SnpSift"""
tstart = datetime.now()
# command = 'python %s/snpsift.py -i sanity_check/checked.vcf 2>log/snpsift.log' % (scripts_dir)
# self.shell(command)
ss = snpsift.SnpSift(self.vcf_file)
ss.run()
tend = datetime.now()
execution_time = tend - tstart | SnpSift | entailment |
def vcf_annotator(self):
"""Vcf annotator"""
tstart = datetime.now()
# python ../scripts/annotate_vcfs.py -i mm13173_14.ug.target1.vcf -r 1000genomes dbsnp138 clinvar esp6500 -a ../data/1000genomes/ALL.wgs.integrated_phase1_v3.20101123.snps_indels_sv.sites.vcf.gz ../data/dbsnp138/00-All.vcf.gz ../data/dbsnp138/clinvar_00-latest.vcf.gz ../data/ESP6500/ESP6500.vcf.gz
# command = 'python %s/vcf_annotator_parallel.py -n %s -i sanity_check/checked.vcf -r 1000genomes dbsnp clinvar esp6500 -a %s %s %s %s 2>log/pynnotator.log' % (scripts_dir, pynnotator_cores, genomes1k, dbsnp, clinvar, esp)
# self.shell(command)
resources = "genomes1k dbsnp clinvar esp6500 ensembl_phen ensembl_clin hgmd" #
resources = resources.split(' ')
annfiles = [
"%s/1000genomes/%s" % (settings.data_dir, settings.genomes1k_file),
"%s/dbsnp/%s" % (settings.data_dir, settings.dbsnp_file),
"%s/dbsnp/%s" % (settings.data_dir, settings.clinvar_file),
"%s/esp6500/%s" % (settings.data_dir, settings.esp_final_file),
"%s/ensembl/%s" % (settings.data_dir, settings.ensembl_phenotype_file),
"%s/ensembl/%s" % (settings.data_dir, settings.ensembl_clinically_file),
]
if os.path.exists(settings.hgmd):
annfiles.append("%s/hgmd/%s" % (settings.data_dir, settings.hgmd),)
#
# annfiles = " ".join(annfiles)
# annfiles = ["%s/1000genomes/%s" % (settings.data_dir, settings.genomes1k_file)]
annotator_obj = vcf_annotator.VCF_Annotator(self.vcf_file, annfiles, resources, settings.vcf_annotator_cores)
annotator_obj.run()
tend = datetime.now()
execution_time = tend - tstart | Vcf annotator | entailment |
def dbnsfp(self):
"""dbnsfp"""
tstart = datetime.now()
# python ../scripts/annotate_vcfs.py -i mm13173_14.ug.target1.vcf -r 1000genomes dbsnp138 clinvar esp6500 -a ../data/1000genomes/ALL.wgs.integrated_phase1_v3.20101123.snps_indels_sv.sites.vcf.gz ../data/dbsnp138/00-All.vcf.gz ../data/dbsnp138/clinvar_00-latest.vcf.gz ../data/ESP6500/ESP6500.vcf.gz
# command = 'python %s/cadd_dann.py -n %s -i sanity_check/checked.vcf 2>log/cadd_dann.log' % (scripts_dir, cadd_vest_cores)
# self.shell(command)
db = dbnsfp.Dbnsfp(self.vcf_file, settings.dbnsfp_cores)
db.run()
tend = datetime.now()
execution_time = tend - tstart | dbnsfp | entailment |
def cli(ctx, settings, app):
"""Manage Morp application services"""
if app is None and settings is None:
print('Either --app or --settings must be supplied')
ctx.ensure_object(dict)
ctx.obj['app'] = app
ctx.obj['settings'] = settings | Manage Morp application services | entailment |
def _settings_checker(self, required_settings=None, accept_none=True):
"""
Take a list of required _settings dictionary keys
and make sure they are set. This can be added to a custom
constructor in a subclass and tested to see if it returns ``True``.
:arg list required_settings: A list of required keys to look for.
:arg bool accept_none: Boolean set to True if None is an acceptable
setting. Set to False if None is not an
acceptable setting.
:returns: * bool ``True`` if all required settings exist, OR
* str <key name> for the first key missing from _settings.
"""
if required_settings is not None:
for keyname in required_settings:
if keyname not in self._settings:
return keyname
if accept_none is False and self._settings[keyname] is None:
return keyname
return True | Take a list of required _settings dictionary keys
and make sure they are set. This can be added to a custom
constructor in a subclass and tested to see if it returns ``True``.
:arg list required_settings: A list of required keys to look for.
:arg bool accept_none: Boolean set to True if None is an acceptable
setting. Set to False if None is not an
acceptable setting.
:returns: * bool ``True`` if all required settings exist, OR
* str <key name> for the first key missing from _settings. | entailment |
def _get_response(self, endpoint, query, is_post=False):
"""Returns response or False in event of failure"""
timeout_secs = self._settings.get('timeout', 10)
headers = self._settings.get('request_headers', {})
try:
if is_post:
response = requests.post(
endpoint, data=query, headers=headers, timeout=timeout_secs)
else:
response = requests.get(
endpoint, params=query, headers=headers, timeout=timeout_secs)
except requests.exceptions.Timeout as e:
raise Exception(
'API request timed out after %s seconds.' % timeout_secs)
except Exception as e:
raise e
if response.status_code != 200:
raise Exception('Received status code %s from %s. Content is:\n%s'
% (response.status_code,
self.get_service_name(),
response.text))
return response | Returns response or False in event of failure | entailment |
def _get_json_obj(self, endpoint, query, is_post=False):
"""
Return False if connection could not be made.
Otherwise, return a response object from JSON.
"""
response = self._get_response(endpoint, query, is_post=is_post)
content = response.text
try:
return loads(content)
except ValueError:
raise Exception('Could not decode content to JSON:\n%s'
% self.__class__.__name__, content) | Return False if connection could not be made.
Otherwise, return a response object from JSON. | entailment |
def _get_xml_doc(self, endpoint, query, is_post=False):
"""
Return False if connection could not be made.
Otherwise, return a minidom Document.
"""
response = self._get_response(endpoint, query, is_post=is_post)
return minidom.parse(response.text) | Return False if connection could not be made.
Otherwise, return a minidom Document. | entailment |
def geocode(self, pq):
"""
:arg PlaceQuery pq: PlaceQuery instance
:rtype: tuple
:returns: post-processed list of Candidate objects and
and UpstreamResponseInfo object if an API call was made.
Examples:
Preprocessor throws out request::
([], None)
Postprocessor throws out some candidates::
([<Candidate obj>, <Candidate obj>, ...], <UpstreamResponseInfo obj>)
Postprocessor throws out all candidates::
([], <UpstreamResponseInfo obj>)
An exception occurs while making the API call::
([], <UpstreamResponseInfo obj>)
"""
processed_pq = copy.copy(pq)
for p in self._preprocessors:
processed_pq = p.process(processed_pq)
if not processed_pq:
return [], None
upstream_response_info = UpstreamResponseInfo(self.get_service_name(),
processed_pq)
try:
start = datetime.now()
candidates = self._geocode(processed_pq)
end = datetime.now()
response_time_sec = (end - start).total_seconds()
upstream_response_info.set_response_time(1000 * response_time_sec)
except Exception:
upstream_response_info.set_success(False)
upstream_response_info.errors.append(format_exc())
return [], upstream_response_info
if len(candidates) > 0:
for p in self._postprocessors: # apply universal candidate postprocessing
candidates = p.process(candidates) # merge lists
return candidates, upstream_response_info | :arg PlaceQuery pq: PlaceQuery instance
:rtype: tuple
:returns: post-processed list of Candidate objects and
and UpstreamResponseInfo object if an API call was made.
Examples:
Preprocessor throws out request::
([], None)
Postprocessor throws out some candidates::
([<Candidate obj>, <Candidate obj>, ...], <UpstreamResponseInfo obj>)
Postprocessor throws out all candidates::
([], <UpstreamResponseInfo obj>)
An exception occurs while making the API call::
([], <UpstreamResponseInfo obj>) | entailment |
def process(self, candidates):
"""
:arg list candidates: list of Candidate instances
"""
for c in candidates[:]:
if c.locator not in self.good_locators:
# TODO: search string, i.e. find "EU_Street_Name" in "EU_Street_Name.GBR_StreetName"
candidates.remove(c)
return candidates | :arg list candidates: list of Candidate instances | entailment |
def process(self, unordered_candidates):
"""
:arg list candidates: list of Candidate instances
"""
ordered_candidates = []
# make a new list of candidates in order of ordered_locators
for locator in self.ordered_locators:
for uc in unordered_candidates[:]:
if uc.locator == locator:
ordered_candidates.append(uc)
unordered_candidates.remove(uc)
# add all the candidates that are still left
# (whose locator values are not in ordered_locators)
# and return the new list
return ordered_candidates + unordered_candidates | :arg list candidates: list of Candidate instances | entailment |
def process(self, candidates):
"""
:arg list candidates: list of Candidates
:returns: list of Candidates where score is at least min_score,
if and only if one or more Candidates have at least min_score.
Otherwise, returns original list of Candidates.
"""
high_score_candidates = [c for c in candidates if c.score >= self.min_score]
if high_score_candidates != []:
return high_score_candidates
return candidates | :arg list candidates: list of Candidates
:returns: list of Candidates where score is at least min_score,
if and only if one or more Candidates have at least min_score.
Otherwise, returns original list of Candidates. | entailment |
def process(self, candidates):
"""
:arg list candidates: list of Candidates
:returns: score-sorted list of Candidates
"""
return sorted(candidates, key=attrgetter('score'), reverse=self.reverse) | :arg list candidates: list of Candidates
:returns: score-sorted list of Candidates | entailment |
def _get_distance(self, pnt1, pnt2):
"""Get distance in meters between two lat/long points"""
lat1, lon1 = pnt1
lat2, lon2 = pnt2
radius = 6356752 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d | Get distance in meters between two lat/long points | entailment |
def _points_within_distance(self, pnt1, pnt2):
"""Returns true if lat/lon points are within given distance in metres."""
if self._get_distance(pnt1, pnt2) <= self.distance:
return True
return False | Returns true if lat/lon points are within given distance in metres. | entailment |
def _make_candidate_from_result(self, result):
""" Make a Candidate from a Google geocoder results dictionary. """
candidate = Candidate()
candidate.match_addr = result['formatted_address']
candidate.x = result['geometry']['location']['lng']
candidate.y = result['geometry']['location']['lat']
candidate.locator = self.LOCATOR_MAPPING.get(result['geometry']['location_type'], '')
candidate.entity_types = result['types']
candidate.partial_match = result.get('partial_match', False)
component_lookups = {
'city': {'type': 'locality', 'key': 'long_name'},
'subregion': {'type': 'administrative_area_level_2', 'key': 'long_name'},
'region': {'type': 'administrative_area_level_1', 'key': 'short_name'},
'postal': {'type': 'postal_code', 'key': 'long_name'},
'country': {'type': 'country', 'key': 'short_name'},
}
for (field, lookup) in component_lookups.items():
setattr(candidate, 'match_' + field, self._get_component_from_result(result, lookup))
candidate.geoservice = self.__class__.__name__
return candidate | Make a Candidate from a Google geocoder results dictionary. | entailment |
def _get_component_from_result(self, result, lookup):
"""
Helper function to get a particular address component from a Google result.
Since the address components in results are an array of objects containing a types array,
we have to search for a particular component rather than being able to look it up directly.
Returns the first match, so this should be used for unique component types (e.g.
'locality'), not for categories (e.g. 'political') that can describe multiple components.
:arg dict result: A results dict with an 'address_components' key, as returned by the
Google geocoder.
:arg dict lookup: The type (e.g. 'street_number') and key ('short_name' or 'long_name') of
the desired address component value.
:returns: address component or empty string
"""
for component in result['address_components']:
if lookup['type'] in component['types']:
return component.get(lookup['key'], '')
return '' | Helper function to get a particular address component from a Google result.
Since the address components in results are an array of objects containing a types array,
we have to search for a particular component rather than being able to look it up directly.
Returns the first match, so this should be used for unique component types (e.g.
'locality'), not for categories (e.g. 'political') that can describe multiple components.
:arg dict result: A results dict with an 'address_components' key, as returned by the
Google geocoder.
:arg dict lookup: The type (e.g. 'street_number') and key ('short_name' or 'long_name') of
the desired address component value.
:returns: address component or empty string | entailment |
def install_requirements(self):
"""Install Ubuntu Requirements"""
print('Installing Requirements')
print(platform.dist())
if platform.dist()[0] in ['Ubuntu', 'LinuxMint']:
command = 'sudo apt-get install -y gcc git python3-dev zlib1g-dev make zip libssl-dev libbz2-dev liblzma-dev libcurl4-openssl-dev build-essential libxml2-dev apache2 zlib1g-dev bcftools build-essential cpanminus curl git libbz2-dev libcurl4-openssl-dev liblocal-lib-perl liblzma-dev libmysqlclient-dev libpng-dev libpq-dev libssl-dev manpages mysql-client openssl perl perl-base pkg-config python3-dev python3-pip python3-setuptools sed tabix unzip vcftools vim wget zlib1g-dev apache2 build-essential cpanminus curl git libmysqlclient-dev libpng-dev libssl-dev locales manpages mysql-client openssl perl perl-base unzip vim wget libgd-dev' # lamp-server^
sts = call(command, shell=True)
try:
subprocess.call(['java', '-version'])
except:
command = """sudo apt install -y software-properties-common
sudo add-apt-repository -y ppa:webupd8team/java
sudo apt-get update
echo "oracle-java8-installer shared/accepted-oracle-license-v1-1 select true" | sudo debconf-set-selections
sudo apt-get -y install oracle-java8-installer"""
sts = call(command, shell=True)
elif platform.dist()[0] in ['debian']:
command = 'sudo apt-get update'
sts = call(command, shell=True)
command = 'sudo apt-get install -y libmodule-install-perl apache2 bcftools build-essential cpanminus curl git libbz2-dev libcurl4-openssl-dev liblocal-lib-perl liblzma-dev default-libmysqlclient-dev libpng-dev libpq-dev libssl-dev manpages mysql-client openssl perl perl-base pkg-config python3-dev python3-pip python3-setuptools sed tabix unzip vcftools vim wget zlib1g-dev apache2 build-essential cpanminus curl git libpng-dev libssl-dev locales manpages mysql-client openssl perl perl-base unzip vim wget libgd-dev libxml-libxml-perl libgd-dev' # lamp-server^
sts = call(command, shell=True)
command = 'sudo apt-get install -y default-jre default-jdk'
sts = call(command, shell=True)
elif platform.dist()[0] in ['redhat', 'centos']:
command = 'sudo yum install libcurl-devel sed vcftools bcftools tabix zlib-devel postgresql96-libs perl-local-lib perl-App-cpanminus curl unzip wget'
sts = call(command, shell=True)
command = """sudo yum groupinstall 'Development Tools'"""
sts = call(command, shell=True)
command = """sudo yum install gcc gcc-c++ make openssl-devel"""
sts = call(command, shell=True)
try:
subprocess.call(['java', '-version'])
except:
command = "sudo yum install -y java-1.8.0-openjdk"
sts = call(command, shell=True)
# Perl Requirements
command = "sudo cpanm DBI DBD::mysql File::Copy::Recursive Archive::Extract Archive::Zip LWP::Simple Bio::Root::Version LWP::Protocol::https Bio::DB::Fasta CGI Test::utf8 Test::File inc::Module::Install XML::DOM::XPath XML::LibXML"
sts = call(command, shell=True)
command = "sudo cpanm --local-lib=~/perl5 local::lib && eval $(perl -I ~/perl5/lib/perl5/ -Mlocal::lib)"
sts = call(command, shell=True) | Install Ubuntu Requirements | entailment |
def search(self, query: Optional[dict] = None, offset: Optional[int] = None,
limit: Optional[int] = None,
order_by: Union[None, list, tuple] = None) -> Sequence['IModel']:
"""return search result based on specified rulez query"""
raise NotImplementedError | return search result based on specified rulez query | entailment |
def aggregate(self, query: Optional[dict] = None,
group: Optional[dict] = None,
order_by: Union[None, list, tuple] = None) -> list:
"""return aggregation result based on specified rulez query and group"""
raise NotImplementedError | return aggregation result based on specified rulez query and group | entailment |
def put_blob(self, field: str, fileobj: BinaryIO,
filename: str,
mimetype: Optional[str] = None,
size: Optional[int] = None,
encoding: Optional[str] = None) -> IBlob:
"""Receive and store blob object"""
raise NotImplementedError | Receive and store blob object | entailment |
def before_blobput(self, field: str, fileobj: BinaryIO,
filename: str,
mimetype: Optional[str] = None,
size: Optional[int] = None,
encoding: Optional[str] = None) -> None:
"""Triggered before BLOB is stored""" | Triggered before BLOB is stored | entailment |
def search(self, query: Optional[dict] = None,
offset: int = 0,
limit: Optional[int] = None,
order_by: Optional[tuple] = None,
secure: bool = False) -> List[IModel]:
"""Search for models
Filtering is done through ``rulez`` based JSON/dict query, which
defines boolean statements in JSON/dict structure.
: param query: Rulez based query
: param offset: Result offset
: param limit: Maximum number of result
: param order_by: Tuple of ``(field, order)`` where ``order`` is
``'asc'`` or ``'desc'``
: param secure: When set to True, this will filter out any object which
current logged in user is not allowed to see
: todo: ``order_by`` need to allow multiple field ordering
"""
raise NotImplementedError | Search for models
Filtering is done through ``rulez`` based JSON/dict query, which
defines boolean statements in JSON/dict structure.
: param query: Rulez based query
: param offset: Result offset
: param limit: Maximum number of result
: param order_by: Tuple of ``(field, order)`` where ``order`` is
``'asc'`` or ``'desc'``
: param secure: When set to True, this will filter out any object which
current logged in user is not allowed to see
: todo: ``order_by`` need to allow multiple field ordering | entailment |
def aggregate(self, query: Optional[dict] = None,
group: Optional[dict] = None,
order_by: Optional[tuple] = None) -> List[IModel]:
"""Get aggregated results
: param query: Rulez based query
: param group: Grouping structure
: param order_by: Tuple of ``(field, order)`` where ``order`` is
``'asc'`` or ``'desc'``
: todo: Grouping structure need to be documented
"""
raise NotImplementedError | Get aggregated results
: param query: Rulez based query
: param group: Grouping structure
: param order_by: Tuple of ``(field, order)`` where ``order`` is
``'asc'`` or ``'desc'``
: todo: Grouping structure need to be documented | entailment |
def fetch_state_data(self):
"""Fetch the raw JSON game state data from EchoVR's ``/session`` API
This method could be useful if you want to retrieve some API data not
directly exposed by this Python wrapper. Otherwise, you should probably
use :meth:`fetch_state` instead.
:returns:
An object (probably a :class:`dict`) representing the raw JSON
response returned by the EchoVR client.
:raises requests.exceptions.ConnectionError:
This exception will be thrown if the API is unavaible. This might
indicate that the user is not currently in a match, or that they
didn't launch Echo VR with the `-http` option.
:raises json.decoder.JSONDecodeError:
This exception will be thrown if the data returned by the API is not
valid JSON. Likely indicates a bug in Echo VR or in this library.
"""
response = requests.get(self._gamestate_url)
response_text = response.text.rstrip('\0')
return json.loads(response_text) | Fetch the raw JSON game state data from EchoVR's ``/session`` API
This method could be useful if you want to retrieve some API data not
directly exposed by this Python wrapper. Otherwise, you should probably
use :meth:`fetch_state` instead.
:returns:
An object (probably a :class:`dict`) representing the raw JSON
response returned by the EchoVR client.
:raises requests.exceptions.ConnectionError:
This exception will be thrown if the API is unavaible. This might
indicate that the user is not currently in a match, or that they
didn't launch Echo VR with the `-http` option.
:raises json.decoder.JSONDecodeError:
This exception will be thrown if the data returned by the API is not
valid JSON. Likely indicates a bug in Echo VR or in this library. | entailment |
def to_bing_str(self):
"""
Convert Viewbox object to a string that can be used by Bing
as a query parameter.
"""
vb = self.convert_srs(4326)
return '%s,%s,%s,%s' % (vb.bottom, vb.left, vb.top, vb.right) | Convert Viewbox object to a string that can be used by Bing
as a query parameter. | entailment |
def to_pelias_dict(self):
"""
Convert Viewbox object to a string that can be used by Pelias
as a query parameter.
"""
vb = self.convert_srs(4326)
return {
'boundary.rect.min_lat': vb.bottom,
'boundary.rect.min_lon': vb.left,
'boundary.rect.max_lat': vb.top,
'boundary.rect.max_lon': vb.right
} | Convert Viewbox object to a string that can be used by Pelias
as a query parameter. | entailment |
def to_google_str(self):
""" Convert to Google's bounds format: 'latMin,lonMin|latMax,lonMax' """
vb = self.convert_srs(4326)
return '%s,%s|%s,%s' % (vb.bottom, vb.left, vb.top, vb.right) | Convert to Google's bounds format: 'latMin,lonMin|latMax,lonMax' | entailment |
def to_mapquest_str(self):
"""
Convert Viewbox object to a string that can be used by
`MapQuest <http://www.mapquestapi.com/geocoding/#options>`_
as a query parameter.
"""
vb = self.convert_srs(4326)
return '%s,%s,%s,%s' % (vb.left, vb.top, vb.right, vb.bottom) | Convert Viewbox object to a string that can be used by
`MapQuest <http://www.mapquestapi.com/geocoding/#options>`_
as a query parameter. | entailment |
def to_esri_wgs_json(self):
"""
Convert Viewbox object to a JSON string that can be used
by the ESRI World Geocoding Service as a parameter.
"""
try:
return ('{ "xmin" : %s, '
'"ymin" : %s, '
'"xmax" : %s, '
'"ymax" : %s, '
'"spatialReference" : {"wkid" : %d} }'
% (self.left,
self.bottom,
self.right,
self.top,
self.wkid))
except ValueError:
raise Exception('One or more values could not be cast to a number. '
'Four bounding points must be real numbers. '
'WKID must be an integer.') | Convert Viewbox object to a JSON string that can be used
by the ESRI World Geocoding Service as a parameter. | entailment |
def register(context, request, load):
"""Validate the username and password and create the user."""
data = request.json
res = validate(data, dataclass_to_jsl(
RegistrationSchema).get_schema())
if res:
@request.after
def set_error(response):
response.status = 422
return {
'status': 'error',
'field_errors': [{'message': res[x]} for x in res.keys()]
}
if data['password'] != data['password_validate']:
@request.after
def adjust_response(response):
response.status = 422
return {'status': 'error',
'message': 'Password confirmation does not match'}
if 'state' not in data.keys() or not data['state']:
data['state'] = request.app.settings.application.new_user_state
del data['password_validate']
obj = context.create(data)
return {'status': 'success'} | Validate the username and password and create the user. | entailment |
def process_login(context, request):
"""Authenticate username and password and log in user"""
username = request.json['username']
password = request.json['password']
# Do the password validation.
user = context.authenticate(username, password)
if not user:
@request.after
def adjust_status(response):
response.status = 401
return {
'status': 'error',
'error': {
'code': 401,
'message': 'Invalid Username / Password'
}
}
@request.after
def remember(response):
"""Remember the identity of the user logged in."""
# We pass the extra info to the identity object.
response.headers.add('Access-Control-Expose-Headers', 'Authorization')
identity = user.identity
request.app.remember_identity(response, request, identity)
return {
'status': 'success'
} | Authenticate username and password and log in user | entailment |
def logout(context, request):
"""Log out the user."""
@request.after
def forget(response):
request.app.forget_identity(response, request)
return {
'status': 'success'
} | Log out the user. | entailment |
def request(self, url, method, data=None):
"""
The requester shortcut to submit a http request to CloutFlare
:param url:
:param method:
:param data:
:return:
"""
method = getattr(requests, method)
response = method(
url,
headers=self.headers,
data=self.process_json_for_cloudflare(data) if data else None
)
content = response.json()
if response.status_code != 200:
print(content)
raise requests.HTTPError(content['message'])
return content | The requester shortcut to submit a http request to CloutFlare
:param url:
:param method:
:param data:
:return: | entailment |
def setup_zone(self):
"""
Setup zone for current domain.
It will also setup the dns records of the zone
:return:
"""
# Initialize current zone
zones_content = self.request(self.api_url, 'get')
try:
if len(self.domain.split('.')) == 3:
domain = self.domain.split('.', 1)[1]
else:
domain = self.domain
zone = [zone for zone in zones_content['result'] if zone['name'] == domain][0]
except IndexError:
raise ZoneNotFound('Cannot find zone information for the domain {domain}.'
.format(domain=self.domain))
self.zone = zone
# Initialize dns_records of current zone
dns_content = self.request(self.api_url + zone['id'] + '/dns_records', 'get')
self.dns_records = dns_content['result'] | Setup zone for current domain.
It will also setup the dns records of the zone
:return: | entailment |
def get_record(self, dns_type, name):
"""
Get a dns record
:param dns_type:
:param name:
:return:
"""
try:
record = [record for record in self.dns_records
if record['type'] == dns_type and record['name'] == name][0]
except IndexError:
raise RecordNotFound(
'Cannot find the specified dns record in domain {domain}'
.format(domain=name))
return record | Get a dns record
:param dns_type:
:param name:
:return: | entailment |
def create_record(self, dns_type, name, content, **kwargs):
"""
Create a dns record
:param dns_type:
:param name:
:param content:
:param kwargs:
:return:
"""
data = {
'type': dns_type,
'name': name,
'content': content
}
if kwargs.get('ttl') and kwargs['ttl'] != 1:
data['ttl'] = kwargs['ttl']
if kwargs.get('proxied') is True:
data['proxied'] = True
else:
data['proxied'] = False
content = self.request(
self.api_url + self.zone['id'] + '/dns_records',
'post',
data=data
)
print('DNS record successfully created')
return content['result'] | Create a dns record
:param dns_type:
:param name:
:param content:
:param kwargs:
:return: | entailment |
def update_record(self, dns_type, name, content, **kwargs):
"""
Update dns record
:param dns_type:
:param name:
:param content:
:param kwargs:
:return:
"""
record = self.get_record(dns_type, name)
data = {
'type': dns_type,
'name': name,
'content': content
}
if kwargs.get('ttl') and kwargs['ttl'] != 1:
data['ttl'] = kwargs['ttl']
if kwargs.get('proxied') is True:
data['proxied'] = True
else:
data['proxied'] = False
content = self.request(
urllib.parse.urljoin(self.api_url, self.zone['id'] + '/dns_records/' + record['id']),
'put',
data=data
)
print('DNS record successfully updated')
return content['result'] | Update dns record
:param dns_type:
:param name:
:param content:
:param kwargs:
:return: | entailment |
def create_or_update_record(self, dns_type, name, content, **kwargs):
"""
Create a dns record. Update it if the record already exists.
:param dns_type:
:param name:
:param content:
:param kwargs:
:return:
"""
try:
return self.update_record(dns_type, name, content, **kwargs)
except RecordNotFound:
return self.create_record(dns_type, name, content, **kwargs) | Create a dns record. Update it if the record already exists.
:param dns_type:
:param name:
:param content:
:param kwargs:
:return: | entailment |
def delete_record(self, dns_type, name):
"""
Delete a dns record
:param dns_type:
:param name:
:return:
"""
record = self.get_record(dns_type, name)
content = self.request(
urllib.parse.urljoin(self.api_url, self.zone['id'] + '/dns_records/' + record['id']),
'delete'
)
return content['result']['id'] | Delete a dns record
:param dns_type:
:param name:
:return: | entailment |
def sync_dns_from_my_ip(self, dns_type='A'):
"""
Sync dns from my public ip address.
It will not do update if ip address in dns record is already same as
current public ip address.
:param dns_type:
:return:
"""
ip_address = ''
for finder in self.public_ip_finder:
try:
result = requests.get(finder)
except requests.RequestException:
continue
if result.status_code == 200:
try:
socket.inet_aton(result.text)
ip_address = result.text
break
except socket.error:
try:
socket.inet_aton(result.json().get('ip'))
ip_address = result.json()['ip']
break
except socket.error:
continue
if ip_address == '':
print('None of public ip finder is working. Please try later')
sys.exit(1)
try:
record = self.get_record(dns_type, self.domain) \
if len(self.domain.split('.')) == 3 \
else self.get_record(dns_type, self.domain)
except RecordNotFound:
if self.proxied:
self.create_record(dns_type, self.domain, ip_address, proxied=True)
else:
self.create_record(dns_type, self.domain, ip_address)
print('Successfully created new record with IP address {new_ip}'
.format(new_ip=ip_address))
else:
if record['content'] != ip_address:
if self.proxied:
self.update_record(dns_type, self.domain, ip_address, proxied=True)
else:
self.update_record(dns_type, self.domain, ip_address)
print('Successfully updated IP address from {old_ip} to {new_ip}'
.format(old_ip=record['content'], new_ip=ip_address))
else:
print('IP address on CloudFlare is same as your current address') | Sync dns from my public ip address.
It will not do update if ip address in dns record is already same as
current public ip address.
:param dns_type:
:return: | entailment |
def _init_helper(self, vars_):
"""Overwrite defaults (if they exist) with arguments passed to constructor"""
for k in vars_:
if k == 'kwargs':
for kwarg in vars_[k]:
setattr(self, kwarg, vars_[k][kwarg])
elif k != 'self':
setattr(self, k, vars_[k]) | Overwrite defaults (if they exist) with arguments passed to constructor | entailment |
def list_members(context, request):
"""Return the list of users in the group."""
members = context.members()
return {
'users': [{
'username': m.identifier,
'userid': m.userid,
'roles': context.get_member_roles(m.userid),
'links': [rellink(m, request)]
} for m in members]
} | Return the list of users in the group. | entailment |
def grant_member(context, request):
"""Grant member roles in the group."""
mapping = request.json['mapping']
for entry in mapping:
user = entry['user']
roles = entry['roles']
username = user.get('username', None)
userid = user.get('userid', None)
if userid:
u = context.get_user_by_userid(userid)
elif username:
u = context.get_user_by_username(username)
else:
u = None
if u is None:
raise UnprocessableError(
'User %s does not exists' % (userid or username))
for rolename in roles:
context.grant_member_role(u.userid, rolename)
return {'status': 'success'} | Grant member roles in the group. | entailment |
def revoke_member(context, request):
"""Revoke member roles in the group."""
mapping = request.json['mapping']
for entry in mapping:
user = entry['user']
roles = entry['roles']
username = user.get('username', None)
userid = user.get('userid', None)
if userid:
u = context.get_user_by_userid(userid)
elif username:
u = context.get_user_by_username(username)
else:
u = None
if u is None:
raise UnprocessableError(
'User %s does not exists' % (userid or username))
for rolename in roles:
context.revoke_member_role(u.userid, rolename)
return {'status': 'success'} | Revoke member roles in the group. | entailment |
def call_MediaInfo(file_name, mediainfo_path=None):
"""Returns a dictionary of dictionaries with the output of
MediaInfo -f file_name"""
if mediainfo_path is None:
mediainfo_path = find_MediaInfo()
result = subprocess.check_output(
[mediainfo_path, "-f", file_name], universal_newlines=True
)
D = collections.defaultdict(dict)
for line in result.splitlines():
line = line.split(':', 1)
# Skip separators
if line[0] == '':
continue
# Start a new section
elif len(line) == 1:
section = line[0].strip()
# Record section key, value pairs
else:
k = line[0].strip()
v = line[1].strip()
if k not in D[section]:
D[section][k] = v
return D | Returns a dictionary of dictionaries with the output of
MediaInfo -f file_name | entailment |
def check_video(file_name, mediainfo_path=None):
"""
Scans the given file with MediaInfo and returns the video and audio codec
information if all the required parameters were found.
"""
D = call_MediaInfo(file_name, mediainfo_path)
err_msg = "Could not determine all video paramters"
if ("General" not in D) or ("Video" not in D):
raise MediaInfoError(err_msg)
general_keys = ("Count of audio streams", "File size", "Overall bit rate")
if any(k not in D["General"] for k in general_keys):
raise MediaInfoError(err_msg)
video_keys = (
"Format profile",
"Commercial name",
"Frame rate",
"Height",
"Scan type",
)
if any(k not in D["Video"] for k in video_keys):
raise MediaInfoError(err_msg)
return D | Scans the given file with MediaInfo and returns the video and audio codec
information if all the required parameters were found. | entailment |
def check_picture(file_name, mediainfo_path=None):
"""
Scans the given file with MediaInfo and returns the picture
information if all the required parameters were found.
"""
D = call_MediaInfo(file_name, mediainfo_path)
# Check that the file analyzed was a valid movie
if (
("Image" not in D) or
("Width" not in D["Image"]) or
("Height" not in D["Image"])
):
raise MediaInfoError("Could not determine all picture paramters")
return D | Scans the given file with MediaInfo and returns the picture
information if all the required parameters were found. | entailment |
def md5_checksum(file_path, chunk_bytes=4194304):
"""Return the MD5 checksum (hex digest) of the file"""
with open(file_path, "rb") as infile:
checksum = hashlib.md5()
while 1:
data = infile.read(chunk_bytes)
if not data:
break
checksum.update(data)
return checksum.hexdigest() | Return the MD5 checksum (hex digest) of the file | entailment |
def trim(docstring):
"""
Remove the tabs to spaces, and remove the extra spaces / tabs that are in
front of the text in docstrings.
Implementation taken from http://www.python.org/dev/peps/pep-0257/
"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
res = '\n'.join(trimmed)
if not PY3 and not isinstance(res, unicode):
res = res.decode('utf8')
return res | Remove the tabs to spaces, and remove the extra spaces / tabs that are in
front of the text in docstrings.
Implementation taken from http://www.python.org/dev/peps/pep-0257/ | entailment |
def _get_attributes(schema, location):
"""Return the schema's children, filtered by location."""
schema = DottedNameResolver(__name__).maybe_resolve(schema)
def _filter(attr):
if not hasattr(attr, "location"):
valid_location = 'body' in location
else:
valid_location = attr.location in to_list(location)
return valid_location
return list(filter(_filter, schema().children)) | Return the schema's children, filtered by location. | entailment |
def _main_ctxmgr(func):
'''
A decorator wrapper for :class:`ServerMainContextManager`
Usage example:
.. code:: python
@aiotools.main
def mymain():
server_args = do_init()
stop_sig = yield server_args
if stop_sig == signal.SIGINT:
do_graceful_shutdown()
else:
do_forced_shutdown()
aiotools.start_server(..., main_ctxmgr=mymain, ...)
'''
@functools.wraps(func)
def helper(*args, **kwargs):
return ServerMainContextManager(func, args, kwargs)
return helper | A decorator wrapper for :class:`ServerMainContextManager`
Usage example:
.. code:: python
@aiotools.main
def mymain():
server_args = do_init()
stop_sig = yield server_args
if stop_sig == signal.SIGINT:
do_graceful_shutdown()
else:
do_forced_shutdown()
aiotools.start_server(..., main_ctxmgr=mymain, ...) | entailment |
def start_server(worker_actxmgr: AsyncServerContextManager,
main_ctxmgr: Optional[ServerMainContextManager] = None,
extra_procs: Iterable[Callable] = tuple(),
stop_signals: Iterable[signal.Signals] = (
signal.SIGINT,
signal.SIGTERM),
num_workers: int = 1,
use_threading: bool = False,
args: Iterable[Any] = tuple()):
'''
Starts a multi-process server where each process has their own individual
asyncio event loop. Their lifecycles are automantically managed -- if the
main program receives one of the signals specified in ``stop_signals`` it
will initiate the shutdown routines on each worker that stops the event
loop gracefully.
Args:
worker_actxmgr: An asynchronous context manager that dicates the
initialization and shutdown steps of each worker.
It should accept the following three arguments:
* **loop**: the asyncio event loop created and set
by aiotools
* **pidx**: the 0-based index of the worker
(use this for per-worker logging)
* **args**: a concatenated tuple of values yielded by
**main_ctxmgr** and the user-defined arguments in
**args**.
aiotools automatically installs an interruption handler
that calls ``loop.stop()`` to the given event loop,
regardless of using either threading or
multiprocessing.
main_ctxmgr: An optional context manager that performs global
initialization and shutdown steps of the whole program.
It may yield one or more values to be passed to worker
processes along with **args** passed to this function.
There is no arguments passed to those functions since
you can directly access ``sys.argv`` to parse command
line arguments and/or read user configurations.
extra_procs: An iterable of functions that consist of extra processes
whose lifecycles are synchronized with other workers.
You should write the shutdown steps of them differently
depending on the value of **use_threading** argument.
If it is ``False`` (default), they will get
a :class:`BaseException` depending on the received stop signal
number, either :class:`KeyboardInterrupt` (for SIGINT),
:class:`SystemExit` (for SIGTERM), or
:class:`InterruptedBySignal` (otherwise).
If it is ``True``, they should check their **intr_event**
argument periodically because there is no way to install
signal handlers in Python threads (only the main thread
can install signal handlers).
It should accept the following three arguments:
* **intr_event**: :class:`threading.Event` object that
signals the interruption of the main thread (only
available when **use_threading** is ``True``; otherwise
it is set to ``None``)
* **pidx**: same to **worker_actxmgr** argument
* **args**: same to **worker_actxmgr** argument
stop_signals: A list of UNIX signals that the main program to
recognize as termination signals.
num_workers: The number of children workers.
use_threading: Use :mod:`threading` instead of :mod:`multiprocessing`.
In this case, the GIL may become the performance
bottleneck. Set this ``True`` only when you know what
you are going to do. Note that this changes the way
to write user-defined functions passed as **extra_procs**.
args: The user-defined arguments passed to workers and extra
processes. If **main_ctxmgr** yields one or more values,
they are *prepended* to this user arguments when passed to
workers and extra processes.
Returns:
None
.. versionchanged:: 0.3.2
The name of argument **num_proc** is changed to **num_workers**.
Even if **num_workers** is 1, a child is created instead of
doing everything at the main thread.
.. versionadded:: 0.3.2
The argument ``extra_procs`` and ``main_ctxmgr``.
.. versionadded:: 0.4.0
Now supports use of threading instead of multiprocessing via
**use_threading** option.
.. versionchanged:: 0.8.0
Now **worker_actxmgr** must be an instance of
:class:`AsyncServerContextManager` or async generators decorated by
``@aiotools.server``.
Now **main_ctxmgr** must be an instance of :class:`ServerMainContextManager`
or plain generators decorated by ``@aiotools.main``.
The usage is same to asynchronous context managers, but optionally you can
distinguish the received stop signal by retrieving the return value of the
``yield`` statement.
In **extra_procs** in non-threaded mode, stop signals are converted into
either one of :class:`KeyboardInterrupt`, :class:`SystemExit`, or
:class:`InterruptedBySignal` exception.
'''
@_main_ctxmgr
def noop_main_ctxmgr():
yield
def create_child(*args, **kwargs):
if use_threading:
return threading.Thread(*args, **kwargs)
else:
return mp.Process(*args, **kwargs)
assert stop_signals
if main_ctxmgr is None:
main_ctxmgr = noop_main_ctxmgr
children = []
_children_ctxs.clear()
_children_loops.clear()
intr_event = threading.Event()
sigblock_mask = frozenset(stop_signals)
main_ctx = main_ctxmgr()
# temporarily block signals and register signal handlers to mainloop
signal.pthread_sigmask(signal.SIG_BLOCK, sigblock_mask)
old_loop = asyncio.get_event_loop()
mainloop = asyncio.new_event_loop()
asyncio.set_event_loop(mainloop)
# to make subprocess working in child threads
try:
asyncio.get_child_watcher()
except NotImplementedError:
pass # for uvloop
# build a main-to-worker interrupt channel using signals
def handle_stop_signal(signum):
main_ctx.yield_return = signum
if use_threading:
with _children_lock:
for c in _children_ctxs:
c.yield_return = signum
for l in _children_loops:
l.call_soon_threadsafe(l.stop)
intr_event.set()
else:
os.killpg(0, signum)
mainloop.stop()
for signum in stop_signals:
mainloop.add_signal_handler(
signum,
functools.partial(handle_stop_signal, signum))
# build a reliable worker-to-main interrupt channel using a pipe
# (workers have no idea whether the main interrupt is enabled/disabled)
def handle_child_interrupt(fd):
child_idx = struct.unpack('i', os.read(fd, 4))[0] # noqa
log.debug(f'Child {child_idx} has interrupted the main program.')
# self-interrupt to initiate the main-to-worker interrupts
signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGINT})
os.kill(0, signal.SIGINT)
if use_threading:
child_intr_pipe = os.pipe()
rfd = child_intr_pipe[0]
else:
child_intr_pipe = mp.Pipe()
rfd = child_intr_pipe[0].fileno()
mainloop.add_reader(rfd, handle_child_interrupt, rfd)
# start
with main_ctx as main_args:
# retrieve args generated by the user-defined main
if main_args is None:
main_args = tuple()
if not isinstance(main_args, tuple):
main_args = (main_args, )
# spawn managed async workers
for i in range(num_workers):
p = create_child(target=_worker_main, daemon=True,
args=(worker_actxmgr, use_threading, stop_signals,
child_intr_pipe[1], i,
main_args + args))
p.start()
children.append(p)
# spawn extra workers
for i, f in enumerate(extra_procs):
p = create_child(target=_extra_main, daemon=True,
args=(f, use_threading, stop_signals,
intr_event, num_workers + i,
main_args + args))
p.start()
children.append(p)
try:
# unblock the stop signals for user/external interrupts.
signal.pthread_sigmask(signal.SIG_UNBLOCK, sigblock_mask)
# run!
mainloop.run_forever()
# if interrupted, wait for workers to finish.
for child in children:
child.join()
finally:
mainloop.close()
asyncio.set_event_loop(old_loop) | Starts a multi-process server where each process has their own individual
asyncio event loop. Their lifecycles are automantically managed -- if the
main program receives one of the signals specified in ``stop_signals`` it
will initiate the shutdown routines on each worker that stops the event
loop gracefully.
Args:
worker_actxmgr: An asynchronous context manager that dicates the
initialization and shutdown steps of each worker.
It should accept the following three arguments:
* **loop**: the asyncio event loop created and set
by aiotools
* **pidx**: the 0-based index of the worker
(use this for per-worker logging)
* **args**: a concatenated tuple of values yielded by
**main_ctxmgr** and the user-defined arguments in
**args**.
aiotools automatically installs an interruption handler
that calls ``loop.stop()`` to the given event loop,
regardless of using either threading or
multiprocessing.
main_ctxmgr: An optional context manager that performs global
initialization and shutdown steps of the whole program.
It may yield one or more values to be passed to worker
processes along with **args** passed to this function.
There is no arguments passed to those functions since
you can directly access ``sys.argv`` to parse command
line arguments and/or read user configurations.
extra_procs: An iterable of functions that consist of extra processes
whose lifecycles are synchronized with other workers.
You should write the shutdown steps of them differently
depending on the value of **use_threading** argument.
If it is ``False`` (default), they will get
a :class:`BaseException` depending on the received stop signal
number, either :class:`KeyboardInterrupt` (for SIGINT),
:class:`SystemExit` (for SIGTERM), or
:class:`InterruptedBySignal` (otherwise).
If it is ``True``, they should check their **intr_event**
argument periodically because there is no way to install
signal handlers in Python threads (only the main thread
can install signal handlers).
It should accept the following three arguments:
* **intr_event**: :class:`threading.Event` object that
signals the interruption of the main thread (only
available when **use_threading** is ``True``; otherwise
it is set to ``None``)
* **pidx**: same to **worker_actxmgr** argument
* **args**: same to **worker_actxmgr** argument
stop_signals: A list of UNIX signals that the main program to
recognize as termination signals.
num_workers: The number of children workers.
use_threading: Use :mod:`threading` instead of :mod:`multiprocessing`.
In this case, the GIL may become the performance
bottleneck. Set this ``True`` only when you know what
you are going to do. Note that this changes the way
to write user-defined functions passed as **extra_procs**.
args: The user-defined arguments passed to workers and extra
processes. If **main_ctxmgr** yields one or more values,
they are *prepended* to this user arguments when passed to
workers and extra processes.
Returns:
None
.. versionchanged:: 0.3.2
The name of argument **num_proc** is changed to **num_workers**.
Even if **num_workers** is 1, a child is created instead of
doing everything at the main thread.
.. versionadded:: 0.3.2
The argument ``extra_procs`` and ``main_ctxmgr``.
.. versionadded:: 0.4.0
Now supports use of threading instead of multiprocessing via
**use_threading** option.
.. versionchanged:: 0.8.0
Now **worker_actxmgr** must be an instance of
:class:`AsyncServerContextManager` or async generators decorated by
``@aiotools.server``.
Now **main_ctxmgr** must be an instance of :class:`ServerMainContextManager`
or plain generators decorated by ``@aiotools.main``.
The usage is same to asynchronous context managers, but optionally you can
distinguish the received stop signal by retrieving the return value of the
``yield`` statement.
In **extra_procs** in non-threaded mode, stop signals are converted into
either one of :class:`KeyboardInterrupt`, :class:`SystemExit`, or
:class:`InterruptedBySignal` exception. | entailment |
def acquire(self):
"""Acquire the lock."""
self.lease = self.client.lease(self.ttl)
base64_key = _encode(self.key)
base64_value = _encode(self._uuid)
txn = {
'compare': [{
'key': base64_key,
'result': 'EQUAL',
'target': 'CREATE',
'create_revision': 0
}],
'success': [{
'request_put': {
'key': base64_key,
'value': base64_value,
'lease': self.lease.id
}
}],
'failure': [{
'request_range': {
'key': base64_key
}
}]
}
result = self.client.transaction(txn)
if 'succeeded' in result:
return result['succeeded']
return False | Acquire the lock. | entailment |
def release(self):
"""Release the lock"""
base64_key = _encode(self.key)
base64_value = _encode(self._uuid)
txn = {
'compare': [{
'key': base64_key,
'result': 'EQUAL',
'target': 'VALUE',
'value': base64_value
}],
'success': [{
'request_delete_range': {
'key': base64_key
}
}]
}
result = self.client.transaction(txn)
if 'succeeded' in result:
return result['succeeded']
return False | Release the lock | entailment |
def is_acquired(self):
"""Check if the lock is acquired"""
values = self.client.get(self.key)
return six.b(self._uuid) in values | Check if the lock is acquired | entailment |
async def aiter(obj, sentinel=_sentinel):
'''
Analogous to the builtin :func:`iter()`.
'''
if sentinel is _sentinel:
# Since we cannot directly return the return value of obj.__aiter__()
# as being an async-generator, we do the async-iteration here.
async for item in obj:
yield item
else:
while True:
item = await obj()
if item == sentinel:
break
yield item | Analogous to the builtin :func:`iter()`. | entailment |
def client(host='localhost', port=2379,
ca_cert=None, cert_key=None, cert_cert=None,
timeout=None, protocol="http"):
"""Return an instance of an Etcd3Client."""
return Etcd3Client(host=host,
port=port,
ca_cert=ca_cert,
cert_key=cert_key,
cert_cert=cert_cert,
timeout=timeout,
protocol=protocol) | Return an instance of an Etcd3Client. | entailment |
def get_url(self, path):
"""Construct a full url to the v3alpha API given a specific path
:param path:
:return: url
"""
host = ('[' + self.host + ']' if (self.host.find(':') != -1)
else self.host)
base_url = self.protocol + '://' + host + ':' + str(self.port)
return base_url + '/v3alpha/' + path.lstrip("/") | Construct a full url to the v3alpha API given a specific path
:param path:
:return: url | entailment |
def post(self, *args, **kwargs):
"""helper method for HTTP POST
:param args:
:param kwargs:
:return: json response
"""
try:
resp = self.session.post(*args, **kwargs)
if resp.status_code in _EXCEPTIONS_BY_CODE:
raise _EXCEPTIONS_BY_CODE[resp.status_code](resp.reason)
if resp.status_code != requests.codes['ok']:
raise exceptions.Etcd3Exception(resp.reason)
except requests.exceptions.Timeout as ex:
raise exceptions.ConnectionTimeoutError(six.text_type(ex))
except requests.exceptions.ConnectionError as ex:
raise exceptions.ConnectionFailedError(six.text_type(ex))
return resp.json() | helper method for HTTP POST
:param args:
:param kwargs:
:return: json response | entailment |
def lease(self, ttl=DEFAULT_TIMEOUT):
"""Create a Lease object given a timeout
:param ttl: timeout
:return: Lease object
"""
result = self.post(self.get_url("/lease/grant"),
json={"TTL": ttl, "ID": 0})
return Lease(int(result['ID']), client=self) | Create a Lease object given a timeout
:param ttl: timeout
:return: Lease object | entailment |
def lock(self, id=str(uuid.uuid4()), ttl=DEFAULT_TIMEOUT):
"""Create a Lock object given an ID and timeout
:param id: ID for the lock, creates a new uuid if not provided
:param ttl: timeout
:return: Lock object
"""
return Lock(id, ttl=ttl, client=self) | Create a Lock object given an ID and timeout
:param id: ID for the lock, creates a new uuid if not provided
:param ttl: timeout
:return: Lock object | entailment |
def create(self, key, value):
"""Atomically create the given key only if the key doesn't exist.
This verifies that the create_revision of a key equales to 0, then
creates the key with the value.
This operation takes place in a transaction.
:param key: key in etcd to create
:param value: value of the key
:type value: bytes or string
:returns: status of transaction, ``True`` if the create was
successful, ``False`` otherwise
:rtype: bool
"""
base64_key = _encode(key)
base64_value = _encode(value)
txn = {
'compare': [{
'key': base64_key,
'result': 'EQUAL',
'target': 'CREATE',
'create_revision': 0
}],
'success': [{
'request_put': {
'key': base64_key,
'value': base64_value,
}
}],
'failure': []
}
result = self.transaction(txn)
if 'succeeded' in result:
return result['succeeded']
return False | Atomically create the given key only if the key doesn't exist.
This verifies that the create_revision of a key equales to 0, then
creates the key with the value.
This operation takes place in a transaction.
:param key: key in etcd to create
:param value: value of the key
:type value: bytes or string
:returns: status of transaction, ``True`` if the create was
successful, ``False`` otherwise
:rtype: bool | entailment |
def put(self, key, value, lease=None):
"""Put puts the given key into the key-value store.
A put request increments the revision of the key-value store
and generates one event in the event history.
:param key:
:param value:
:param lease:
:return: boolean
"""
payload = {
"key": _encode(key),
"value": _encode(value)
}
if lease:
payload['lease'] = lease.id
self.post(self.get_url("/kv/put"), json=payload)
return True | Put puts the given key into the key-value store.
A put request increments the revision of the key-value store
and generates one event in the event history.
:param key:
:param value:
:param lease:
:return: boolean | entailment |
def get(self, key, metadata=False, sort_order=None,
sort_target=None, **kwargs):
"""Range gets the keys in the range from the key-value store.
:param key:
:param metadata:
:param sort_order: 'ascend' or 'descend' or None
:param sort_target: 'key' or 'version' or 'create' or 'mod' or 'value'
:param kwargs:
:return:
"""
try:
order = 0
if sort_order:
order = _SORT_ORDER.index(sort_order)
except ValueError:
raise ValueError('sort_order must be one of "ascend" or "descend"')
try:
target = 0
if sort_target:
target = _SORT_TARGET.index(sort_target)
except ValueError:
raise ValueError('sort_target must be one of "key", '
'"version", "create", "mod" or "value"')
payload = {
"key": _encode(key),
"sort_order": order,
"sort_target": target,
}
payload.update(kwargs)
result = self.post(self.get_url("/kv/range"),
json=payload)
if 'kvs' not in result:
return []
if metadata:
def value_with_metadata(item):
item['key'] = _decode(item['key'])
value = _decode(item.pop('value'))
return value, item
return [value_with_metadata(item) for item in result['kvs']]
else:
return [_decode(item['value']) for item in result['kvs']] | Range gets the keys in the range from the key-value store.
:param key:
:param metadata:
:param sort_order: 'ascend' or 'descend' or None
:param sort_target: 'key' or 'version' or 'create' or 'mod' or 'value'
:param kwargs:
:return: | entailment |
def get_all(self, sort_order=None, sort_target='key'):
"""Get all keys currently stored in etcd.
:returns: sequence of (value, metadata) tuples
"""
return self.get(
key=_encode(b'\0'),
metadata=True,
sort_order=sort_order,
sort_target=sort_target,
range_end=_encode(b'\0'),
) | Get all keys currently stored in etcd.
:returns: sequence of (value, metadata) tuples | entailment |
def get_prefix(self, key_prefix, sort_order=None, sort_target=None):
"""Get a range of keys with a prefix.
:param sort_order: 'ascend' or 'descend' or None
:param key_prefix: first key in range
:returns: sequence of (value, metadata) tuples
"""
return self.get(key_prefix,
metadata=True,
range_end=_encode(_increment_last_byte(key_prefix)),
sort_order=sort_order,
sort_target=sort_target) | Get a range of keys with a prefix.
:param sort_order: 'ascend' or 'descend' or None
:param key_prefix: first key in range
:returns: sequence of (value, metadata) tuples | entailment |
def replace(self, key, initial_value, new_value):
"""Atomically replace the value of a key with a new value.
This compares the current value of a key, then replaces it with a new
value if it is equal to a specified value. This operation takes place
in a transaction.
:param key: key in etcd to replace
:param initial_value: old value to replace
:type initial_value: bytes or string
:param new_value: new value of the key
:type new_value: bytes or string
:returns: status of transaction, ``True`` if the replace was
successful, ``False`` otherwise
:rtype: bool
"""
base64_key = _encode(key)
base64_initial_value = _encode(initial_value)
base64_new_value = _encode(new_value)
txn = {
'compare': [{
'key': base64_key,
'result': 'EQUAL',
'target': 'VALUE',
'value': base64_initial_value
}],
'success': [{
'request_put': {
'key': base64_key,
'value': base64_new_value,
}
}],
'failure': []
}
result = self.transaction(txn)
if 'succeeded' in result:
return result['succeeded']
return False | Atomically replace the value of a key with a new value.
This compares the current value of a key, then replaces it with a new
value if it is equal to a specified value. This operation takes place
in a transaction.
:param key: key in etcd to replace
:param initial_value: old value to replace
:type initial_value: bytes or string
:param new_value: new value of the key
:type new_value: bytes or string
:returns: status of transaction, ``True`` if the replace was
successful, ``False`` otherwise
:rtype: bool | entailment |
def delete(self, key, **kwargs):
"""DeleteRange deletes the given range from the key-value store.
A delete request increments the revision of the key-value store and
generates a delete event in the event history for every deleted key.
:param key:
:param kwargs:
:return:
"""
payload = {
"key": _encode(key),
}
payload.update(kwargs)
result = self.post(self.get_url("/kv/deleterange"),
json=payload)
if 'deleted' in result:
return True
return False | DeleteRange deletes the given range from the key-value store.
A delete request increments the revision of the key-value store and
generates a delete event in the event history for every deleted key.
:param key:
:param kwargs:
:return: | entailment |
def delete_prefix(self, key_prefix):
"""Delete a range of keys with a prefix in etcd."""
return self.delete(
key_prefix, range_end=_encode(_increment_last_byte(key_prefix))) | Delete a range of keys with a prefix in etcd. | entailment |
def transaction(self, txn):
"""Txn processes multiple requests in a single transaction.
A txn request increments the revision of the key-value store and
generates events with the same revision for every completed request.
It is not allowed to modify the same key several times within one txn.
:param txn:
:return:
"""
return self.post(self.get_url("/kv/txn"),
data=json.dumps(txn)) | Txn processes multiple requests in a single transaction.
A txn request increments the revision of the key-value store and
generates events with the same revision for every completed request.
It is not allowed to modify the same key several times within one txn.
:param txn:
:return: | entailment |
def watch(self, key, **kwargs):
"""Watch a key.
:param key: key to watch
:returns: tuple of ``events_iterator`` and ``cancel``.
Use ``events_iterator`` to get the events of key changes
and ``cancel`` to cancel the watch request
"""
event_queue = queue.Queue()
def callback(event):
event_queue.put(event)
w = watch.Watcher(self, key, callback, **kwargs)
canceled = threading.Event()
def cancel():
canceled.set()
event_queue.put(None)
w.stop()
def iterator():
while not canceled.is_set():
event = event_queue.get()
if event is None:
canceled.set()
if not canceled.is_set():
yield event
return iterator(), cancel | Watch a key.
:param key: key to watch
:returns: tuple of ``events_iterator`` and ``cancel``.
Use ``events_iterator`` to get the events of key changes
and ``cancel`` to cancel the watch request | entailment |
def watch_prefix(self, key_prefix, **kwargs):
"""The same as ``watch``, but watches a range of keys with a prefix."""
kwargs['range_end'] = \
_increment_last_byte(key_prefix)
return self.watch(key_prefix, **kwargs) | The same as ``watch``, but watches a range of keys with a prefix. | entailment |
def watch_once(self, key, timeout=None, **kwargs):
"""Watch a key and stops after the first event.
:param key: key to watch
:param timeout: (optional) timeout in seconds.
:returns: event
"""
event_queue = queue.Queue()
def callback(event):
event_queue.put(event)
w = watch.Watcher(self, key, callback, **kwargs)
try:
return event_queue.get(timeout=timeout)
except queue.Empty:
raise exceptions.WatchTimedOut()
finally:
w.stop() | Watch a key and stops after the first event.
:param key: key to watch
:param timeout: (optional) timeout in seconds.
:returns: event | entailment |
def watch_prefix_once(self, key_prefix, timeout=None, **kwargs):
"""Watches a range of keys with a prefix, similar to watch_once"""
kwargs['range_end'] = \
_increment_last_byte(key_prefix)
return self.watch_once(key_prefix, timeout=timeout, **kwargs) | Watches a range of keys with a prefix, similar to watch_once | entailment |
def create_timer(cb: Callable[[float], None], interval: float,
delay_policy: TimerDelayPolicy = TimerDelayPolicy.DEFAULT,
loop: Optional[asyncio.BaseEventLoop] = None) -> asyncio.Task:
'''
Schedule a timer with the given callable and the interval in seconds.
The interval value is also passed to the callable.
If the callable takes longer than the timer interval, all accumulated
callable's tasks will be cancelled when the timer is cancelled.
Args:
cb: TODO - fill argument descriptions
Returns:
You can stop the timer by cancelling the returned task.
'''
if not loop:
loop = asyncio.get_event_loop()
async def _timer():
fired_tasks = []
try:
while True:
if delay_policy == TimerDelayPolicy.CANCEL:
for t in fired_tasks:
if not t.done():
t.cancel()
await t
fired_tasks.clear()
else:
fired_tasks[:] = [t for t in fired_tasks if not t.done()]
t = loop.create_task(cb(interval=interval))
fired_tasks.append(t)
await asyncio.sleep(interval)
except asyncio.CancelledError:
for t in fired_tasks:
t.cancel()
await asyncio.gather(*fired_tasks)
return loop.create_task(_timer()) | Schedule a timer with the given callable and the interval in seconds.
The interval value is also passed to the callable.
If the callable takes longer than the timer interval, all accumulated
callable's tasks will be cancelled when the timer is cancelled.
Args:
cb: TODO - fill argument descriptions
Returns:
You can stop the timer by cancelling the returned task. | entailment |
def revoke(self):
"""LeaseRevoke revokes a lease.
All keys attached to the lease will expire and be deleted.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
:return:
"""
self.client.post(self.client.get_url("/kv/lease/revoke"),
json={"ID": self.id})
return True | LeaseRevoke revokes a lease.
All keys attached to the lease will expire and be deleted.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
:return: | entailment |
def ttl(self):
"""LeaseTimeToLive retrieves lease information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
:return:
"""
result = self.client.post(self.client.get_url("/kv/lease/timetolive"),
json={"ID": self.id})
return int(result['TTL']) | LeaseTimeToLive retrieves lease information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
:return: | entailment |
def keys(self):
"""Get the keys associated with this lease.
:return:
"""
result = self.client.post(self.client.get_url("/kv/lease/timetolive"),
json={"ID": self.id,
"keys": True})
keys = result['keys'] if 'keys' in result else []
return [_decode(key) for key in keys] | Get the keys associated with this lease.
:return: | entailment |
def apartial(coro, *args, **kwargs):
'''
Wraps a coroutine function with pre-defined arguments (including keyword
arguments). It is an asynchronous version of :func:`functools.partial`.
'''
@functools.wraps(coro)
async def wrapped(*cargs, **ckwargs):
return await coro(*args, *cargs, **kwargs, **ckwargs)
return wrapped | Wraps a coroutine function with pre-defined arguments (including keyword
arguments). It is an asynchronous version of :func:`functools.partial`. | entailment |
def lru_cache(maxsize: int = 128,
typed: bool = False,
expire_after: float = None):
'''
A simple LRU cache just like :func:`functools.lru_cache`, but it works for
coroutines. This is not as heavily optimized as :func:`functools.lru_cache`
which uses an internal C implementation, as it targets async operations
that take a long time.
It follows the same API that the standard functools provides. The wrapped
function has ``cache_clear()`` method to flush the cache manually, but
leaves ``cache_info()`` for statistics unimplemented.
Note that calling the coroutine multiple times with the same arguments
before the first call returns may incur duplicate exectuions.
This function is not thread-safe.
Args:
maxsize: The maximum number of cached entries.
typed: Cache keys in different types separately (e.g., ``3`` and ``3.0`` will
be different keys).
expire_after: Re-calculate the value if the configured time has passed even
when the cache is hit. When re-calculation happens the
expiration timer is also reset.
'''
if maxsize is not None and not isinstance(maxsize, int):
raise TypeError('Expected maxsize to be an integer or None')
def wrapper(coro):
sentinel = object() # unique object to distinguish None as result
cache = collections.OrderedDict()
cache_get = cache.get
cache_del = cache.__delitem__
cache_set = cache.__setitem__
cache_len = cache.__len__
cache_move = cache.move_to_end
make_key = functools._make_key
# We don't use explicit locks like the standard functools,
# because this lru_cache is intended for use in asyncio coroutines.
# The only context interleaving happens when calling the user-defined
# coroutine, so there is no need to add extra synchronization guards.
@functools.wraps(coro)
async def wrapped(*args, **kwargs):
now = get_running_loop().time()
k = make_key(args, kwargs, typed)
entry = cache_get(k, sentinel)
if entry is not sentinel:
if entry.expire_at is None:
return entry.value
if entry.expire_at >= now:
return entry.value
cache_del(k)
result = await coro(*args, **kwargs)
if maxsize is not None and cache_len() >= maxsize:
cache.popitem(last=False)
if expire_after is not None:
expire_at = now + expire_after
else:
expire_at = None
cache_set(k, _CacheEntry(result, expire_at))
cache_move(k, last=True)
return result
def cache_clear():
cache.clear()
def cache_info():
raise NotImplementedError
wrapped.cache_clear = cache_clear
wrapped.cache_info = cache_info
return wrapped
return wrapper | A simple LRU cache just like :func:`functools.lru_cache`, but it works for
coroutines. This is not as heavily optimized as :func:`functools.lru_cache`
which uses an internal C implementation, as it targets async operations
that take a long time.
It follows the same API that the standard functools provides. The wrapped
function has ``cache_clear()`` method to flush the cache manually, but
leaves ``cache_info()`` for statistics unimplemented.
Note that calling the coroutine multiple times with the same arguments
before the first call returns may incur duplicate exectuions.
This function is not thread-safe.
Args:
maxsize: The maximum number of cached entries.
typed: Cache keys in different types separately (e.g., ``3`` and ``3.0`` will
be different keys).
expire_after: Re-calculate the value if the configured time has passed even
when the cache is hit. When re-calculation happens the
expiration timer is also reset. | entailment |
def get_services(self):
"""
get_services makes call to services end point of api.embed.ly to fetch
the list of supported providers and their regexes
"""
if self.services:
return self.services
url = 'http://api.embed.ly/1/services/python'
http = httplib2.Http(timeout=self.timeout)
headers = {'User-Agent': self.user_agent,
'Connection': 'close'}
resp, content = http.request(url, headers=headers)
if resp['status'] == '200':
resp_data = json.loads(content.decode('utf-8'))
self.services = resp_data
# build the regex that we can use later
_regex = []
for each in self.services:
_regex.append('|'.join(each.get('regex', [])))
self._regex = re.compile('|'.join(_regex))
return self.services | get_services makes call to services end point of api.embed.ly to fetch
the list of supported providers and their regexes | entailment |
def _get(self, version, method, url_or_urls, **kwargs):
"""
_get makes the actual call to api.embed.ly
"""
if not url_or_urls:
raise ValueError('%s requires a url or a list of urls given: %s' %
(method.title(), url_or_urls))
# a flag we can use instead of calling isinstance() all the time
multi = isinstance(url_or_urls, list)
# throw an error early for too many URLs
if multi and len(url_or_urls) > 20:
raise ValueError('Embedly accepts only 20 urls at a time. Url '
'Count:%s' % len(url_or_urls))
query = ''
key = kwargs.get('key', self.key)
# make sure that a key was set on the client or passed in
if not key:
raise ValueError('Requires a key. None given: %s' % key)
kwargs['key'] = key
query += urlencode(kwargs)
if multi:
query += '&urls=%s&' % ','.join([quote(url) for url in url_or_urls])
else:
query += '&url=%s' % quote(url_or_urls)
url = 'http://api.embed.ly/%s/%s?%s' % (version, method, query)
http = httplib2.Http(timeout=self.timeout)
headers = {'User-Agent': self.user_agent,
'Connection': 'close'}
resp, content = http.request(url, headers=headers)
if resp['status'] == '200':
data = json.loads(content.decode('utf-8'))
if kwargs.get('raw', False):
data['raw'] = content
else:
data = {'type': 'error',
'error': True,
'error_code': int(resp['status'])}
if multi:
return map(lambda url, data: Url(data, method, url),
url_or_urls, data)
return Url(data, method, url_or_urls) | _get makes the actual call to api.embed.ly | entailment |
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
From django.utils.encoding.py in 1.4.2+, minus the dependency on Six.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if sys.version_info[0] == 2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass | A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
From django.utils.encoding.py in 1.4.2+, minus the dependency on Six.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class. | entailment |
def _encode(data):
"""Encode the given data using base-64
:param data:
:return: base-64 encoded string
"""
if not isinstance(data, bytes_types):
data = six.b(str(data))
return base64.b64encode(data).decode("utf-8") | Encode the given data using base-64
:param data:
:return: base-64 encoded string | entailment |
def _decode(data):
"""Decode the base-64 encoded string
:param data:
:return: decoded data
"""
if not isinstance(data, bytes_types):
data = six.b(str(data))
return base64.b64decode(data.decode("utf-8")) | Decode the base-64 encoded string
:param data:
:return: decoded data | entailment |
def _increment_last_byte(data):
"""Get the last byte in the array and increment it
:param bytes_string:
:return:
"""
if not isinstance(data, bytes_types):
if isinstance(data, six.string_types):
data = data.encode('utf-8')
else:
data = six.b(str(data))
s = bytearray(data)
s[-1] = s[-1] + 1
return bytes(s) | Get the last byte in the array and increment it
:param bytes_string:
:return: | entailment |
def metadata(self):
"""Return dict representation of this cookbook's metadata.rb ."""
self.metadata_path = os.path.join(self.path, 'metadata.rb')
if not os.path.isfile(self.metadata_path):
raise ValueError("Cookbook needs metadata.rb, %s"
% self.metadata_path)
if not self._metadata:
self._metadata = MetadataRb(open(self.metadata_path, 'r+'))
return self._metadata | Return dict representation of this cookbook's metadata.rb . | entailment |
def berksfile(self):
"""Return this cookbook's Berksfile instance."""
self.berks_path = os.path.join(self.path, 'Berksfile')
if not self._berksfile:
if not os.path.isfile(self.berks_path):
raise ValueError("No Berksfile found at %s"
% self.berks_path)
self._berksfile = Berksfile(open(self.berks_path, 'r+'))
return self._berksfile | Return this cookbook's Berksfile instance. | entailment |
def from_dict(cls, dictionary):
"""Create a MetadataRb instance from a dict."""
cookbooks = set()
# put these in order
groups = [cookbooks]
for key, val in dictionary.items():
if key == 'depends':
cookbooks.update({cls.depends_statement(cbn, meta)
for cbn, meta in val.items()})
body = ''
for group in groups:
if group:
body += '\n'
body += '\n'.join(group)
return cls.from_string(body) | Create a MetadataRb instance from a dict. | entailment |
def depends_statement(cookbook_name, metadata=None):
"""Return a valid Ruby 'depends' statement for the metadata.rb file."""
line = "depends '%s'" % cookbook_name
if metadata:
if not isinstance(metadata, dict):
raise TypeError("Stencil dependency options for %s "
"should be a dict of options, not %s."
% (cookbook_name, metadata))
if metadata:
line = "%s '%s'" % (line, "', '".join(metadata))
return line | Return a valid Ruby 'depends' statement for the metadata.rb file. | entailment |
def parse(self):
"""Parse the metadata.rb into a dict."""
data = utils.ruby_lines(self.readlines())
data = [tuple(j.strip() for j in line.split(None, 1))
for line in data]
depends = {}
for line in data:
if not len(line) == 2:
continue
key, value = line
if key == 'depends':
value = value.split(',')
lib = utils.ruby_strip(value[0])
detail = [utils.ruby_strip(j) for j in value[1:]]
depends[lib] = detail
datamap = {key: utils.ruby_strip(val) for key, val in data}
if depends:
datamap['depends'] = depends
self.seek(0)
return datamap | Parse the metadata.rb into a dict. | entailment |
def merge(self, other):
"""Add requirements from 'other' metadata.rb into this one."""
if not isinstance(other, MetadataRb):
raise TypeError("MetadataRb to merge should be a 'MetadataRb' "
"instance, not %s.", type(other))
current = self.to_dict()
new = other.to_dict()
# compare and gather cookbook dependencies
meta_writelines = ['%s\n' % self.depends_statement(cbn, meta)
for cbn, meta in new.get('depends', {}).items()
if cbn not in current.get('depends', {})]
self.write_statements(meta_writelines)
return self.to_dict() | Add requirements from 'other' metadata.rb into this one. | entailment |
def parse(self):
"""Parse this Berksfile into a dict."""
self.flush()
self.seek(0)
data = utils.ruby_lines(self.readlines())
data = [tuple(j.strip() for j in line.split(None, 1))
for line in data]
datamap = {}
for line in data:
if len(line) == 1:
datamap[line[0]] = True
elif len(line) == 2:
key, value = line
if key == 'cookbook':
datamap.setdefault('cookbook', {})
value = [utils.ruby_strip(v) for v in value.split(',')]
lib, detail = value[0], value[1:]
datamap['cookbook'].setdefault(lib, {})
# if there is additional dependency data but its
# not the ruby hash, its the version constraint
if detail and not any("".join(detail).startswith(o)
for o in self.berks_options):
constraint, detail = detail[0], detail[1:]
datamap['cookbook'][lib]['constraint'] = constraint
if detail:
for deet in detail:
opt, val = [
utils.ruby_strip(i)
for i in deet.split(':', 1)
]
if not any(opt == o for o in self.berks_options):
raise ValueError(
"Cookbook detail '%s' does not specify "
"one of '%s'" % (opt, self.berks_options))
else:
datamap['cookbook'][lib][opt.strip(':')] = (
utils.ruby_strip(val))
elif key == 'source':
datamap.setdefault(key, [])
datamap[key].append(utils.ruby_strip(value))
elif key:
datamap[key] = utils.ruby_strip(value)
self.seek(0)
return datamap | Parse this Berksfile into a dict. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.