_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q41500 | ValidateS3UploadForm._generate_processed_key_name | train | def _generate_processed_key_name(process_to, upload_name):
"""Returns a key name to use after processing based on timestamp and
upload key name."""
timestamp = datetime.now().strftime('%Y%m%d%H%M%S%f')
name, extension = os.path.splitext(upload_name)
digest = md5(''.join([timestamp, upload_name])).hexdigest()
return os.path.join(process_to, '{0}.{1}'.format(digest, extension)) | python | {
"resource": ""
} |
q41501 | ValidateS3UploadForm.clean_bucket_name | train | def clean_bucket_name(self):
"""Validates that the bucket name in the provided data matches the
bucket name from the storage backend."""
bucket_name = self.cleaned_data['bucket_name']
if not bucket_name == self.get_bucket_name():
raise forms.ValidationError('Bucket name does not validate.')
return bucket_name | python | {
"resource": ""
} |
q41502 | ValidateS3UploadForm.clean_key_name | train | def clean_key_name(self):
"""Validates that the key in the provided data starts with the
required prefix, and that it exists in the bucket."""
key = self.cleaned_data['key_name']
# Ensure key starts with prefix
if not key.startswith(self.get_key_prefix()):
raise forms.ValidationError('Key does not have required prefix.')
# Ensure key exists
if not self.get_upload_key():
raise forms.ValidationError('Key does not exist.')
return key | python | {
"resource": ""
} |
q41503 | ValidateS3UploadForm.get_processed_key_name | train | def get_processed_key_name(self):
"""Return the full path to use for the processed file."""
if not hasattr(self, '_processed_key_name'):
path, upload_name = os.path.split(self.get_upload_key().name)
key_name = self._generate_processed_key_name(
self.process_to, upload_name)
self._processed_key_name = os.path.join(
self.get_storage().location, key_name)
return self._processed_key_name | python | {
"resource": ""
} |
q41504 | ValidateS3UploadForm.get_processed_path | train | def get_processed_path(self):
"""Returns the processed file path from the storage backend.
:returns: File path from the storage backend.
:rtype: :py:class:`unicode`
"""
location = self.get_storage().location
return self.get_processed_key_name()[len(location):] | python | {
"resource": ""
} |
q41505 | ValidateS3UploadForm.process_upload | train | def process_upload(self, set_content_type=True):
"""Process the uploaded file."""
metadata = self.get_upload_key_metadata()
if set_content_type:
content_type = self.get_upload_content_type()
metadata.update({b'Content-Type': b'{0}'.format(content_type)})
upload_key = self.get_upload_key()
processed_key_name = self.get_processed_key_name()
processed_key = upload_key.copy(upload_key.bucket.name,
processed_key_name, metadata)
processed_key.set_acl(self.get_processed_acl())
upload_key.delete()
return processed_key | python | {
"resource": ""
} |
q41506 | ValidateS3UploadForm.get_upload_content_type | train | def get_upload_content_type(self):
"""Determine the actual content type of the upload."""
if not hasattr(self, '_upload_content_type'):
with self.get_storage().open(self.get_upload_path()) as upload:
content_type = Magic(mime=True).from_buffer(upload.read(1024))
self._upload_content_type = content_type
return self._upload_content_type | python | {
"resource": ""
} |
q41507 | ValidateS3UploadForm.get_upload_key | train | def get_upload_key(self):
"""Get the `Key` from the S3 bucket for the uploaded file.
:returns: Key (object) of the uploaded file.
:rtype: :py:class:`boto.s3.key.Key`
"""
if not hasattr(self, '_upload_key'):
self._upload_key = self.get_storage().bucket.get_key(
self.cleaned_data['key_name'])
return self._upload_key | python | {
"resource": ""
} |
q41508 | ValidateS3UploadForm.get_upload_key_metadata | train | def get_upload_key_metadata(self):
"""Generate metadata dictionary from a bucket key."""
key = self.get_upload_key()
metadata = key.metadata.copy()
# Some http header properties which are stored on the key need to be
# copied to the metadata when updating
headers = {
# http header name, key attribute name
'Cache-Control': 'cache_control',
'Content-Type': 'content_type',
'Content-Disposition': 'content_disposition',
'Content-Encoding': 'content_encoding',
}
for header_name, attribute_name in headers.items():
attribute_value = getattr(key, attribute_name, False)
if attribute_value:
metadata.update({b'{0}'.format(header_name):
b'{0}'.format(attribute_value)})
return metadata | python | {
"resource": ""
} |
q41509 | ValidateS3UploadForm.get_upload_path | train | def get_upload_path(self):
"""Returns the uploaded file path from the storage backend.
:returns: File path from the storage backend.
:rtype: :py:class:`unicode`
"""
location = self.get_storage().location
return self.cleaned_data['key_name'][len(location):] | python | {
"resource": ""
} |
q41510 | ask | train | def ask(question, escape=True):
"Return the answer"
answer = raw_input(question)
if escape:
answer.replace('"', '\\"')
return answer.decode('utf') | python | {
"resource": ""
} |
q41511 | Sesame.update_state | train | def update_state(self, cache=True):
"""Update the internal state of the Sesame."""
self.use_cached_state = cache
endpoint = API_SESAME_ENDPOINT.format(self._device_id)
response = self.account.request('GET', endpoint)
if response is None or response.status_code != 200:
return
state = json.loads(response.text)
self._nickname = state['nickname']
self._is_unlocked = state['is_unlocked']
self._api_enabled = state['api_enabled']
self._battery = state['battery'] | python | {
"resource": ""
} |
q41512 | Sesame.lock | train | def lock(self):
"""Lock the Sesame. Return True on success, else False."""
endpoint = API_SESAME_CONTROL_ENDPOINT.format(self.device_id)
payload = {'type': 'lock'}
response = self.account.request('POST', endpoint, payload=payload)
if response is None:
return False
if response.status_code == 200 or response.status_code == 204:
return True
return False | python | {
"resource": ""
} |
q41513 | PyPiRC.save | train | def save(self):
"""Saves pypirc file with new configuration information."""
for server, conf in self.servers.iteritems():
self._add_index_server()
for conf_k, conf_v in conf.iteritems():
if not self.conf.has_section(server):
self.conf.add_section(server)
self.conf.set(server, conf_k, conf_v)
with open(self.rc_file, 'wb') as configfile:
self.conf.write(configfile)
self.conf.read(self.rc_file) | python | {
"resource": ""
} |
q41514 | PyPiRC._get_index_servers | train | def _get_index_servers(self):
"""Gets index-servers current configured in pypirc."""
idx_srvs = []
if 'index-servers' in self.conf.options('distutils'):
idx = self.conf.get('distutils', 'index-servers')
idx_srvs = [srv.strip() for srv in idx.split('\n') if srv.strip()]
return idx_srvs | python | {
"resource": ""
} |
q41515 | PyPiRC._add_index_server | train | def _add_index_server(self):
"""Adds index-server to 'distutil's 'index-servers' param."""
index_servers = '\n\t'.join(self.servers.keys())
self.conf.set('distutils', 'index-servers', index_servers) | python | {
"resource": ""
} |
q41516 | OutputMapper.outputmap | train | def outputmap(self, data):
""" Internal function used to traverse a data structure and map the contents onto python-friendly objects inplace.
This uses recursion, so try not to pass in anything that's over 255 objects deep.
:param data: data structure
:type data: any
:param prefix: endpoint family, eg. sources, historics
:type prefix: str
:param endpoint: endpoint being called on the API
:type endpoint: str
:returns: Nothing, edits inplace
"""
if isinstance(data, list):
for item in data:
self.outputmap(item)
elif isinstance(data, dict):
for map_target in self.output_map:
if map_target in data:
data[map_target] = getattr(self, self.output_map[map_target])(data[map_target])
for item in data.values():
self.outputmap(item) | python | {
"resource": ""
} |
q41517 | get_lux_count | train | def get_lux_count(lux_byte):
""" Method to convert data from the TSL2550D lux sensor
into more easily usable ADC count values.
"""
LUX_VALID_MASK = 0b10000000
LUX_CHORD_MASK = 0b01110000
LUX_STEP_MASK = 0b00001111
valid = lux_byte & LUX_VALID_MASK
if valid != 0:
step_num = (lux_byte & LUX_STEP_MASK)
# Shift to normalize value
chord_num = (lux_byte & LUX_CHORD_MASK) >> 4
step_val = 2**chord_num
chord_val = int(16.5 * (step_val - 1))
count = chord_val + step_val * step_num
return count
else:
raise SensorError("Invalid lux sensor data.") | python | {
"resource": ""
} |
q41518 | SensorCluster.update_lux | train | def update_lux(self, extend=0):
""" Communicates with the TSL2550D light sensor and returns a
lux value.
Note that this method contains approximately 1 second of total delay.
This delay is necessary in order to obtain full resolution
compensated lux values.
Alternatively, the device could be put in extended mode,
which drops some resolution in favor of shorter delays.
"""
DEVICE_REG_OUT = 0x1d
LUX_PWR_ON = 0x03
if extend == 1:
LUX_MODE = 0x1d
delay = .08
scale = 5
else:
LUX_MODE = 0x18
delay = .4
scale = 1
LUX_READ_CH0 = 0x43
LUX_READ_CH1 = 0x83
# Select correct I2C mux channel on TCA module
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan)
# Make sure lux sensor is powered up.
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON)
lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON)
# Check for successful powerup
if (lux_on == LUX_PWR_ON):
# Send command to initiate ADC on each channel
# Read each channel after the new data is ready
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE)
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0)
sleep(delay)
adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1)
sleep(delay)
adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode
ratio = count1 / (count0 - count1)
lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2))
self.light_ratio = float(count1)/float(count0)
print("Light ratio Ch1/Ch0: ", self.light_ratio)
self.lux = round(lux, 3)
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise SensorError("The lux sensor is powered down.") | python | {
"resource": ""
} |
q41519 | SensorCluster.update_humidity_temp | train | def update_humidity_temp(self):
""" This method utilizes the HIH7xxx sensor to read
humidity and temperature in one call.
"""
# Create mask for STATUS (first two bits of 64 bit wide result)
STATUS = 0b11 << 6
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan)
SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion
sleep(.25)
# wait 100ms to make sure the conversion takes place.
data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4)
status = (data[0] & STATUS) >> 6
if status == 0 or status == 1: # will always pass for now.
humidity = round((((data[0] & 0x3f) << 8) |
data[1]) * 100.0 / (2**14 - 2), 3)
self.humidity = humidity
self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2))
* 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise I2CBusError("Unable to retrieve humidity") | python | {
"resource": ""
} |
q41520 | SensorCluster.sensor_values | train | def sensor_values(self):
"""
Returns the values of all sensors for this cluster
"""
self.update_instance_sensors(opt="all")
return {
"light": self.lux,
"water": self.soil_moisture,
"humidity": self.humidity,
"temperature": self.temp
} | python | {
"resource": ""
} |
q41521 | SensorCluster.get_water_level | train | def get_water_level(cls):
""" This method uses the ADC on the control module to measure
the current water tank level and returns the water volume
remaining in the tank.
For this method, it is assumed that a simple voltage divider
is used to interface the sensor to the ADC module.
Testing shows that the sensor response is not completely linear,
though it is quite close. To make the results more accurate,
a mapping method approximated by a linear fit to data is used.
"""
# ----------
# These values should be updated based on the real system parameters
vref = 4.95
tank_height = 17.5 # in centimeters (height of container)
rref = 2668 # Reference resistor
# ----------
val = 0
for i in range(5):
# Take five readings and do an average
# Fetch value from ADC (0x69 - ch1)
val = get_ADC_value(cls.bus, 0x6c, 1) + val
avg = val / 5
water_sensor_res = rref * avg/(vref - avg)
depth_cm = water_sensor_res * \
(-.0163) + 28.127 # measured transfer adjusted offset
if depth_cm < 1.0: # Below 1cm, the values should not be trusted.
depth_cm = 0
cls.water_remaining = depth_cm / tank_height
# Return the current depth in case the user is interested in
# that parameter alone. (IE for automatic shut-off)
return depth_cm/tank_height | python | {
"resource": ""
} |
q41522 | _imported_symbol | train | def _imported_symbol(import_path):
"""Resolve a dotted path into a symbol, and return that.
For example...
>>> _imported_symbol('django.db.models.Model')
<class 'django.db.models.base.Model'>
Raise ImportError if there's no such module, AttributeError if no
such symbol.
"""
module_name, symbol_name = import_path.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, symbol_name) | python | {
"resource": ""
} |
q41523 | Param | train | def Param(name, value=None, unit=None, ucd=None, dataType=None, utype=None,
ac=True):
"""
'Parameter', used as a general purpose key-value entry in the 'What' section.
May be assembled into a :class:`Group`.
NB ``name`` is not mandated by schema, but *is* mandated in full spec.
Args:
value(str): String representing parameter value.
Or, if ``ac`` is true, then 'autoconversion' is attempted, in which case
``value`` can also be an instance of one of the following:
* :py:obj:`bool`
* :py:obj:`int`
* :py:obj:`float`
* :py:class:`datetime.datetime`
This allows you to create Params without littering your code
with string casts, or worrying if the passed value is a float or a
string, etc.
NB the value is always *stored* as a string representation,
as per VO spec.
unit(str): Units of value. See :class:`.definitions.units`
ucd(str): `unified content descriptor <http://arxiv.org/abs/1110.0525>`_.
For a list of valid UCDs, see:
http://vocabularies.referata.com/wiki/Category:IVOA_UCD.
dataType(str): Denotes type of ``value``; restricted to 3 options:
``string`` (default), ``int`` , or ``float``.
(NB *not* to be confused with standard XML Datatypes, which have many
more possible values.)
utype(str): See http://wiki.ivoa.net/twiki/bin/view/IVOA/Utypes
ac(bool): Attempt automatic conversion of passed ``value`` to string,
and set ``dataType`` accordingly (only attempted if ``dataType``
is the default, i.e. ``None``).
(NB only supports types listed in _datatypes_autoconversion dict)
"""
# We use locals() to allow concise looping over the arguments.
atts = locals()
atts.pop('ac')
temp_dict = {}
temp_dict.update(atts)
for k in temp_dict.keys():
if atts[k] is None:
del atts[k]
if (ac
and value is not None
and (not isinstance(value, string_types))
and dataType is None
):
if type(value) in _datatypes_autoconversion:
datatype, func = _datatypes_autoconversion[type(value)]
atts['dataType'] = datatype
atts['value'] = func(value)
return objectify.Element('Param', attrib=atts) | python | {
"resource": ""
} |
q41524 | Group | train | def Group(params, name=None, type=None):
"""Groups together Params for adding under the 'What' section.
Args:
params(list of :func:`Param`): Parameter elements to go in this group.
name(str): Group name. NB ``None`` is valid, since the group may be
best identified by its type.
type(str): Type of group, e.g. 'complex' (for real and imaginary).
"""
atts = {}
if name:
atts['name'] = name
if type:
atts['type'] = type
g = objectify.Element('Group', attrib=atts)
for p in params:
g.append(p)
return g | python | {
"resource": ""
} |
q41525 | Reference | train | def Reference(uri, meaning=None):
"""
Represents external information, typically original obs data and metadata.
Args:
uri(str): Uniform resource identifier for external data, e.g. FITS file.
meaning(str): The nature of the document referenced, e.g. what
instrument and filter was used to create the data?
"""
attrib = {'uri': uri}
if meaning is not None:
attrib['meaning'] = meaning
return objectify.Element('Reference', attrib) | python | {
"resource": ""
} |
q41526 | EventIvorn | train | def EventIvorn(ivorn, cite_type):
"""
Used to cite earlier VOEvents.
Use in conjunction with :func:`.add_citations`
Args:
ivorn(str): It is assumed this will be copied verbatim from elsewhere,
and so these should have any prefix (e.g. 'ivo://','http://')
already in place - the function will not alter the value.
cite_type (:class:`.definitions.cite_types`): String conforming to one
of the standard citation types.
"""
# This is an ugly hack around the limitations of the lxml.objectify API:
c = objectify.StringElement(cite=cite_type)
c._setText(ivorn)
c.tag = "EventIVORN"
return c | python | {
"resource": ""
} |
q41527 | Odp.batch | train | def batch(self, source_id, data):
""" Upload data to the given soruce
:param source_id: The ID of the source to upload to
:type source_id: str
:param data: The data to upload to the source
:type data: list
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`,
:class:`~datasift.exceptions.BadRequest`
"""
if type(data) is not list or type(data[0]) is not dict:
raise BadRequest("Ingestion data must be a list of dicts")
data = "\r\n".join(map(json.dumps, data))
return self.request.post(source_id, data, {'Accept-Encoding': 'application/text'}) | python | {
"resource": ""
} |
q41528 | create_package_version | train | def create_package_version(requirement):
"""Create a new PackageVersion from a requirement. Handles errors."""
try:
PackageVersion(requirement=requirement).save()
logger.info("Package '%s' added.", requirement.name) # noqa
except IntegrityError:
logger.info("Package '%s' already exists.", requirement.name) | python | {
"resource": ""
} |
q41529 | local | train | def local():
"""Load local requirements file."""
logger.info("Loading requirements from local file.")
with open(REQUIREMENTS_FILE, 'r') as f:
requirements = parse(f)
for r in requirements:
logger.debug("Creating new package: %r", r)
create_package_version(r) | python | {
"resource": ""
} |
q41530 | remote | train | def remote():
"""Update package info from PyPI."""
logger.info("Fetching latest data from PyPI.")
results = defaultdict(list)
packages = PackageVersion.objects.exclude(is_editable=True)
for pv in packages:
pv.update_from_pypi()
results[pv.diff_status].append(pv)
logger.debug("Updated package from PyPI: %r", pv)
results['refreshed_at'] = tz_now()
return results | python | {
"resource": ""
} |
q41531 | Command.handle | train | def handle(self, *args, **options):
"""Run the managemement command."""
if options['clean']:
clean()
if options['local']:
local()
if options['remote']:
results = remote()
render = lambda t: render_to_string(t, results)
if options['notify']:
send_mail(
options['subject'],
render('summary.txt'),
options['from'],
[options['notify']],
html_message=render('summary.html'),
fail_silently=False,
) | python | {
"resource": ""
} |
q41532 | HistoricsPreview.create | train | def create(self, stream, start, parameters, sources, end=None):
""" Create a hitorics preview job.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/previewcreate
:param stream: hash of the CSDL filter to create the job for
:type stream: str
:param start: Unix timestamp for the start of the period
:type start: int
:param parameters: list of historics preview parameters, can be found at http://dev.datasift.com/docs/api/rest-api/endpoints/previewcreate
:type parameters: list
:param sources: list of sources to include, eg. ['tumblr','facebook']
:type sources: list
:param end: (optional) Unix timestamp for the end of the period, defaults to min(start+24h, now-1h)
:type end: int
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
if len(sources) == 0:
raise HistoricSourcesRequired()
if isinstance(sources, six.string_types):
sources = [sources]
params = {'hash': stream, 'start': start, 'sources': ','.join(sources), 'parameters': ','.join(parameters)}
if end:
params['end'] = end
return self.request.post('create', params) | python | {
"resource": ""
} |
q41533 | HistoricsPreview.get | train | def get(self, preview_id):
""" Retrieve a Historics preview job.
Warning: previews expire after 24 hours.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/previewget
:param preview_id: historics preview job hash of the job to retrieve
:type preview_id: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self.request.get('get', params=dict(id=preview_id)) | python | {
"resource": ""
} |
q41534 | Historics.prepare | train | def prepare(self, hash, start, end, name, sources, sample=None):
""" Prepare a historics query which can later be started.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsprepare
:param hash: The hash of a CSDL create the query for
:type hash: str
:param start: when to start querying data from - unix timestamp
:type start: int
:param end: when the query should end - unix timestamp
:type end: int
:param name: the name of the query
:type name: str
:param sources: list of sources e.g. ['facebook','bitly','tumblr']
:type sources: list
:param sample: percentage to sample, either 10 or 100
:type sample: int
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
if len(sources) == 0:
raise HistoricSourcesRequired()
if not isinstance(sources, list):
sources = [sources]
params = {'hash': hash, 'start': start, 'end': end, 'name': name, 'sources': ','.join(sources)}
if sample:
params['sample'] = sample
return self.request.post('prepare', params) | python | {
"resource": ""
} |
q41535 | Historics.start | train | def start(self, historics_id):
""" Start the historics job with the given ID.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstart
:param historics_id: hash of the job to start
:type historics_id: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self.request.post('start', data=dict(id=historics_id)) | python | {
"resource": ""
} |
q41536 | Historics.update | train | def update(self, historics_id, name):
""" Update the name of the given Historics query.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsupdate
:param historics_id: playback id of the job to start
:type historics_id: str
:param name: new name of the stream
:type name: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self.request.post('update', data=dict(id=historics_id, name=name)) | python | {
"resource": ""
} |
q41537 | Historics.stop | train | def stop(self, historics_id, reason=''):
""" Stop an existing Historics query.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstop
:param historics_id: playback id of the job to stop
:type historics_id: str
:param reason: optional reason for stopping the job
:type reason: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self.request.post('stop', data=dict(id=historics_id, reason=reason)) | python | {
"resource": ""
} |
q41538 | Historics.status | train | def status(self, start, end, sources=None):
""" Check the data coverage in the Historics archive for a given interval.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstatus
:param start: Unix timestamp for the start time
:type start: int
:param end: Unix timestamp for the start time
:type end: int
:param sources: list of data sources to include.
:type sources: list
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
params = {'start': start, 'end': end}
if sources:
params['sources'] = ','.join(sources)
return self.request.get('status', params=params) | python | {
"resource": ""
} |
q41539 | Historics.delete | train | def delete(self, historics_id):
""" Delete one specified playback query. If the query is currently running, stop it.
status_code is set to 204 on success
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsdelete
:param historics_id: playback id of the query to delete
:type historics_id: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self.request.post('delete', data=dict(id=historics_id)) | python | {
"resource": ""
} |
q41540 | Historics.get_for | train | def get_for(self, historics_id, with_estimate=None):
""" Get the historic query for the given ID
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsget
:param historics_id: playback id of the query
:type historics_id: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self.get(historics_id, maximum=None, page=None, with_estimate=with_estimate) | python | {
"resource": ""
} |
q41541 | Historics.get | train | def get(self, historics_id=None, maximum=None, page=None, with_estimate=None):
""" Get the historics query with the given ID, if no ID is provided then get a list of historics queries.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsget
:param historics_id: (optional) ID of the query to retrieve
:type historics_id: str
:param maximum: (optional) maximum number of queries to recieve (default 20)
:type maximum: int
:param page: (optional) page to retrieve for paginated queries
:type page: int
:param with_estimate: include estimate of completion time in output
:type with_estimate: bool
:param historics_id: playback id of the query
:type historics_id: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
params = {'id': historics_id}
if maximum:
params['max'] = maximum
if page:
params['page'] = page
params['with_estimate'] = 1 if with_estimate else 0
return self.request.get('get', params=params) | python | {
"resource": ""
} |
q41542 | Historics.pause | train | def pause(self, historics_id, reason=""):
""" Pause an existing Historics query.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicspause
:param historics_id: id of the job to pause
:type historics_id: str
:param reason: optional reason for pausing it
:type reason: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
params = {"id": historics_id}
if reason != "":
params["reason"] = reason
return self.request.post('pause', data=params) | python | {
"resource": ""
} |
q41543 | Historics.resume | train | def resume(self, historics_id):
""" Resume a paused Historics query.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsresume
:param historics_id: id of the job to resume
:type historics_id: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self.request.post('resume', data=dict(id=historics_id)) | python | {
"resource": ""
} |
q41544 | Resource.remove | train | def remove(self, source_id, resource_ids):
""" Remove one or more resources from a Managed Source
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceresourceremove
:param source_id: target Source ID
:type source_id: str
:param resources: An array of the resource IDs that you would like to remove..
:type resources: array of str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
params = {'id': source_id, 'resource_ids': resource_ids}
return self.request.post('remove', params) | python | {
"resource": ""
} |
q41545 | Auth.add | train | def add(self, source_id, auth, validate=True):
""" Add one or more sets of authorization credentials to a Managed Source
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceauthadd
:param source_id: target Source ID
:type source_id: str
:param auth: An array of the source-specific authorization credential sets that you're adding.
:type auth: array of strings
:param validate: Allows you to suppress the validation of the authorization credentials, defaults to true.
:type validate: bool
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
params = {'id': source_id, 'auth': auth, 'validate': validate}
return self.request.post('add', params) | python | {
"resource": ""
} |
q41546 | Auth.remove | train | def remove(self, source_id, auth_ids):
""" Remove one or more sets of authorization credentials from a Managed Source
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceauthremove
:param source_id: target Source ID
:type source_id: str
:param resources: An array of the authorization credential set IDs that you would like to remove.
:type resources: array of str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
params = {'id': source_id, 'auth_ids': auth_ids}
return self.request.post('remove', params) | python | {
"resource": ""
} |
q41547 | ManagedSources.create | train | def create(self, source_type, name, resources, auth=None, parameters=None, validate=True):
""" Create a managed source
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourcecreate
:param source_type: data source name e.g. facebook_page, googleplus, instagram, yammer
:type source_type: str
:param name: name to use to identify the managed source being created
:type name: str
:param resources: list of source-specific config dicts
:type resources: list
:param auth: list of source-specific authentication dicts
:type auth: list
:param parameters: (optional) dict with config information on how to treat each resource
:type parameters: dict
:param validate: bool to determine if validation should be performed on the source
:type validate: bool
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
assert resources, "Need at least one resource"
params = {
'source_type': source_type,
'name': name,
'resources': resources,
'validate': validate
}
if auth:
params['auth'] = auth
if parameters:
params['parameters'] = parameters
return self.request.post('create', params) | python | {
"resource": ""
} |
q41548 | ManagedSources.update | train | def update(self, source_id, source_type, name, resources, auth, parameters=None, validate=True):
""" Update a managed source
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceupdate
:param source_type: data source name e.g. facebook_page, googleplus, instagram, yammer
:type source_type: str
:param name: name to use to identify the managed source being created
:type name: str
:param resources: list of source-specific config dicts
:type resources: list
:param auth: list of source-specific authentication dicts
:type auth: list
:param parameters: (optional) dict with config information on how to treat each resource
:type parameters: dict
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
assert resources, "Need at least one resource"
assert auth, "Need at least one authentication token"
params = {'id': source_id, 'source_type': source_type, 'name': name, 'resources': resources, 'auth': auth, 'validate': validate}
if parameters:
params['parameters'] = parameters
return self.request.post('update', params) | python | {
"resource": ""
} |
q41549 | ManagedSources.log | train | def log(self, source_id, page=None, per_page=None):
""" Get the log for a specific Managed Source.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourcelog
:param source_id: target Source ID
:type source_id: str
:param page: (optional) page number for pagination
:type page: int
:param per_page: (optional) number of items per page, default 20
:type per_page: int
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
params = {'id': source_id}
if page:
params['page'] = page
if per_page:
params['per_page'] = per_page
return self.request.get('log', params=params) | python | {
"resource": ""
} |
q41550 | ManagedSources.get | train | def get(self, source_id=None, source_type=None, page=None, per_page=None):
""" Get a specific managed source or a list of them.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceget
:param source_id: (optional) target Source ID
:type source_id: str
:param source_type: (optional) data source name e.g. facebook_page, googleplus, instagram, yammer
:type source_type: str
:param page: (optional) page number for pagination, default 1
:type page: int
:param per_page: (optional) number of items per page, default 20
:type per_page: int
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
params = {}
if source_type:
params['source_type'] = source_type
if source_id:
params['id'] = source_id
if page:
params['page'] = page
if per_page:
params['per_page'] = per_page
return self.request.get('get', params=params) | python | {
"resource": ""
} |
q41551 | Laplacian.apply | train | def apply(self, localArray):
"""
Apply Laplacian stencil to data
@param localArray local array
@return new array on local proc
"""
# input dist array
inp = gdaZeros(localArray.shape, localArray.dtype, numGhosts=1)
# output array
out = numpy.zeros(localArray.shape, localArray.dtype)
# no displacement term
weight = self.stencil[self.zeros]
out[...] += weight * localArray
for disp in self.srcLocalDomains:
weight = self.stencil[disp]
# no communication required here
srcDom = self.srcLocalDomains[disp]
dstDom = self.dstLocalDomains[disp]
out[dstDom] += weight * localArray[srcDom]
#
# now the part that requires communication
#
# set the ghost values
srcSlab = self.srcSlab[disp]
# copy
inp[srcSlab] = localArray[srcSlab]
# send over to local process
dstSlab = self.dstSlab[disp]
winId = self.winIds[disp]
rk = self.neighRk[disp]
# remote fetch
out[dstSlab] += weight * inp.getData(rk, winId)
# some implementations require this
inp.free()
return out | python | {
"resource": ""
} |
q41552 | ndrive.login | train | def login(self, user_id, password, svctype = "Android NDrive App ver", auth = 0):
"""Log in Naver and get cookie
Agrs:
user_id: Naver account's login id
password: Naver account's login password
Returns:
True: Login success
False: Login failed
Remarks:
self.cookie is a dictionary with 5 keys: path, domain, NID_AUT, nid_inf, NID_SES
"""
self.user_id = user_id
self.password = password
if self.user_id == None or self.password == None:
print "[*] Error __init__: user_id and password is needed"
return False
try:
cookie = naver_login(user_id, password)
except:
return False
self.session.cookies.set('NID_AUT', cookie["NID_AUT"])
self.session.cookies.set('NID_SES', cookie["NID_SES"])
s = self.getRegisterUserInfo(svctype, auth)
if s is True:
return True
else:
print "[*] Error getRegisterUserInfo: failed"
return False | python | {
"resource": ""
} |
q41553 | set_mysql | train | def set_mysql(host, user, password, db, charset):
"""Set the SQLAlchemy connection string with MySQL settings"""
manager.database.set_mysql_connection(
host=host,
user=user,
password=password,
db=db,
charset=charset
) | python | {
"resource": ""
} |
q41554 | quantile_gaussianize | train | def quantile_gaussianize(x):
"""Normalize a sequence of values via rank and Normal c.d.f.
Args:
x (array_like): sequence of values.
Returns:
Gaussian-normalized values.
Example:
.. doctest::
>>> from scipy_sugar.stats import quantile_gaussianize
>>> print(quantile_gaussianize([-1, 0, 2]))
[-0.67448975 0. 0.67448975]
"""
from scipy.stats import norm, rankdata
x = asarray(x, float).copy()
ok = isfinite(x)
x[ok] *= -1
y = empty_like(x)
y[ok] = rankdata(x[ok])
y[ok] = norm.isf(y[ok] / (sum(ok) + 1))
y[~ok] = x[~ok]
return y | python | {
"resource": ""
} |
q41555 | Peak.apply_noise | train | def apply_noise(self, noise_generator, split_idx, ndigits=6):
"""Apply noise to dimensions within a peak.
:param noise_generator: Noise generator object.
:param int split_idx: Index specifying which peak list split parameters to use.
:return: None
:rtype: :py:obj:`None`
"""
noise = noise_generator.generate(self.labels, split_idx)
for dim, noise_value in zip(self, noise):
dim.chemshift = round(dim.chemshift + noise_value, ndigits) | python | {
"resource": ""
} |
q41556 | SequenceSite.is_sequential | train | def is_sequential(self):
"""Check if residues that sequence site is composed of are in sequential order.
:return: If sequence site is in valid sequential order (True) or not (False).
:rtype: :py:obj:`True` or :py:obj:`False`
"""
seq_ids = tuple(int(residue["Seq_ID"]) for residue in self)
return seq_ids == tuple(range(int(seq_ids[0]), int(seq_ids[-1])+1)) | python | {
"resource": ""
} |
q41557 | PeakDescription.create_dimension_groups | train | def create_dimension_groups(dimension_positions):
"""Create list of dimension groups.
:param zip dimension_positions: List of tuples describing dimension and its position within sequence site.
:return: List of dimension groups.
:rtype: :py:class:`list`
"""
dimension_groups = []
for dim_group_label, position in dimension_positions:
dim_group = DimensionGroup(dim_group_label, position)
for dim_label in nmrstarlib.RESONANCE_CLASSES[dim_group_label]:
dim_group.dimensions.append(Dimension(dim_label, position))
dimension_groups.append(dim_group)
return dimension_groups | python | {
"resource": ""
} |
q41558 | Spectrum.peak_templates | train | def peak_templates(self):
"""Create a list of concrete peak templates from a list of general peak descriptions.
:return: List of peak templates.
:rtype: :py:class:`list`
"""
peak_templates = []
for peak_descr in self:
expanded_dims = [dim_group.dimensions for dim_group in peak_descr]
templates = product(*expanded_dims)
for template in templates:
peak_templates.append(PeakTemplate(template))
return peak_templates | python | {
"resource": ""
} |
q41559 | Spectrum.seq_site_length | train | def seq_site_length(self):
"""Calculate length of a single sequence site based upon relative positions specified in peak descriptions.
:return: Length of sequence site.
:rtype: :py:class:`int`
"""
relative_positions_set = set()
for peak_descr in self:
relative_positions_set.update(peak_descr.relative_positions)
return len(relative_positions_set) | python | {
"resource": ""
} |
q41560 | StarFileToPeakList.create_spectrum | train | def create_spectrum(spectrum_name):
"""Initialize spectrum and peak descriptions.
:param str spectrum_name: Name of the spectrum from which peak list will be simulated.
:return: Spectrum object.
:rtype: :class:`~nmrstarlib.plsimulator.Spectrum`
"""
try:
spectrum_description = nmrstarlib.SPECTRUM_DESCRIPTIONS[spectrum_name]
except KeyError:
raise NotImplementedError("Experiment type is not defined.")
spectrum = plsimulator.Spectrum(spectrum_name, spectrum_description["Labels"],
spectrum_description["MinNumberPeaksPerSpinSystem"],
spectrum_description.get("ResonanceLimit", None))
for peak_descr in spectrum_description["PeakDescriptions"]:
spectrum.append(plsimulator.PeakDescription(peak_descr["fraction"], peak_descr["dimensions"]))
return spectrum | python | {
"resource": ""
} |
q41561 | StarFileToPeakList.create_sequence_sites | train | def create_sequence_sites(chain, seq_site_length):
"""Create sequence sites using sequence ids.
:param dict chain: Chain object that contains chemical shift values and assignment information.
:param int seq_site_length: Length of a single sequence site.
:return: List of sequence sites.
:rtype: :py:class:`list`
"""
seq_ids = sorted(list(chain.keys()), key=int) # make sure that sequence is sorted by sequence id
slices = [itertools.islice(seq_ids, i, None) for i in range(seq_site_length)]
seq_site_ids = list(zip(*slices))
sequence_sites = []
for seq_site_id in seq_site_ids:
seq_site = plsimulator.SequenceSite(chain[seq_id] for seq_id in seq_site_id)
if seq_site.is_sequential():
sequence_sites.append(seq_site)
else:
continue
return sequence_sites | python | {
"resource": ""
} |
q41562 | StarFileToPeakList.calculate_intervals | train | def calculate_intervals(chunk_sizes):
"""Calculate intervals for a given chunk sizes.
:param list chunk_sizes: List of chunk sizes.
:return: Tuple of intervals.
:rtype: :py:class:`tuple`
"""
start_indexes = [sum(chunk_sizes[:i]) for i in range(0, len(chunk_sizes))]
end_indexes = [sum(chunk_sizes[:i+1]) for i in range(0, len(chunk_sizes))]
return tuple(zip(start_indexes, end_indexes)) | python | {
"resource": ""
} |
q41563 | StarFileToPeakList.split_by_percent | train | def split_by_percent(self, spin_systems_list):
"""Split list of spin systems by specified percentages.
:param list spin_systems_list: List of spin systems.
:return: List of spin systems divided into sub-lists corresponding to specified split percentages.
:rtype: :py:class:`list`
"""
chunk_sizes = [int((i*len(spin_systems_list))/100) for i in self.plsplit]
if sum(chunk_sizes) < len(spin_systems_list):
difference = len(spin_systems_list) - sum(chunk_sizes)
chunk_sizes[chunk_sizes.index(min(chunk_sizes))] += difference
assert sum(chunk_sizes) == len(spin_systems_list), \
"sum of chunk sizes must be equal to spin systems list length."
intervals = self.calculate_intervals(chunk_sizes)
chunks_of_spin_systems_by_percentage = [itertools.islice(spin_systems_list, *interval) for interval in intervals]
return chunks_of_spin_systems_by_percentage | python | {
"resource": ""
} |
q41564 | StarFileToPeakList.create_peaklist | train | def create_peaklist(self, spectrum, chain, chain_idx, source):
"""Create peak list file.
:param spectrum: Spectrum object instance.
:type spectrum: :class:`~nmrstarlib.plsimulator.Spectrum`
:param dict chain: Chain object that contains chemical shift values and assignment information.
:param int chain_idx: Protein chain index.
:param str source: :class:`~nmrstarlib.nmrstarlib.StarFile` source.
:return: Peak list object.
:rtype: :class:`~nmrstarlib.plsimulator.PeakList`
"""
sequence_sites = self.create_sequence_sites(chain, spectrum.seq_site_length)
spin_systems = []
peaklist = plsimulator.PeakList(spectrum.name, spectrum.labels, source, chain_idx)
for seq_site in sequence_sites:
spin_system = plsimulator.SpinSystem()
for template in spectrum.peak_templates:
peak = plsimulator.Peak(template.dimension_labels)
for dim in template:
chemshift = seq_site[dim.position].get(dim.label, None)
assignment = "{}{}{}".format(seq_site[dim.position]["AA3Code"],
seq_site[dim.position]["Seq_ID"],
dim.label)
if chemshift and assignment:
peak_dim = plsimulator.Dimension(dim.label, dim.position, assignment, float(chemshift))
peak.append(peak_dim)
else:
continue
if len(peak) == len(template):
spin_system.append(peak)
peaklist.append(peak)
else:
continue
spin_systems.append(spin_system)
if all(len(i) < spectrum.min_spin_system_peaks for i in spin_systems):
return None
if self.noise_generator is not None:
spin_systems_chunks = self.split_by_percent(spin_systems)
for split_idx, chunk in enumerate(spin_systems_chunks):
for spin_system in chunk:
for peak in spin_system:
peak.apply_noise(self.noise_generator, split_idx)
return peaklist | python | {
"resource": ""
} |
q41565 | daArray | train | def daArray(arry, dtype=numpy.float):
"""
Array constructor for numpy distributed array
@param arry numpy-like array
"""
a = numpy.array(arry, dtype)
res = DistArray(a.shape, a.dtype)
res[:] = a
return res | python | {
"resource": ""
} |
q41566 | daZeros | train | def daZeros(shap, dtype=numpy.float):
"""
Zero constructor for numpy distributed array
@param shap the shape of the array
@param dtype the numpy data type
"""
res = DistArray(shap, dtype)
res[:] = 0
return res | python | {
"resource": ""
} |
q41567 | daOnes | train | def daOnes(shap, dtype=numpy.float):
"""
One constructor for numpy distributed array
@param shap the shape of the array
@param dtype the numpy data type
"""
res = DistArray(shap, dtype)
res[:] = 1
return res | python | {
"resource": ""
} |
q41568 | mdaArray | train | def mdaArray(arry, dtype=numpy.float, mask=None):
"""
Array constructor for masked distributed array
@param arry numpy-like array
@param mask mask array (or None if all data elements are valid)
"""
a = numpy.array(arry, dtype)
res = MaskedDistArray(a.shape, a.dtype)
res[:] = a
res.mask = mask
return res | python | {
"resource": ""
} |
q41569 | mdaZeros | train | def mdaZeros(shap, dtype=numpy.float, mask=None):
"""
Zero constructor for masked distributed array
@param shap the shape of the array
@param dtype the numpy data type
@param mask mask array (or None if all data elements are valid)
"""
res = MaskedDistArray(shap, dtype)
res[:] = 0
res.mask = mask
return res | python | {
"resource": ""
} |
q41570 | mdaOnes | train | def mdaOnes(shap, dtype=numpy.float, mask=None):
"""
One constructor for masked distributed array
@param shap the shape of the array
@param dtype the numpy data type
@param mask mask array (or None if all data elements are valid)
"""
res = MaskedDistArray(shap, dtype)
res[:] = 1
res.mask = mask
return res | python | {
"resource": ""
} |
q41571 | UploadForm.stash | train | def stash(self, storage, url):
"""Stores the uploaded file in a temporary storage location."""
result = {}
if self.is_valid():
upload = self.cleaned_data['upload']
name = storage.save(upload.name, upload)
result['filename'] = os.path.basename(name)
try:
result['url'] = storage.url(name)
except NotImplementedError:
result['url'] = None
result['stored'] = serialize_upload(name, storage, url)
return result | python | {
"resource": ""
} |
q41572 | CandyHouseAccount.login | train | def login(self, email=None, password=None, timeout=5):
"""Log in to CANDY HOUSE account. Return True on success."""
if email is not None:
self.email = email
if password is not None:
self.password = password
url = self.api_url + API_LOGIN_ENDPOINT
data = json.dumps({'email': self.email, 'password': self.password})
headers = {'Content-Type': 'application/json'}
response = None
try:
response = self.session.post(url, data=data, headers=headers,
timeout=timeout)
except requests.exceptions.ConnectionError:
_LOGGER.warning("Unable to connect to %s", url)
except requests.exceptions.Timeout:
_LOGGER.warning("No response from %s", url)
if response is not None:
if response.status_code == 200:
self.auth_token = json.loads(response.text)['authorization']
return True
else:
_LOGGER.warning("Login failed for %s: %s", self.email,
response.text)
else:
_LOGGER.warning("Login failed for %s", self.email)
return False | python | {
"resource": ""
} |
q41573 | CandyHouseAccount.request | train | def request(self, method, endpoint, payload=None, timeout=5):
"""Send request to API."""
url = self.api_url + endpoint
data = None
headers = {}
if payload is not None:
data = json.dumps(payload)
headers['Content-Type'] = 'application/json'
try:
if self.auth_token is not None:
headers[API_AUTH_HEADER] = self.auth_token
response = self.session.request(method, url, data=data,
headers=headers,
timeout=timeout)
if response.status_code != 401:
return response
_LOGGER.debug("Renewing auth token")
if not self.login(timeout=timeout):
return None
# Retry request
headers[API_AUTH_HEADER] = self.auth_token
return self.session.request(method, url, data=data,
headers=headers,
timeout=timeout)
except requests.exceptions.ConnectionError:
_LOGGER.warning("Unable to connect to %s", url)
except requests.exceptions.Timeout:
_LOGGER.warning("No response from %s", url)
return None | python | {
"resource": ""
} |
q41574 | CandyHouseAccount.sesames | train | def sesames(self):
"""Return list of Sesames."""
response = self.request('GET', API_SESAME_LIST_ENDPOINT)
if response is not None and response.status_code == 200:
return json.loads(response.text)['sesames']
_LOGGER.warning("Unable to list Sesames")
return [] | python | {
"resource": ""
} |
q41575 | get_api | train | def get_api(
profile=None,
config_file=None,
requirements=None):
'''
Generate a datafs.DataAPI object from a config profile
``get_api`` generates a DataAPI object based on a
pre-configured datafs profile specified in your datafs
config file.
To create a datafs config file, use the command line
tool ``datafs configure --helper`` or export an existing
DataAPI object with
:py:meth:`datafs.ConfigFile.write_config_from_api`
Parameters
----------
profile : str
(optional) name of a profile in your datafs config
file. If profile is not provided, the default
profile specified in the file will be used.
config_file : str or file
(optional) path to your datafs configuration file.
By default, get_api uses your OS's default datafs
application directory.
Examples
--------
The following specifies a simple API with a MongoDB
manager and a temporary storage service:
.. code-block:: python
>>> try:
... from StringIO import StringIO
... except ImportError:
... from io import StringIO
...
>>> import tempfile
>>> tempdir = tempfile.mkdtemp()
>>>
>>> config_file = StringIO("""
... default-profile: my-data
... profiles:
... my-data:
... manager:
... class: MongoDBManager
... kwargs:
... database_name: 'MyDatabase'
... table_name: 'DataFiles'
...
... authorities:
... local:
... service: OSFS
... args: ['{}']
... """.format(tempdir))
>>>
>>> # This file can be read in using the datafs.get_api helper function
...
>>>
>>> api = get_api(profile='my-data', config_file=config_file)
>>> api.manager.create_archive_table(
... 'DataFiles',
... raise_on_err=False)
>>>
>>> archive = api.create(
... 'my_first_archive',
... metadata = dict(description = 'My test data archive'),
... raise_on_err=False)
>>>
>>> with archive.open('w+') as f:
... res = f.write(u'hello!')
...
>>> with archive.open('r') as f:
... print(f.read())
...
hello!
>>>
>>> # clean up
...
>>> archive.delete()
>>> import shutil
>>> shutil.rmtree(tempdir)
'''
config = ConfigFile(config_file=config_file)
config.read_config()
if profile is None:
profile = config.config['default-profile']
profile_config = config.get_profile_config(profile)
default_versions = {}
if requirements is None:
requirements = config.config.get('requirements', None)
if requirements is not None and not os.path.isfile(requirements):
for reqline in re.split(r'[\r\n;]+', requirements):
if re.search(r'^\s*$', reqline):
continue
archive, version = _parse_requirement(reqline)
default_versions[archive] = version
else:
if requirements is None:
requirements = 'requirements_data.txt'
if os.path.isfile(requirements):
with open_filelike(requirements, 'r') as reqfile:
for reqline in reqfile.readlines():
if re.search(r'^\s*$', reqline):
continue
archive, version = _parse_requirement(reqline)
default_versions[archive] = version
api = APIConstructor.generate_api_from_config(profile_config)
api.default_versions = default_versions
APIConstructor.attach_manager_from_config(api, profile_config)
APIConstructor.attach_services_from_config(api, profile_config)
APIConstructor.attach_cache_from_config(api, profile_config)
return api | python | {
"resource": ""
} |
q41576 | check_requirements | train | def check_requirements(to_populate, prompts, helper=False):
'''
Iterates through required values, checking to_populate for required values
If a key in prompts is missing in to_populate and ``helper==True``,
prompts the user using the values in to_populate. Otherwise, raises an
error.
Parameters
----------
to_populate : dict
Data dictionary to fill. Prompts given to the user are taken from this
dictionary.
prompts : dict
Keys and prompts to use when filling ``to_populate``
'''
for kw, prompt in prompts.items():
if helper:
if kw not in to_populate:
to_populate[kw] = click.prompt(prompt)
else:
msg = (
'Required value "{}" not found. '
'Use helper=True or the --helper '
'flag for assistance.'.format(kw))
assert kw in to_populate, msg | python | {
"resource": ""
} |
q41577 | UploadView.post | train | def post(self, *args, **kwargs):
"""Save file and return saved info or report errors."""
if self.upload_allowed():
form = self.get_upload_form()
result = {}
if form.is_valid():
storage = self.get_storage()
result['is_valid'] = True
info = form.stash(storage, self.request.path)
result.update(info)
else:
result.update({
'is_valid': False,
'errors': form.errors,
})
return HttpResponse(json.dumps(result), content_type='application/json')
else:
return HttpResponseForbidden() | python | {
"resource": ""
} |
q41578 | UploadView.get_upload_form | train | def get_upload_form(self):
"""Construct form for accepting file upload."""
return self.form_class(self.request.POST, self.request.FILES) | python | {
"resource": ""
} |
q41579 | from_url | train | def from_url(location):
""" HTTP request for page at location returned as string
malformed url returns ValueError
nonexistant IP returns URLError
wrong subnet IP return URLError
reachable IP, no HTTP server returns URLError
reachable IP, HTTP, wrong page returns HTTPError
"""
req = urllib.request.Request(location)
with urllib.request.urlopen(req) as response:
the_page = response.read().decode()
return the_page | python | {
"resource": ""
} |
q41580 | parse_description_xml | train | def parse_description_xml(location):
""" Extract serial number, base ip, and img url from description.xml
missing data from XML returns AttributeError
malformed XML returns ParseError
Refer to included example for URLBase and serialNumber elements
"""
class _URLBase(str):
""" Convenient access to hostname (ip) portion of the URL """
@property
def hostname(self):
return urlsplit(self).hostname
# """TODO: review error handling on xml"""
# may want to suppress ParseError in the event that it was caused
# by a none bridge device although this seems unlikely
try:
xml_str = from_url(location)
except urllib.request.HTTPError as error:
logger.info("No description for %s: %s", location, error)
return None, error
except urllib.request.URLError as error:
logger.info("No HTTP server for %s: %s", location, error)
return None, error
else:
root = ET.fromstring(xml_str)
rootname = {'root': root.tag[root.tag.find('{')+1:root.tag.find('}')]}
baseip = root.find('root:URLBase', rootname).text
device = root.find('root:device', rootname)
serial = device.find('root:serialNumber', rootname).text
# anicon = device.find('root:iconList', rootname).find('root:icon', rootname)
# imgurl = anicon.find('root:url', rootname).text
# Alternatively, could look directly in the modelDescription field
if all(x in xml_str.lower() for x in ['philips', 'hue']):
return serial, _URLBase(baseip)
else:
return None, None | python | {
"resource": ""
} |
q41581 | _build_from | train | def _build_from(baseip):
""" Build URL for description.xml from ip """
from ipaddress import ip_address
try:
ip_address(baseip)
except ValueError:
# """attempt to construct url but the ip format has changed"""
# logger.warning("Format of internalipaddress changed: %s", baseip)
if 'http' not in baseip[0:4].lower():
baseip = urlunsplit(['http', baseip, '', '', ''])
spl = urlsplit(baseip)
if '.xml' not in spl.path:
sep = '' if spl.path.endswith('/') else '/'
spl = spl._replace(path=spl.path+sep+'description.xml')
return spl.geturl()
else:
# construct url knowing baseip is a pure ip
return urlunsplit(('http', baseip, '/description.xml', '', '')) | python | {
"resource": ""
} |
q41582 | via_upnp | train | def via_upnp():
""" Use SSDP as described by the Philips guide """
ssdp_list = ssdp_discover("ssdp:all", timeout=5)
#import pickle
#with open("ssdp.pickle", "wb") as f:
#pickle.dump(ssdp_list,f)
bridges_from_ssdp = [u for u in ssdp_list if 'IpBridge' in u.server]
logger.info('SSDP returned %d items with %d Hue bridges(s).',
len(ssdp_list), len(bridges_from_ssdp))
# Confirm SSDP gave an accessible bridge device by reading from the returned
# location. Should look like: http://192.168.0.1:80/description.xml
found_bridges = {}
for bridge in bridges_from_ssdp:
serial, bridge_info = parse_description_xml(bridge.location)
if serial:
found_bridges[serial] = bridge_info
logger.debug('%s', found_bridges)
if found_bridges:
return found_bridges
else:
raise DiscoveryError('SSDP returned nothing') | python | {
"resource": ""
} |
q41583 | via_nupnp | train | def via_nupnp():
""" Use method 2 as described by the Philips guide """
bridges_from_portal = parse_portal_json()
logger.info('Portal returned %d Hue bridges(s).',
len(bridges_from_portal))
# Confirm Portal gave an accessible bridge device by reading from the returned
# location. Should look like: http://192.168.0.1/description.xml
found_bridges = {}
for bridge in bridges_from_portal:
serial, bridge_info = parse_description_xml(bridge[1])
if serial:
found_bridges[serial] = bridge_info
logger.debug('%s', found_bridges)
if found_bridges:
return found_bridges
else:
raise DiscoveryError('Portal returned nothing') | python | {
"resource": ""
} |
q41584 | via_scan | train | def via_scan():
""" IP scan - now implemented """
import socket
import ipaddress
import httpfind
bridges_from_scan = []
hosts = socket.gethostbyname_ex(socket.gethostname())[2]
for host in hosts:
bridges_from_scan += httpfind.survey(
# TODO: how do we determine subnet configuration?
ipaddress.ip_interface(host+'/24').network,
path='description.xml',
pattern='(P|p)hilips')
logger.info('Scan on %s', host)
logger.info('Scan returned %d Hue bridges(s).', len(bridges_from_scan))
# Confirm Scan gave an accessible bridge device by reading from the returned
# location. Should look like: http://192.168.0.1/description.xml
found_bridges = {}
for bridge in bridges_from_scan:
serial, bridge_info = parse_description_xml(bridge)
if serial:
found_bridges[serial] = bridge_info
logger.debug('%s', found_bridges)
if found_bridges:
return found_bridges
else:
raise DiscoveryError('Scan returned nothing') | python | {
"resource": ""
} |
q41585 | find_bridges | train | def find_bridges(prior_bridges=None):
""" Confirm or locate IP addresses of Philips Hue bridges.
`prior_bridges` -- optional list of bridge serial numbers
* omitted - all discovered bridges returned as dictionary
* single string - returns IP as string or None
* dictionary - validate provided ip's before attempting discovery
* collection or sequence - return dictionary of filtered sn:ip pairs
* if mutable then found bridges are removed from argument
"""
found_bridges = {}
# Validate caller's provided list
try:
prior_bridges_list = prior_bridges.items()
except AttributeError:
# if caller didnt provide dict then assume single SN or None
# in either case, the discovery must be executed
run_discovery = True
else:
for prior_sn, prior_ip in prior_bridges_list:
if prior_ip:
serial, baseip = parse_description_xml(_build_from(prior_ip))
if serial:
# there is a bridge at provided IP, add to found
found_bridges[serial] = baseip
else:
# nothing usable at that ip
logger.info('%s not found at %s', prior_sn, prior_ip)
run_discovery = found_bridges.keys() != prior_bridges.keys()
# prior_bridges is None, unknown, dict of unfound SNs, or empty dict
# found_bridges is dict of found SNs from prior, or empty dict
if run_discovery:
# do the discovery, not all IPs were confirmed
try:
found_bridges.update(via_upnp())
except DiscoveryError:
try:
found_bridges.update(via_nupnp())
except DiscoveryError:
try:
found_bridges.update(via_scan())
except DiscoveryError:
logger.warning("All discovery methods returned nothing")
if prior_bridges:
# prior_bridges is either single SN or dict of unfound SNs
# first assume single Serial SN string
try:
ip_address = found_bridges[prior_bridges]
except TypeError:
# user passed an invalid type for key
# presumably it's a dict meant for alternate mode
logger.debug('Assuming alternate mode, prior_bridges is type %s.',
type(prior_bridges))
except KeyError:
# user provided Serial Number was not found
# TODO: dropping tuples here if return none executed
# return None
pass # let it turn the string into a set, eww
else:
# user provided Serial Number found
return ip_address
# Filter the found list to subset of prior
prior_bridges_keys = set(prior_bridges)
keys_to_remove = prior_bridges_keys ^ found_bridges.keys()
logger.debug('Removing %s from found_bridges', keys_to_remove)
for key in keys_to_remove:
found_bridges.pop(key, None)
# Filter the prior dict to unfound only
keys_to_remove = prior_bridges_keys & found_bridges.keys()
logger.debug('Removing %s from prior_bridges', keys_to_remove)
for key in keys_to_remove:
try:
prior_bridges.pop(key, None)
except TypeError:
# not a dict, try as set or list
prior_bridges.remove(key)
except AttributeError:
# likely not mutable
break
keys_to_report = prior_bridges_keys - found_bridges.keys()
for serial in keys_to_report:
logger.warning('Could not locate bridge with Serial ID %s', serial)
else:
# prior_bridges is None or empty dict, return all found
pass
return found_bridges | python | {
"resource": ""
} |
q41586 | ByPackage.matches | train | def matches(self, a, b, **config):
""" The message must match by package """
package_a = self.processor._u2p(a['msg']['update']['title'])[0]
package_b = self.processor._u2p(b['msg']['update']['title'])[0]
if package_a != package_b:
return False
return True | python | {
"resource": ""
} |
q41587 | PackageVersion.update_from_pypi | train | def update_from_pypi(self):
"""Call get_latest_version and then save the object."""
package = pypi.Package(self.package_name)
self.licence = package.licence()
if self.is_parseable:
self.latest_version = package.latest_version()
self.next_version = package.next_version(self.current_version)
self.diff_status = pypi.version_diff(self.current_version, self.latest_version)
self.python_support = package.python_support()
self.django_support = package.django_support()
self.supports_py3 = package.supports_py3()
self.checked_pypi_at = tz_now()
self.save()
return self | python | {
"resource": ""
} |
q41588 | Connector.sendfile | train | def sendfile(self, data, zlib_compress=None, compress_level=6):
"""Send data from a file object"""
if hasattr(data, 'seek'):
data.seek(0)
chunk_size = CHUNK_SIZE
if zlib_compress:
chunk_size = BLOCK_SIZE
compressor = compressobj(compress_level)
while 1:
binarydata = data.read(chunk_size)
if binarydata == '':
break
if zlib_compress:
binarydata = compressor.compress(binarydata)
if not binarydata:
continue
self.send(binarydata)
if zlib_compress:
remaining = compressor.flush()
while remaining:
binarydata = remaining[:BLOCK_SIZE]
remaining = remaining[BLOCK_SIZE:]
self.send(binarydata) | python | {
"resource": ""
} |
q41589 | ghostedDistArrayFactory | train | def ghostedDistArrayFactory(BaseClass):
"""
Returns a ghosted distributed array class that derives from BaseClass
@param BaseClass base class, e.g. DistArray or MaskedDistArray
@return ghosted dist array class
"""
class GhostedDistArrayAny(BaseClass):
"""
Ghosted distributed array. Each process owns data and exposes the
halo region to other processes. These are accessed with tuples
such (1, 0) for north, (-1, 0) for south, etc.
"""
def __init__(self, shape, dtype):
"""
Constructor
@param shape shape of the array
@param dtype numpy data type
@param numGhosts the width of the halo
"""
# call the parent Ctor
BaseClass.__init__(self, shape, dtype)
def setNumberOfGhosts(self, numGhosts):
"""
Set the width of the ghost halo
@param numGhosts halo thickness
"""
# expose each window to other PE domains
ndim = len(self.shape)
for dim in range(ndim):
for drect in (-1, 1):
# the window id uniquely specifies the
# location of the window. we use 0's to indicate
# a slab extending over the entire length for a
# given direction, a 1 represents a layer of
# thickness numGhosts on the high index side,
# -1 on the low index side.
winId = tuple([0 for i in range(dim)] +
[drect] +
[0 for i in range(dim+1, ndim)])
slce = slice(0, numGhosts)
if drect == 1:
slce = slice(self.shape[dim] -
numGhosts, self.shape[dim])
slab = self.getSlab(dim, slce)
# expose MPI window
self.expose(slab, winId)
def getSlab(self, dim, slce):
"""
Get slab. A slab is a multi-dimensional slice extending in
all directions except along dim where slce applies
@param dim dimension (0=first index, 1=2nd index...)
@param slce python slice object along dimension dim
@return slab
"""
shape = self.shape
ndim = len(shape)
slab = [slice(0, shape[i]) for i in range(dim)] + \
[slce] + [slice(0, shape[i]) for i in range(dim+1, ndim)]
return slab
def getEllipsis(self, winID):
"""
Get the ellipsis for a given halo side
@param winID a tuple of zeros and one +1 or -1. To access
the "north" side for instance, set side=(1, 0),
(-1, 0) to access the south side, (0, 1) the east
side, etc. This does not involve any communication.
@return None if halo was not exposed (bad winID)
"""
if winID in self.windows:
return self.windows[winID]['slice']
else:
return None
return GhostedDistArrayAny | python | {
"resource": ""
} |
q41590 | serialize_upload | train | def serialize_upload(name, storage, url):
"""
Serialize uploaded file by name and storage. Namespaced by the upload url.
"""
if isinstance(storage, LazyObject):
# Unwrap lazy storage class
storage._setup()
cls = storage._wrapped.__class__
else:
cls = storage.__class__
return signing.dumps({
'name': name,
'storage': '%s.%s' % (cls.__module__, cls.__name__)
}, salt=url) | python | {
"resource": ""
} |
q41591 | deserialize_upload | train | def deserialize_upload(value, url):
"""
Restore file and name and storage from serialized value and the upload url.
"""
result = {'name': None, 'storage': None}
try:
result = signing.loads(value, salt=url)
except signing.BadSignature:
# TODO: Log invalid signature
pass
else:
try:
result['storage'] = get_storage_class(result['storage'])
except (ImproperlyConfigured, ImportError):
# TODO: Log invalid class
result = {'name': None, 'storage': None}
return result | python | {
"resource": ""
} |
q41592 | open_stored_file | train | def open_stored_file(value, url):
"""
Deserialize value for a given upload url and return open file.
Returns None if deserialization fails.
"""
upload = None
result = deserialize_upload(value, url)
filename = result['name']
storage_class = result['storage']
if storage_class and filename:
storage = storage_class()
if storage.exists(filename):
upload = storage.open(filename)
upload.name = os.path.basename(filename)
return upload | python | {
"resource": ""
} |
q41593 | _check_action | train | def _check_action(action):
"""check for invalid actions"""
if isinstance(action, types.StringTypes):
action = action.lower()
if action not in ['learn', 'forget', 'report', 'revoke']:
raise SpamCError('The action option is invalid')
return action | python | {
"resource": ""
} |
q41594 | get_response | train | def get_response(cmd, conn):
"""Return a response"""
resp = conn.socket().makefile('rb', -1)
resp_dict = dict(
code=0,
message='',
isspam=False,
score=0.0,
basescore=0.0,
report=[],
symbols=[],
headers={},
)
if cmd == 'TELL':
resp_dict['didset'] = False
resp_dict['didremove'] = False
data = resp.read()
lines = data.split('\r\n')
for index, line in enumerate(lines):
if index == 0:
match = RESPONSE_RE.match(line)
if not match:
raise SpamCResponseError(
'spamd unrecognized response: %s' % data)
resp_dict.update(match.groupdict())
resp_dict['code'] = int(resp_dict['code'])
else:
if not line.strip():
continue
match = SPAM_RE.match(line)
if match:
tmp = match.groupdict()
resp_dict['score'] = float(tmp['score'])
resp_dict['basescore'] = float(tmp['basescore'])
resp_dict['isspam'] = tmp['isspam'] in ['True', 'Yes']
if not match:
if cmd == 'SYMBOLS':
match = PART_RE.findall(line)
for part in match:
resp_dict['symbols'].append(part)
if not match and cmd != 'PROCESS':
match = RULE_RE.findall(line)
if match:
resp_dict['report'] = []
for part in match:
score = part[0] + part[1]
score = score.strip()
resp_dict['report'].append(
dict(score=score,
name=part[2],
description=SPACE_RE.sub(" ", part[3])))
if line.startswith('DidSet:'):
resp_dict['didset'] = True
if line.startswith('DidRemove:'):
resp_dict['didremove'] = True
if cmd == 'PROCESS':
resp_dict['message'] = ''.join(lines[4:]) + '\r\n'
if cmd == 'HEADERS':
parser = Parser()
headers = parser.parsestr('\r\n'.join(lines[4:]), headersonly=True)
for key in headers.keys():
resp_dict['headers'][key] = headers[key]
return resp_dict | python | {
"resource": ""
} |
q41595 | SpamC.get_headers | train | def get_headers(self, cmd, msg_length, extra_headers):
"""Returns the headers string based on command to execute"""
cmd_header = "%s %s" % (cmd, PROTOCOL_VERSION)
len_header = "Content-length: %s" % msg_length
headers = [cmd_header, len_header]
if self.user:
user_header = "User: %s" % self.user
headers.append(user_header)
if self.gzip:
headers.append("Compress: zlib")
if extra_headers is not None:
for key in extra_headers:
if key.lower() != 'content-length':
headers.append("%s: %s" % (key, extra_headers[key]))
headers.append('')
headers.append('')
return '\r\n'.join(headers) | python | {
"resource": ""
} |
q41596 | SpamC.perform | train | def perform(self, cmd, msg='', extra_headers=None):
"""Perform the call"""
tries = 0
while 1:
conn = None
try:
conn = self.get_connection()
if hasattr(msg, 'read') and hasattr(msg, 'fileno'):
msg_length = str(os.fstat(msg.fileno()).st_size)
elif hasattr(msg, 'read'):
msg.seek(0, 2)
msg_length = str(msg.tell() + 2)
else:
if msg:
try:
msg_length = str(len(msg) + 2)
except TypeError:
conn.close()
raise ValueError(
'msg param should be a string or file handle')
else:
msg_length = '2'
headers = self.get_headers(cmd, msg_length, extra_headers)
if isinstance(msg, types.StringTypes):
if self.gzip and msg:
msg = compress(msg + '\r\n', self.compress_level)
else:
msg = msg + '\r\n'
conn.send(headers + msg)
else:
conn.send(headers)
if hasattr(msg, 'read'):
if hasattr(msg, 'seek'):
msg.seek(0)
conn.sendfile(msg, self.gzip, self.compress_level)
conn.send('\r\n')
try:
conn.socket().shutdown(socket.SHUT_WR)
except socket.error:
pass
return get_response(cmd, conn)
except socket.gaierror as err:
if conn is not None:
conn.release()
raise SpamCError(str(err))
except socket.timeout as err:
if conn is not None:
conn.release()
raise SpamCTimeOutError(str(err))
except socket.error as err:
if conn is not None:
conn.close()
errors = (errno.EAGAIN, errno.EPIPE, errno.EBADF,
errno.ECONNRESET)
if err[0] not in errors or tries >= self.max_tries:
raise SpamCError("socket.error: %s" % str(err))
except BaseException:
if conn is not None:
conn.release()
raise
tries += 1
self.backend_mod.sleep(self.wait_tries) | python | {
"resource": ""
} |
q41597 | generate_key | train | def generate_key(filepath):
''' generates a new, random secret key at the given location on the
filesystem and returns its path
'''
fs = path.abspath(path.expanduser(filepath))
with open(fs, 'wb') as outfile:
outfile.write(Fernet.generate_key())
chmod(fs, 0o400)
return fs | python | {
"resource": ""
} |
q41598 | get_key | train | def get_key(key=None, keyfile=None):
""" returns a key given either its value, a path to it on the filesystem
or as last resort it checks the environment variable CRYPTOYAML_SECRET
"""
if key is None:
if keyfile is None:
key = environ.get('CRYPTOYAML_SECRET')
if key is None:
raise MissingKeyException(
'''You must either provide a key value,'''
''' a path to a key or its value via the environment variable '''
''' CRYPTOYAML_SECRET'''
)
else:
key = key.encode('utf-8')
else:
key = open(keyfile, 'rb').read()
return key | python | {
"resource": ""
} |
q41599 | CryptoYAML.read | train | def read(self):
""" Reads and decrypts data from the filesystem """
if path.exists(self.filepath):
with open(self.filepath, 'rb') as infile:
self.data = yaml.load(
self.fernet.decrypt(infile.read()))
else:
self.data = dict() | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.