_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q41600
CryptoYAML.write
train
def write(self): """ Encrypts and writes the current state back onto the filesystem """ with open(self.filepath, 'wb') as outfile: outfile.write( self.fernet.encrypt( yaml.dump(self.data, encoding='utf-8')))
python
{ "resource": "" }
q41601
ConditionSet.has_active_condition
train
def has_active_condition(self, condition, instances): """ Given a list of instances, and the condition active for this switch, returns a boolean representing if the conditional is met, including a non-instance default. """ return_value = None for instance in instances + [None]: if not self.can_execute(instance): continue result = self.is_active(instance, condition) if result is False: return False elif result is True: return_value = True return return_value
python
{ "resource": "" }
q41602
Version.get_pypi_version
train
async def get_pypi_version(self): """Get version published to PyPi.""" self._version_data["beta"] = self.beta self._version_data["source"] = "PyPi" info_version = None last_release = None try: async with async_timeout.timeout(5, loop=self.loop): response = await self.session.get(URL["pypi"]) data = await response.json() info_version = data["info"]["version"] releases = data["releases"] for version in sorted(releases, reverse=True): if re.search(r"^(\\d+\\.)?(\\d\\.)?(\\*|\\d+)$", version): continue else: last_release = version break self._version = info_version if self.beta: if info_version in last_release: self._version = info_version else: self._version = last_release _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except asyncio.TimeoutError as error: _LOGGER.error("Timeouterror fetching version information from PyPi") except KeyError as error: _LOGGER.error("Error parsing version information from PyPi, %s", error) except TypeError as error: _LOGGER.error("Error parsing version information from PyPi, %s", error) except aiohttp.ClientError as error: _LOGGER.error("Error fetching version information from PyPi, %s", error) except socket.gaierror as error: _LOGGER.error("Error fetching version information from PyPi, %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
python
{ "resource": "" }
q41603
Version.get_hassio_version
train
async def get_hassio_version(self): """Get version published for hassio.""" if self.image not in IMAGES: _LOGGER.warning("%s is not a valid image using default", self.image) self.image = "default" board = BOARDS.get(self.image, BOARDS["default"]) self._version_data["source"] = "Hassio" self._version_data["beta"] = self.beta self._version_data["board"] = board self._version_data["image"] = IMAGES[self.image]["hassio"] try: async with async_timeout.timeout(5, loop=self.loop): response = await self.session.get( URL["hassio"]["beta" if self.beta else "stable"] ) data = await response.json() self._version = data["homeassistant"][IMAGES[self.image]["hassio"]] self._version_data["hassos"] = data["hassos"][board] self._version_data["supervisor"] = data["supervisor"] self._version_data["hassos-cli"] = data["hassos-cli"] _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except asyncio.TimeoutError as error: _LOGGER.error("Timeouterror fetching version information for hassio") except KeyError as error: _LOGGER.error("Error parsing version information for hassio, %s", error) except TypeError as error: _LOGGER.error("Error parsing version information for hassio, %s", error) except aiohttp.ClientError as error: _LOGGER.error("Error fetching version information for hassio, %s", error) except socket.gaierror as error: _LOGGER.error("Error fetching version information for hassio, %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
python
{ "resource": "" }
q41604
Version.get_docker_version
train
async def get_docker_version(self): """Get version published for docker.""" if self.image not in IMAGES: _LOGGER.warning("%s is not a valid image using default", self.image) self.image = "default" self._version_data["beta"] = self.beta self._version_data["source"] = "Docker" self._version_data["image"] = IMAGES[self.image]["docker"] try: async with async_timeout.timeout(5, loop=self.loop): response = await self.session.get( URL["docker"].format(IMAGES[self.image]["docker"]) ) data = await response.json() for version in data["results"]: if version["name"] in ["latest", "landingpage", "rc", "dev"]: continue elif re.search(r"\b.+b\d", version["name"]): if self.beta: self._version = version["name"] break else: continue else: self._version = version["name"] if self._version is not None: break else: continue _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except asyncio.TimeoutError as error: _LOGGER.error("Timeouterror fetching version information for docker") except KeyError as error: _LOGGER.error("Error parsing version information for docker, %s", error) except TypeError as error: _LOGGER.error("Error parsing version information for docker, %s", error) except aiohttp.ClientError as error: _LOGGER.error("Error fetching version information for docker, %s", error) except socket.gaierror as error: _LOGGER.error("Error fetching version information for docker, %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
python
{ "resource": "" }
q41605
ByUserTag.matches
train
def matches(self, a, b, **config): """ The message must match by username """ submitter_a = a['msg']['override']['submitter']['name'] submitter_b = b['msg']['override']['submitter']['name'] if submitter_a != submitter_b: return False return True
python
{ "resource": "" }
q41606
Client.start_stream_subscriber
train
def start_stream_subscriber(self): """ Starts the stream consumer's main loop. Called when the stream consumer has been set up with the correct callbacks. """ if not self._stream_process_started: # pragma: no cover if sys.platform.startswith("win"): # if we're on windows we can't expect multiprocessing to work self._stream_process_started = True self._stream() self._stream_process_started = True self._stream_process.start()
python
{ "resource": "" }
q41607
Client.subscribe
train
def subscribe(self, stream): """ Subscribe to a stream. :param stream: stream to subscribe to :type stream: str :raises: :class:`~datasift.exceptions.StreamSubscriberNotStarted`, :class:`~datasift.exceptions.DeleteRequired`, :class:`~datasift.exceptions.StreamNotConnected` Used as a decorator, eg.:: @client.subscribe(stream) def subscribe_to_hash(msg): print(msg) """ if not self._stream_process_started: raise StreamSubscriberNotStarted() def real_decorator(func): if not self._on_delete: raise DeleteRequired("""An on_delete function is required. You must process delete messages and remove them from your system (if stored) in order to remain compliant with the ToS""") if hasattr(self.factory, 'datasift') and 'send_message' in self.factory.datasift: # pragma: no cover self.subscriptions[stream] = func self.factory.datasift['send_message'](json.dumps({"action": "subscribe", "hash": stream}).encode("utf8")) else: # pragma: no cover raise StreamNotConnected('The client is not connected to DataSift, unable to subscribe to stream') return real_decorator
python
{ "resource": "" }
q41608
Client.on_open
train
def on_open(self, func): """ Function to set the callback for the opening of a stream. Can be called manually:: def open_callback(data): setup_stream() client.on_open(open_callback) or as a decorator:: @client.on_open def open_callback(): setup_stream() """ self._on_open = func if self.opened: # pragma: no cover self._on_open(self) return func
python
{ "resource": "" }
q41609
Client._stream
train
def _stream(self): # pragma: no cover """Runs in a sub-process to perform stream consumption""" self.factory.protocol = LiveStream self.factory.datasift = { 'on_open': self._on_open, 'on_close': self._on_close, 'on_message': self._on_message, 'send_message': None } if self.config.ssl: from twisted.internet import ssl options = ssl.optionsForClientTLS(hostname=WEBSOCKET_HOST) connectWS(self.factory, options) else: connectWS(self.factory) reactor.run()
python
{ "resource": "" }
q41610
Client.compile
train
def compile(self, csdl): """ Compile the given CSDL. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/compile Raises a DataSiftApiException for any error given by the REST API, including CSDL compilation. :param csdl: CSDL to compile :type csdl: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('compile', data=dict(csdl=csdl))
python
{ "resource": "" }
q41611
Client.is_valid
train
def is_valid(self, csdl): """ Checks if the given CSDL is valid. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/validate :param csdl: CSDL to validate :type csdl: str :returns: Boolean indicating the validity of the CSDL :rtype: bool :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ try: self.validate(csdl) except DataSiftApiException as e: if e.response.status_code == 400: return False else: raise e return True
python
{ "resource": "" }
q41612
Client.usage
train
def usage(self, period='hour'): """ Check the number of objects processed and delivered for a given time period Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/usage :param period: (optional) time period to measure usage for, can be one of "day", "hour" or "current" (5 minutes), default is hour :type period: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get('usage', params=dict(period=period))
python
{ "resource": "" }
q41613
Client.dpu
train
def dpu(self, hash=None, historics_id=None): """ Calculate the DPU cost of consuming a stream. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/dpu :param hash: target CSDL filter hash :type hash: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ if hash: return self.request.get('dpu', params=dict(hash=hash)) if historics_id: return self.request.get('dpu', params=dict(historics_id=historics_id))
python
{ "resource": "" }
q41614
Client.pull
train
def pull(self, subscription_id, size=None, cursor=None): """ Pulls a series of interactions from the queue for the given subscription ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pull :param subscription_id: The ID of the subscription to pull interactions for :type subscription_id: str :param size: the max amount of data to pull in bytes :type size: int :param cursor: an ID to use as the point in the queue from which to start fetching data :type cursor: str :returns: dict with extra response data :rtype: :class:`~datasift.request.ResponseList` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': subscription_id} if size: params['size'] = size if cursor: params['cursor'] = cursor raw = self.request('get', 'pull', params=params) def pull_parser(headers, data): pull_type = headers.get("X-DataSift-Format") if pull_type in ("json_meta", "json_array"): return json.loads(data) else: lines = data.strip().split("\n").__iter__() return list(map(json.loads, lines)) return self.request.build_response(raw, parser=pull_parser)
python
{ "resource": "" }
q41615
PUBGAPI.player_skill
train
def player_skill(self, player_handle, game_mode='solo'): """Returns the current skill rating of the player for a specified gamemode, default gamemode is solo""" if game_mode not in constants.GAME_MODES: raise APIException("game_mode must be one of: solo, duo, squad, all") try: data = self._get_player_profile(player_handle) player_stats = {} return_data = [] for stat in data['Stats']: if stat['Match'] == game_mode: for datas in stat['Stats']: if datas['label'] == 'Rating': player_stats[stat['Region']] = datas['value'] return player_stats except BaseException as error: print('Unhandled exception: ' + str(error)) raise
python
{ "resource": "" }
q41616
plot
train
def plot(nxG, nyG, iBeg, iEnd, jBeg, jEnd, data, title=''): """ Plot distributed array @param nxG number of global cells in x @param nyG number of global cells in y @param iBeg global starting index in x @param iEnd global ending index in x @param jBeg global starting index in y @param jEnd global ending index in y @param data local array @param title plot title """ sz = MPI.COMM_WORLD.Get_size() rk = MPI.COMM_WORLD.Get_rank() iBegs = MPI.COMM_WORLD.gather(iBeg, root=0) iEnds = MPI.COMM_WORLD.gather(iEnd, root=0) jBegs = MPI.COMM_WORLD.gather(jBeg, root=0) jEnds = MPI.COMM_WORLD.gather(jEnd, root=0) arrays = MPI.COMM_WORLD.gather(numpy.array(data), root=0) if rk == 0: bigArray = numpy.zeros((nxG, nyG), data.dtype) for pe in range(sz): bigArray[iBegs[pe]:iEnds[pe], jBegs[pe]:jEnds[pe]] = arrays[pe] from matplotlib import pylab pylab.pcolor(bigArray.transpose()) # add the decomp domains for pe in range(sz): pylab.plot([iBegs[pe], iBegs[pe]], [0, nyG - 1], 'w--') pylab.plot([0, nxG - 1], [jBegs[pe], jBegs[pe]], 'w--') pylab.title(title) pylab.show()
python
{ "resource": "" }
q41617
NdriveTerm.do_ls
train
def do_ls(self, nothing = ''): """list files in current remote directory""" for d in self.dirs: self.stdout.write("\033[0;34m" + ('%s\n' % d) + "\033[0m") for f in self.files: self.stdout.write('%s\n' % f)
python
{ "resource": "" }
q41618
NdriveTerm.do_cd
train
def do_cd(self, path = '/'): """change current working directory""" path = path[0] if path == "..": self.current_path = "/".join(self.current_path[:-1].split("/")[0:-1]) + '/' elif path == '/': self.current_path = "/" else: if path[-1] == '/': self.current_path += path else: self.current_path += path + '/' resp = self.n.getList(self.current_path, type=3) if resp: self.dirs = [] self.files = [] for f in resp: name = f['href'].encode('utf-8') if name[-1] == '/': self.dirs.append(os.path.basename(name[:-1])) else: self.files.append(os.path.basename(name)) self.prompt = "> %s@Ndrive:%s " %(self.id, self.current_path)
python
{ "resource": "" }
q41619
NdriveTerm.do_cat
train
def do_cat(self, path): """display the contents of a file""" path = path[0] tmp_file_path = self.TMP_PATH + 'tmp' if not os.path.exists(self.TMP_PATH): os.makedirs(self.TMP_PATH) f = self.n.downloadFile(self.current_path + path, tmp_file_path) f = open(tmp_file_path, 'r') self.stdout.write(f.read()) self.stdout.write("\n")
python
{ "resource": "" }
q41620
NdriveTerm.do_mkdir
train
def do_mkdir(self, path): """create a new directory""" path = path[0] self.n.makeDirectory(self.current_path + path) self.dirs = self.dir_complete()
python
{ "resource": "" }
q41621
NdriveTerm.do_rm
train
def do_rm(self, path): path = path[0] """delete a file or directory""" self.n.delete(self.current_path + path) self.dirs = self.dir_complete() self.files = self.file_complete()
python
{ "resource": "" }
q41622
NdriveTerm.do_account_info
train
def do_account_info(self): """display account information""" s, metadata = self.n.getRegisterUserInfo() pprint.PrettyPrinter(indent=2).pprint(metadata)
python
{ "resource": "" }
q41623
NdriveTerm.do_get
train
def do_get(self, from_path, to_path): """ Copy file from Ndrive to local file and print out out the metadata. Examples: Ndrive> get file.txt ~/ndrive-file.txt """ to_file = open(os.path.expanduser(to_path), "wb") self.n.downloadFile(self.current_path + "/" + from_path, to_path)
python
{ "resource": "" }
q41624
NdriveTerm.do_put
train
def do_put(self, from_path, to_path): """ Copy local file to Ndrive Examples: Ndrive> put ~/test.txt ndrive-copy-test.txt """ from_file = open(os.path.expanduser(from_path), "rb") self.n.put(self.current_path + "/" + from_path, to_path)
python
{ "resource": "" }
q41625
NdriveTerm.do_search
train
def do_search(self, string): """Search Ndrive for filenames containing the given string.""" results = self.n.doSearch(string, full_path = self.current_path) if results: for r in results: self.stdout.write("%s\n" % r['path'])
python
{ "resource": "" }
q41626
tr
train
def tr(string1, string2, source, option=''): """Replace or remove specific characters. If not given option, then replace all characters in string1 with the character in the same position in string2. Following options are available: c Replace all complemented characters in string1 with the character in the same position in string2. d Delete all characters in string1. s Squeeze all characters in string1. cs Squeeze all the characters in string2 besides "c" replacement. ds Delete all characters in string1. Squeeze all characters in string2. cd Delete all complemented characters in string1. Params: <unicode> string1 <unicode> string2 <unicode> source <basestring> option Return: <unicode> translated_source """ if not is_valid_type(source): raise TypeError('source must be unicode') from_list = make_char_list(string1) if option == 's': from_list = to_unichr(from_list) return squeeze(from_list, source) elif 'c' in option: from_list = to_unichr(from_list) from_list = [ord(c) for c in set(source) - set(from_list)] if 'd' in option: to_list = [None for i in from_list] else: to_list = [string2[-1] for i in from_list] source = translate(from_list, to_list, source) if 's' in option: source = squeeze(to_list, source) return source elif 'd' in option: to_list = [None for i in from_list] source = translate(from_list, to_list, source) if 's' in option: to_list = make_char_list(string2) to_list = to_unichr(to_list) source = squeeze(to_list, source) return source else: to_list = make_char_list(string2) length_diff = (len(from_list) - len(to_list)) if length_diff: to_list += [to_list[-1]] * length_diff to_list = to_unichr(to_list) return translate(from_list, to_list, source)
python
{ "resource": "" }
q41627
DataService.upload
train
def upload(self, filepath, service_path, remove=False): ''' "Upload" a file to a service This copies a file from the local filesystem into the ``DataService``'s filesystem. If ``remove==True``, the file is moved rather than copied. If ``filepath`` and ``service_path`` paths are the same, ``upload`` deletes the file if ``remove==True`` and returns. Parameters ---------- filepath : str Relative or absolute path to the file to be uploaded on the user's filesystem service_path: str Path to the destination for the file on the ``DataService``'s filesystem remove : bool If true, the file is moved rather than copied ''' local = OSFS(os.path.dirname(filepath)) # Skip if source and dest are the same if self.fs.hassyspath(service_path) and ( self.fs.getsyspath(service_path) == local.getsyspath( os.path.basename(filepath))): if remove: os.remove(filepath) return if not self.fs.isdir(fs.path.dirname(service_path)): self.fs.makedir( fs.path.dirname(service_path), recursive=True, allow_recreate=True) if remove: fs.utils.movefile( local, os.path.basename(filepath), self.fs, service_path) else: fs.utils.copyfile( local, os.path.basename(filepath), self.fs, service_path)
python
{ "resource": "" }
q41628
DataArchive.get_version_path
train
def get_version_path(self, version=None): ''' Returns a storage path for the archive and version If the archive is versioned, the version number is used as the file path and the archive path is the directory. If not, the archive path is used as the file path. Parameters ---------- version : str or object Version number to use as file name on versioned archives (default latest unless ``default_version`` set) Examples -------- .. code-block:: python >>> arch = DataArchive(None, 'arch', None, 'a1', versioned=False) >>> print(arch.get_version_path()) a1 >>> >>> ver = DataArchive(None, 'ver', None, 'a2', versioned=True) >>> print(ver.get_version_path('0.0.0')) a2/0.0 >>> >>> print(ver.get_version_path('0.0.1a1')) a2/0.0.1a1 >>> >>> print(ver.get_version_path('latest')) # doctest: +ELLIPSIS Traceback (most recent call last): ... AttributeError: 'NoneType' object has no attribute 'manager' ''' version = _process_version(self, version) if self.versioned: return fs.path.join(self.archive_path, str(version)) else: return self.archive_path
python
{ "resource": "" }
q41629
DataArchive.update
train
def update( self, filepath, cache=False, remove=False, bumpversion=None, prerelease=None, dependencies=None, metadata=None, message=None): ''' Enter a new version to a DataArchive Parameters ---------- filepath : str The path to the file on your local file system cache : bool Turn on caching for this archive if not already on before update remove : bool removes a file from your local directory bumpversion : str Version component to update on write if archive is versioned. Valid bumpversion values are 'major', 'minor', and 'patch', representing the three components of the strict version numbering system (e.g. "1.2.3"). If bumpversion is None the version number is not updated on write. Either bumpversion or prerelease (or both) must be a non-None value. If the archive is not versioned, bumpversion is ignored. prerelease : str Prerelease component of archive version to update on write if archive is versioned. Valid prerelease values are 'alpha' and 'beta'. Either bumpversion or prerelease (or both) must be a non-None value. If the archive is not versioned, prerelease is ignored. metadata : dict Updates to archive metadata. Pass {key: None} to remove a key from the archive's metadata. ''' if metadata is None: metadata = {} latest_version = self.get_latest_version() hashval = self.api.hash_file(filepath) checksum = hashval['checksum'] algorithm = hashval['algorithm'] if checksum == self.get_latest_hash(): self.update_metadata(metadata) if remove and os.path.isfile(filepath): os.remove(filepath) return if self.versioned: if latest_version is None: latest_version = BumpableVersion() next_version = latest_version.bump( kind=bumpversion, prerelease=prerelease, inplace=False) else: next_version = None next_path = self.get_version_path(next_version) if cache: self.cache(next_version) if self.is_cached(next_version): self.authority.upload(filepath, next_path) self.api.cache.upload(filepath, next_path, remove=remove) else: self.authority.upload(filepath, next_path, remove=remove) self._update_manager( archive_metadata=metadata, version_metadata=dict( checksum=checksum, algorithm=algorithm, version=next_version, dependencies=dependencies, message=message))
python
{ "resource": "" }
q41630
DataArchive._get_default_dependencies
train
def _get_default_dependencies(self): ''' Get default dependencies for archive Get default dependencies from requirements file or (if no requirements file) from previous version ''' # Get default dependencies from requirements file default_dependencies = { k: v for k, v in self.api.default_versions.items() if k != self.archive_name} # If no requirements file or is empty: if len(default_dependencies) == 0: # Retrieve dependencies from last archive record history = self.get_history() if len(history) > 0: default_dependencies = history[-1].get('dependencies', {}) return default_dependencies
python
{ "resource": "" }
q41631
DataArchive.download
train
def download(self, filepath, version=None): ''' Downloads a file from authority to local path 1. First checks in cache to check if file is there and if it is, is it up to date 2. If it is not up to date, it will download the file to cache ''' version = _process_version(self, version) dirname, filename = os.path.split( os.path.abspath(os.path.expanduser(filepath))) assert os.path.isdir(dirname), 'Directory not found: "{}"'.format( dirname) local = OSFS(dirname) version_hash = self.get_version_hash(version) # version_check returns true if fp's hash is current as of read def version_check(chk): return chk['checksum'] == version_hash if os.path.exists(filepath): if version_check(self.api.hash_file(filepath)): return read_path = self.get_version_path(version) with data_file._choose_read_fs( self.authority, self.api.cache, read_path, version_check, self.api.hash_file) as read_fs: fs.utils.copyfile( read_fs, read_path, local, filename)
python
{ "resource": "" }
q41632
DataArchive.delete
train
def delete(self): ''' Delete the archive .. warning:: Deleting an archive will erase all data and metadata permanently. For help setting user permissions, see :ref:`Administrative Tools <admin>` ''' versions = self.get_versions() self.api.manager.delete_archive_record(self.archive_name) for version in versions: if self.authority.fs.exists(self.get_version_path(version)): self.authority.fs.remove(self.get_version_path(version)) if self.api.cache: if self.api.cache.fs.exists(self.get_version_path(version)): self.api.cache.fs.remove(self.get_version_path(version)) if self.authority.fs.exists(self.archive_name): self.authority.fs.removedir(self.archive_name) if self.api.cache: if self.api.cache.fs.exists(self.archive_name): self.api.cache.fs.removedir(self.archive_name)
python
{ "resource": "" }
q41633
DataArchive.isfile
train
def isfile(self, version=None, *args, **kwargs): ''' Check whether the path exists and is a file ''' version = _process_version(self, version) path = self.get_version_path(version) self.authority.fs.isfile(path, *args, **kwargs)
python
{ "resource": "" }
q41634
DataArchive.add_tags
train
def add_tags(self, *tags): ''' Set tags for a given archive ''' normed_tags = self.api.manager._normalize_tags(tags) self.api.manager.add_tags(self.archive_name, normed_tags)
python
{ "resource": "" }
q41635
open
train
def open( file, mode="r", buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None, ): r"""Open file and return a stream. Raise OSError upon failure. file is either a text or byte string giving the name (and the path if the file isn't in the current working directory) of the file to be opened or an integer file descriptor of the file to be wrapped. (If a file descriptor is given, it is closed when the returned I/O object is closed, unless closefd is set to False.) mode is an optional string that specifies the mode in which the file is opened. It defaults to 'r' which means open for reading in text mode. Other common values are 'w' for writing (truncating the file if it already exists), 'x' for exclusive creation of a new file, and 'a' for appending (which on some Unix systems, means that all writes append to the end of the file regardless of the current seek position). In text mode, if encoding is not specified the encoding used is platform dependent. (For reading and writing raw bytes use binary mode and leave encoding unspecified.) The available modes are: ========= =============================================================== Character Meaning --------- --------------------------------------------------------------- 'r' open for reading (default) 'w' open for writing, truncating the file first 'x' create a new file and open it for writing 'a' open for writing, appending to the end of the file if it exists 'b' binary mode 't' text mode (default) '+' open a disk file for updating (reading and writing) 'U' universal newline mode (deprecated) ========= =============================================================== The default mode is 'rt' (open for reading text). For binary random access, the mode 'w+b' opens and truncates the file to 0 bytes, while 'r+b' opens the file without truncation. The 'x' mode implies 'w' and raises an `FileExistsError` if the file already exists. Python distinguishes between files opened in binary and text modes, even when the underlying operating system doesn't. Files opened in binary mode (appending 'b' to the mode argument) return contents as bytes objects without any decoding. In text mode (the default, or when 't' is appended to the mode argument), the contents of the file are returned as strings, the bytes having been first decoded using a platform-dependent encoding or using the specified encoding if given. 'U' mode is deprecated and will raise an exception in future versions of Python. It has no effect in Python 3. Use newline to control universal newlines mode. buffering is an optional integer used to set the buffering policy. Pass 0 to switch buffering off (only allowed in binary mode), 1 to select line buffering (only usable in text mode), and an integer > 1 to indicate the size of a fixed-size chunk buffer. When no buffering argument is given, the default buffering policy works as follows: * Binary files are buffered in fixed-size chunks; the size of the buffer is chosen using a heuristic trying to determine the underlying device's "block size" and falling back on `io.DEFAULT_BUFFER_SIZE`. On many systems, the buffer will typically be 4096 or 8192 bytes long. * "Interactive" text files (files for which isatty() returns True) use line buffering. Other text files use the policy described above for binary files. encoding is the str name of the encoding used to decode or encode the file. This should only be used in text mode. The default encoding is platform dependent, but any encoding supported by Python can be passed. See the codecs module for the list of supported encodings. errors is an optional string that specifies how encoding errors are to be handled---this argument should not be used in binary mode. Pass 'strict' to raise a ValueError exception if there is an encoding error (the default of None has the same effect), or pass 'ignore' to ignore errors. (Note that ignoring encoding errors can lead to data loss.) See the documentation for codecs.register for a list of the permitted encoding error strings. newline is a string controlling how universal newlines works (it only applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works as follows: * On input, if newline is None, universal newlines mode is enabled. Lines in the input can end in '\n', '\r', or '\r\n', and these are translated into '\n' before being returned to the caller. If it is '', universal newline mode is enabled, but line endings are returned to the caller untranslated. If it has any of the other legal values, input lines are only terminated by the given string, and the line ending is returned to the caller untranslated. * On output, if newline is None, any '\n' characters written are translated to the system default line separator, os.linesep. If newline is '', no translation takes place. If newline is any of the other legal values, any '\n' characters written are translated to the given string. closedfd is a bool. If closefd is False, the underlying file descriptor will be kept open when the file is closed. This does not work when a file name is given and must be True in that case. The newly created file is non-inheritable. A custom opener can be used by passing a callable as *opener*. The underlying file descriptor for the file object is then obtained by calling *opener* with (*file*, *flags*). *opener* must return an open file descriptor (passing os.open as *opener* results in functionality similar to passing None). open() returns a file object whose type depends on the mode, and through which the standard file operations such as reading and writing are performed. When open() is used to open a file in a text mode ('w', 'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open a file in a binary mode, the returned class varies: in read binary mode, it returns a BufferedReader; in write binary and append binary modes, it returns a BufferedWriter, and in read/write mode, it returns a BufferedRandom. It is also possible to use a string or bytearray as a file for both reading and writing. For strings StringIO can be used like a file opened in a text mode, and for bytes a BytesIO can be used like a file opened in a binary mode. """ result = sync_io.open( file, mode, buffering, encoding, errors, newline, closefd, opener, ) return wrap_file(result)
python
{ "resource": "" }
q41636
wrap_file
train
def wrap_file(file_like_obj): """Wrap a file like object in an async stream wrapper. Files generated with `open()` may be one of several types. This convenience function retruns the stream wrapped in the most appropriate wrapper for the type. If the stream is already wrapped it is returned unaltered. """ if isinstance(file_like_obj, AsyncIOBaseWrapper): return file_like_obj if isinstance(file_like_obj, sync_io.FileIO): return AsyncFileIOWrapper(file_like_obj) if isinstance(file_like_obj, sync_io.BufferedRandom): return AsyncBufferedRandomWrapper(file_like_obj) if isinstance(file_like_obj, sync_io.BufferedReader): return AsyncBufferedReaderWrapper(file_like_obj) if isinstance(file_like_obj, sync_io.BufferedWriter): return AsyncBufferedWriterWrapper(file_like_obj) if isinstance(file_like_obj, sync_io.TextIOWrapper): return AsyncTextIOWrapperWrapper(file_like_obj) raise TypeError( 'Unrecognized file stream type {}.'.format(file_like_obj.__class__), )
python
{ "resource": "" }
q41637
StringIO
train
def StringIO(*args, **kwargs): """StringIO constructor shim for the async wrapper.""" raw = sync_io.StringIO(*args, **kwargs) return AsyncStringIOWrapper(raw)
python
{ "resource": "" }
q41638
BytesIO
train
def BytesIO(*args, **kwargs): """BytesIO constructor shim for the async wrapper.""" raw = sync_io.BytesIO(*args, **kwargs) return AsyncBytesIOWrapper(raw)
python
{ "resource": "" }
q41639
AsyncFileIOWrapper.seek
train
async def seek(self, pos, whence=sync_io.SEEK_SET): """Move to new file position. Argument offset is a byte count. Optional argument whence defaults to SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values are SEEK_CUR or 1 (move relative to current position, positive or negative), and SEEK_END or 2 (move relative to end of file, usually negative, although many platforms allow seeking beyond the end of a file). Note that not all file objects are seekable. """ return self._stream.seek(pos, whence)
python
{ "resource": "" }
q41640
QueryManager._join_gene
train
def _join_gene(query, gene_name, gene_symbol, gene_id): """helper function to add a query join to Gene model :param `sqlalchemy.orm.query.Query` query: SQL Alchemy query :param str gene_name: gene name :param str gene_symbol: gene symbol :param int gene_id: NCBI Gene identifier :return: `sqlalchemy.orm.query.Query` object """ if gene_name or gene_symbol: query = query.join(models.Gene) if gene_symbol: query = query.filter(models.Gene.gene_symbol.like(gene_symbol)) if gene_name: query = query.filter(models.Gene.gene_name.like(gene_name)) if gene_id: query = query.filter(models.Gene.gene_id.like(gene_id)) return query
python
{ "resource": "" }
q41641
QueryManager.actions
train
def actions(self): """Gets the list of allowed actions :rtype: list[str] """ r = self.session.query(models.Action).all() return [x.type_name for x in r]
python
{ "resource": "" }
q41642
S3UploadFormView.get_validate_upload_form_kwargs
train
def get_validate_upload_form_kwargs(self): """ Return the keyword arguments for instantiating the form for validating the upload. """ kwargs = { 'storage': self.get_storage(), 'upload_to': self.get_upload_to(), 'content_type_prefix': self.get_content_type_prefix(), 'process_to': self.get_process_to(), 'processed_key_generator': self.get_processed_key_generator(), } # ``data`` may be provided by a POST from the JavaScript if using a # DropZone form, or as querystrings on a redirect GET request from # Amazon if not. data = { 'bucket_name': self._get_bucket_name(), 'key_name': self._get_key_name(), 'etag': self._get_etag(), } kwargs.update({'data': data}) return kwargs
python
{ "resource": "" }
q41643
SoundService.play
train
def play(state): """ Play sound for a given state. :param state: a State value. """ filename = None if state == SoundService.State.welcome: filename = "pad_glow_welcome1.wav" elif state == SoundService.State.goodbye: filename = "pad_glow_power_off.wav" elif state == SoundService.State.hotword_detected: filename = "pad_soft_on.wav" elif state == SoundService.State.asr_text_captured: filename = "pad_soft_off.wav" elif state == SoundService.State.error: filename = "music_marimba_error_chord_2x.wav" if filename is not None: AudioPlayer.play_async("{}/{}".format(ABS_SOUND_DIR, filename))
python
{ "resource": "" }
q41644
get_readme
train
def get_readme(): """Generate long description""" pandoc = None for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') pandoc = os.path.join(path, 'pandoc') if os.path.isfile(pandoc) and os.access(pandoc, os.X_OK): break else: pandoc = None try: if pandoc: cmd = [pandoc, '-t', 'rst', 'README.md'] long_description = os.popen(' '.join(cmd)).read() else: raise ValueError except BaseException: long_description = open("README.md").read() return long_description
python
{ "resource": "" }
q41645
_casual_timedelta_string
train
def _casual_timedelta_string(meeting): """ Return a casual timedelta string. If a meeting starts in 2 hours, 15 minutes, and 32 seconds from now, then return just "in 2 hours". If a meeting starts in 7 minutes and 40 seconds from now, return just "in 7 minutes". If a meeting starts 56 seconds from now, just return "right now". """ now = datetime.datetime.utcnow() mdate = meeting['meeting_date'] mtime = meeting['meeting_time_start'] dt_string = "%s %s" % (mdate, mtime) meeting_dt = datetime.datetime.strptime(dt_string, "%Y-%m-%d %H:%M:%S") relative_td = dateutil.relativedelta.relativedelta(meeting_dt, now) denominations = ['years', 'months', 'days', 'hours', 'minutes'] for denomination in denominations: value = getattr(relative_td, denomination) if value: # If the value is only one, then strip off the plural suffix. if value == 1: denomination = denomination[:-1] return "in %i %s" % (value, denomination) return "right now"
python
{ "resource": "" }
q41646
repo_name
train
def repo_name(msg): """ Compat util to get the repo name from a message. """ try: # git messages look like this now path = msg['msg']['commit']['path'] project = path.split('.git')[0][9:] except KeyError: # they used to look like this, though project = msg['msg']['commit']['repo'] return project
python
{ "resource": "" }
q41647
Account.usage
train
def usage(self, period=None, start=None, end=None): """ Get account usage information :param period: Period is one of either hourly, daily or monthly :type period: str :param start: Determines time period of the usage :type start: int :param end: Determines time period of the usage :type end: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if period: params['period'] = period if start: params['start'] = start if end: params['end'] = end return self.request.get('usage', params)
python
{ "resource": "" }
q41648
bmrblex
train
def bmrblex(text): """A lexical analyzer for the BMRB NMR-STAR format syntax. :param text: Input text. :type text: :py:class:`str` or :py:class:`bytes` :return: Current token. :rtype: :py:class:`str` """ stream = transform_text(text) wordchars = (u"abcdfeghijklmnopqrstuvwxyz" u"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_" u"ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" u"ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ" u"!@$%^&*()_+:;?/>.<,~`|\{[}]-=") whitespace = u" \t\v\r\n" comment = u"#" state = u" " token = u"" single_line_comment = u"" while len(stream) > 0: nextnextchar = stream.popleft() while True: nextchar = nextnextchar if len(stream) > 0: nextnextchar = stream.popleft() else: nextnextchar = u"" # Process multiline string, comment, or single line comment if len(nextchar) > 1: state = u" " token = nextchar break # emit current token elif nextchar in whitespace and nextnextchar in comment and state not in (u"'", u'"'): single_line_comment = u"" state = u"#" if state is None: token = u"" # past end of file break elif state == u" ": if not nextchar: state = None break elif nextchar in whitespace: if token: state = u" " break # emit current token else: continue elif nextchar in wordchars: token = nextchar state = u"a" elif nextchar == u"'" or nextchar == u'"': token = nextchar state = nextchar else: token = nextchar if token: state = u" " break # emit current token else: continue # Process single-quoted or double-quoted token elif state == u"'" or state == u'"': token += nextchar if nextchar == state: if nextnextchar in whitespace: state = u" " token = token[1:-1] # remove single or double quotes from the ends break # Process single line comment elif state == u"#": single_line_comment += nextchar if nextchar == u"\n": state = u" " break # Process regular (unquoted) token elif state == u"a": if not nextchar: state = None break elif nextchar in whitespace: state = u" " if token: break # emit current token else: continue else: token += nextchar if nextnextchar: stream.appendleft(nextnextchar) yield token token = u""
python
{ "resource": "" }
q41649
check_dependencies
train
def check_dependencies(): """Check external dependecies Return a tuple with the available generators. """ available = [] try: shell('ebook-convert') available.append('calibre') except OSError: pass try: shell('pandoc --help') available.append('pandoc') except OSError: pass if not available: sys.exit(error('No generator found, you cannot use md2ebook.')) check_dependency_epubcheck() return available
python
{ "resource": "" }
q41650
MongoLock.lock
train
def lock(self, key, owner, timeout=None, expire=None): """Lock given `key` to `owner`. :Parameters: - `key` - lock name - `owner` - name of application/component/whatever which asks for lock - `timeout` (optional) - how long to wait if `key` is locked - `expire` (optional) - when given, lock will be released after that number of seconds. Raises `MongoLockTimeout` if can't achieve a lock before timeout. """ expire = datetime.utcnow() + timedelta(seconds=expire) if expire else None try: self.collection.insert({ '_id': key, 'locked': True, 'owner': owner, 'created': datetime.utcnow(), 'expire': expire }) return True except DuplicateKeyError: start_time = datetime.utcnow() while True: if self._try_get_lock(key, owner, expire): return True if not timeout or datetime.utcnow() >= start_time + timedelta(seconds=timeout): return False time.sleep(self.acquire_retry_step)
python
{ "resource": "" }
q41651
MongoLock.touch
train
def touch(self, key, owner, expire=None): """Renew lock to avoid expiration. """ lock = self.collection.find_one({'_id': key, 'owner': owner}) if not lock: raise MongoLockException(u'Can\'t find lock for {key}: {owner}'.format(key=key, owner=owner)) if not lock['expire']: return if not expire: raise MongoLockException(u'Can\'t touch lock without expire for {0}: {1}'.format(key, owner)) expire = datetime.utcnow() + timedelta(seconds=expire) self.collection.update( {'_id': key, 'owner': owner}, {'$set': {'expire': expire}} )
python
{ "resource": "" }
q41652
PartialRequest.build_response
train
def build_response(self, response, path=None, parser=json_decode_wrapper, async=False): """ Builds a List or Dict response object. Wrapper for a response from the DataSift REST API, can be accessed as a list. :param response: HTTP response to wrap :type response: :class:`~datasift.requests.DictResponse` :param parser: optional parser to overload how the data is loaded :type parser: func :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`~datasift.exceptions.DataSiftApiFailure`, :class:`~datasift.exceptions.AuthException`, :class:`requests.exceptions.HTTPError`, :class:`~datasift.exceptions.RateLimitException` """ if async: response.process = lambda: self.build_response(response.result(), path=path, parser=parser, async=False) return response if response.status_code != 204: try: data = parser(response.headers, response.text) except ValueError as e: raise DataSiftApiFailure(u"Unable to decode returned data: %s" % e) if "error" in data: if response.status_code == 401: raise AuthException(data) if response.status_code == 403 or response.status_code == 429: if not response.headers.get("X-RateLimit-Cost"): raise DataSiftApiException(DictResponse(response, data)) if int(response.headers.get("X-RateLimit-Cost")) > int(response.headers.get("X-RateLimit-Remaining")): raise RateLimitException(DictResponse(response, data)) raise DataSiftApiException(DictResponse(response, data)) response.raise_for_status() if isinstance(data, dict): r = DictResponse(response, data) elif isinstance(data, (list, map)): r = ListResponse(response, data) self.outputmapper.outputmap(r) return r else: # empty dict return DictResponse(response, {})
python
{ "resource": "" }
q41653
ControlCluster.compile_instance_masks
train
def compile_instance_masks(cls): """ Compiles instance masks into a master mask that is usable by the IO expander. Also determines whether or not the pump should be on. Method is generalized to support multiple IO expanders for possible future expansion. """ # Compute required # of IO expanders needed, clear mask variable. number_IO_expanders = ((len(cls._list) - 1) / 4) + 1 cls.master_mask = [0, 0] * number_IO_expanders for ctrlobj in cls: # Or masks together bank-by-banl cls.master_mask[ctrlobj.bank] |= ctrlobj.mask # Handle the pump request seperately if ctrlobj.pump_request == 1: cls.master_mask[cls.pump_bank] |= 1 << cls.pump_pin
python
{ "resource": "" }
q41654
ControlCluster.update
train
def update(self): """ This method exposes a more simple interface to the IO module Regardless of what the control instance contains, this method will transmit the queued IO commands to the IO expander Usage: plant1Control.update(bus) """ ControlCluster.compile_instance_masks() IO_expander_output( ControlCluster.bus, self.IOexpander, self.bank, ControlCluster.master_mask[self.bank]) if self.bank != ControlCluster.pump_bank: IO_expander_output( ControlCluster.bus, self.IOexpander, ControlCluster.pump_bank, ControlCluster.master_mask[ControlCluster.pump_bank])
python
{ "resource": "" }
q41655
ControlCluster.form_GPIO_map
train
def form_GPIO_map(self): """ This method creates a dictionary to map plant IDs to GPIO pins are associated in triples. Each ID gets a light, a fan, and a mist nozzle. """ # Compute bank/pins/IOexpander address based on ID if self.ID == 1: self.IOexpander = 0x20 self.bank = 0 self.fan = 2 self.light = 3 self.valve = 4 elif self.ID == 2: self.IOexpander = 0x20 self.bank = 0 self.fan = 5 self.light = 6 self.valve = 7 elif self.ID == 3: self.IOexpander = 0x20 self.bank = 1 self.fan = 0 self.light = 1 self.valve = 2 elif self.ID == 4: self.IOexpander = 0x20 self.bank = 1 self.fan = 3 self.light = 5 self.valve = 6 else: raise InvalidIOMap( "Mapping not available for ID: " + str(self.ID)) # Check to make sure reserved pins are not requested if (self.bank == 0) and (min(self.fan, self.light, self.valve) < 2): raise InvalidIOMap( "Pins A0 and A1 are reserved for other functions") self.GPIO_dict = [{'ID': self.ID, 'bank': self.bank, 'fan': self.fan, 'valve': self.valve, 'light': self.light}] # Append dictionary to class and resort dictionary by ID # if needed ControlCluster.GPIOdict.append(self.GPIO_dict)
python
{ "resource": "" }
q41656
ControlCluster.manage_pump
train
def manage_pump(self, operation): """ Updates control module knowledge of pump requests. If any sensor module requests water, the pump will turn on. """ if operation == "on": self.controls["pump"] = "on" elif operation == "off": self.controls["pump"] = "off" return True
python
{ "resource": "" }
q41657
ControlCluster.control
train
def control(self, on=[], off=[]): """ This method serves as the primary interaction point to the controls interface. - The 'on' and 'off' arguments can either be a list or a single string. This allows for both individual device control and batch controls. Note: Both the onlist and offlist are optional. If only one item is being managed, it can be passed as a string. Usage: - Turning off all devices: ctrlobj.control(off="all") - Turning on all devices: ctrlobj.control(on="all") - Turning on the light and fan ONLY (for example) ctrlobj.control(on=["light", "fan"]) - Turning on the light and turning off the fan (for example) ctrolobj.control(on="light", off="fan") """ controls = {"light", "valve", "fan", "pump"} def cast_arg(arg): if type(arg) is str: if arg == "all": return controls else: return {arg} & controls else: return set(arg) & controls # User has requested individual controls. for item in cast_arg(on): self.manage(item, "on") for item in cast_arg(off): self.manage(item, "off") sleep(.01) # Force delay to throttle requests return self.update()
python
{ "resource": "" }
q41658
ControlCluster.restore_state
train
def restore_state(self): """ Method should be called on obj. initialization When called, the method will attempt to restore IO expander and RPi coherence and restore local knowledge across a possible power failure """ current_mask = get_IO_reg(ControlCluster.bus, self.IOexpander, self.bank) if current_mask & (1 << ControlCluster.pump_pin): self.manage_pump("on") if current_mask & (1 << self.fan): self.manage_fan("on") if current_mask & (1 << self.light): self.manage_fan("on")
python
{ "resource": "" }
q41659
_key
train
def _key(key=''): ''' Returns a Datastore key object, prefixed with the NAMESPACE. ''' if not isinstance(key, datastore.Key): # Switchboard uses ':' to denote one thing (parent-child) and datastore # uses it for another, so replace ':' in the datastore version of the # key. safe_key = key.replace(':', '|') key = datastore.Key(os.path.join(NAMESPACE, safe_key)) return key
python
{ "resource": "" }
q41660
Model.update
train
def update(cls, spec, updates, upsert=False): ''' The spec is used to search for the data to update, updates contains the values to be updated, and upsert specifies whether to do an insert if the original data is not found. ''' if 'key' in spec: previous = cls.get(spec['key']) else: previous = None if previous: # Update existing data. current = cls(**previous.__dict__) elif upsert: # Create new data. current = cls(**spec) else: current = None # XXX Should there be any error thrown if this is a noop? if current: current.__dict__.update(updates) current.save() return current
python
{ "resource": "" }
q41661
Model._queryless_all
train
def _queryless_all(cls): ''' This is a hack because some datastore implementations don't support querying. Right now the solution is to drop down to the underlying native client and query all, which means that this section is ugly. If it were architected properly, you might be able to do something like inject an implementation of a NativeClient interface, which would let Switchboard users write their own NativeClient wrappers that implement all. However, at this point I'm just happy getting datastore to work, so quick-and-dirty will suffice. ''' if hasattr(cls.ds, '_redis'): r = cls.ds._redis keys = r.keys() serializer = cls.ds.child_datastore.serializer def get_value(k): value = r.get(k) return value if value is None else serializer.loads(value) return [get_value(k) for k in keys] else: raise NotImplementedError
python
{ "resource": "" }
q41662
Switch.remove_condition
train
def remove_condition(self, manager, condition_set, field_name, condition, commit=True): """ Removes a condition and updates the global ``operator`` switch manager. If ``commit`` is ``False``, the data will not be written to the database. >>> switch = operator['my_switch'] #doctest: +SKIP >>> cs_id = condition_set.get_id() #doctest: +SKIP >>> switch.remove_condition(cs_id, 'percent', [0, 50]) #doctest: +SKIP """ condition_set = manager.get_condition_set_by_id(condition_set) namespace = condition_set.get_namespace() if namespace not in self.value: return if field_name not in self.value[namespace]: return conditions = self.value[namespace][field_name] self.value[namespace][field_name] = ([c for c in conditions if c[1] != condition]) if not self.value[namespace][field_name]: del self.value[namespace][field_name] if not self.value[namespace]: del self.value[namespace] if commit: self.save()
python
{ "resource": "" }
q41663
Push.validate
train
def validate(self, output_type, output_params): """ Check that a subscription is defined correctly. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushvalidate :param output_type: One of DataSift's supported output types, e.g. s3 :type output_type: str :param output_params: The set of parameters required by the specified output_type for docs on all available connectors see http://dev.datasift.com/docs/push/connectors/ :type output_params: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('validate', dict(output_type=output_type, output_params=output_params))
python
{ "resource": "" }
q41664
Push.create_from_hash
train
def create_from_hash(self, stream, name, output_type, output_params, initial_status=None, start=None, end=None): """ Create a new push subscription using a live stream. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushcreate :param stream: The hash of a DataSift stream. :type stream: str :param name: The name to give the newly created subscription :type name: str :param output_type: One of the supported output types e.g. s3 :type output_type: str :param output_params: The set of parameters required for the given output type :type output_params: dict :param initial_status: The initial status of the subscription, active, paused or waiting_for_start :type initial_status: str :param start: Optionally specifies when the subscription should start :type start: int :param end: Optionally specifies when the subscription should end :type end: int :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self._create(True, stream, name, output_type, output_params, initial_status, start, end)
python
{ "resource": "" }
q41665
Push.create_from_historics
train
def create_from_historics(self, historics_id, name, output_type, output_params, initial_status=None, start=None, end=None): """ Create a new push subscription using the given Historic ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushcreate :param historics_id: The ID of a Historics query :type historics_id: str :param name: The name to give the newly created subscription :type name: str :param output_type: One of the supported output types e.g. s3 :type output_type: str :param output_params: set of parameters required for the given output type, see dev.datasift.com :type output_params: dict :param initial_status: The initial status of the subscription, active, paused or waiting_for_start :type initial_status: str :param start: Optionally specifies when the subscription should start :type start: int :param end: Optionally specifies when the subscription should end :type end: int :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self._create(False, historics_id, name, output_type, output_params, initial_status, start, end)
python
{ "resource": "" }
q41666
Push.pause
train
def pause(self, subscription_id): """ Pause a Subscription and buffer the data for up to one hour. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushpause :param subscription_id: id of an existing Push Subscription. :type subscription_id: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('pause', data=dict(id=subscription_id))
python
{ "resource": "" }
q41667
Push.resume
train
def resume(self, subscription_id): """ Resume a previously paused Subscription. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushresume :param subscription_id: id of an existing Push Subscription. :type subscription_id: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('resume', data=dict(id=subscription_id))
python
{ "resource": "" }
q41668
Push.update
train
def update(self, subscription_id, output_params, name=None): """ Update the name or output parameters for an existing Subscription. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushupdate :param subscription_id: id of an existing Push Subscription. :type subscription_id: str :param output_params: new output parameters for the subscription, see dev.datasift.com :type output_params: dict :param name: optional new name for the Subscription :type name: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': subscription_id, 'output_params': output_params} if name: params['name'] = name return self.request.post('update', params)
python
{ "resource": "" }
q41669
Push.stop
train
def stop(self, subscription_id): """ Stop the given subscription from running. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushstop :param subscription_id: id of an existing Push Subscription. :type subscription_id: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('stop', data=dict(id=subscription_id))
python
{ "resource": "" }
q41670
Push.delete
train
def delete(self, subscription_id): """ Delete the subscription for the given ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushdelete :param subscription_id: id of an existing Push Subscription. :type subscription_id: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('delete', data=dict(id=subscription_id))
python
{ "resource": "" }
q41671
Push.log
train
def log(self, subscription_id=None, page=None, per_page=None, order_by=None, order_dir=None): """ Retrieve any messages that have been logged for your subscriptions. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushlog :param subscription_id: optional id of an existing Push Subscription, restricts logs to a given subscription if supplied. :type subscription_id: str :param page: optional page number for pagination :type page: int :param per_page: optional number of items per page, default 20 :type per_page: int :param order_by: field to order by, default request_time :type order_by: str :param order_dir: direction to order by, asc or desc, default desc :type order_dir: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if subscription_id: params['id'] = subscription_id if page: params['page'] = page if per_page: params['per_page'] = per_page if order_by: params['order_by'] = order_by if order_dir: params['order_dir'] = order_dir return self.request.get('log', params=params)
python
{ "resource": "" }
q41672
Push.get
train
def get(self, subscription_id=None, stream=None, historics_id=None, page=None, per_page=None, order_by=None, order_dir=None, include_finished=None): """ Show details of the Subscriptions belonging to this user. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushget :param subscription_id: optional id of an existing Push Subscription :type subscription_id: str :param hash: optional hash of a live stream :type hash: str :param playback_id: optional playback id of a Historics query :type playback_id: str :param page: optional page number for pagination :type page: int :param per_page: optional number of items per page, default 20 :type per_page: int :param order_by: field to order by, default request_time :type order_by: str :param order_dir: direction to order by, asc or desc, default desc :type order_dir: str :param include_finished: boolean indicating if finished Subscriptions for Historics should be included :type include_finished: bool :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if subscription_id: params['id'] = subscription_id if stream: params['hash'] = stream if historics_id: params['historics_id'] = historics_id if page: params['page'] = page if per_page: params['per_page'] = per_page if order_by: params['order_by'] = order_by if order_dir: params['order_dir'] = order_dir if include_finished: params['include_finished'] = 1 if include_finished else 0 return self.request.get('get', params=params)
python
{ "resource": "" }
q41673
_git_receive_v1
train
def _git_receive_v1(msg, tmpl, **config): ''' Return the subtitle for the first version of pagure git.receive messages. ''' repo = _get_project(msg['msg']['commit'], key='repo') email = msg['msg']['commit']['email'] user = email2fas(email, **config) summ = msg['msg']['commit']['summary'] whole = msg['msg']['commit']['message'] if summ.strip() != whole.strip(): summ += " (..more)" branch = msg['msg']['commit']['branch'] if 'refs/heads/' in branch: branch = branch.replace('refs/heads/', '') return tmpl.format(user=user or email, repo=repo, branch=branch, summary=summ)
python
{ "resource": "" }
q41674
_git_receive_v2
train
def _git_receive_v2(msg, tmpl): ''' Return the subtitle for the second version of pagure git.receive messages. ''' repo = _get_project(msg['msg'], key='repo') user = msg['msg']['agent'] n_commits = msg['msg']['total_commits'] commit_lbl = 'commit' if str(n_commits) == '1' else 'commits' branch = msg['msg']['branch'] if 'refs/heads/' in branch: branch = branch.replace('refs/heads/', '') return tmpl.format(user=user, repo=repo, branch=branch, n_commits=n_commits, commit_lbl=commit_lbl)
python
{ "resource": "" }
q41675
BaseDataManager.update
train
def update(self, archive_name, version_metadata): ''' Register a new version for archive ``archive_name`` .. note :: need to implement hash checking to prevent duplicate writes ''' version_metadata['updated'] = self.create_timestamp() version_metadata['version'] = str( version_metadata.get('version', None)) if version_metadata.get('message') is not None: version_metadata['message'] = str(version_metadata['message']) self._update(archive_name, version_metadata)
python
{ "resource": "" }
q41676
BaseDataManager.update_metadata
train
def update_metadata(self, archive_name, archive_metadata): ''' Update metadata for archive ``archive_name`` ''' required_metadata_keys = self.required_archive_metadata.keys() for key, val in archive_metadata.items(): if key in required_metadata_keys and val is None: raise ValueError( 'Cannot remove required metadata attribute "{}"'.format( key)) self._update_metadata(archive_name, archive_metadata)
python
{ "resource": "" }
q41677
BaseDataManager.create_archive
train
def create_archive( self, archive_name, authority_name, archive_path, versioned, raise_on_err=True, metadata=None, user_config=None, tags=None, helper=False): ''' Create a new data archive Returns ------- archive : object new :py:class:`~datafs.core.data_archive.DataArchive` object ''' archive_metadata = self._create_archive_metadata( archive_name=archive_name, authority_name=authority_name, archive_path=archive_path, versioned=versioned, raise_on_err=raise_on_err, metadata=metadata, user_config=user_config, tags=tags, helper=helper) if raise_on_err: self._create_archive( archive_name, archive_metadata) else: self._create_if_not_exists( archive_name, archive_metadata) return self.get_archive(archive_name)
python
{ "resource": "" }
q41678
BaseDataManager.get_archive
train
def get_archive(self, archive_name): ''' Get a data archive given an archive name Returns ------- archive_specification : dict archive_name: name of the archive to be retrieved authority: name of the archive's authority archive_path: service path of archive ''' try: spec = self._get_archive_spec(archive_name) return spec except KeyError: raise KeyError('Archive "{}" not found'.format(archive_name))
python
{ "resource": "" }
q41679
BaseDataManager.delete_tags
train
def delete_tags(self, archive_name, tags): ''' Delete tags from an archive Parameters ---------- archive_name:s tr Name of archive tags: list or tuple of strings tags to delete from the archive ''' updated_tag_list = list(self._get_tags(archive_name)) for tag in tags: if tag in updated_tag_list: updated_tag_list.remove(tag) self._set_tags(archive_name, updated_tag_list)
python
{ "resource": "" }
q41680
BaseDataManager._normalize_tags
train
def _normalize_tags(self, tags): ''' Coerces tags to lowercase strings Parameters ---------- tags: list or tuple of strings ''' lowered_str_tags = [] for tag in tags: lowered_str_tags.append(str(tag).lower()) return lowered_str_tags
python
{ "resource": "" }
q41681
Commander.start
train
def start(self): "Start the project on the directory" bookname = self.args.get('--bookname', None) if not bookname: bookname = 'book.md' project_dir = self.args.get('<name>', None) if not project_dir: project_dir = join(self.cwd, 'Book') project_dir = abspath(project_dir) # create the working dir? if not exists(project_dir) or self.args['--overwrite']: if exists(project_dir): if yesno(warning( 'Are you sure you want to remove `%s`? ' % project_dir)): shutil.rmtree(project_dir) else: sys.exit(error('Operation aborted')) os.makedirs(project_dir) os.makedirs(join(project_dir, 'build')) with codecs.open( join(project_dir, bookname), 'w', encoding="utf") as fd: fd.write('''# This is your book You can start it right now and publish it away! ''') # What shall we do with the configuration file? config_file = join(project_dir, 'book.json') rewrite_config_file = True if exists(config_file) and not self.args['--overwrite']: print('A config file already exists. This step is skipped') rewrite_config_file = False if rewrite_config_file: with codecs.open(config_file, 'w', encoding="utf") as fd: data = { 'files': ['%s' % bookname], 'author': "%s" % ask("What is your name? "), 'title': '%s' % ask("E-book title, please? "), } data['fileroot'] = unidecode(data['title']).lower() \ .replace(' ', '-') # pick a generator if len(self.generators) == 1: data['generator'] = self.generators[0] else: picked_generator = None while not picked_generator: picked_generator = ask( "Which generator? [%s] " % ', '.join(self.generators) ) if picked_generator not in self.generators: print warning( 'Wrong answer. Please pick one on the list') picked_generator = None # fine, we have one. data['generator'] = picked_generator json.dump(data, fd, indent=4, encoding="utf") # Game over print sys.exit( success('Now you can go to `%s` and start editing your book...' % project_dir))
python
{ "resource": "" }
q41682
Commander.build
train
def build(self): "Build your book" config = self.load_config() html_generator = HTMLGenerator(self.cwd, config) html_generator.build() if self.args.get('--generator', None): generator = self.args.get('--generator') else: generator = config.get('generator') if generator == 'calibre': EPUBClass = CalibreEPUBGenerator PDFClass = CalibrePDFGenerator elif generator == 'pandoc': EPUBClass = PandocEPUBGenerator PDFClass = PandocPDFGenerator else: raise ConfigurationError( "Wrong configuration. Please check your config.json file.") # EPUB Generation epub_generator = EPUBClass(self.cwd, config, self.args) epub_generator.build() # Shall we proceed to the PDF? if config.get('pdf', False) or self.args['--with-pdf']: pdf_generator = PDFClass(self.cwd, config, self.args) pdf_generator.build()
python
{ "resource": "" }
q41683
Commander.check
train
def check(self): "Checks EPUB integrity" config = self.load_config() if not check_dependency_epubcheck(): sys.exit(error('Unavailable command.')) epub_file = u"%s.epub" % config['fileroot'] epub_path = join(CWD, 'build', epub_file) print success("Starting to check %s..." % epub_file) epubcheck = u'epubcheck %s' % epub_path epubcheck = shell(epubcheck.encode()) for line in epubcheck.errors(): print error(line) for line in epubcheck.output(): print line
python
{ "resource": "" }
q41684
PostgresDatabase.create
train
def create(self, sql=None): """CREATE this DATABASE. @param sql: (Optional) A string of psql (such as might be generated by pg_dump); it will be executed by psql(1) after creating the database. @type sql: str @rtype: None """ create_sql = 'CREATE DATABASE {self.db_name} WITH OWNER {self.user}' create_sql = create_sql.format(**vars()) self.super_psql(['-c', create_sql]) if sql: self.psql_string(sql)
python
{ "resource": "" }
q41685
PostgresDatabase.psql
train
def psql(self, args): r"""Invoke psql, passing the given command-line arguments. Typical <args> values: ['-c', <sql_string>] or ['-f', <pathname>]. Connection parameters are taken from self. STDIN, STDOUT, and STDERR are inherited from the parent. WARNING: This method uses the psql(1) program, which ignores SQL errors by default. That hides many real errors, making our software less reliable. To overcome this flaw, add this line to the head of your SQL: "\set ON_ERROR_STOP TRUE" @return: None. Raises an exception upon error, but *ignores SQL errors* unless "\set ON_ERROR_STOP TRUE" is used. """ argv = [ PostgresFinder.find_root() / 'psql', '--quiet', '-U', self.user, '-h', self.host, '-p', self.port, ] + args + [self.db_name] subprocess.check_call(argv)
python
{ "resource": "" }
q41686
PostgresDatabase.sql
train
def sql(self, input_string, *args): """Execute a SQL command using the Python DBI directly. Connection parameters are taken from self. Autocommit is in effect. Example: .sql('SELECT %s FROM %s WHERE age > %s', 'name', 'table1', '45') @param input_string: A string of SQL. May contain %s or %(name)s format specifiers; they are replaced with corresponding values taken from args. @param args: zero or more parameters to interpolate into the string. Note that they're passed individually, not as a single tuple. @return: Whatever .fetchall() returns. """ """ # I advise against using sqlalchemy here (it's more complicated than # what we need), but here's an implementation Just In Case. -jps import psycopg2, sqlalchemy engine = sqlalchemy.create_engine( 'postgres://%s@%s:%s/%s' % (self.user, self.host, self.port, self.db_name), echo=False, poolclass=sqlalchemy.pool.NullPool) connection = engine.connect() result = connection.execute(input_string, *args) try: # sqlalchemy 0.6.7 offers a result.returns_rows attribute, but # no prior version offers anything comparable. A tacky # workaround... try: return result.fetchall() except psycopg2.ProgrammingError: return None finally: result.close() connection.close() """ psycopg2 = importlib.import_module('psycopg2') importlib.import_module('psycopg2.extensions') connection = psycopg2.connect( user=self.user, host=self.host, port=self.port, database=self.db_name) connection.set_isolation_level( psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) try: cursor = connection.cursor() cursor.execute(input_string, args) # No way to ask whether any rows were returned, so just try it... try: return cursor.fetchall() except psycopg2.ProgrammingError: return None finally: connection.close()
python
{ "resource": "" }
q41687
PostgresServer.destroy
train
def destroy(self): """Undo the effects of initdb. Destroy all evidence of this DBMS, including its backing files. """ self.stop() if self.base_pathname is not None: self._robust_remove(self.base_pathname)
python
{ "resource": "" }
q41688
PostgresServer._robust_remove
train
def _robust_remove(path): """ Remove the directory specified by `path`. Because we can't determine directly if the path is in use, and on Windows, it's not possible to remove a path if it is in use, retry a few times until the call succeeds. """ tries = itertools.count() max_tries = 50 while os.path.isdir(path): try: shutil.rmtree(path) except WindowsError: if next(tries) >= max_tries: raise time.sleep(0.2)
python
{ "resource": "" }
q41689
PostgresServer.initdb
train
def initdb(self, quiet=True, locale='en_US.UTF-8'): """Bootstrap this DBMS from nothing. If you're running in an environment where the DBMS is provided as part of the basic infrastructure, you probably don't want to call this method! @param quiet: Should we operate quietly, emitting nothing if things go well? """ # Defining base_pathname is deferred until this point because we don't # want to create a temp directory unless it's needed. And now it is! if self.base_pathname in [None, '']: self.base_pathname = tempfile.mkdtemp() if not os.path.isdir(self.base_pathname): os.mkdir(self.base_pathname) stdout = DEV_NULL if quiet else None # The database superuser needs no password at this point(!). arguments = [ '--auth=trust', '--username', self.superuser, ] if locale is not None: arguments.extend(('--locale', locale)) cmd = [ PostgresFinder.find_root() / 'initdb', ] + arguments + ['--pgdata', self.base_pathname] log.info('Initializing PostgreSQL with command: {}'.format( ' '.join(cmd) )) subprocess.check_call(cmd, stdout=stdout)
python
{ "resource": "" }
q41690
PostgresServer._is_running
train
def _is_running(self, tries=10): """ Return if the server is running according to pg_ctl. """ # We can't possibly be running if our base_pathname isn't defined. if not self.base_pathname: return False if tries < 1: raise ValueError('tries must be > 0') cmd = [ PostgresFinder.find_root() / 'pg_ctl', 'status', '-D', self.base_pathname, ] votes = 0 while abs(votes) < tries: time.sleep(0.1) running = (subprocess.call(cmd, stdout=DEV_NULL) == 0) if running and votes >= 0: votes += 1 elif not running and votes <= 0: votes -= 1 else: votes = 0 return votes > 0
python
{ "resource": "" }
q41691
PostgresServer.ready
train
def ready(self): """ Assumes postgres now talks to pg_ctl, but might not yet be listening or connections from psql. Test that psql is able to connect, as it occasionally takes 5-10 seconds for postgresql to start listening. """ cmd = self._psql_cmd() for i in range(50, -1, -1): res = subprocess.call( cmd, stdin=DEV_NULL, stdout=DEV_NULL, stderr=DEV_NULL) if res == 0: break time.sleep(0.2) return i != 0
python
{ "resource": "" }
q41692
PostgresServer.start
train
def start(self): """Launch this postgres server. If it's already running, do nothing. If the backing storage directory isn't configured, raise NotInitializedError. This method is optional. If you're running in an environment where the DBMS is provided as part of the basic infrastructure, you probably want to skip this step! """ log.info('Starting PostgreSQL at %s:%s', self.host, self.port) if not self.base_pathname: tmpl = ('Invalid base_pathname: %r. Did you forget to call ' '.initdb()?') raise NotInitializedError(tmpl % self.base_pathname) conf_file = os.path.join(self.base_pathname, 'postgresql.conf') if not os.path.exists(conf_file): tmpl = 'No config file at: %r. Did you forget to call .initdb()?' raise NotInitializedError(tmpl % self.base_pathname) if not self.is_running(): version = self.get_version() if version and version >= (9, 3): socketop = 'unix_socket_directories' else: socketop = 'unix_socket_directory' postgres_options = [ # When running not as root, postgres might try to put files # where they're not writable (see # https://paste.yougov.net/YKdgi). So set the socket_dir. '-c', '{}={}'.format(socketop, self.base_pathname), '-h', self.host, '-i', # enable TCP/IP connections '-p', self.port, ] subprocess.check_call([ PostgresFinder.find_root() / 'pg_ctl', 'start', '-D', self.base_pathname, '-l', os.path.join(self.base_pathname, 'postgresql.log'), '-o', subprocess.list2cmdline(postgres_options), ]) # Postgres may launch, then abort if it's unhappy with some parameter. # This post-launch test helps us decide. if not self.is_running(): tmpl = ('%s aborted immediately after launch, check ' 'postgresql.log in storage dir') raise RuntimeError(tmpl % self)
python
{ "resource": "" }
q41693
PostgresServer.stop
train
def stop(self): """Stop this DMBS daemon. If it's not currently running, do nothing. Don't return until it's terminated. """ log.info('Stopping PostgreSQL at %s:%s', self.host, self.port) if self._is_running(): cmd = [ PostgresFinder.find_root() / 'pg_ctl', 'stop', '-D', self.base_pathname, '-m', 'fast', ] subprocess.check_call(cmd) # pg_ctl isn't reliable if it's called at certain critical times if self.pid: os.kill(self.pid, signal.SIGTERM) # Can't use wait() because the server might not be our child while self._is_running(): time.sleep(0.1)
python
{ "resource": "" }
q41694
PostgresServer.create
train
def create(self, db_name, **kwargs): """ Construct a PostgresDatabase and create it on self """ db = PostgresDatabase( db_name, host=self.host, port=self.port, superuser=self.superuser, **kwargs) db.ensure_user() db.create() return db
python
{ "resource": "" }
q41695
DynamoDBManager._search
train
def _search(self, search_terms, begins_with=None): """ Returns a list of Archive id's in the table on Dynamo """ kwargs = dict( ProjectionExpression='#id', ExpressionAttributeNames={"#id": "_id"}) if len(search_terms) > 0: kwargs['FilterExpression'] = reduce( lambda x, y: x & y, [Attr('tags').contains(arg) for arg in search_terms]) if begins_with: if 'FilterExpression' in kwargs: kwargs['FilterExpression'] = kwargs[ 'FilterExpression'] & Key('_id').begins_with(begins_with) else: kwargs['FilterExpression'] = Key( '_id').begins_with(begins_with) while True: res = self._table.scan(**kwargs) for r in res['Items']: yield r['_id'] if 'LastEvaluatedKey' in res: kwargs['ExclusiveStartKey'] = res['LastEvaluatedKey'] else: break
python
{ "resource": "" }
q41696
DynamoDBManager._update
train
def _update(self, archive_name, version_metadata): ''' Updates the version specific metadata attribute in DynamoDB In DynamoDB this is simply a list append on this attribute value Parameters ---------- archive_name: str unique '_id' primary key version_metadata: dict dictionary of version metadata values Returns ------- dict list of dictionaries of version_history ''' command = "SET version_history = list_append(version_history, :v)" self._table.update_item( Key={'_id': archive_name}, UpdateExpression=command, ExpressionAttributeValues={':v': [version_metadata]}, ReturnValues='ALL_NEW')
python
{ "resource": "" }
q41697
DynamoDBManager._create_archive_table
train
def _create_archive_table(self, table_name): ''' Dynamo implementation of BaseDataManager create_archive_table waiter object is implemented to ensure table creation before moving on this will slow down table creation. However, since we are only creating table once this should no impact users. Parameters ---------- table_name: str Returns ------- None ''' if table_name in self._get_table_names(): raise KeyError('Table "{}" already exists'.format(table_name)) try: table = self._resource.create_table( TableName=table_name, KeySchema=[{'AttributeName': '_id', 'KeyType': 'HASH'}], AttributeDefinitions=[ {'AttributeName': '_id', 'AttributeType': 'S'}], ProvisionedThroughput={ 'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}) table.meta.client.get_waiter('table_exists').wait( TableName=table_name) except ValueError: # Error handling for windows incompatability issue msg = 'Table creation failed' assert table_name in self._get_table_names(), msg
python
{ "resource": "" }
q41698
DynamoDBManager._create_spec_config
train
def _create_spec_config(self, table_name, spec_documents): ''' Dynamo implementation of spec config creation Called by `create_archive_table()` in :py:class:`manager.BaseDataManager` Simply adds two rows to the spec table Parameters ---------- table_name : base table name (not including .spec suffix) spec_documents : list list of dictionary documents defining the manager spec ''' _spec_table = self._resource.Table(table_name + '.spec') for doc in spec_documents: _spec_table.put_item(Item=doc)
python
{ "resource": "" }
q41699
DynamoDBManager._update_spec_config
train
def _update_spec_config(self, document_name, spec): ''' Dynamo implementation of project specific metadata spec ''' # add the updated archive_metadata object to Dynamo self._spec_table.update_item( Key={'_id': '{}'.format(document_name)}, UpdateExpression="SET config = :v", ExpressionAttributeValues={':v': spec}, ReturnValues='ALL_NEW')
python
{ "resource": "" }