_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q45800
Connection.recv
train
def recv(self): """Receives a message from PS and decrypts it and returns a Message""" LOGGER.debug('Receiving') try: message_length = struct.unpack('>i', self._socket.recv(4))[0] message_length -= Connection.COMM_LENGTH LOGGER.debug('Length: %i', message_length) except socket.timeout: return None comm_status = struct.unpack('>i', self._socket.recv(4))[0] LOGGER.debug('Status: %i', comm_status) bytes_received = 0 message = b"" while bytes_received < message_length: if message_length - bytes_received >= 1024: recv_len = 1024 else: recv_len = message_length - bytes_received bytes_received += recv_len LOGGER.debug('Received %i', bytes_received) message += self._socket.recv(recv_len) if comm_status == 0: message = self._crypt.decrypt(message) else: return Message(len(message), Connection.COMM_ERROR, message) msg = Message(message_length, comm_status, message) return msg
python
{ "resource": "" }
q45801
Connection.send
train
def send(self, content): """Sends a JavaScript command to PS :param content: Script content :type content: str :yields: :class:`.Message` """ LOGGER.debug('Sending: %s', content) all_bytes = struct.pack('>i', Connection.PROTOCOL_VERSION) all_bytes += struct.pack('>i', self._id) all_bytes += struct.pack('>i', 2) self._id += 1 for char in content: all_bytes += struct.pack('>c', char.encode('utf8')) encrypted_bytes = self._crypt.encrypt(all_bytes) message_length = Connection.COMM_LENGTH + len(encrypted_bytes) self._socket.send(struct.pack('>i', message_length)) self._socket.send(struct.pack('>i', Connection.NO_COMM_ERROR)) self._socket.send(encrypted_bytes) LOGGER.debug('Sent') message = self.recv() while message is None: message = self.recv() yield message yield message
python
{ "resource": "" }
q45802
watch_command
train
def watch_command(context, backend, config, poll): """ Watch for change on your Sass project sources then compile them to CSS. Watched events are: \b * Create: when a new source file is created; * Change: when a source is changed; * Delete: when a source is deleted; * Move: When a source file is moved in watched dirs. Also occurs with editor transition file; Almost all errors occurring during compile won't break watcher, so you can resolve them and watcher will try again to compile once a new event occurs. You can stop watcher using key combo "CTRL+C" (or CMD+C on MacOSX). """ logger = logging.getLogger("boussole") logger.info("Watching project") # Discover settings file try: discovering = Discover(backends=[SettingsBackendJson, SettingsBackendYaml]) config_filepath, config_engine = discovering.search( filepath=config, basedir=os.getcwd(), kind=backend ) project = ProjectBase(backend_name=config_engine._kind_name) settings = project.backend_engine.load(filepath=config_filepath) except BoussoleBaseException as e: logger.critical(six.text_type(e)) raise click.Abort() logger.debug(u"Settings file: {} ({})".format( config_filepath, config_engine._kind_name)) logger.debug(u"Project sources directory: {}".format( settings.SOURCES_PATH)) logger.debug(u"Project destination directory: {}".format( settings.TARGET_PATH)) logger.debug(u"Exclude patterns: {}".format( settings.EXCLUDES)) # Watcher settings watcher_templates_patterns = { 'patterns': ['*.scss'], 'ignore_patterns': ['*.part'], 'ignore_directories': False, 'case_sensitive': True, } # Init inspector instance shared through all handlers inspector = ScssInspector() if not poll: logger.debug(u"Using Watchdog native platform observer") observer = Observer() else: logger.debug(u"Using Watchdog polling observer") observer = PollingObserver() # Init event handlers project_handler = WatchdogProjectEventHandler(settings, inspector, **watcher_templates_patterns) lib_handler = WatchdogLibraryEventHandler(settings, inspector, **watcher_templates_patterns) # Observe source directory observer.schedule(project_handler, settings.SOURCES_PATH, recursive=True) # Also observe libraries directories for libpath in settings.LIBRARY_PATHS: observer.schedule(lib_handler, libpath, recursive=True) # Start watching logger.warning(u"Launching the watcher, use CTRL+C to stop it") observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: logger.warning(u"CTRL+C used, stopping..") observer.stop() observer.join()
python
{ "resource": "" }
q45803
init
train
def init(deb1, deb2=False): """Initialize DEBUG and DEBUGALL. Allows other modules to set DEBUG and DEBUGALL, so their call to dprint or dprintx generate output. Args: deb1 (bool): value of DEBUG to set deb2 (bool): optional - value of DEBUGALL to set, defaults to False. """ global DEBUG # pylint: disable=global-statement global DEBUGALL # pylint: disable=global-statement DEBUG = deb1 DEBUGALL = deb2
python
{ "resource": "" }
q45804
dprintx
train
def dprintx(passeditem, special=False): """Print Text if DEBUGALL set, optionally with PrettyPrint. Args: passeditem (str): item to print special (bool): determines if item prints with PrettyPrint or regular print. """ if DEBUGALL: if special: from pprint import pprint pprint(passeditem) else: print("%s%s%s" % (C_TI, passeditem, C_NORM))
python
{ "resource": "" }
q45805
get
train
def get(url, **kwargs): """ Wrapper for `request.get` function to set params. """ headers = kwargs.get('headers', {}) headers['User-Agent'] = config.USER_AGENT # overwrite kwargs['headers'] = headers timeout = kwargs.get('timeout', config.TIMEOUT) kwargs['timeout'] = timeout kwargs['verify'] = False # no SSLError logger.debug("Getting: %s", url) return requests.get(url, **kwargs)
python
{ "resource": "" }
q45806
phantomjs_get
train
def phantomjs_get(url): """ Perform the request via PhantomJS. """ from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities dcap = dict(DesiredCapabilities.PHANTOMJS) dcap["phantomjs.page.settings.userAgent"] = config.USER_AGENT dcap["phantomjs.page.settings.loadImages"] = False driver = webdriver.PhantomJS(desired_capabilities=dcap, executable_path=config.PHANTOMJS_BIN) logger.debug("PhantomJS get: %s", url) driver.get(url) time.sleep(10) # to follow redirects response = driver.page_source driver.quit() return response
python
{ "resource": "" }
q45807
NwcpymatgenTcodtranslator.get_atom_type_symbol
train
def get_atom_type_symbol(cls,calc,**kwargs): """ Returns a list of atom types. Each atom site MUST occur only once in this list. List MUST be sorted. """ parameters = calc.out.output dictionary = parameters.get_dict() if 'basis_set' not in dictionary.keys(): return None return sorted(dictionary['basis_set'].keys())
python
{ "resource": "" }
q45808
FilterProcessor.tag_handler
train
def tag_handler(self, cmd): """Process a TagCommand.""" # Keep tags if they indirectly reference something we kept cmd.from_ = self._find_interesting_from(cmd.from_) self.keep = cmd.from_ is not None
python
{ "resource": "" }
q45809
FilterProcessor._print_command
train
def _print_command(self, cmd): """Wrapper to avoid adding unnecessary blank lines.""" text = helpers.repr_bytes(cmd) self.outf.write(text) if not text.endswith(b'\n'): self.outf.write(b'\n')
python
{ "resource": "" }
q45810
FilterProcessor._filter_filecommands
train
def _filter_filecommands(self, filecmd_iter): """Return the filecommands filtered by includes & excludes. :return: a list of FileCommand objects """ if self.includes is None and self.excludes is None: return list(filecmd_iter()) # Do the filtering, adjusting for the new_root result = [] for fc in filecmd_iter(): if (isinstance(fc, commands.FileModifyCommand) or isinstance(fc, commands.FileDeleteCommand)): if self._path_to_be_kept(fc.path): fc.path = self._adjust_for_new_root(fc.path) else: continue elif isinstance(fc, commands.FileDeleteAllCommand): pass elif isinstance(fc, commands.FileRenameCommand): fc = self._convert_rename(fc) elif isinstance(fc, commands.FileCopyCommand): fc = self._convert_copy(fc) else: self.warning("cannot handle FileCommands of class %s - ignoring", fc.__class__) continue if fc is not None: result.append(fc) return result
python
{ "resource": "" }
q45811
FilterProcessor._path_to_be_kept
train
def _path_to_be_kept(self, path): """Does the given path pass the filtering criteria?""" if self.excludes and (path in self.excludes or helpers.is_inside_any(self.excludes, path)): return False if self.includes: return (path in self.includes or helpers.is_inside_any(self.includes, path)) return True
python
{ "resource": "" }
q45812
FilterProcessor._adjust_for_new_root
train
def _adjust_for_new_root(self, path): """Adjust a path given the new root directory of the output.""" if self.new_root is None: return path elif path.startswith(self.new_root): return path[len(self.new_root):] else: return path
python
{ "resource": "" }
q45813
FilterProcessor._convert_rename
train
def _convert_rename(self, fc): """Convert a FileRenameCommand into a new FileCommand. :return: None if the rename is being ignored, otherwise a new FileCommand based on the whether the old and new paths are inside or outside of the interesting locations. """ old = fc.old_path new = fc.new_path keep_old = self._path_to_be_kept(old) keep_new = self._path_to_be_kept(new) if keep_old and keep_new: fc.old_path = self._adjust_for_new_root(old) fc.new_path = self._adjust_for_new_root(new) return fc elif keep_old: # The file has been renamed to a non-interesting location. # Delete it! old = self._adjust_for_new_root(old) return commands.FileDeleteCommand(old) elif keep_new: # The file has been renamed into an interesting location # We really ought to add it but we don't currently buffer # the contents of all previous files and probably never want # to. Maybe fast-import-info needs to be extended to # remember all renames and a config file can be passed # into here ala fast-import? self.warning("cannot turn rename of %s into an add of %s yet" % (old, new)) return None
python
{ "resource": "" }
q45814
FilterProcessor._convert_copy
train
def _convert_copy(self, fc): """Convert a FileCopyCommand into a new FileCommand. :return: None if the copy is being ignored, otherwise a new FileCommand based on the whether the source and destination paths are inside or outside of the interesting locations. """ src = fc.src_path dest = fc.dest_path keep_src = self._path_to_be_kept(src) keep_dest = self._path_to_be_kept(dest) if keep_src and keep_dest: fc.src_path = self._adjust_for_new_root(src) fc.dest_path = self._adjust_for_new_root(dest) return fc elif keep_src: # The file has been copied to a non-interesting location. # Ignore it! return None elif keep_dest: # The file has been copied into an interesting location # We really ought to add it but we don't currently buffer # the contents of all previous files and probably never want # to. Maybe fast-import-info needs to be extended to # remember all copies and a config file can be passed # into here ala fast-import? self.warning("cannot turn copy of %s into an add of %s yet" % (src, dest)) return None
python
{ "resource": "" }
q45815
bundle
train
def bundle(context, yes, bundle_name): """Delete the latest bundle version.""" bundle_obj = context.obj['store'].bundle(bundle_name) if bundle_obj is None: click.echo(click.style('bundle not found', fg='red')) context.abort() version_obj = bundle_obj.versions[0] if version_obj.included_at: question = f"remove bundle version from file system and database: {version_obj.full_path}" else: question = f"remove bundle version from database: {version_obj.created_at.date()}" if yes or click.confirm(question): if version_obj.included_at: shutil.rmtree(version_obj.full_path, ignore_errors=True) version_obj.delete() context.obj['store'].commit() click.echo(f"version deleted: {version_obj.full_path}")
python
{ "resource": "" }
q45816
files
train
def files(context, yes, tag, bundle, before, notondisk): """Delete files based on tags.""" file_objs = [] if not tag and not bundle: click.echo("I'm afraid I can't let you do that.") context.abort() if bundle: bundle_obj = context.obj['store'].bundle(bundle) if bundle_obj is None: click.echo(click.style('bundle not found', fg='red')) context.abort() query = context.obj['store'].files_before(bundle = bundle, tags = tag, before = before) if notondisk: file_objs = set(query) - context.obj['store'].files_ondisk(query) else: file_objs = query.all() if len(file_objs) > 0 and len(yes) < 2: if not click.confirm(f"Are you sure you want to delete {len(file_objs)} files?"): context.abort() for file_obj in file_objs: if yes or click.confirm(f"remove file from disk and database: {file_obj.full_path}"): file_obj_path = Path(file_obj.full_path) if file_obj.is_included and (file_obj_path.exists() or file_obj_path.is_symlink()): file_obj_path.unlink() file_obj.delete() context.obj['store'].commit() click.echo(f'{file_obj.full_path} deleted')
python
{ "resource": "" }
q45817
sam_readline
train
def sam_readline(sock, partial = None): """read a line from a sam control socket""" response = b'' exception = None while True: try: c = sock.recv(1) if not c: raise EOFError('SAM connection died. Partial response %r %r' % (partial, response)) elif c == b'\n': break else: response += c except (BlockingIOError, pysocket.timeout) as e: if partial is None: raise e else: exception = e break if partial is None: # print('<--', response) return response.decode('ascii') else: # print('<--', repr(partial), '+', response, exception) return (partial + response.decode('ascii'), exception)
python
{ "resource": "" }
q45818
sam_parse_reply
train
def sam_parse_reply(line): """parse a reply line into a dict""" parts = line.split(' ') opts = {k: v for (k, v) in split_kv(parts[2:])} return SAMReply(parts[0], opts)
python
{ "resource": "" }
q45819
sam_send
train
def sam_send(sock, line_and_data): """Send a line to the SAM controller, but don't read it""" if isinstance(line_and_data, tuple): line, data = line_and_data else: line, data = line_and_data, b'' line = bytes(line, encoding='ascii') + b' \n' # print('-->', line, data) sock.sendall(line + data)
python
{ "resource": "" }
q45820
sam_cmd
train
def sam_cmd(sock, line, parse=True): """Send a line to the SAM controller, returning the parsed response""" sam_send(sock, line) reply_line = sam_readline(sock) if parse: return sam_parse_reply(reply_line) else: return reply_line
python
{ "resource": "" }
q45821
handshake
train
def handshake(timeout, sam_api, max_version): """handshake with sam via a socket.socket instance""" sock = controller_connect(sam_api, timeout=timeout) response = sam_cmd(sock, greet(max_version)) if response.ok: return sock else: raise HandshakeError("Failed to handshake with SAM: %s" % repr(response))
python
{ "resource": "" }
q45822
lookup
train
def lookup(sock, domain, cache = None): """lookup an I2P domain name, returning a Destination instance""" domain = normalize_domain(domain) # cache miss, perform lookup reply = sam_cmd(sock, "NAMING LOOKUP NAME=%s" % domain) b64_dest = reply.get('VALUE') if b64_dest: dest = Dest(b64_dest, encoding='base64') if cache: cache[dest.base32 + '.b32.i2p'] = dest return dest else: raise NSError('Domain name %r not resolved because %r' % (domain, reply))
python
{ "resource": "" }
q45823
HistoryHandler.post
train
async def post(self): """ Accepts json-rpc post request. Retrieves data from request body. Calls defined method in field 'method_name' """ request = self.request.body.decode() response = await methods.dispatch(request) if not response.is_notification: self.write(response)
python
{ "resource": "" }
q45824
get_template_directories
train
def get_template_directories(): """This function tries to figure out where template directories are located. It first inspects the TEMPLATES setting, and if that exists and is not empty, uses its values. Otherwise, the values from all of the defined DIRS within TEMPLATES are used. Returns a set of template directories. """ templates = set() for t in settings.TEMPLATES: templates = templates.union(set(t.get('DIRS', []))) return templates
python
{ "resource": "" }
q45825
urls_from_file_tree
train
def urls_from_file_tree(template_dir): """Generates a list of URL strings that would match each staticflatpage.""" urls = [] # keep a list of of all the files/paths # Should be somethign like: # /path/to/myproject/templates/staticflatpages root_dir = join(template_dir, 'staticflatpages') for root, dirs, files in walk(template_dir): # Only do this for the ``staticflatpages`` directory or sub-directories if "staticflatpages" in root: root = root.replace(root_dir, '') for f in files: path = join(root, f) path = _format_as_url(path) urls.append(path) return urls
python
{ "resource": "" }
q45826
get_terminal_size
train
def get_terminal_size(defaultw=80): """ Checks various methods to determine the terminal size Methods: - shutil.get_terminal_size (only Python3) - fcntl.ioctl - subprocess.check_output - os.environ Parameters ---------- defaultw : int Default width of terminal. Returns ------- width, height : int Width and height of the terminal. If one of them could not be found, None is return in its place. """ if hasattr(shutil_get_terminal_size, "__call__"): return shutil_get_terminal_size() else: try: import fcntl, termios, struct fd = 0 hw = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) return (hw[1], hw[0]) except: try: out = sp.check_output(["tput", "cols"]) width = int(out.decode("utf-8").strip()) return (width, None) except: try: hw = (os.environ['LINES'], os.environ['COLUMNS']) return (hw[1], hw[0]) except: return (defaultw, None)
python
{ "resource": "" }
q45827
FormField.compress
train
def compress(self, data_list): """ Return the cleaned_data of the form, everything should already be valid """ data = {} if data_list: return dict( (f.name, data_list[i]) for i, f in enumerate(self.form)) return data
python
{ "resource": "" }
q45828
FormField.clean
train
def clean(self, value): """ Call the form is_valid to ensure every value supplied is valid """ if not value: raise ValidationError( 'Error found in Form Field: Nothing to validate') data = dict((bf.name, value[i]) for i, bf in enumerate(self.form)) self.form = form = self.form.__class__(data) if not form.is_valid(): error_dict = list(form.errors.items()) raise ValidationError([ ValidationError(mark_safe('{} {}'.format( k.title(), v)), code=k) for k, v in error_dict]) # This call will ensure compress is called as expected. return super(FormField, self).clean(value)
python
{ "resource": "" }
q45829
MOCTool.run
train
def run(self, params): """Main run method for PyMOC tool. Takes a list of command line arguments to process. Each operation is performed on a current "running" MOC object. """ self.params = list(reversed(params)) if not self.params: self.help() return while self.params: p = self.params.pop() if p in self.command: # If we got a known command, execute it. self.command[p](self) elif os.path.exists(p): # If we were given the name of an existing file, read it. self.read_moc(p) else: # Otherwise raise an error. raise CommandError('file or command {0} not found'.format(p))
python
{ "resource": "" }
q45830
MOCTool.read_moc
train
def read_moc(self, filename): """Read a file into the current running MOC object. If the running MOC object has not yet been created, then it is created by reading the file, which will import the MOC metadata. Otherwise the metadata are not imported. """ if self.moc is None: self.moc = MOC(filename=filename) else: self.moc.read(filename)
python
{ "resource": "" }
q45831
MOCTool.catalog
train
def catalog(self): """Create MOC from catalog of coordinates. This command requires that the Healpy and Astropy libraries be available. It attempts to load the given catalog, and merges it with the running MOC. The name of an ASCII catalog file should be given. The file should contain either "RA" and "Dec" columns (for ICRS coordinates) or "Lon" and "Lat" columns (for galactic coordinates). The MOC order and radius (in arcseconds) can be given with additional options. :: pymoctool --catalog coords.txt [order 12] [radius 3600] [unit (hour | deg | rad) (deg | rad)] [format commented_header] [inclusive] Units (if not specified) are assumed to be hours and degrees for ICRS coordinates and degrees for galactic coordinates. The format, if not specified (as an Astropy ASCII table format name) is assumed to be commented header, e.g.: :: # RA Dec 01:30:00 +45:00:00 22:30:00 +45:00:00 """ from .catalog import catalog_to_moc, read_ascii_catalog filename = self.params.pop() order = 12 radius = 3600 unit = None format_ = 'commented_header' kwargs = {} while self.params: if self.params[-1] == 'order': self.params.pop() order = int(self.params.pop()) elif self.params[-1] == 'radius': self.params.pop() radius = float(self.params.pop()) elif self.params[-1] == 'unit': self.params.pop() unit_x = self.params.pop() unit_y = self.params.pop() unit = (unit_x, unit_y) elif self.params[-1] == 'format': self.params.pop() format_ = self.params.pop() elif self.params[-1] == 'inclusive': self.params.pop() kwargs['inclusive'] = True else: break coords = read_ascii_catalog(filename, format_=format_, unit=unit) catalog_moc = catalog_to_moc(coords, radius, order, **kwargs) if self.moc is None: self.moc = catalog_moc else: self.moc += catalog_moc
python
{ "resource": "" }
q45832
MOCTool.help
train
def help(self): """Display command usage information.""" if self.params: command = self.params.pop().lstrip('-') if command in self.command.documentation: (aliases, doc) = self.command.documentation[command] (synopsis, body) = self._split_docstring(doc) print(synopsis) if body: print() print(body) else: raise CommandError('command {0} not known'.format(command)) else: (synopsis, body) = self._split_docstring(__doc__) print(synopsis) print() print(body) print() print('Commands:') for command in sorted(self.command.documentation.keys()): print(' ', ', '.join(self.command.documentation[command][0])) print() print('Use "pymoctool --help COMMAND" for additional ' 'information about a command.')
python
{ "resource": "" }
q45833
MOCTool.identifier
train
def identifier(self): """Set the identifier of the current MOC. The new identifier should be given after this option. :: pymoctool ... --id 'New MOC identifier' --output new_moc.fits """ if self.moc is None: self.moc = MOC() self.moc.id = self.params.pop()
python
{ "resource": "" }
q45834
MOCTool.display_info
train
def display_info(self): """Display basic information about the running MOC.""" if self.moc is None: print('No MOC information present') return if self.moc.name is not None: print('Name:', self.moc.name) if self.moc.id is not None: print('Identifier:', self.moc.id) print('Order:', self.moc.order) print('Cells:', self.moc.cells) print('Area:', self.moc.area_sq_deg, 'square degrees')
python
{ "resource": "" }
q45835
MOCTool.intersection
train
def intersection(self): """Compute the intersection with the given MOC. This command takes the name of a MOC file and forms the intersection of the running MOC with that file. :: pymoctool a.fits --intersection b.fits --output intersection.fits """ if self.moc is None: raise CommandError('No MOC information present for intersection') filename = self.params.pop() self.moc = self.moc.intersection(MOC(filename=filename))
python
{ "resource": "" }
q45836
MOCTool.name
train
def name(self): """Set the name of the current MOC. The new name should be given after this option. :: pymoctool ... --name 'New MOC name' --output new_moc.fits """ if self.moc is None: self.moc = MOC() self.moc.name = self.params.pop()
python
{ "resource": "" }
q45837
MOCTool.normalize
train
def normalize(self): """Normalize the MOC to a given order. This command takes a MOC order (0-29) and normalizes the MOC so that its maximum order is the given order. :: pymoctool a.fits --normalize 10 --output a_10.fits """ if self.moc is None: raise CommandError('No MOC information present for normalization') order = int(self.params.pop()) self.moc.normalize(order)
python
{ "resource": "" }
q45838
MOCTool.write_moc
train
def write_moc(self): """Write the MOC to a given file.""" if self.moc is None: raise CommandError('No MOC information present for output') filename = self.params.pop() self.moc.write(filename)
python
{ "resource": "" }
q45839
MOCTool.subtract
train
def subtract(self): """Subtract the given MOC from the running MOC. This command takes the name of a MOC file to be subtracted from the running MOC. :: pymoctool a.fits --subtract b.fits --output difference.fits """ if self.moc is None: raise CommandError('No MOC information present for subtraction') filename = self.params.pop() self.moc -= MOC(filename=filename)
python
{ "resource": "" }
q45840
MOCTool.plot
train
def plot(self): """Show the running MOC on an all-sky map. This command requires that the Healpy and matplotlib libraries be available. It plots the running MOC, which should be normalized to a lower order first if it would generate an excessively large pixel array. :: pymoctool a.moc --normalize 8 --plot It also accepts additional arguments which can be used to control the plot. The 'order' option can be used instead of normalizing the MOC before plotting. The 'antialias' option specifies an additional number of MOC orders which should be used to smooth the edges as plotted -- 1 or 2 is normally sufficient. The 'file' option can be given to specify a file to which the plot should be saved. :: pymoctool ... --plot [order <order>] [antialias <level>] [file <filename>] ... """ if self.moc is None: raise CommandError('No MOC information present for plotting') from .plot import plot_moc order = self.moc.order antialias = 0 filename = None while self.params: if self.params[-1] == 'order': self.params.pop() order = int(self.params.pop()) elif self.params[-1] == 'antialias': self.params.pop() antialias = int(self.params.pop()) elif self.params[-1] == 'file': self.params.pop() filename = self.params.pop() else: break plot_moc(self.moc, order=order, antialias=antialias, filename=filename, projection='moll')
python
{ "resource": "" }
q45841
load_synapses
train
def load_synapses(path=HOME + "/Downloads/pinky100_final.df", scaling=(1, 1, 1)): """ Test scenario using real synapses """ scaling = np.array(list(scaling)) df = pd.read_csv(path) locs = np.array(df[["presyn_x", "centroid_x", "postsyn_x"]]) mask = ~np.any(np.isnan(locs), axis=1) df = df[mask] df['pre_pt.position'] = list((np.array(df[['presyn_x', 'presyn_y', 'presyn_z']]) / scaling).astype(np.int)) df['ctr_pt.position'] = list((np.array(df[['centroid_x', 'centroid_y', 'centroid_z']]) / scaling).astype(np.int)) df['post_pt.position'] = list((np.array(df[['postsyn_x', 'postsyn_y', 'postsyn_z']]) / scaling).astype(np.int)) df = df[['pre_pt.position', 'ctr_pt.position', 'post_pt.position', 'size']] return df
python
{ "resource": "" }
q45842
include
train
def include(context, bundle_name, version): """Include a bundle of files into the internal space. Use bundle name if you simply want to inlcude the latest version. """ store = Store(context.obj['database'], context.obj['root']) if version: version_obj = store.Version.get(version) if version_obj is None: click.echo(click.style('version not found', fg='red')) else: bundle_obj = store.bundle(bundle_name) if bundle_obj is None: click.echo(click.style('bundle not found', fg='red')) version_obj = bundle_obj.versions[0] try: include_version(context.obj['root'], version_obj) except VersionIncludedError as error: click.echo(click.style(error.message, fg='red')) context.abort() version_obj.included_at = dt.datetime.now() store.commit() click.echo(click.style('included all files!', fg='green'))
python
{ "resource": "" }
q45843
Service.ingress_filter
train
def ingress_filter(self, response): """ Flatten a response with meta and data keys into an object. """ data = self.data_getter(response) if isinstance(data, dict): data = m_data.DictResponse(data) elif isinstance(data, list): data = m_data.ListResponse(data) else: return data data.meta = self.meta_getter(response) return data
python
{ "resource": "" }
q45844
Service.get_pager
train
def get_pager(self, *path, **kwargs): """ A generator for all the results a resource can provide. The pages are lazily loaded. """ page_arg = kwargs.pop('page_size', None) limit_arg = kwargs.pop('limit', None) kwargs['limit'] = page_arg or limit_arg or self.default_page_size return self.adapter.get_pager(self.get, path, kwargs)
python
{ "resource": "" }
q45845
RefTracker.track_heads
train
def track_heads(self, cmd): """Track the repository heads given a CommitCommand. :param cmd: the CommitCommand :return: the list of parents in terms of commit-ids """ # Get the true set of parents if cmd.from_ is not None: parents = [cmd.from_] else: last_id = self.last_ids.get(cmd.ref) if last_id is not None: parents = [last_id] else: parents = [] parents.extend(cmd.merges) # Track the heads self.track_heads_for_ref(cmd.ref, cmd.id, parents) return parents
python
{ "resource": "" }
q45846
metasay
train
def metasay(ctx, inputfile, item): """Moo some dataset metadata to stdout. Python module: rio-metasay (https://github.com/sgillies/rio-plugin-example). """ with rasterio.open(inputfile) as src: meta = src.profile click.echo(moothedata(meta, key=item))
python
{ "resource": "" }
q45847
load_pdb
train
def load_pdb(pdb, path=True, pdb_id='', ignore_end=False): """Converts a PDB file into an AMPAL object. Parameters ---------- pdb : str Either a path to a PDB file or a string containing PDB format structural data. path : bool, optional If `true`, flags `pdb` as a path and not a PDB string. pdb_id : str, optional Identifier for the `Assembly`. ignore_end : bool, optional If `false`, parsing of the file will stop when an "END" record is encountered. Returns ------- ampal : ampal.Assembly or ampal.AmpalContainer AMPAL object that contains the structural information from the PDB file provided. If the PDB file has a single state then an `Assembly` will be returned, otherwise an `AmpalContainer` will be returned. """ pdb_p = PdbParser(pdb, path=path, pdb_id=pdb_id, ignore_end=ignore_end) return pdb_p.make_ampal()
python
{ "resource": "" }
q45848
PdbParser.gen_states
train
def gen_states(self, monomer_data, parent): """Generates the `states` dictionary for a `Monomer`. monomer_data : list A list of atom data parsed from the input PDB. parent : ampal.Monomer `Monomer` used to assign `parent` on created `Atoms`. """ states = {} for atoms in monomer_data: for atom in atoms: state = 'A' if not atom[3] else atom[3] if state not in states: states[state] = OrderedDict() states[state][atom[2]] = Atom( tuple(atom[8:11]), atom[13], atom_id=atom[1], res_label=atom[2], occupancy=atom[11], bfactor=atom[12], charge=atom[14], state=state, parent=parent) # This code is to check if there are alternate states and populate any # both states with the full complement of atoms states_len = [(k, len(x)) for k, x in states.items()] if (len(states) > 1) and (len(set([x[1] for x in states_len])) > 1): for t_state, t_state_d in states.items(): new_s_dict = OrderedDict() for k, v in states[sorted(states_len, key=lambda x: x[0])[0][0]].items(): if k not in t_state_d: c_atom = Atom( v._vector, v.element, atom_id=v.id, res_label=v.res_label, occupancy=v.tags['occupancy'], bfactor=v.tags['bfactor'], charge=v.tags['charge'], state=t_state[0], parent=v.parent) new_s_dict[k] = c_atom else: new_s_dict[k] = t_state_d[k] states[t_state] = new_s_dict return states
python
{ "resource": "" }
q45849
PdbParser.check_for_non_canonical
train
def check_for_non_canonical(residue): """Checks to see if the residue is non-canonical.""" res_label = list(residue[0])[0][2] atom_labels = {x[2] for x in itertools.chain( *residue[1].values())} # Used to find unnatural aas if (all(x in atom_labels for x in ['N', 'CA', 'C', 'O'])) and ( len(res_label) == 3): return Residue, True return None
python
{ "resource": "" }
q45850
get_inst_info
train
def get_inst_info(qry_string): """Get details for instances that match the qry_string. Execute a query against the AWS EC2 client object, that is based on the contents of qry_string. Args: qry_string (str): the query to be used against the aws ec2 client. Returns: qry_results (dict): raw information returned from AWS. """ qry_prefix = "EC2C.describe_instances(" qry_real = qry_prefix + qry_string + ")" qry_results = eval(qry_real) # pylint: disable=eval-used return qry_results
python
{ "resource": "" }
q45851
get_all_aminames
train
def get_all_aminames(i_info): """Get Image_Name for each instance in i_info. Args: i_info (dict): information on instances and details. Returns: i_info (dict): i_info is returned with the aminame added for each instance. """ for i in i_info: try: # pylint: disable=maybe-no-member i_info[i]['aminame'] = EC2R.Image(i_info[i]['ami']).name except AttributeError: i_info[i]['aminame'] = "Unknown" return i_info
python
{ "resource": "" }
q45852
get_one_aminame
train
def get_one_aminame(inst_img_id): """Get Image_Name for the image_id specified. Args: inst_img_id (str): image_id to get name value from. Returns: aminame (str): name of the image. """ try: aminame = EC2R.Image(inst_img_id).name except AttributeError: aminame = "Unknown" return aminame
python
{ "resource": "" }
q45853
startstop
train
def startstop(inst_id, cmdtodo): """Start or Stop the Specified Instance. Args: inst_id (str): instance-id to perform command against cmdtodo (str): command to perform (start or stop) Returns: response (dict): reponse returned from AWS after performing specified action. """ tar_inst = EC2R.Instance(inst_id) thecmd = getattr(tar_inst, cmdtodo) response = thecmd() return response
python
{ "resource": "" }
q45854
GitHubRegion.addCity
train
def addCity(self, fileName): """Add a JSON file and read the users. :param fileName: path to the JSON file. This file has to have a list of users, called users. :type fileName: str. """ with open(fileName) as data_file: data = load(data_file) for u in data["users"]: if not any(d["name"] == u["name"] for d in self.__users): self.__users.append(u)
python
{ "resource": "" }
q45855
GitHubRegion.__getTemplate
train
def __getTemplate(template_file_name): """Get temaplte to save the ranking. :param template_file_name: path to the template. :type template_file_name: str. :return: template for the file. :rtype: pystache's template. """ with open(template_file_name) as template_file: template_raw = template_file.read() template = parse(template_raw) return template
python
{ "resource": "" }
q45856
setup_logging
train
def setup_logging(logging_config, debug=False): """Setup logging config.""" if logging_config is not None: logging.config.fileConfig(logging_config) else: logging.basicConfig(level=debug and logging.DEBUG or logging.ERROR)
python
{ "resource": "" }
q45857
loop
train
def loop(sock, config=None): """Loops over all docker events and executes subscribed callbacks with an optional config value. :param config: a dictionary with external config values """ if config is None: config = {} client = docker.Client(base_url=sock) # fake a running event for all running containers for container in client.containers(): event_data = { 'status': "running", 'id': container['Id'], 'from': container['Image'], 'time': container['Created'], } LOG.debug("incomming event: %s", event_data) callbacks = event.filter_callbacks(client, event_data) # spawn all callbacks gevent.joinall([gevent.spawn(cb, event_data, config) for cb in callbacks]) # listen for further events for raw_data in client.events(): event_data = json.loads(raw_data) LOG.debug("incomming event: %s", event_data) callbacks = event.filter_callbacks(client, event_data) # spawn all callbacks gevent.joinall([gevent.spawn(cb, client, event_data, config) for cb in callbacks])
python
{ "resource": "" }
q45858
join_configs
train
def join_configs(configs): """Join all config files into one config.""" joined_config = {} for config in configs: joined_config.update(yaml.load(config)) return joined_config
python
{ "resource": "" }
q45859
load_modules
train
def load_modules(modules): """Load a module.""" for dotted_module in modules: try: __import__(dotted_module) except ImportError as e: LOG.error("Unable to import %s: %s", dotted_module, e)
python
{ "resource": "" }
q45860
load_files
train
def load_files(files): """Load and execute a python file.""" for py_file in files: LOG.debug("exec %s", py_file) execfile(py_file, globals(), locals())
python
{ "resource": "" }
q45861
summarize_events
train
def summarize_events(): """Some information about active events and callbacks.""" for ev in event.events: if ev.callbacks: LOG.info("subscribed to %s by %s", ev, ', '.join(imap(repr, ev.callbacks)))
python
{ "resource": "" }
q45862
cli
train
def cli(sock, configs, modules, files, log, debug): """The CLI.""" setup_logging(log, debug) config = join_configs(configs) # load python modules load_modules(modules) # load python files load_files(files) # summarize active events and callbacks summarize_events() gloop = gevent.Greenlet.spawn(loop, sock=sock, config=config) gloop.start() gloop.join()
python
{ "resource": "" }
q45863
MOC.type
train
def type(self, value): """Set the type of the MOC. The value should be either "IMAGE" or "CATALOG". """ self._type = None if value is None: return value = value.upper() if value in MOC_TYPES: self._type = value else: raise ValueError('MOC type must be one of ' + ', '.join(MOC_TYPES))
python
{ "resource": "" }
q45864
MOC.area
train
def area(self): """The area enclosed by the MOC, in steradians. >>> m = MOC(0, (0, 1, 2)) >>> round(m.area, 2) 3.14 """ self.normalize() area = 0.0 for (order, cells) in self: area += (len(cells) * pi) / (3 * 4 ** order) return area
python
{ "resource": "" }
q45865
MOC.cells
train
def cells(self): """The number of cells in the MOC. This gives the total number of cells at all orders, with cells from every order counted equally. >>> m = MOC(0, (1, 2)) >>> m.cells 2 """ n = 0 for (order, cells) in self: n += len(cells) return n
python
{ "resource": "" }
q45866
MOC.add
train
def add(self, order, cells, no_validation=False): """Add cells at a given order to the MOC. The cells are inserted into the MOC at the specified order. This leaves the MOC in an un-normalized state. The cells are given as a collection of integers (or types which can be converted to integers). >>> m = MOC() >>> m.add(4, (20, 21)) >>> m.cells 2 >>> m.add(5, (88, 89)) >>> m.cells 4 The `no_validation` option can be given to skip validation of the cell numbers. They must already be integers in the correct range. """ self._normalized = False order = self._validate_order(order) if no_validation: # Simply add the given cells to the set with no validation. self._orders[order].update(cells) else: # Collect validated cell numbers in a set for addition. cell_set = set() for cell in cells: cell = self._validate_cell(order, cell) cell_set.add(cell) self._orders[order].update(cell_set)
python
{ "resource": "" }
q45867
MOC.remove
train
def remove(self, order, cells): """Remove cells at a given order from the MOC. """ self._normalized = False order = self._validate_order(order) for cell in cells: cell = self._validate_cell(order, cell) self._compare_operation(order, cell, True, 'remove')
python
{ "resource": "" }
q45868
MOC.clear
train
def clear(self): """Clears all cells from a MOC. >>> m = MOC(4, (5, 6)) >>> m.clear() >>> m.cells 0 """ for order in range(0, MAX_ORDER + 1): self._orders[order].clear() self._normalized = True
python
{ "resource": "" }
q45869
MOC.copy
train
def copy(self): """Return a copy of a MOC. >>> p = MOC(4, (5, 6)) >>> q = p.copy() >>> repr(q) '<MOC: [(4, [5, 6])]>' """ copy = MOC(name=self.name, mocid=self.id, origin=self.origin, moctype=self.type) copy += self return copy
python
{ "resource": "" }
q45870
MOC.contains
train
def contains(self, order, cell, include_smaller=False): """Test whether the MOC contains the given cell. If the include_smaller argument is true then the MOC is considered to include a cell if it includes part of that cell (at a higher order). >>> m = MOC(1, (5,)) >>> m.contains(0, 0) False >>> m.contains(0, 1, True) True >>> m.contains(0, 1, False) False >>> m.contains(1, 4) False >>> m.contains(1, 5) True >>> m.contains(2, 19) False >>> m.contains(2, 21) True """ order = self._validate_order(order) cell = self._validate_cell(order, cell) return self._compare_operation(order, cell, include_smaller, 'check')
python
{ "resource": "" }
q45871
MOC._compare_operation
train
def _compare_operation(self, order, cell, include_smaller, operation): """General internal method for comparison-based operations. This is a private method, and does not update the normalized flag. """ # Check for a larger cell (lower order) which contains the # given cell. for order_i in range(0, order): shift = 2 * (order - order_i) cell_i = cell >> shift if cell_i in self._orders[order_i]: if operation == 'check': return True elif operation == 'remove': # Remove the cell and break it into its 4 constituent # cells. Those which actually match the area we are # trying to remove will be removed at the next stage. self._orders[order_i].remove(cell_i) self.add(order_i + 1, range(cell_i << 2, (cell_i + 1) << 2)) elif operation == 'inter': return [(order, (cell,))] # Check for the specific cell itself, but only after looking at larger # cells because for the "remove" operation we may have broken up # one of the large cells so that it subsequently matches. if cell in self._orders[order]: if operation == 'check': return True elif operation == 'remove': self._orders[order].remove(cell) elif operation == 'inter': return [(order, (cell,))] result = [] if include_smaller: # Check for a smaller cell (higher order) which is part # of the given cell. for order_i in range(order + 1, MAX_ORDER + 1): shift = 2 * (order_i - order) cells = [] for cell_i in self._orders[order_i]: if (cell_i >> shift) == cell: if operation == 'check': return True elif operation == 'remove' or operation == 'inter': cells.append(cell_i) if operation == 'remove': for cell_i in cells: self._orders[order_i].remove(cell_i) elif operation == 'inter': if cells: result.append((order_i, cells)) if operation == 'check': return False elif operation == 'inter': return result
python
{ "resource": "" }
q45872
MOC.intersection
train
def intersection(self, other): """Returns a MOC representing the intersection with another MOC. >>> p = MOC(2, (3, 4, 5)) >>> q = MOC(2, (4, 5, 6)) >>> p.intersection(q) <MOC: [(2, [4, 5])]> """ inter = MOC() for (order, cells) in other: for cell in cells: for i in self._compare_operation(order, cell, True, 'inter'): inter.add(*i) return inter
python
{ "resource": "" }
q45873
MOC.normalize
train
def normalize(self, max_order=MAX_ORDER): """Ensure that the MOC is "well-formed". This structures the MOC as is required for the FITS and JSON representation. This method is invoked automatically when writing to these formats. The number of cells in the MOC will be minimized, so that no area of the sky is covered multiple times by cells at different orders, and if all four neighboring cells are present at an order (other than order 0), they are merged into their parent cell at the next lower order. >>> m = MOC(1, (0, 1, 2, 3)) >>> m.cells 4 >>> m.normalize() >>> m.cells 1 """ max_order = self._validate_order(max_order) # If the MOC is already normalized and we are not being asked # to reduce the order, then do nothing. if self.normalized and max_order >= self.order: return # Group the pixels by iterating down from the order. At each # order, where all 4 adjacent pixels are present (or we are above # the maximum order) they are replaced with a single pixel in the # next lower order. Otherwise the pixel should appear in the MOC # unless it is already represented at a lower order. for order in range(self.order, 0, -1): pixels = self._orders[order] next_pixels = self._orders[order - 1] new_pixels = set() while pixels: pixel = pixels.pop() # Look to lower orders to ensure this pixel isn't # already covered. check_pixel = pixel already_contained = True for check_order in range(order - 1, -1, -1): check_pixel >>= 2 if check_pixel in self._orders[check_order]: break else: already_contained = False # Check whether this order is above the maximum, or # if we have all 4 adjacent pixels. Also do this if # the pixel was already contained at a lower level # so that we can avoid checking the adjacent pixels. if (already_contained or (order > max_order) or (((pixel ^ 1) in pixels) and ((pixel ^ 2) in pixels) and ((pixel ^ 3) in pixels))): pixels.discard(pixel ^ 1) pixels.discard(pixel ^ 2) pixels.discard(pixel ^ 3) if not already_contained: # Group these pixels by placing the equivalent pixel # for the next order down in the set. next_pixels.add(pixel >> 2) else: new_pixels.add(pixel) if new_pixels: self._orders[order].update(new_pixels) self._normalized = True
python
{ "resource": "" }
q45874
MOC.flattened
train
def flattened(self, order=None, include_smaller=True): """Return a flattened pixel collection at a single order.""" if order is None: order = self.order else: order = self._validate_order(order) # Start with the cells which are already at this order. flat = set(self[order]) # Look at lower orders and expand them into this set. # Based on the "map" algorithm from Appendix A of the # MOC recommendation. for order_i in range(0, order): shift = 2 * (order - order_i) for cell in self[order_i]: flat.update(range(cell << shift, (cell + 1) << shift)) # Look at higher orders unless we have been told to exclude # them. if include_smaller: for order_i in range(order + 1, MAX_ORDER + 1): shift = 2 * (order_i - order) for cell in self[order_i]: flat.add(cell >> shift) return flat
python
{ "resource": "" }
q45875
MOC.read
train
def read(self, filename, filetype=None, include_meta=False, **kwargs): """Read data from the given file into the MOC object. The cell lists read from the file are added to the current object. Therefore if the object already contains some cells, it will be updated to represent the union of the current coverge and that from the file. The file type can be specified as "fits", "json" or "ascii", with "text" allowed as an alias for "ascii". If the type is not specified, then an attempt will be made to guess from the file name, or the contents of the file. Note that writing to FITS and JSON will cause the MOC to be normalized automatically. Any additional keyword arguments (kwargs) are passed on to the corresponding pymoc.io read functions (read_moc_fits, read_moc_json or read_moc_ascii). """ if filetype is not None: filetype = filetype.lower() else: filetype = self._guess_file_type(filename) if filetype == 'fits': from .io.fits import read_moc_fits read_moc_fits(self, filename, include_meta, **kwargs) elif filetype == 'json': from .io.json import read_moc_json read_moc_json(self, filename, **kwargs) elif filetype == 'ascii' or filetype == 'text': from .io.ascii import read_moc_ascii read_moc_ascii(self, filename, **kwargs) else: raise ValueError('Unknown MOC file type {0}'.format(filetype))
python
{ "resource": "" }
q45876
MOC.write
train
def write(self, filename, filetype=None, **kwargs): """Write the coverage data in the MOC object to a file. The filetype can be given or left to be inferred as for the read method. Any additional keyword arguments (kwargs) are passed on to the corresponding pymoc.io write functions (write_moc_fits, write_moc_json or write_moc_ascii). This can be used, for example, to set overwrite=True (or clobber=True prior to Astropy version 2.0) when writing FITS files. """ if filetype is not None: filetype = filetype.lower() else: filetype = self._guess_file_type(filename) if filetype == 'fits': from .io.fits import write_moc_fits write_moc_fits(self, filename, **kwargs) elif filetype == 'json': from .io.json import write_moc_json write_moc_json(self, filename, **kwargs) elif filetype == 'ascii' or filetype == 'text': from .io.ascii import write_moc_ascii write_moc_ascii(self, filename, **kwargs) else: raise ValueError('Unknown MOC file type {0}'.format(filetype))
python
{ "resource": "" }
q45877
MOC._guess_file_type
train
def _guess_file_type(self, filename): """Attempt to guess the type of a MOC file. Returns "fits", "json" or "ascii" if successful and raised a ValueError otherwise. """ # First attempt to guess from the file name. namelc = filename.lower() if namelc.endswith('.fits') or namelc.endswith('.fit'): return 'fits' elif namelc.endswith('.json'): return 'json' elif namelc.endswith('.txt') or namelc.endswith('.ascii'): return 'ascii' # Otherwise, if the file exists, look at the first character. if isfile(filename): with open(filename, 'r') as f: c = f.read(1) if c == 'S': return 'fits' elif c == '{': return 'json' elif c.isdigit(): return 'ascii' raise ValueError('Unable to determine format of {0}'.format(filename))
python
{ "resource": "" }
q45878
MOC._validate_order
train
def _validate_order(self, order): """Check that the given order is valid.""" try: order = int(order) except ValueError as e: raise TypeError('MOC order must be convertable to int') if not 0 <= order <= MAX_ORDER: raise ValueError( 'MOC order must be in range 0-{0}'.format(MAX_ORDER)) return order
python
{ "resource": "" }
q45879
MOC._validate_cell
train
def _validate_cell(self, order, cell): """Check that the given cell is valid. The order is assumed already to have been validated. """ max_cells = self._order_num_cells(order) try: cell = int(cell) except ValueError as e: raise TypeError('MOC cell must be convertable to int') if not 0 <= cell < max_cells: raise ValueError( 'MOC cell order {0} must be in range 0-{1}'.format( order, max_cells - 1)) return cell
python
{ "resource": "" }
q45880
queues_for_endpoint
train
def queues_for_endpoint(event): """ Return the list of queues to publish to for a given endpoint. :param event: Lambda event that triggered the handler :type event: dict :return: list of queues for endpoint :rtype: :std:term:`list` :raises: Exception """ global endpoints # endpoint config that's templated in by generator # get endpoint config try: ep_name = event['context']['resource-path'].lstrip('/') return endpoints[ep_name]['queues'] except: raise Exception('Endpoint not in configuration: /%s' % ep_name)
python
{ "resource": "" }
q45881
msg_body_for_event
train
def msg_body_for_event(event, context): """ Generate the JSON-serialized message body for an event. :param event: Lambda event that triggered the handler :type event: dict :param context: Lambda function context - see http://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html :return: JSON-serialized success response :rtype: str """ # find the actual input data - this differs between GET and POST http_method = event.get('context', {}).get('http-method', None) if http_method == 'GET': data = event.get('params', {}).get('querystring', {}) else: # POST data = event.get('body-json', {}) # build the message to enqueue msg_dict = { 'data': serializable_dict(data), 'event': serializable_dict(event), 'context': serializable_dict(vars(context)) } msg = json.dumps(msg_dict, sort_keys=True) logger.debug('Message to enqueue: %s', msg) return msg
python
{ "resource": "" }
q45882
handle_event
train
def handle_event(event, context): """ Do the actual event handling - try to enqueue the request. :param event: Lambda event that triggered the handler :type event: dict :param context: Lambda function context - see http://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html :return: JSON-serialized success response :rtype: str :raises: Exception """ queues = queues_for_endpoint(event) # store some state msg_ids = [] failed = 0 # get the message to enqueue msg = msg_body_for_event(event, context) # connect to SQS API conn = boto3.client('sqs') for queue_name in queues: try: msg_ids.append(try_enqueue(conn, queue_name, msg)) except Exception: failed += 1 logger.error('Failed enqueueing message in %s:', queue_name, exc_info=1) fail_str = '' status = 'success' if failed > 0: fail_str = '; %d failed' % failed status = 'partial' return { 'status': status, 'message': 'enqueued %s messages%s' % (len(msg_ids), fail_str), 'SQSMessageIds': msg_ids }
python
{ "resource": "" }
q45883
try_enqueue
train
def try_enqueue(conn, queue_name, msg): """ Try to enqueue a message. If it succeeds, return the message ID. :param conn: SQS API connection :type conn: :py:class:`botocore:SQS.Client` :param queue_name: name of queue to put message in :type queue_name: str :param msg: JSON-serialized message body :type msg: str :return: message ID :rtype: str """ logger.debug('Getting Queue URL for queue %s', queue_name) qurl = conn.get_queue_url(QueueName=queue_name)['QueueUrl'] logger.debug('Sending message to queue at: %s', qurl) resp = conn.send_message( QueueUrl=qurl, MessageBody=msg, DelaySeconds=0 ) logger.debug('Enqueued message in %s with ID %s', queue_name, resp['MessageId']) return resp['MessageId']
python
{ "resource": "" }
q45884
serializable_dict
train
def serializable_dict(d): """ Return a dict like d, but with any un-json-serializable elements removed. """ newd = {} for k in d.keys(): if isinstance(d[k], type({})): newd[k] = serializable_dict(d[k]) continue try: json.dumps({'k': d[k]}) newd[k] = d[k] except: pass # unserializable return newd
python
{ "resource": "" }
q45885
cmdline
train
def cmdline(argv=sys.argv[1:]): """ Script for rebasing a text file """ parser = ArgumentParser( description='Rebase a text from his stop words') parser.add_argument('language', help='The language used to rebase') parser.add_argument('source', help='Text file to rebase') options = parser.parse_args(argv) factory = StopWordFactory() language = options.language stop_words = factory.get_stop_words(language, fail_safe=True) content = open(options.source, 'rb').read().decode('utf-8') print(stop_words.rebase(content))
python
{ "resource": "" }
q45886
MongoStorage.create
train
def create(self, data): """Creates new entry in mongo database """ q = self.history.insert_one(data).inserted_id logging.debug(self.history.find_one({"_id":q}))
python
{ "resource": "" }
q45887
Table.get_url
train
def get_url(self, **kwargs): """ Return an url, relative to the request associated with this table. Any keywords arguments provided added to the query string, replacing existing values. """ return build( self._request.path, self._request.GET, self._meta.prefix, **kwargs )
python
{ "resource": "" }
q45888
Table.rows
train
def rows(self): """Return the list of object on the active page.""" return map( lambda o: self._meta.row_class(self, o), self.paginator.page(self._meta.page).object_list )
python
{ "resource": "" }
q45889
SublemonSubprocess.spawn
train
async def spawn(self): """Spawn the command wrapped in this object as a subprocess.""" self._server._pending_set.add(self) await self._server._sem.acquire() self._subprocess = await asyncio.create_subprocess_shell( self._cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) self._began_at = datetime.now() if self in self._server._pending_set: self._server._pending_set.remove(self) self._server._running_set.add(self) self._began_running_evt.set()
python
{ "resource": "" }
q45890
SublemonSubprocess.wait_done
train
async def wait_done(self) -> int: """Coroutine to wait for subprocess run completion. Returns: The exit code of the subprocess. """ await self._done_running_evt.wait() if self._exit_code is None: raise SublemonLifetimeError( 'Subprocess exited abnormally with `None` exit code') return self._exit_code
python
{ "resource": "" }
q45891
SublemonSubprocess._poll
train
def _poll(self) -> None: """Check the status of the wrapped running subprocess. Note: This should only be called on currently-running tasks. """ if self._subprocess is None: raise SublemonLifetimeError( 'Attempted to poll a non-active subprocess') elif self._subprocess.returncode is not None: self._exit_code = self._subprocess.returncode self._done_running_evt.set() self._server._running_set.remove(self) self._server._sem.release()
python
{ "resource": "" }
q45892
SublemonSubprocess.stdout
train
async def stdout(self) -> AsyncGenerator[str, None]: """Asynchronous generator for lines from subprocess stdout.""" await self.wait_running() async for line in self._subprocess.stdout: # type: ignore yield line
python
{ "resource": "" }
q45893
SublemonSubprocess.stderr
train
async def stderr(self) -> AsyncGenerator[str, None]: """Asynchronous generator for lines from subprocess stderr.""" await self.wait_running() async for line in self._subprocess.stderr: # type: ignore yield line
python
{ "resource": "" }
q45894
Task._execute
train
def _execute(self, worker): """ This method is ASSIGNED during the evaluation to control how to resume it once it has been paused """ self._assert_status_is(TaskStatus.RUNNING) operation = worker.look_up(self.operation) operation.invoke(self, [], worker=worker)
python
{ "resource": "" }
q45895
Column.value
train
def value(self, cell): """ Extract the value of ``cell``, ready to be rendered. If this Column was instantiated with a ``value`` attribute, it is called here to provide the value. (For example, to provide a calculated value.) Otherwise, ``cell.value`` is returned. """ if self._value is not None: return self._value(cell) else: return cell.value
python
{ "resource": "" }
q45896
Column.css_class
train
def css_class(self, cell): """Return the CSS class for this column.""" if isinstance(self._css_class, basestring): return self._css_class else: return self._css_class(cell)
python
{ "resource": "" }
q45897
WrappedColumn.sort_url
train
def sort_url(self): """ Return the URL to sort the linked table by this column. If the table is already sorted by this column, the order is reversed. Since there is no canonical URL for a table the current URL (via the HttpRequest linked to the Table instance) is reused, and any unrelated parameters will be included in the output. """ prefix = (self.sort_direction == "asc") and "-" or "" return self.table.get_url(order_by=prefix + self.name)
python
{ "resource": "" }
q45898
check_or_confirm_overwrite
train
def check_or_confirm_overwrite(file_name): """ Returns True if OK to proceed, False otherwise """ try: with open(file_name) as fd: header = next(fd) if header.find(':sedge:') == -1: okay = ask_overwrite(file_name) if okay: backup_file(file_name) else: return False except FileNotFoundError: click.echo("{} not found".format(file_name), err=True) except StopIteration as e: click.echo(repr(e), err=True) else: return True
python
{ "resource": "" }
q45899
update
train
def update(config): """ Update ssh config from sedge specification """ def write_to(out): engine.output(out) config_file = Path(config.config_file) if not config_file.is_file(): click.echo('No file {} '.format(config_file), err=True) sys.exit() library = KeyLibrary(config.key_directory) with config_file.open() as fd: engine = SedgeEngine(library, fd, not config.no_verify, url=config.config_file) if config.output_file == '-': write_to(ConfigOutput(sys.stdout)) return if not check_or_confirm_overwrite(config.output_file): click.echo('Aborting.', err=True) sys.exit(1) tmp_file = NamedTemporaryFile(mode='w', dir=os.path.dirname(config.output_file), delete=False) try: tmp_file.file.write(sedge_config_header.format(config.config_file)) write_to(ConfigOutput(tmp_file.file)) tmp_file.close() if config.verbose: diff_config_changes(config.output_file, tmp_file.name) os.rename(tmp_file.name, config.output_file) except: os.unlink(tmp_file.name) raise
python
{ "resource": "" }