_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q38000
DeflateDecompressor.decompress
train
def decompress(self, chunk): """Decompress the chunk of data. :param bytes chunk: data chunk :rtype: bytes """ try: return self._decompressobj.decompress(chunk) except zlib.error: # ugly hack to work with raw deflate content that may # be sent by microsoft servers. For more information, see: # http://carsten.codimi.de/gzip.yaws/ # http://www.port80software.com/200ok/archive/2005/10/31/868.aspx # http://www.gzip.org/zlib/zlib_faq.html#faq38 if self._first_chunk: self._decompressobj = zlib.decompressobj(-zlib.MAX_WBITS) return self._decompressobj.decompress(chunk) raise finally: self._first_chunk = False
python
{ "resource": "" }
q38001
makeOuputDir
train
def makeOuputDir(outputDir, force): """ Create or check for an output directory. @param outputDir: A C{str} output directory name, or C{None}. @param force: If C{True}, allow overwriting of pre-existing files. @return: The C{str} output directory name. """ if outputDir: if exists(outputDir): if not force: print('Will not overwrite pre-existing files. Use --force to ' 'make me.', file=sys.stderr) sys.exit(1) else: mkdir(outputDir) else: outputDir = mkdtemp() print('Writing output files to %s' % outputDir) return outputDir
python
{ "resource": "" }
q38002
samtoolsMpileup
train
def samtoolsMpileup(outFile, referenceFile, alignmentFile, executor): """ Use samtools mpileup to generate VCF. @param outFile: The C{str} name to write the output to. @param referenceFile: The C{str} name of the FASTA file with the reference sequence. @param alignmentFile: The C{str} name of the SAM or BAM alignment file. @param executor: An C{Executor} instance. """ executor.execute( 'samtools mpileup -u -v -f %s %s > %s' % (referenceFile, alignmentFile, outFile))
python
{ "resource": "" }
q38003
bcftoolsMpileup
train
def bcftoolsMpileup(outFile, referenceFile, alignmentFile, executor): """ Use bcftools mpileup to generate VCF. @param outFile: The C{str} name to write the output to. @param referenceFile: The C{str} name of the FASTA file with the reference sequence. @param alignmentFile: The C{str} name of the SAM or BAM alignment file. @param executor: An C{Executor} instance. """ executor.execute( 'bcftools mpileup -Ov -f %s %s > %s' % (referenceFile, alignmentFile, outFile))
python
{ "resource": "" }
q38004
bcftoolsConsensus
train
def bcftoolsConsensus(outFile, vcfFile, id_, referenceFile, executor): """ Use bcftools to extract consensus FASTA. @param outFile: The C{str} name to write the output to. @param vcfFile: The C{str} name of the VCF file with the calls from the pileup. @param id_: The C{str} identifier to use in the resulting FASTA sequence. @param referenceFile: The C{str} name of the FASTA file with the reference sequence. @param executor: An C{Executor} instance. """ bgz = vcfFile + '.gz' executor.execute('bgzip -c %s > %s' % (vcfFile, bgz)) executor.execute('tabix %s' % bgz) executor.execute( 'bcftools consensus %s < %s | ' 'filter-fasta.py --idLambda \'lambda id: "%s"\' > %s' % (bgz, referenceFile, id_, outFile))
python
{ "resource": "" }
q38005
vcfutilsConsensus
train
def vcfutilsConsensus(outFile, vcfFile, id_, _, executor): """ Use vcftools to extract consensus FASTA. @param outFile: The C{str} name to write the output to. @param vcfFile: The C{str} name of the VCF file with the calls from the pileup. @param id_: The C{str} identifier to use in the resulting FASTA sequence. @param executor: An C{Executor} instance. """ executor.execute( 'vcfutils.pl vcf2fq < %s | ' 'filter-fasta.py --fastq --quiet --saveAs fasta ' '--idLambda \'lambda id: "%s"\' > %s' % (vcfFile, id_, outFile))
python
{ "resource": "" }
q38006
Ctx.translator
train
def translator(self): """Get a valid translator object from one or several languages names.""" if self._translator is None: languages = self.lang if not languages: return gettext.NullTranslations() if not isinstance(languages, list): languages = [languages] translator = gettext.NullTranslations() for name, i18n_dir in [ ( 'biryani', os.path.join(pkg_resources.get_distribution('biryani').location, 'biryani', 'i18n'), ), ( conf['country_package'].replace('_', '-'), os.path.join(pkg_resources.get_distribution(conf['country_package']).location, conf['country_package'], 'i18n'), ), ]: if i18n_dir is not None: translator = new_translator(name, i18n_dir, languages, fallback = translator) translator = new_translator(conf['package_name'], conf['i18n_dir'], languages, fallback = translator) self._translator = translator return self._translator
python
{ "resource": "" }
q38007
LineageFetcher.lineage
train
def lineage(self, title): """ Get lineage information from the taxonomy database for a given title. @param title: A C{str} sequence title (e.g., from a BLAST hit). Of the form 'gi|63148399|gb|DQ011818.1| Description...'. It is the gi number (63148399 in this example) that is looked up in the taxonomy database. @return: A C{list} of the taxonomic categories of the title. Each list element is an (C{int}, C{str}) 2-tuple, giving a taxonomy id and a scientific name. The first element in the list will correspond to C{title}, and each successive element is the parent of the preceeding one. If no taxonomy is found, the returned list will be empty. """ if title in self._cache: return self._cache[title] lineage = [] gi = int(title.split('|')[1]) query = 'SELECT taxID from gi_taxid where gi = %d' % gi try: while True: self._cursor.execute(query) taxID = self._cursor.fetchone()[0] if taxID == 1: break query = 'SELECT name from names where taxId = %s' % taxID self._cursor.execute(query) scientificName = self._cursor.fetchone()[0] lineage.append((taxID, scientificName)) # Move up to the parent. query = ('SELECT parent_taxID from nodes where taxID = %s' % taxID) except TypeError: lineage = [] self._cache[title] = lineage return lineage
python
{ "resource": "" }
q38008
LineageFetcher.close
train
def close(self): """ Close the database connection and render self invalid. Any subsequent re-use of self will raise an error. """ self._cursor.close() self._db.close() self._cursor = self._db = self._cache = None
python
{ "resource": "" }
q38009
retry_handler
train
def retry_handler(retries=0, delay=timedelta(), conditions=[]): """ A simple wrapper function that creates a handler function by using on the retry_loop function. Args: retries (Integral): The number of times to retry if a failure occurs. delay (timedelta, optional, 0 seconds): A timedelta representing the amount time to delay between retries. conditions (list): A list of retry conditions. Returns: function: The retry_loop function partialed. """ delay_in_seconds = delay.total_seconds() return partial(retry_loop, retries, delay_in_seconds, conditions)
python
{ "resource": "" }
q38010
retry
train
def retry(retries=0, delay=timedelta(), conditions=[]): """ A decorator for making a function that retries on failure. Args: retries (Integral): The number of times to retry if a failure occurs. delay (timedelta, optional, 0 seconds): A timedelta representing the amount of time to delay between retries. conditions (list): A list of retry conditions. """ delay_in_seconds = delay.total_seconds() def decorator(function): """ The actual decorator for retrying. """ @wraps(function) def wrapper(*args, **kwargs): """ The actual wrapper for retrying. """ func = partial(function, *args, **kwargs) return retry_loop(retries, delay_in_seconds, conditions, func) return wrapper return decorator
python
{ "resource": "" }
q38011
retry_loop
train
def retry_loop(retries, delay_in_seconds, conditions, function): """ Actually performs the retry loop used by the retry decorator and handler functions. Failures for retrying are defined by the RetryConditions passed in. If the maximum number of retries has been reached then it raises the most recent error or a ValueError on the most recent result value. Args: retries (Integral): Maximum number of times to retry. delay_in_seconds (Integral): Number of seconds to wait between retries. conditions (list): A list of retry conditions the can trigger a retry on a return value or exception. function (function): The function to wrap. Returns: value: The return value from function """ if not isinstance(retries, Integral): raise TypeError(retries) if delay_in_seconds < 0: raise TypeError(delay_in_seconds) attempts = 0 value = None err = None while attempts <= retries: try: value = function() for condition in conditions: if condition.on_value(value): break else: return value except Exception as exc: err = exc for condition in conditions: if condition.on_exception(exc): break else: raise attempts += 1 sleep(delay_in_seconds) else: if err: raise err else: raise ValueError( "Max retries ({}) reached and return the value is still {}." .format(attempts, value) ) return value
python
{ "resource": "" }
q38012
DiamondTabularFormatReader.saveAsJSON
train
def saveAsJSON(self, fp, writeBytes=False): """ Write the records out as JSON. The first JSON object saved contains information about the DIAMOND algorithm. @param fp: A C{str} file pointer to write to. @param writeBytes: If C{True}, the JSON will be written out as bytes (not strings). This is required when we are writing to a BZ2 file. """ if writeBytes: fp.write(dumps(self.params, sort_keys=True).encode('UTF-8')) fp.write(b'\n') for record in self.records(): fp.write(dumps(record, sort_keys=True).encode('UTF-8')) fp.write(b'\n') else: fp.write(six.u(dumps(self.params, sort_keys=True))) fp.write(six.u('\n')) for record in self.records(): fp.write(six.u(dumps(record, sort_keys=True))) fp.write(six.u('\n'))
python
{ "resource": "" }
q38013
object_as_dict
train
def object_as_dict(obj): """Turn an SQLAlchemy model into a dict of field names and values. Based on https://stackoverflow.com/a/37350445/1579058 """ return {c.key: getattr(obj, c.key) for c in inspect(obj).mapper.column_attrs}
python
{ "resource": "" }
q38014
SQLAReference.fetch_object
train
def fetch_object(self, model_id): """Fetch the model by its ID.""" pk_field_instance = getattr(self.object_class, self.pk_field) qs = self.object_class.query.filter(pk_field_instance == model_id) model = qs.one_or_none() if not model: raise ReferenceNotFoundError return model
python
{ "resource": "" }
q38015
init_ixe
train
def init_ixe(logger, host, port=4555, rsa_id=None): """ Connect to Tcl Server and Create IxExplorer object. :param logger: python logger object :param host: host (IxTclServer) IP address :param port: Tcl Server port :param rsa_id: full path to RSA ID file for Linux based IxVM :return: IXE object """ return IxeApp(logger, IxTclHalApi(TclClient(logger, host, port, rsa_id)))
python
{ "resource": "" }
q38016
IxeApp.connect
train
def connect(self, user=None): """ Connect to host. :param user: if user - login session. """ self.api._tcl_handler.connect() if user: self.session.login(user)
python
{ "resource": "" }
q38017
IxeApp.add
train
def add(self, chassis): """ add chassis. :param chassis: chassis IP address. """ self.chassis_chain[chassis] = IxeChassis(self.session, chassis, len(self.chassis_chain) + 1) self.chassis_chain[chassis].connect()
python
{ "resource": "" }
q38018
IxeSession.wait_for_up
train
def wait_for_up(self, timeout=16, ports=None): """ Wait until ports reach up state. :param timeout: seconds to wait. :param ports: list of ports to wait for. :return: """ port_list = [] for port in ports: port_list.append(self.set_ports_list(port)) t_end = time.time() + timeout ports_not_in_up = [] ports_in_up = [] while time.time() < t_end: # ixCheckLinkState can take few seconds on some ports when link is down. for port in port_list: call = self.api.call('ixCheckLinkState {}'.format(port)) if call == '0': ports_in_up.append("{}".format(port)) else: pass ports_in_up = list(set(ports_in_up)) if len(port_list) == len(ports_in_up): return time.sleep(1) for port in port_list: if port not in ports_in_up: ports_not_in_up.append(port) raise TgnError('{}'.format(ports_not_in_up))
python
{ "resource": "" }
q38019
IxeSession.start_transmit
train
def start_transmit(self, blocking=False, start_packet_groups=True, *ports): """ Start transmit on ports. :param blocking: True - wait for traffic end, False - return after traffic start. :param start_packet_groups: True - clear time stamps and start collecting packet groups stats, False - don't. :param ports: list of ports to start traffic on, if empty start on all ports. """ port_list = self.set_ports_list(*ports) if start_packet_groups: port_list_for_packet_groups = self.ports.values() port_list_for_packet_groups = self.set_ports_list(*port_list_for_packet_groups) self.api.call_rc('ixClearTimeStamp {}'.format(port_list_for_packet_groups)) self.api.call_rc('ixStartPacketGroups {}'.format(port_list_for_packet_groups)) self.api.call_rc('ixStartTransmit {}'.format(port_list)) time.sleep(0.2) if blocking: self.wait_transmit(*ports)
python
{ "resource": "" }
q38020
IxeSession.start_packet_groups
train
def start_packet_groups(self, clear_time_stamps=True, *ports): """ Start packet groups on ports. :param clear_time_stamps: True - clear time stamps, False - don't. :param ports: list of ports to start traffic on, if empty start on all ports. """ port_list = self.set_ports_list(*ports) if clear_time_stamps: self.api.call_rc('ixClearTimeStamp {}'.format(port_list)) self.api.call_rc('ixStartPacketGroups {}'.format(port_list))
python
{ "resource": "" }
q38021
IxeSession.stop_transmit
train
def stop_transmit(self, *ports): """ Stop traffic on ports. :param ports: list of ports to stop traffic on, if empty start on all ports. """ port_list = self.set_ports_list(*ports) self.api.call_rc('ixStopTransmit {}'.format(port_list)) time.sleep(0.2)
python
{ "resource": "" }
q38022
IxeSession.wait_transmit
train
def wait_transmit(self, *ports): """ Wait for traffic end on ports. :param ports: list of ports to wait for, if empty wait for all ports. """ port_list = self.set_ports_list(*ports) self.api.call_rc('ixCheckTransmitDone {}'.format(port_list))
python
{ "resource": "" }
q38023
IxeSession.start_capture
train
def start_capture(self, *ports): """ Start capture on ports. :param ports: list of ports to start capture on, if empty start on all ports. """ IxeCapture.current_object = None IxeCaptureBuffer.current_object = None if not ports: ports = self.ports.values() for port in ports: port.captureBuffer = None port_list = self.set_ports_list(*ports) self.api.call_rc('ixStartCapture {}'.format(port_list))
python
{ "resource": "" }
q38024
chr22XY
train
def chr22XY(c): """force to name from 1..22, 23, 24, X, Y, M to in chr1..chr22, chrX, chrY, chrM str or ints accepted >>> chr22XY('1') 'chr1' >>> chr22XY(1) 'chr1' >>> chr22XY('chr1') 'chr1' >>> chr22XY(23) 'chrX' >>> chr22XY(24) 'chrY' >>> chr22XY("X") 'chrX' >>> chr22XY("23") 'chrX' >>> chr22XY("M") 'chrM' """ c = str(c) if c[0:3] == 'chr': c = c[3:] if c == '23': c = 'X' if c == '24': c = 'Y' return 'chr' + c
python
{ "resource": "" }
q38025
infer_namespace
train
def infer_namespace(ac): """Infer the single namespace of the given accession This function is convenience wrapper around infer_namespaces(). Returns: * None if no namespaces are inferred * The (single) namespace if only one namespace is inferred * Raises an exception if more than one namespace is inferred >>> infer_namespace("ENST00000530893.6") 'ensembl' >>> infer_namespace("NM_01234.5") 'refseq' >>> infer_namespace("A2BC19") 'uniprot' N.B. The following test is disabled because Python 2 and Python 3 handle doctest exceptions differently. :-( X>>> infer_namespace("P12345") Traceback (most recent call last): ... bioutils.exceptions.BioutilsError: Multiple namespaces possible for P12345 >>> infer_namespace("BOGUS99") is None True """ namespaces = infer_namespaces(ac) if not namespaces: return None if len(namespaces) > 1: raise BioutilsError("Multiple namespaces possible for {}".format(ac)) return namespaces[0]
python
{ "resource": "" }
q38026
infer_namespaces
train
def infer_namespaces(ac): """infer possible namespaces of given accession based on syntax Always returns a list, possibly empty >>> infer_namespaces("ENST00000530893.6") ['ensembl'] >>> infer_namespaces("ENST00000530893") ['ensembl'] >>> infer_namespaces("ENSQ00000530893") [] >>> infer_namespaces("NM_01234") ['refseq'] >>> infer_namespaces("NM_01234.5") ['refseq'] >>> infer_namespaces("NQ_01234.5") [] >>> infer_namespaces("A2BC19") ['uniprot'] >>> sorted(infer_namespaces("P12345")) ['insdc', 'uniprot'] >>> infer_namespaces("A0A022YWF9") ['uniprot'] """ return [v for k, v in ac_namespace_regexps.items() if k.match(ac)]
python
{ "resource": "" }
q38027
IntervalSet.add
train
def add(self, other): """ Add an Interval to the IntervalSet by taking the union of the given Interval object with the existing Interval objects in self. This has no effect if the Interval is already represented. :param other: an Interval to add to this IntervalSet. """ if other.empty(): return to_add = set() for inter in self: if inter.overlaps(other): # if it overlaps with this interval then the union will be a single interval to_add.add(inter.union(other)) if len(to_add) == 0: # other must not overlap with any interval in self (self could be empty!) to_add.add(other) # Now add the intervals found to self if len(to_add) > 1: set_to_add = IntervalSet(to_add) # creating an interval set unions any overlapping intervals for el in set_to_add: self._add(el) elif len(to_add) == 1: self._add(to_add.pop())
python
{ "resource": "" }
q38028
IntervalSet.difference
train
def difference(self, other): """ Subtract an Interval or IntervalSet from the intervals in the set. """ intervals = other if isinstance(other, IntervalSet) else IntervalSet((other,)) result = IntervalSet() for left in self: for right in intervals: left = left - right if isinstance(left, IntervalSet): for interval in left: result.add(interval) else: result.add(left) return result
python
{ "resource": "" }
q38029
Notifications.send_notification
train
def send_notification(self, subject="", message="", sender="", source=None, actions=None): """ Sends a notification. Blocks as long as necessary. :param subject: The subject. :type subject: str :param message: The message. :type message: str :param sender: The sender. :type sender: str :param source: The source of the notification :type source: .LegacyNotification.Source :param actions Actions to be sent with a notification (list of TimelineAction objects) :type actions list """ if self._pebble.firmware_version.major < 3: self._send_legacy_notification(subject, message, sender, source) else: self._send_modern_notification(subject, message, sender, source, actions)
python
{ "resource": "" }
q38030
DjipsumFields.randomBinaryField
train
def randomBinaryField(self): """ Return random bytes format. """ lst = [ b"hello world", b"this is bytes", b"awesome django", b"djipsum is awesome", b"\x00\x01\x02\x03\x04\x05\x06\x07", b"\x0b\x0c\x0e\x0f" ] return self.randomize(lst)
python
{ "resource": "" }
q38031
DjipsumFields.randomUUIDField
train
def randomUUIDField(self): """ Return the unique uuid from uuid1, uuid3, uuid4, or uuid5. """ uuid1 = uuid.uuid1().hex uuid3 = uuid.uuid3( uuid.NAMESPACE_URL, self.randomize(['python', 'django', 'awesome']) ).hex uuid4 = uuid.uuid4().hex uuid5 = uuid.uuid5( uuid.NAMESPACE_DNS, self.randomize(['python', 'django', 'awesome']) ).hex return self.randomize([uuid1, uuid3, uuid4, uuid5])
python
{ "resource": "" }
q38032
trade_day
train
def trade_day(dt, cal='US'): """ Latest trading day w.r.t given dt Args: dt: date of reference cal: trading calendar Returns: pd.Timestamp: last trading day Examples: >>> trade_day('2018-12-25').strftime('%Y-%m-%d') '2018-12-24' """ from xone import calendar dt = pd.Timestamp(dt).date() return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]
python
{ "resource": "" }
q38033
align_data
train
def align_data(*args): """ Resample and aligh data for defined frequency Args: *args: DataFrame of data to be aligned Returns: pd.DataFrame: aligned data with renamed columns Examples: >>> start = '2018-09-10T10:10:00' >>> tz = 'Australia/Sydney' >>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz) >>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04] >>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718] >>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx) >>> d1 price volume 2018-09-10 10:10:00+10:00 31.08 10166 2018-09-10 10:11:00+10:00 31.10 69981 2018-09-10 10:12:00+10:00 31.11 14343 2018-09-10 10:13:00+10:00 31.07 10096 2018-09-10 10:14:00+10:00 31.04 11506 2018-09-10 10:15:00+10:00 31.04 9718 >>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79] >>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791] >>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx) >>> d2 price volume 2018-09-10 10:10:00+10:00 70.81 4749 2018-09-10 10:11:00+10:00 70.78 6762 2018-09-10 10:12:00+10:00 70.85 4908 2018-09-10 10:13:00+10:00 70.79 2002 2018-09-10 10:14:00+10:00 70.79 9170 2018-09-10 10:15:00+10:00 70.79 9791 >>> align_data(d1, d2) price_1 volume_1 price_2 volume_2 2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749 2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762 2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908 2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002 2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170 2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791 """ res = pd.DataFrame(pd.concat([ d.loc[~d.index.duplicated(keep='first')].rename( columns=lambda vv: '%s_%d' % (vv, i + 1) ) for i, d in enumerate(args) ], axis=1)) data_cols = [col for col in res.columns if col[-2:] == '_1'] other_cols = [col for col in res.columns if col[-2:] != '_1'] res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='pad') return res.dropna(subset=data_cols)
python
{ "resource": "" }
q38034
cat_data
train
def cat_data(data_kw): """ Concatenate data with ticker as sub column index Args: data_kw: key = ticker, value = pd.DataFrame Returns: pd.DataFrame Examples: >>> start = '2018-09-10T10:10:00' >>> tz = 'Australia/Sydney' >>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz) >>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04] >>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718] >>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx) >>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79] >>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791] >>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx) >>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2}) >>> sample.columns MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['ticker', None]) >>> r = sample.transpose().iloc[:, :2] >>> r.index.names = (None, None) >>> r 2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00 BHP AU price 31.08 31.10 volume 10,166.00 69,981.00 RIO AU price 70.81 70.78 volume 4,749.00 6,762.00 """ if len(data_kw) == 0: return pd.DataFrame() return pd.DataFrame(pd.concat([ data.assign(ticker=ticker).set_index('ticker', append=True) .unstack('ticker').swaplevel(0, 1, axis=1) for ticker, data in data_kw.items() ], axis=1))
python
{ "resource": "" }
q38035
to_frame
train
def to_frame(data_list, exc_cols=None, **kwargs): """ Dict in Python 3.6 keeps insertion order, but cannot be relied upon This method is to keep column names in order In Python 3.7 this method is redundant Args: data_list: list of dict exc_cols: exclude columns Returns: pd.DataFrame Example: >>> d_list = [ ... dict(sid=1, symbol='1 HK', price=89), ... dict(sid=700, symbol='700 HK', price=350) ... ] >>> to_frame(d_list) sid symbol price 0 1 1 HK 89 1 700 700 HK 350 >>> to_frame(d_list, exc_cols=['price']) sid symbol 0 1 1 HK 1 700 700 HK """ from collections import OrderedDict return pd.DataFrame( pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs ).drop(columns=[] if exc_cols is None else exc_cols)
python
{ "resource": "" }
q38036
spline_curve
train
def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs): """ Fit spline curve for given x, y values Args: x: x-values y: y-values step: step size for interpolation val_min: minimum value of result val_max: maximum value of result kind: for scipy.interpolate.interp1d Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’ refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and ‘next’ simply return the previous or next value of the point) or as an integer specifying the order of the spline interpolator to use. Default is ‘linear’. **kwargs: additional parameters for interp1d Returns: pd.Series: fitted curve Examples: >>> x = pd.Series([1, 2, 3]) >>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)]) >>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate') >>> r.round(2).index.tolist() [1.0, 1.5, 2.0, 2.5, 3.0] >>> r.round(2).tolist() [3.0, 4.05, 7.39, 12.73, 18.0] >>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4])) >>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate') >>> r_df.round(2) a b 1.00 3.00 3.00 1.50 4.05 3.00 2.00 7.39 3.00 2.50 12.73 3.50 3.00 20.09 4.00 """ from scipy.interpolate import interp1d from collections import OrderedDict if isinstance(y, pd.DataFrame): return pd.DataFrame(OrderedDict([(col, spline_curve( x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind )) for col in y.columns])) fitted_curve = interp1d(x, y, kind=kind, **kwargs) new_x = np.arange(x.min(), x.max() + step / 2., step=step) return pd.Series( new_x, index=new_x, name=y.name if hasattr(y, 'name') else None ).apply(fitted_curve).clip(val_min, val_max)
python
{ "resource": "" }
q38037
format_float
train
def format_float(digit=0, is_pct=False): """ Number display format for pandas Args: digit: number of digits to keep if negative, add one space in front of positive pct is_pct: % display Returns: lambda function to format floats Examples: >>> format_float(0)(1e5) '100,000' >>> format_float(1)(1e5) '100,000.0' >>> format_float(-1, True)(.2) ' 20.0%' >>> format_float(-1, True)(-.2) '-20.0%' >>> pd.options.display.float_format = format_float(2) """ if is_pct: space = ' ' if digit < 0 else '' fmt = f'{{:{space}.{abs(int(digit))}%}}' return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv) else: return lambda vv: 'NaN' if np.isnan(vv) else ( f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit) )
python
{ "resource": "" }
q38038
SQLConstructor.join
train
def join(self, source, op='LEFT JOIN', on=''): """ Join `source`. >>> sc = SQLConstructor('main', ['c1', 'c2']) >>> sc.join('sub', 'JOIN', 'main.id = sub.id') >>> (sql, params, keys) = sc.compile() >>> sql 'SELECT c1, c2 FROM main JOIN sub ON main.id = sub.id' It is possible to pass another `SQLConstructor` as a source. >>> sc = SQLConstructor('main', ['c1', 'c2']) >>> sc.add_or_matches('{0} = {1}', 'c1', [111]) >>> subsc = SQLConstructor('sub', ['d1', 'd2']) >>> subsc.add_or_matches('{0} = {1}', 'd1', ['abc']) >>> sc.join(subsc, 'JOIN', 'main.id = sub.id') >>> sc.add_column('d1') >>> (sql, params, keys) = sc.compile() >>> print(sql) # doctest: +NORMALIZE_WHITESPACE SELECT c1, c2, d1 FROM main JOIN ( SELECT d1, d2 FROM sub WHERE (d1 = ?) ) ON main.id = sub.id WHERE (c1 = ?) `params` is set appropriately to include parameters for joined source: >>> params ['abc', 111] Note that `subsc.compile` is called when `sc.join(subsc, ...)` is called. Therefore, calling `subsc.add_<predicate>` does not effect `sc`. :type source: str or SQLConstructor :arg source: table :type op: str :arg op: operation (e.g., 'JOIN') :type on: str :arg on: on clause. `source` ("right" source) can be referred using `{r}` formatting field. """ if isinstance(source, SQLConstructor): (sql, params, _) = source.compile() self.join_params.extend(params) jsrc = '( {0} )'.format(sql) if source.table_alias: jsrc += ' AS ' + source.table_alias on = on.format(r=source.table_alias) else: jsrc = source on = on.format(r=source) constraint = 'ON {0}'.format(on) if on else '' self.join_source = ' '.join([self.join_source, op, jsrc, constraint])
python
{ "resource": "" }
q38039
SQLConstructor.add_and_matches
train
def add_and_matches(self, matcher, lhs, params, numq=1, flatten=None): """ Add AND conditions to match to `params`. :type matcher: str or callable :arg matcher: if `str`, `matcher.format` is used. :type lhs: str :arg lhs: the first argument to `matcher`. :type params: list :arg params: each element should be able to feed into sqlite '?'. :type numq: int :arg numq: number of parameters for each condition. :type flatten: None or callable :arg flatten: when `numq > 1`, it should return a list of length `numq * len(params)`. """ params = self._adapt_params(params) qs = ['?'] * numq flatten = flatten or self._default_flatten(numq) expr = repeat(adapt_matcher(matcher)(lhs, *qs), len(params)) self.conditions.extend(expr) self.params.extend(flatten(params))
python
{ "resource": "" }
q38040
SQLConstructor.add_matches
train
def add_matches(self, matcher, lhs, match_params=[], include_params=[], exclude_params=[], numq=1, flatten=None): """ Quick way to call `add_or_matches` and `add_and_matches`. """ matcher = adapt_matcher(matcher) notmatcher = negate(matcher) self.add_and_matches(matcher, lhs, match_params, numq, flatten) self.add_or_matches(matcher, lhs, include_params, numq, flatten) self.add_and_matches(notmatcher, lhs, exclude_params, numq, flatten)
python
{ "resource": "" }
q38041
SQLConstructor.uniquify_by
train
def uniquify_by(self, column, chooser=None, aggregate='MAX'): """ Group by `column` and run `aggregate` function on `chooser` column. """ self.group_by.append(column) if chooser: i = self.columns.index(chooser) self.columns[i] = '{0}({1})'.format(aggregate, self.columns[i])
python
{ "resource": "" }
q38042
SQLConstructor.move_where_clause_to_column
train
def move_where_clause_to_column(self, column='condition', key=None): """ Move whole WHERE clause to a column named `column`. """ if self.conditions: expr = " AND ".join(self.conditions) params = self.params self.params = [] self.conditions = [] else: expr = '1' params = [] self.add_column('({0}) AS {1}'.format(expr, column), key or column, params)
python
{ "resource": "" }
q38043
remove_axis_junk
train
def remove_axis_junk(ax, which=['right', 'top']): '''remove upper and right axis''' for loc, spine in ax.spines.items(): if loc in which: spine.set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left')
python
{ "resource": "" }
q38044
normalize
train
def normalize(x): '''normalize x to have mean 0 and unity standard deviation''' x = x.astype(float) x -= x.mean() return x / float(x.std())
python
{ "resource": "" }
q38045
Indexer.find_record_files
train
def find_record_files(self): """ Yield paths to record files. """ for (root, _, files) in os.walk(self.record_path): for f in (f for f in files if f.endswith('.json')): yield os.path.join(root, f)
python
{ "resource": "" }
q38046
Field.buffer_to_value
train
def buffer_to_value(self, obj, buffer, offset, default_endianness=DEFAULT_ENDIANNESS): """ Converts the bytes in ``buffer`` at ``offset`` to a native Python value. Returns that value and the number of bytes consumed to create it. :param obj: The parent :class:`.PebblePacket` of this field :type obj: .PebblePacket :param buffer: The buffer from which to extract a value. :type buffer: bytes :param offset: The offset in the buffer to start at. :type offset: int :param default_endianness: The default endianness of the value. Used if ``endianness`` was not passed to the :class:`Field` constructor. :type default_endianness: str :return: (value, length) :rtype: (:class:`object`, :any:`int`) """ try: value, length = struct.unpack_from(str(self.endianness or default_endianness) + self.struct_format, buffer, offset)[0], struct.calcsize(self.struct_format) if self._enum is not None: try: return self._enum(value), length except ValueError as e: raise PacketDecodeError("{}: {}".format(self.type, e)) else: return value, length except struct.error as e: raise PacketDecodeError("{}: {}".format(self.type, e))
python
{ "resource": "" }
q38047
Field.value_to_bytes
train
def value_to_bytes(self, obj, value, default_endianness=DEFAULT_ENDIANNESS): """ Converts the given value to an appropriately encoded string of bytes that represents it. :param obj: The parent :class:`.PebblePacket` of this field :type obj: .PebblePacket :param value: The python value to serialise. :param default_endianness: The default endianness of the value. Used if ``endianness`` was not passed to the :class:`Field` constructor. :type default_endianness: str :return: The serialised value :rtype: bytes """ return struct.pack(str(self.endianness or default_endianness) + self.struct_format, value)
python
{ "resource": "" }
q38048
profile
train
def profile(func): """ Decorator to profile functions with cProfile Args: func: python function Returns: profile report References: https://osf.io/upav8/ """ def inner(*args, **kwargs): pr = cProfile.Profile() pr.enable() res = func(*args, **kwargs) pr.disable() s = io.StringIO() ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') ps.print_stats() print(s.getvalue()) return res return inner
python
{ "resource": "" }
q38049
EjabberdXMLRPCBackend.rpc
train
def rpc(self, cmd, **kwargs): """Generic helper function to call an RPC method.""" func = getattr(self.client, cmd) try: if self.credentials is None: return func(kwargs) else: return func(self.credentials, kwargs) except socket.error as e: raise BackendConnectionError(e) except (xmlrpclib.ProtocolError, BadStatusLine) as e: log.error(e) raise BackendError("Error reaching backend.")
python
{ "resource": "" }
q38050
_parse_chemical_equation
train
def _parse_chemical_equation(value): """ Parse the chemical equation mini-language. See the docstring of `ChemicalEquation` for more. Parameters ---------- value : `str` A string in chemical equation mini-language. Returns ------- mapping A mapping in the format specified by the mini-language (see notes on `ChemicalEquation`). Examples -------- >>> from pyrrole.core import _parse_chemical_equation >>> parsed = _parse_chemical_equation('4 A + 3 B <- 2 C + D') >>> parsed['arrow'] '->' >>> parsed['products'][1]['species'] 'B' >>> parsed['reactants'][0]['coefficient'] 2 """ arrow = _pp.oneOf('-> <- <=>').setResultsName('arrow') species = _pp.Word(_pp.printables).setResultsName('species') coefficient = (_pp.Optional(_pp.Word(_pp.nums), default=1) .setParseAction(_pp.tokenMap(int)) .setResultsName('coefficient')) group_ = _pp.Group(coefficient + _pp.Optional(_pp.Suppress('*')) + species) reactants = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_)) .setResultsName('reactants')) products = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_)) .setResultsName('products')) grammar = reactants + arrow + products parsed = grammar.parseString(value).asDict() if parsed['arrow'] == '<-': parsed['reactants'], parsed['products'] \ = parsed['products'], parsed['reactants'] parsed['arrow'] = '->' return parsed
python
{ "resource": "" }
q38051
_get_chemical_equation_piece
train
def _get_chemical_equation_piece(species_list, coefficients): """ Produce a string from chemical species and their coefficients. Parameters ---------- species_list : iterable of `str` Iterable of chemical species. coefficients : iterable of `float` Nonzero stoichiometric coefficients. The length of `species_list` and `coefficients` must be the same. Negative values are made positive and zeros are ignored along with their respective species. Examples -------- >>> from pyrrole.core import _get_chemical_equation_piece >>> _get_chemical_equation_piece(["AcOH"], [2]) '2 AcOH' >>> _get_chemical_equation_piece(["AcO-", "H+"], [-1, -1]) 'AcO- + H+' >>> _get_chemical_equation_piece("ABCD", [-2, -1, 0, -1]) '2 A + B + D' """ def _get_token(species, coefficient): if coefficient == 1: return '{}'.format(species) else: return '{:g} {}'.format(coefficient, species) bag = [] for species, coefficient in zip(species_list, coefficients): if coefficient < 0: coefficient = -coefficient if coefficient > 0: bag.append(_get_token(species, coefficient)) return '{}'.format(' + '.join(bag))
python
{ "resource": "" }
q38052
_check_data
train
def _check_data(data): """ Check a data object for inconsistencies. Parameters ---------- data : `pandas.DataFrame` A `data` object, i.e., a table whose rows store information about chemical species, indexed by chemical species. Warns ----- UserWarning Warned if a ground state species has one or more imaginary vibrational frequencies, or if a transition state species has zero, two or more imaginary vibrational frequencies. Examples -------- >>> import pandas as pd >>> from pyrrole.core import _check_data >>> data = (pd.DataFrame([{'name': 'A', 'vibfreqs': [0., 1., 2.]}, ... {'name': 'B', 'vibfreqs': [0., -1., 2.]}, ... {'name': 'C', 'vibfreqs': [0., -1., -2.]}, ... {'name': 'A#', 'vibfreqs': [0., 1., 2.]}, ... {'name': 'C#', 'vibfreqs': [0., -2., -1.]}, ... {'name': 'B#', 'vibfreqs': [0., -1., 2.]}]) ... .set_index('name')) >>> _check_data(data) """ if "vibfreqs" in data.columns: for species in data.index: vibfreqs = data.loc[species, "vibfreqs"] nimagvibfreqs = _np.sum(_np.array(vibfreqs) < 0) if species[-1] == '#' and nimagvibfreqs != 1: _warnings.warn("'{}' should have 1 imaginary vibfreqs but {} " "found".format(species, nimagvibfreqs)) elif species[-1] != '#' and nimagvibfreqs != 0: _warnings.warn("'{}' should have no imaginary vibfreqs but {} " "found".format(species, nimagvibfreqs))
python
{ "resource": "" }
q38053
_split_chemical_equations
train
def _split_chemical_equations(value): """ Split a string with sequential chemical equations into separate strings. Each string in the returned iterable represents a single chemical equation of the input. See the docstrings of `ChemicalEquation` and `ChemicalSystem` for more. Parameters ---------- value : `str` A string with sequential chemical equations in the mini-language (see notes on `ChemicalEquation`). Returns ------- iterable of `str` An iterable of strings in the format specified by the mini-language (see notes on `ChemicalEquation`). Examples -------- >>> from pyrrole.core import _split_chemical_equations >>> _split_chemical_equations('A + B -> C + D -> D + E <=> F + G <- H + I') ['A + B -> C + D', 'C + D -> D + E', 'D + E <=> F + G', 'F + G <- H + I'] """ pieces = _split_arrows(value) return [(pieces[i] + pieces[i + 1] + pieces[i + 2]).strip() for i in range(0, len(pieces) - 2, 2)]
python
{ "resource": "" }
q38054
ChemicalEquation.to_series
train
def to_series(self, only=None, intensive_columns=["temperature", "pressure"], check_data=True): """ Produce a data record for `ChemicalEquation`. All possible linear differences for all numeric attributes are computed and stored in the returned `pandas.Series` object (see examples below). This allows for easy application and manipulation of `Hess's law <https://en.wikipedia.org/wiki/Hess%27s_law>`_ to chemical equations (see examples below). Parameters ---------- only : ``"reactants"``, ``"products"``, optional Instead of the standard behaviour (difference of sums), sum numeric attributes of either reactants or products only. If given, absolute coefficients are used. intensive_columns : iterable of `str`, optional A set of column names representing intensive properties (e.g. bulk properties) whose values are not summable. Those must be constant throughout the chemical equation. check_data : `bool`, optional Whether to check data object for inconsistencies. Returns ------- series : `pandas.Series` Data record of attribute differences, whose name is the canonical string representation of the `ChemicalEquation` or, if `only` is given, a string representing either reactants or products (see examples below). Raises ------ ValueError Raised if `self.data` wasn't defined (e.g. is `None`), if `only` is something other than ``"reactants"`` or ``"products"``, or if two or more distinct values for an intensive property have been found. Examples -------- >>> from pyrrole import ChemicalEquation >>> from pyrrole.atoms import create_data, read_cclib >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", ... "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", ... "AcOH(aq)")) >>> equilibrium = ChemicalEquation("AcOH(g) <=> AcOH(aq)", ... data) >>> equilibrium.to_series() charge 0.000000 enthalpy -0.010958 entropy -0.000198 freeenergy -0.010759 mult 0.000000 natom 0.000000 nbasis 0.000000 nmo 0.000000 pressure 1.000000 temperature 298.150000 Name: AcOH(g) <=> AcOH(aq), dtype: float64 Sums of either reactants or products can be computed: >>> equilibrium.to_series("reactants") charge 0.000000 enthalpy -228.533374 entropy 0.031135 freeenergy -228.564509 mult 1.000000 natom 8.000000 nbasis 68.000000 nmo 68.000000 pressure 1.000000 temperature 298.150000 Name: AcOH(g), dtype: float64 """ if self.data is None: # TODO: should an empty Series be returned? raise ValueError("data not defined") # TODO: find a way to keep categorical columns. Keep if they match? columns = self.data.select_dtypes('number').columns intensive_columns = [column for column in columns if column in intensive_columns] extensive_columns = [column for column in columns if column not in intensive_columns] columns = extensive_columns + intensive_columns if only is None: species = self.species elif only == "reactants": species = sorted(self.reactants) elif only == "products": species = sorted(self.products) else: raise ValueError("only must be either 'reactants' or 'products' " "('{}' given)".format(only)) if check_data: _check_data(self.data.loc[species]) if all([s in self.data.index for s in species]): series = (self.data.loc[species, extensive_columns] .mul(self.coefficient, axis="index").sum("index")) for column in intensive_columns: vals = self.data[column].unique() if len(vals) > 1: raise ValueError("different values for {}: " "{}".format(column, vals)) series[column] = vals[0] else: series = _pd.Series(_np.nan, index=columns) if only is None: name = self.__str__() else: coefficients = self.coefficient[species] name = _get_chemical_equation_piece(species, coefficients) if only == "reactants": series[extensive_columns] = -series[extensive_columns] # Avoid negative zero # (see https://stackoverflow.com/a/11010791/4039050) series = series + 0. return series.rename(name)
python
{ "resource": "" }
q38055
ChemicalSystem.to_dataframe
train
def to_dataframe(self, *args, **kwargs): """ Produce a data table with records for all chemical equations. All possible differences for numeric attributes are computed and stored as columns in the returned `pandas.DataFrame` object (see examples below), whose rows represent chemical equations. In terms of behavior, this method can be seen as the `ChemicalEquation` counterpart of `create_data`. Returns ------- dataframe : `pandas.DataFrame` Data table with records of attribute differences for every single `ChemicalEquation` object in the model. Notes ----- Further arguments and keywords are passed directly to `ChemicalEquation.to_series`. Examples -------- >>> from pyrrole import ChemicalSystem >>> from pyrrole.atoms import create_data, read_cclib >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", ... "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", ... "AcOH(aq)")) >>> data = data[["enthalpy", "entropy", "freeenergy"]] >>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data) >>> equilibrium.to_dataframe() # doctest: +NORMALIZE_WHITESPACE enthalpy entropy freeenergy chemical_equation AcOH(g) <=> AcOH(aq) -0.010958 -0.000198 -0.010759 """ dataframe = _pd.DataFrame([equation.to_series(*args, **kwargs) for equation in self.equations]) dataframe.index.name = "chemical_equation" return dataframe
python
{ "resource": "" }
q38056
ChemicalSystem.to_digraph
train
def to_digraph(self, *args, **kwargs): """ Compute a directed graph for the chemical system. Returns ------- digraph : `networkx.DiGraph` Graph nodes are reactants and/or products of chemical equations, while edges represent the equations themselves. Double ended edges are used to represent equilibria. Attributes are computed with `ChemicalEquation.to_series` for each equation (see examples below). Notes ----- Further arguments and keywords are passed directly to `ChemicalEquation.to_series`. Examples -------- >>> from pyrrole import ChemicalSystem >>> from pyrrole.atoms import create_data, read_cclib >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)")) >>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data) >>> digraph = equilibrium.to_digraph() >>> sorted(digraph.nodes(data='freeenergy')) [('AcOH(aq)', -228.57526805), ('AcOH(g)', -228.56450866)] >>> digraph.number_of_nodes() 2 >>> digraph.number_of_edges() 2 """ # TODO: make test for this digraph = _nx.DiGraph() for equation in self.equations: reactants, arrow, products = [value.strip() for value in _split_arrows(str(equation))] try: attr = equation.to_series("reactants", *args, **kwargs).to_dict() except ValueError: attr = dict() digraph.add_node(reactants, **attr) try: attr = equation.to_series("products", *args, **kwargs).to_dict() except ValueError: attr = dict() digraph.add_node(products, **attr) try: attr = equation.to_series(*args, **kwargs).to_dict() except ValueError: attr = dict() digraph.add_edge(reactants, products, **attr) if arrow == '<=>': digraph.add_edge(products, reactants, **attr) return digraph
python
{ "resource": "" }
q38057
read_cclib
train
def read_cclib(value, name=None): """ Create an `Atoms` object from data attributes parsed by cclib. `cclib <https://cclib.github.io/>`_ is an open source library, written in Python, for parsing and interpreting the results (logfiles) of computational chemistry packages. Parameters ---------- value : `str`, `cclib.parser.logfileparser.Logfile`, `cclib.parser.data.ccData` A path to a logfile, or either a cclib job object (i.e., from `cclib.ccopen`), or cclib data object (i.e., from ``job.parse()``). name : `str`, optional Name for chemical species. If not given, this is set to the logfile path, if known. Chemical equations mention this name when refering to the returned object. Returns ------- molecule : `Atoms` All attributes obtainable by cclib are made available as attributes in the returned object. Examples -------- >>> from pyrrole.atoms import read_cclib >>> molecule = read_cclib('data/pyrrolate/pyrrole.out') >>> molecule.atomnos array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32) >>> molecule.charge 0 """ if isinstance(value, _logfileparser.Logfile): # TODO: test this case. jobfilename = value.filename ccdata = value.parse() elif isinstance(value, _data.ccData): # TODO: test this case. jobfilename = None ccdata = value else: # TODO: test this case. ccobj = _cclib.ccopen(value) jobfilename = ccobj.filename ccdata = ccobj.parse() if name is None: name = jobfilename attributes = ccdata.getattributes() attributes.update({ 'name': name, 'jobfilename': jobfilename, }) return Atoms(attributes)
python
{ "resource": "" }
q38058
read_pybel
train
def read_pybel(value, name=None): """ Create an `Atoms` object from content parsed by Pybel. `Pybel <https://openbabel.org/docs/dev/UseTheLibrary/Python_Pybel.html>`_ is a Python module that simplifies access to the OpenBabel API, a chemical toolbox designed to speak the many languages of chemical data. It’s an open, collaborative project allowing anyone to search, convert, analyze, or store data from molecular modeling, chemistry, solid-state materials, biochemistry, and related areas. Parameters ---------- value : `str`, `pybel.Molecule`, `openbabel.OBMol` A path to a file, or either a Pybel Molecule object, or OpenBabel OBMol. name : `str`, optional Name for chemical species. If not given, this is set to the file path, if known. Chemical equations mention this name when refering to the returned object. Returns ------- molecule : `Atoms` All attributes convertible from Pybel to cclib are made available as attributes in the returned object. Notes ----- The following attributes are converted from Pybel to cclib: `atomcoords`, `atommasses`, `atomnos`, `natom`, `charge` and `mult`. One must keep in mind that `charge` and `mult` are not always reliable, since these are often calculated from atomic formal charges. Examples -------- >>> from pyrrole.atoms import read_pybel >>> molecule = read_pybel('data/pyrrolate/pyrrole.xyz') >>> molecule.atomnos array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32) >>> molecule.natom 10 >>> molecule.charge 0 """ if isinstance(value, _pb.Molecule): # TODO: test this case. jobfilename = None charge, mult = value.charge, value.spin ccdata = _makecclib(value.OBMol) elif isinstance(value, _ob.OBMol): # TODO: test this case. jobfilename = None charge, mult = value.GetTotalCharge(), value.GetTotalSpinMultiplicity() ccdata = _makecclib(value) else: # TODO: test this case. jobfilename = value _, jobfilename_ext = _os.path.splitext(jobfilename) # TODO: This only reads first structure. mol = next(_pb.readfile(jobfilename_ext[1:], jobfilename)) charge, mult = mol.charge, mol.spin ccdata = _makecclib(mol.OBMol) if name is None: name = jobfilename attributes = ccdata.getattributes() attributes.update({ 'name': name, 'jobfilename': jobfilename, 'charge': charge, 'mult': mult }) return Atoms(attributes)
python
{ "resource": "" }
q38059
create_data
train
def create_data(*args): """ Produce a single data object from an arbitrary number of different objects. This function returns a single `pandas.DataFrame` object from a collection of `Atoms` and `pandas.DataFrame` objects. The returned object, already indexed by `Atoms.name`, can be promptly used by e.g. `ChemicalSystem`. Parameters ---------- *args : `pandas.DataFrame` or `Atoms`-like All positional arguments are assumed to be sources of data. `Atoms`-like objects (i.e. any object accepted by the `Atoms` constructor) become single row records in the final returned data object. `pandas.DataFrame` data table objects, on the other hand, are concatenated together (by using `pandas.DataFrame.concat`). Returns ------- dataframe : `pandas.DataFrame` Resulting tabular data object. The returned object is guaranteed to be indexed by `Atoms.name`; if no column with this name exists at indexing time, a new column (with `None` values) is created for the purpose of indexing. Notes ----- The returned `pandas.DataFrame` will be indexed by `Atoms.name` (see examples below), which might be the same as `Atoms.jobfilename` if no name was given to the constructor of `Atoms` (e.g. mapping). Examples -------- >>> from pyrrole.atoms import Atoms, create_data, read_cclib >>> pyrrole = read_cclib('data/pyrrolate/pyrrole.out', 'pyrrole') >>> pyrrolate = read_cclib('data/pyrrolate/pyrrolate.out') >>> data = create_data(pyrrole, pyrrolate) >>> data['charge'] name pyrrole 0 data/pyrrolate/pyrrolate.out -1 Name: charge, dtype: int64 """ def _prepare_data(data): if not isinstance(data, _pd.DataFrame): try: data = _pd.DataFrame([data.to_series()]) except AttributeError: data = _pd.DataFrame([Atoms(data).to_series()]) if data.index.name != "name": if "name" not in data.columns: data["name"] = None data = data.set_index("name") return data.reset_index() args = map(_prepare_data, args) dataframe = _pd.concat(args, sort=False) return dataframe.set_index("name")
python
{ "resource": "" }
q38060
Atoms.split
train
def split(self, pattern=None): r""" Break molecule up into constituent fragments. By default (i.e., if `pattern` is `None`), each disconnected fragment is returned as a separate new `Atoms` object. This uses OpenBabel (through `OBMol.Separate`) and might not preserve atom order, depending on your version of the library. Parameters ---------- pattern : iterable of iterable of `int`, optional Groupings of atoms into molecule fragments. Each element of `pattern` should be an iterable whose members are atom indices (see example below). Returns ------- fragments : iterable of `Atoms` Examples -------- >>> from pyrrole import atoms >>> water_dimer = atoms.read_pybel("data/water-dimer.xyz") "Natural fragmentation" is the default behaviour, i.e. all disconnected fragments are returned: >>> for frag in water_dimer.split(): ... print("{}\n".format(frag)) O -1.62893 -0.04138 0.37137 H -0.69803 -0.09168 0.09337 H -2.06663 -0.73498 -0.13663 <BLANKLINE> O 1.21457 0.03172 -0.27623 H 1.72977 -0.08038 0.53387 H 1.44927 0.91672 -0.58573 <BLANKLINE> Precise fragment grouping can be achieved by explicitly indicating which atoms belong to which fragments: >>> for frag in water_dimer.split([range(3), (5, 4), [3]]): ... print("{}\n".format(frag)) O -1.62893 -0.04138 0.37137 H -0.69803 -0.09168 0.09337 H -2.06663 -0.73498 -0.13663 <BLANKLINE> H 1.72977 -0.08038 0.53387 H 1.44927 0.91672 -0.58573 <BLANKLINE> O 1.21457 0.03172 -0.27623 <BLANKLINE> """ molecule_pybel = self.to_pybel() if pattern is None: fragments = [read_pybel(frag) for frag in molecule_pybel.OBMol.Separate()] else: fragments = [] for group in pattern: fragment_obmol = _pb.ob.OBMol() for i in group: obatom = molecule_pybel.OBMol.GetAtomById(i) fragment_obmol.InsertAtom(obatom) fragments.append(fragment_obmol) fragments = [read_pybel(frag) for frag in fragments] return fragments
python
{ "resource": "" }
q38061
Atoms.to_pybel
train
def to_pybel(self): """ Produce a Pybel Molecule object. It is based on the capabilities of OpenBabel through Pybel. The present object must have at least `atomcoords`, `atomnos`, `charge` and `mult` defined. Returns ------- `pybel.Molecule` Examples -------- >>> from pyrrole.atoms import Atoms >>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.], ... [0., 0., 1.21]], ... 'atomnos': [8, 8], ... 'charge': 0, ... 'mult': 3, ... 'name': 'dioxygen'}) >>> mol = dioxygen.to_pybel() >>> mol.molwt 31.9988 """ # TODO: This only exports last geometry by default. obmol = _makeopenbabel(self.atomcoords, self.atomnos, self.charge, self.mult) title = self.name or "" if 'scfenergies' in self.attributes: title += ", scfenergy={} eV".format(self.scfenergies[-1]) obmol.SetTitle(title) # TODO: make a test for this function. return _pb.Molecule(obmol)
python
{ "resource": "" }
q38062
Atoms.to_string
train
def to_string(self, format="smi", dialect=None, with_header=False, fragment_id=None, constraints=None): r""" Produce a string representation of the molecule. This function wraps and extends the functionality of OpenBabel (which is accessible through `to_pybel`). Many chemical formats can thus be output (see the `pybel.outformats` variable for a list of available output formats). Parameters ---------- format : `str`, optional Chemical file format of the returned string representation (see examples below). dialect : `str`, optional Format dialect. This encompasses enhancements provided for some subformats. If ``"standard"`` or `None`, the output provided by OpenBabel is used with no or minimal modification. See notes below. with_header : `bool`, optional If `format` encompasses a header, allow it in the returned string. This would be, for instance, the first two lines of data for ``format="xyz"`` (see examples below). This might not work with all dialects and/or formats. fragment_id : `str`, optional Indentify molecular fragments (see examples below). This might not work with all dialects and/or formats. constraints : iterable object of `int` Set cartesian constraints for selected atoms (see examples below). This might not work with all dialects and/or formats. Returns ------- `str` String representation of molecule in the specified format and/or dialect. Raises ------ KeyError Raised if `dialect` value is currently not supported or if `fragment_id` is given with a currently not supported `dialect` value. Notes ----- Format dialects are subformats that support extended functionality. Currently supported dialects are: - for ``format="xyz"``: - ``"ADF"``, ``"ORCA"``. Examples -------- >>> from pyrrole import atoms >>> dioxygen = atoms.Atoms({'atomcoords': [[0., 0., 0.], ... [0., 0., 1.21]], ... 'atomnos': [8, 8], ... 'charge': 0, ... 'mult': 3, ... 'name': 'dioxygen'}) By default, a SMILES string is returned: >>> dioxygen.to_string() 'O=O\tdioxygen' Cartesian coordinates can be produced with ``format="xyz"``, which is equivalent to printing an `Atoms` instance: >>> print(dioxygen.to_string("xyz")) O 0.00000 0.00000 0.00000 O 0.00000 0.00000 1.21000 >>> print(dioxygen) O 0.00000 0.00000 0.00000 O 0.00000 0.00000 1.21000 Header lines are disabled by default (for ``format="xyz"``, for example, the header stores the number of atoms in the molecule and a comment or title line), but this can be reversed with ``with_header=True``: >>> print(dioxygen.to_string("xyz", with_header=True)) 2 dioxygen O 0.00000 0.00000 0.00000 O 0.00000 0.00000 1.21000 Coordinates for packages such as GAMESS and MOPAC are also supported: >>> water_dimer = atoms.read_pybel("data/water-dimer.xyz") >>> print(water_dimer.to_string("gamin")) O 8.0 -1.6289300000 -0.0413800000 0.3713700000 H 1.0 -0.6980300000 -0.0916800000 0.0933700000 H 1.0 -2.0666300000 -0.7349800000 -0.1366300000 O 8.0 1.2145700000 0.0317200000 -0.2762300000 H 1.0 1.4492700000 0.9167200000 -0.5857300000 H 1.0 1.7297700000 -0.0803800000 0.5338700000 >>> print(water_dimer.to_string("mop")) O -1.62893 1 -0.04138 1 0.37137 1 H -0.69803 1 -0.09168 1 0.09337 1 H -2.06663 1 -0.73498 1 -0.13663 1 O 1.21457 1 0.03172 1 -0.27623 1 H 1.44927 1 0.91672 1 -0.58573 1 H 1.72977 1 -0.08038 1 0.53387 1 Constraining of cartesian coordinates works with MOPAC format: >>> print(water_dimer.to_string("mop", constraints=(0, 3))) O -1.62893 0 -0.04138 0 0.37137 0 H -0.69803 1 -0.09168 1 0.09337 1 H -2.06663 1 -0.73498 1 -0.13663 1 O 1.21457 0 0.03172 0 -0.27623 0 H 1.44927 1 0.91672 1 -0.58573 1 H 1.72977 1 -0.08038 1 0.53387 1 Fragment identification is supported for ``"ADF"`` and ``"ORCA"`` dialects: >>> print(water_dimer.to_string("xyz", dialect="ADF", ... fragment_id="dimer")) O -1.62893 -0.04138 0.37137 f=dimer H -0.69803 -0.09168 0.09337 f=dimer H -2.06663 -0.73498 -0.13663 f=dimer O 1.21457 0.03172 -0.27623 f=dimer H 1.44927 0.91672 -0.58573 f=dimer H 1.72977 -0.08038 0.53387 f=dimer >>> print(water_dimer.to_string("xyz", dialect="ORCA", ... fragment_id=1)) O(1) -1.62893 -0.04138 0.37137 H(1) -0.69803 -0.09168 0.09337 H(1) -2.06663 -0.73498 -0.13663 O(1) 1.21457 0.03172 -0.27623 H(1) 1.44927 0.91672 -0.58573 H(1) 1.72977 -0.08038 0.53387 """ s = self.to_pybel().write(format).strip() if dialect is None: dialect = "standard" dialect = dialect.lower() if format == "xyz": natom, comment, body = s.split("\n", 2) if dialect in {"adf", "orca", "standard"}: if fragment_id is not None: if dialect == "adf": body = \ "\n".join(["{} f={}".format(line, fragment_id) for line in body.split("\n")]) elif dialect == "orca": fragment_id = "({})".format(fragment_id) body = \ "\n".join([line.replace(" " * len(fragment_id), fragment_id, 1) for line in body.split("\n")]) else: raise KeyError("fragment_id currently not supported " "with dialect '{}'".format(dialect)) else: raise KeyError("dialect '{}' currently not " "supported".format(dialect)) if with_header: s = "\n".join([natom, comment, body]) else: s = body elif format == "gamin": lines = s.split("\n") begin = "\n".join([line.strip() for line in lines[:5]]) body = "\n".join([line.strip() for line in lines[5:-1]]) if with_header: s = "\n".join([begin, body]) else: s = body elif format == "mop": chunks = s.split("\n", 2) begin = "\n".join([line.strip() for line in chunks[:2]]) body = chunks[2].strip() if constraints is not None: body = body.split("\n") for i in constraints: body[i] = _re.sub(' 1( |$)', ' 0\g<1>', body[i]) body = "\n".join(body) if with_header: s = "\n".join([begin, body]) else: s = body return s.strip()
python
{ "resource": "" }
q38063
run
train
def run(macro, output_files=[], force_close=True): """ Runs Fiji with the suplied macro. Output of Fiji can be viewed by setting environment variable `DEBUG=fijibin`. Parameters ---------- macro : string or list of strings IJM-macro(s) to run. If list of strings, it will be joined with a space, so all statements should end with ``;``. output_files : list Files to check if exists after macro has been run. Files specified that do not exist after macro is done will print a warning message. force_close : bool Will add ``eval("script", "System.exit(42);");`` to end of macro. Exit code 42 is used to overcome that errors in macro efficiently will exit Fiji with error code 0. In other words, if this line in the macro is reached, the macro has most probably finished without errors. This is the default behaviour. One should also note that Fiji doesn't terminate right away if ``System.exit()`` is left out, and it may take several minutes for Fiji to close. Returns ------- int Files from output_files which exists after running macro. """ if type(macro) == list: macro = ' '.join(macro) if len(macro) == 0: print('fijibin.macro.run got empty macro, not starting fiji') return _exists(output_files) if force_close: # make sure fiji halts immediately when done # hack: use error code 42 to check if macro has run sucessfully macro = macro + 'eval("script", "System.exit(42);");' # escape backslashes (windows file names) # not \ \ not \ g1 \\ g2 macro = re.sub(r"([^\\])\\([^\\])", r"\1\\\\\2", macro) debug('macro {}'.format(macro)) # avoid verbose output of Fiji when DEBUG environment variable set env = os.environ.copy() debugging = False if 'DEBUG' in env: if env['DEBUG'] == 'fijibin' or env['DEBUG'] == '*': debugging = True del env['DEBUG'] fptr, temp_filename = mkstemp(suffix='.ijm') m = os.fdopen(fptr, 'w') m.write(macro) m.flush() # make sure macro is written before running Fiji m.close() cmd = [fijibin.BIN, '--headless', '-macro', temp_filename] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out, err = proc.communicate() for line in out.decode('latin1', errors='ignore').splitlines(): debug('stdout:' + line) for line in err.decode('latin1', errors='ignore').splitlines(): debug('stderr:' + line) if force_close and proc.returncode != 42: print('fijibin ERROR: Fiji did not successfully ' + 'run macro {}'.format(temp_filename)) if not debugging: print('fijibin Try running script with ' + '`DEBUG=fijibin python your_script.py`') else: # only delete if everything is ok os.remove(temp_filename) # return output_files which exists return _exists(output_files)
python
{ "resource": "" }
q38064
_exists
train
def _exists(filenames): """Check if every filename exists. If not, print an error message and remove the item from the list. Parameters ---------- filenames : list List of filenames to check for existence. Returns ------- list Filtered list of filenames that exists. """ exists = [] for filename in filenames: if os.path.isfile(filename): exists.append(filename) else: print('fijibin ERROR missing output file {}'.format(filename)) return exists
python
{ "resource": "" }
q38065
VoiceService.send_stop_audio
train
def send_stop_audio(self): ''' Stop an audio streaming session ''' assert self._session_id != VoiceService.SESSION_ID_INVALID self._pebble.send_packet(AudioStream(session_id=self._session_id, data=StopTransfer()))
python
{ "resource": "" }
q38066
VoiceService.send_session_setup_result
train
def send_session_setup_result(self, result, app_uuid=None): ''' Send the result of setting up a dictation session requested by the watch :param result: result of setting up the session :type result: .SetupResult :param app_uuid: UUID of app that initiated the session :type app_uuid: uuid.UUID ''' assert self._session_id != VoiceService.SESSION_ID_INVALID assert isinstance(result, SetupResult) flags = 0 if app_uuid is not None: assert isinstance(app_uuid, uuid.UUID) flags |= Flags.AppInitiated logger.debug("Sending session setup result (result={}".format(result) + ", app={})".format(app_uuid) if app_uuid is not None else ")") self._pebble.send_packet(VoiceControlResult(flags=flags, data=SessionSetupResult( session_type=SessionType.Dictation, result=result))) if result != SetupResult.Success: self._session_id = VoiceService.SESSION_ID_INVALID
python
{ "resource": "" }
q38067
VoiceService.send_dictation_result
train
def send_dictation_result(self, result, sentences=None, app_uuid=None): ''' Send the result of a dictation session :param result: Result of the session :type result: DictationResult :param sentences: list of sentences, each of which is a list of words and punctuation :param app_uuid: UUID of app that initiated the session :type app_uuid: uuid.UUID ''' assert self._session_id != VoiceService.SESSION_ID_INVALID assert isinstance(result, TranscriptionResult) transcription = None if result == TranscriptionResult.Success: if len(sentences) > 0: s_list = [] for s in sentences: words = [Word(confidence=100, data=w) for w in s] s_list.append(Sentence(words=words)) transcription = Transcription(transcription=SentenceList(sentences=s_list)) flags = 0 if app_uuid is not None: assert isinstance(app_uuid, uuid.UUID) flags |= Flags.AppInitiated attributes = [] if app_uuid is not None: assert isinstance(app_uuid, uuid.UUID) attributes.append(Attribute(id=AttributeType.AppUuid, data=AppUuid(uuid=app_uuid))) if transcription is not None: attributes.append(Attribute(id=AttributeType.Transcription, data=transcription)) logger.debug("Sending dictation result (result={}".format(result) + ", app={})".format(app_uuid) if app_uuid is not None else ")") self._pebble.send_packet(VoiceControlResult(flags=flags, data=DictationResult( session_id=self._session_id, result=result, attributes=AttributeList(dictionary=attributes)))) self._session_id = VoiceService.SESSION_ID_INVALID
python
{ "resource": "" }
q38068
load_ipython_extension
train
def load_ipython_extension(ip): """ register magics function, can be called from a notebook """ #ip = get_ipython() ip.register_magics(CustomMagics) # enable C# (CSHARP) highlight patch = ("IPython.config.cell_magic_highlight['clrmagic'] = " "{'reg':[/^%%CS/]};") js = display.Javascript(data=patch, lib=["https://github.com/codemirror/CodeMirror/blob/master/mode/clike/clike.js"])
python
{ "resource": "" }
q38069
XmppBackendBase.module
train
def module(self): """The module specified by the ``library`` attribute.""" if self._module is None: if self.library is None: raise ValueError( "Backend '%s' doesn't specify a library attribute" % self.__class__) try: if '.' in self.library: mod_path, cls_name = self.library.rsplit('.', 1) mod = import_module(mod_path) self._module = getattr(mod, cls_name) else: self._module = import_module(self.library) except (AttributeError, ImportError): raise ValueError("Couldn't load %s backend library" % cls_name) return self._module
python
{ "resource": "" }
q38070
XmppBackendBase.datetime_to_timestamp
train
def datetime_to_timestamp(self, dt): """Helper function to convert a datetime object to a timestamp. If datetime instance ``dt`` is naive, it is assumed that it is in UTC. In Python 3, this just calls ``datetime.timestamp()``, in Python 2, it substracts any timezone offset and returns the difference since 1970-01-01 00:00:00. Note that the function always returns an int, even in Python 3. >>> XmppBackendBase().datetime_to_timestamp(datetime(2017, 9, 17, 19, 59)) 1505678340 >>> XmppBackendBase().datetime_to_timestamp(datetime(1984, 11, 6, 13, 21)) 468595260 :param dt: The datetime object to convert. If ``None``, returns the current time. :type dt: datetime :return: The seconds in UTC. :rtype: int """ if dt is None: return int(time.time()) if six.PY3: if not dt.tzinfo: dt = pytz.utc.localize(dt) return int(dt.timestamp()) else: if dt.tzinfo: dt = dt.replace(tzinfo=None) - dt.utcoffset() return int((dt - datetime(1970, 1, 1)).total_seconds())
python
{ "resource": "" }
q38071
XmppBackendBase.get_random_password
train
def get_random_password(self, length=32, chars=None): """Helper function that gets a random password. :param length: The length of the random password. :type length: int :param chars: A string with characters to choose from. Defaults to all ASCII letters and digits. :type chars: str """ if chars is None: chars = string.ascii_letters + string.digits return ''.join(random.choice(chars) for x in range(length))
python
{ "resource": "" }
q38072
XmppBackendBase.create_reservation
train
def create_reservation(self, username, domain, email=None): """Reserve a new account. This method is called when a user account should be reserved, meaning that the account can no longer be registered by anybody else but the user cannot yet log in either. This is useful if e.g. an email confirmation is still pending. The default implementation calls :py:func:`~xmpp_backends.base.XmppBackendBase.create_user` with a random password. :param username: The username of the user. :type username: str :param domain: The domain of the user. :type domain: str :param email: The email address provided by the user. Note that at this point it is not confirmed. You are free to ignore this parameter. """ password = self.get_random_password() self.create(username=username, domain=domain, password=password, email=email)
python
{ "resource": "" }
q38073
XmppBackendBase.confirm_reservation
train
def confirm_reservation(self, username, domain, password, email=None): """Confirm a reservation for a username. The default implementation just calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` and optionally :py:func:`~xmpp_backends.base.XmppBackendBase.set_email`. """ self.set_password(username=username, domain=domain, password=password) if email is not None: self.set_email(username=username, domain=domain, email=email)
python
{ "resource": "" }
q38074
XmppBackendBase.block_user
train
def block_user(self, username, domain): """Block the specified user. The default implementation calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` with a random password. :param username: The username of the user. :type username: str :param domain: The domain of the user. :type domain: str """ self.set_password(username, domain, self.get_random_password())
python
{ "resource": "" }
q38075
EjabberdBackendBase.parse_connection_string
train
def parse_connection_string(self, connection): """Parse string as returned by the ``connected_users_info`` or ``user_sessions_info`` API calls. >>> EjabberdBackendBase().parse_connection_string('c2s_tls') (0, True, False) >>> EjabberdBackendBase().parse_connection_string('c2s_compressed_tls') (0, True, True) >>> EjabberdBackendBase().parse_connection_string('http_bind') (2, None, None) :param connection: The connection string as returned by the ejabberd APIs. :type connection: str :return: A tuple representing the conntion type, if it is encrypted and if it uses XMPP stream compression. :rtype: tuple """ # TODO: Websockets, HTTP Polling if connection == 'c2s_tls': return CONNECTION_XMPP, True, False elif connection == 'c2s_compressed_tls': return CONNECTION_XMPP, True, True elif connection == 'http_bind': return CONNECTION_HTTP_BINDING, None, None elif connection == 'c2s': return CONNECTION_XMPP, False, False log.warn('Could not parse connection string "%s"', connection) return CONNECTION_UNKNOWN, True, True
python
{ "resource": "" }
q38076
EjabberdBackendBase.parse_ip_address
train
def parse_ip_address(self, ip_address): """Parse an address as returned by the ``connected_users_info`` or ``user_sessions_info`` API calls. Example:: >>> EjabberdBackendBase().parse_ip_address('192.168.0.1') # doctest: +FORCE_TEXT IPv4Address('192.168.0.1') >>> EjabberdBackendBase().parse_ip_address('::FFFF:192.168.0.1') # doctest: +FORCE_TEXT IPv4Address('192.168.0.1') >>> EjabberdBackendBase().parse_ip_address('::1') # doctest: +FORCE_TEXT IPv6Address('::1') :param ip_address: An IP address. :type ip_address: str :return: The parsed IP address. :rtype: `ipaddress.IPv6Address` or `ipaddress.IPv4Address`. """ if ip_address.startswith('::FFFF:'): ip_address = ip_address[7:] if six.PY2 and isinstance(ip_address, str): # ipaddress constructor does not eat str in py2 :-/ ip_address = ip_address.decode('utf-8') return ipaddress.ip_address(ip_address)
python
{ "resource": "" }
q38077
PebbleConnection.pump_reader
train
def pump_reader(self): """ Synchronously reads one message from the watch, blocking until a message is available. All events caused by the message read will be processed before this method returns. .. note:: You usually don't need to invoke this method manually; instead, see :meth:`run_sync` and :meth:`run_async`. """ origin, message = self.transport.read_packet() if isinstance(origin, MessageTargetWatch): self._handle_watch_message(message) else: self._broadcast_transport_message(origin, message)
python
{ "resource": "" }
q38078
PebbleConnection.run_sync
train
def run_sync(self): """ Runs the message loop until the Pebble disconnects. This method will block until the watch disconnects or a fatal error occurs. For alternatives that don't block forever, see :meth:`pump_reader` and :meth:`run_async`. """ while self.connected: try: self.pump_reader() except PacketDecodeError as e: logger.warning("Packet decode failed: %s", e) except ConnectionError: break
python
{ "resource": "" }
q38079
PebbleConnection._handle_watch_message
train
def _handle_watch_message(self, message): """ Processes a binary message received from the watch and broadcasts the relevant events. :param message: A raw message from the watch, without any transport framing. :type message: bytes """ if self.log_protocol_level is not None: logger.log(self.log_protocol_level, "<- %s", hexlify(message).decode()) message = self.pending_bytes + message while len(message) >= 4: try: packet, length = PebblePacket.parse_message(message) except IncompleteMessage: self.pending_bytes = message break except: # At this point we've failed to deconstruct the message via normal means, but we don't want to end # up permanently desynced (because we wiped a partial message), nor do we want to get stuck (because # we didn't wipe anything). We therefore parse the packet length manually and skip ahead that far. # If the expected length is 0, we wipe everything to ensure forward motion (but we are quite probably # screwed). expected_length, = struct.unpack('!H', message[:2]) if expected_length == 0: self.pending_bytes = b'' else: self.pending_bytes = message[expected_length + 4:] raise self.event_handler.broadcast_event("raw_inbound", message[:length]) if self.log_packet_level is not None: logger.log(self.log_packet_level, "<- %s", packet) message = message[length:] self.event_handler.broadcast_event((_EventType.Watch, type(packet)), packet) if length == 0: break self.pending_bytes = message
python
{ "resource": "" }
q38080
PebbleConnection._broadcast_transport_message
train
def _broadcast_transport_message(self, origin, message): """ Broadcasts an event originating from a transport that does not represent a message from the Pebble. :param origin: The type of transport responsible for the message. :type origin: .MessageTarget :param message: The message from the transport """ self.event_handler.broadcast_event((_EventType.Transport, type(origin), type(message)), message)
python
{ "resource": "" }
q38081
PebbleConnection.register_transport_endpoint
train
def register_transport_endpoint(self, origin, message_type, handler): """ Register a handler for a message received from a transport that does not indicate a message from the connected Pebble. :param origin: The type of :class:`.MessageTarget` that triggers the message :param message_type: The class of the message that is expected. :param handler: A callback to be called when a message is received. :type handler: callable :return: A handle that can be passed to :meth:`unregister_endpoint` to remove the handler. """ return self.event_handler.register_handler((_EventType.Transport, origin, message_type), handler)
python
{ "resource": "" }
q38082
PebbleConnection.register_endpoint
train
def register_endpoint(self, endpoint, handler): """ Register a handler for a message received from the Pebble. :param endpoint: The type of :class:`.PebblePacket` that is being listened for. :type endpoint: .PacketType :param handler: A callback to be called when a message is received. :type handler: callable :return: A handle that can be passed to :meth:`unregister_endpoint` to remove the handler. """ return self.event_handler.register_handler((_EventType.Watch, endpoint), handler)
python
{ "resource": "" }
q38083
PebbleConnection.read_transport_message
train
def read_transport_message(self, origin, message_type, timeout=15): """ Blocking read of a transport message that does not indicate a message from the Pebble. Will block until a message is received, or it times out. .. warning:: Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock. :param origin: The type of :class:`.MessageTarget` that triggers the message. :param message_type: The class of the message to read from the transport. :param timeout: The maximum time to wait before raising :exc:`.TimeoutError`. :return: The object read from the transport; of the same type as passed to ``message_type``. """ return self.event_handler.wait_for_event((_EventType.Transport, origin, message_type), timeout=timeout)
python
{ "resource": "" }
q38084
PebbleConnection.send_packet
train
def send_packet(self, packet): """ Sends a message to the Pebble. :param packet: The message to send. :type packet: .PebblePacket """ if self.log_packet_level: logger.log(self.log_packet_level, "-> %s", packet) serialised = packet.serialise_packet() self.event_handler.broadcast_event("raw_outbound", serialised) self.send_raw(serialised)
python
{ "resource": "" }
q38085
PebbleConnection.send_and_read
train
def send_and_read(self, packet, endpoint, timeout=15): """ Sends a packet, then returns the next response received from that endpoint. This method sets up a listener before it actually sends the message, avoiding a potential race. .. warning:: Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock. :param packet: The message to send. :type packet: .PebblePacket :param endpoint: The endpoint to read from :type endpoint: .PacketType :param timeout: The maximum time to wait before raising :exc:`.TimeoutError`. :return: The message read from the endpoint; of the same type as passed to ``endpoint``. """ queue = self.get_endpoint_queue(endpoint) self.send_packet(packet) try: return queue.get(timeout=timeout) finally: queue.close()
python
{ "resource": "" }
q38086
PebbleConnection.send_raw
train
def send_raw(self, message): """ Sends a raw binary message to the Pebble. No processing will be applied, but any transport framing should be omitted. :param message: The message to send to the pebble. :type message: bytes """ if self.log_protocol_level: logger.log(self.log_protocol_level, "-> %s", hexlify(message).decode()) self.transport.send_packet(message)
python
{ "resource": "" }
q38087
PebbleConnection.firmware_version
train
def firmware_version(self): """ Provides information on the connected Pebble, including its firmware version, language, capabilities, etc. .. note: This is a blocking call if :meth:`fetch_watch_info` has not yet been called, which could lead to deadlock if called in an endpoint callback. :rtype: .WatchVersionResponse """ version = self.watch_info.running.version_tag[1:] parts = version.split('-', 1) points = [int(x) for x in parts[0].split('.')] while len(points) < 3: points.append(0) if len(parts) == 2: suffix = parts[1] else: suffix = '' return FirmwareVersion(*(points + [suffix]))
python
{ "resource": "" }
q38088
GDF._blockread
train
def _blockread(self, fname): """ Generator yields bsize lines from gdf file. Hidden method. Parameters ---------- fname : str Name of gdf-file. Yields ------ list file contents """ with open(fname, 'rb') as f: while True: a = [] for i in range(self.bsize): line = f.readline() if not line: break a.append(line.split()) if a == []: raise StopIteration yield a
python
{ "resource": "" }
q38089
GDF.create
train
def create(self, re='brunel-py-ex-*.gdf', index=True): """ Create db from list of gdf file glob Parameters ---------- re : str File glob to load. index : bool Create index on neurons for speed. Returns ------- None See also -------- sqlite3.connect.cursor, sqlite3.connect """ self.cursor.execute('CREATE TABLE IF NOT EXISTS spikes (neuron INT UNSIGNED, time REAL)') tic = now() for f in glob.glob(re): print(f) while True: try: for data in self._blockread(f): self.cursor.executemany('INSERT INTO spikes VALUES (?, ?)', data) self.conn.commit() except: continue break toc = now() if self.debug: print('Inserts took %g seconds.' % (toc-tic)) # Optionally, create index for speed if index: tic = now() self.cursor.execute('CREATE INDEX neuron_index on spikes (neuron)') toc = now() if self.debug: print('Indexed db in %g seconds.' % (toc-tic))
python
{ "resource": "" }
q38090
GDF.create_from_list
train
def create_from_list(self, re=[], index=True): """ Create db from list of arrays. Parameters ---------- re : list Index of element is cell index, and element `i` an array of spike times in ms. index : bool Create index on neurons for speed. Returns ------- None See also -------- sqlite3.connect.cursor, sqlite3.connect """ self.cursor.execute('CREATE TABLE IF NOT EXISTS spikes (neuron INT UNSIGNED, time REAL)') tic = now() i = 0 for x in re: data = list(zip([i] * len(x), x)) self.cursor.executemany('INSERT INTO spikes VALUES (?, ?)', data) i += 1 self.conn.commit() toc = now() if self.debug: print('Inserts took %g seconds.' % (toc-tic)) # Optionally, create index for speed if index: tic = now() self.cursor.execute('CREATE INDEX neuron_index on spikes (neuron)') toc = now() if self.debug: print('Indexed db in %g seconds.' % (toc-tic))
python
{ "resource": "" }
q38091
GDF.select
train
def select(self, neurons): """ Select spike trains. Parameters ---------- neurons : numpy.ndarray or list Array of list of neurons. Returns ------- list List of numpy.ndarray objects containing spike times. See also -------- sqlite3.connect.cursor """ s = [] for neuron in neurons: self.cursor.execute('SELECT time FROM spikes where neuron = %d' % neuron) sel = self.cursor.fetchall() spikes = np.array(sel).flatten() s.append(spikes) return s
python
{ "resource": "" }
q38092
GDF.interval
train
def interval(self, T=[0, 1000]): """ Get all spikes in a time interval T. Parameters ---------- T : list Time interval. Returns ------- s : list Nested list with spike times. See also -------- sqlite3.connect.cursor """ self.cursor.execute('SELECT * FROM spikes WHERE time BETWEEN %f AND %f' % tuple(T)) sel = self.cursor.fetchall() return sel
python
{ "resource": "" }
q38093
GDF.neurons
train
def neurons(self): """ Return list of neuron indices. Parameters ---------- None Returns ------- list list of neuron indices See also -------- sqlite3.connect.cursor """ self.cursor.execute('SELECT DISTINCT neuron FROM spikes ORDER BY neuron') sel = self.cursor.fetchall() return np.array(sel).flatten()
python
{ "resource": "" }
q38094
GDF.num_spikes
train
def num_spikes(self): """ Return total number of spikes. Parameters ---------- None Returns ------- list """ self.cursor.execute('SELECT Count(*) from spikes') rows = self.cursor.fetchall()[0] # Check against 'wc -l *ex*.gdf' if self.debug: print('DB has %d spikes' % rows) return rows
python
{ "resource": "" }
q38095
GDF.plotstuff
train
def plotstuff(self, T=[0, 1000]): """ Create a scatter plot of the contents of the database, with entries on the interval T. Parameters ---------- T : list Time interval. Returns ------- None See also -------- GDF.select_neurons_interval """ fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111) neurons = self.neurons() i = 0 for x in self.select_neurons_interval(neurons, T): ax.plot(x, np.zeros(x.size) + neurons[i], 'o', markersize=1, markerfacecolor='k', markeredgecolor='k', alpha=0.25) i += 1 ax.set_xlabel('time (ms)') ax.set_ylabel('neuron ID') ax.set_xlim(T[0], T[1]) ax.set_ylim(neurons.min(), neurons.max()) ax.set_title('database content on T = [%.0f, %.0f]' % (T[0], T[1]))
python
{ "resource": "" }
q38096
get_config_directory
train
def get_config_directory(appname): """ Get OS-specific configuration directory. :type appname: str :arg appname: capitalized name of the application """ if platform.system().lower() == 'windows': path = os.path.join(os.getenv('APPDATA') or '~', appname, appname) elif platform.system().lower() == 'darwin': path = os.path.join('~', 'Library', 'Application Support', appname) else: path = os.path.join(os.getenv('XDG_CONFIG_HOME') or '~/.config', appname.lower()) return os.path.expanduser(path)
python
{ "resource": "" }
q38097
SendEmailAdmin.save_model
train
def save_model(self, request, obj, form, change): """ sends the email and does not save it """ email = message.EmailMessage( subject=obj.subject, body=obj.body, from_email=obj.from_email, to=[t.strip() for t in obj.to_emails.split(',')], bcc=[t.strip() for t in obj.bcc_emails.split(',')], cc=[t.strip() for t in obj.cc_emails.split(',')] ) email.send()
python
{ "resource": "" }
q38098
dump_dict_of_nested_lists_to_h5
train
def dump_dict_of_nested_lists_to_h5(fname, data): """ Take nested list structure and dump it in hdf5 file. Parameters ---------- fname : str Filename data : dict(list(numpy.ndarray)) Dict of nested lists with variable len arrays. Returns ------- None """ # Open file print('writing to file: %s' % fname) f = h5py.File(fname) # Iterate over values for i, ivalue in list(data.items()): igrp = f.create_group(str(i)) for j, jvalue in enumerate(ivalue): jgrp = igrp.create_group(str(j)) for k, kvalue in enumerate(jvalue): if kvalue.size > 0: dset = jgrp.create_dataset(str(k), data=kvalue, compression='gzip') else: dset = jgrp.create_dataset(str(k), data=kvalue, maxshape=(None, ), compression='gzip') # Close file f.close()
python
{ "resource": "" }
q38099
load_dict_of_nested_lists_from_h5
train
def load_dict_of_nested_lists_from_h5(fname, toplevelkeys=None): """ Load nested list structure from hdf5 file Parameters ---------- fname : str Filename toplevelkeys : None or iterable, Load a two(default) or three-layered structure. Returns ------- dict(list(numpy.ndarray)) dictionary of nested lists with variable length array data. """ # Container: data = {} # Open file object f = h5py.File(fname, 'r') # Iterate over partial dataset if toplevelkeys is not None: for i in toplevelkeys: ivalue = f[str(i)] data[i] = [] for j, jvalue in enumerate(ivalue.values()): data[int(i)].append([]) for k, kvalue in enumerate(jvalue.values()): data[i][j].append(kvalue.value) else: for i, ivalue in list(f.items()): i = int(i) data[i] = [] for j, jvalue in enumerate(ivalue.values()): data[i].append([]) for k, kvalue in enumerate(jvalue.values()): data[i][j].append(kvalue.value) # Close dataset f.close() return data
python
{ "resource": "" }