_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q39400
get_version
train
def get_version(): """Reads version number. This workaround is required since __init__ is an entry point exposing stuff from other modules, which may use dependencies unavailable in current environment, which in turn will prevent this application from install. """ contents = read(os.path.join(PATH_BASE, 'srptools', '__init__.py')) version = re.search('VERSION = \(([^)]+)\)', contents) version = version.group(1).replace(', ', '.').strip() return version
python
{ "resource": "" }
q39401
fromfilenames
train
def fromfilenames(filenames, coltype = int): """ Return a segmentlist describing the intervals spanned by the files whose names are given in the list filenames. The segmentlist is constructed by parsing the file names, and the boundaries of each segment are coerced to type coltype. The file names are parsed using a generalization of the format described in Technical Note LIGO-T010150-00-E, which allows the start time and duration appearing in the file name to be non-integers. NOTE: the output is a segmentlist as described by the file names; if the file names are not in time order, or describe overlaping segments, then thusly shall be the output of this function. It is recommended that this function's output be coalesced before use. """ pattern = re.compile(r"-([\d.]+)-([\d.]+)\.[\w_+#]+\Z") l = segments.segmentlist() for name in filenames: [(s, d)] = pattern.findall(name.strip().rstrip(".gz")) s = coltype(s) d = coltype(d) l.append(segments.segment(s, s + d)) return l
python
{ "resource": "" }
q39402
fromlalcache
train
def fromlalcache(cachefile, coltype = int): """ Construct a segmentlist representing the times spanned by the files identified in the LAL cache contained in the file object file. The segmentlist will be created with segments whose boundaries are of type coltype, which should raise ValueError if it cannot convert its string argument. Example: >>> from pycbc_glue.lal import LIGOTimeGPS >>> cache_seglists = fromlalcache(open(filename), coltype = LIGOTimeGPS).coalesce() See also: pycbc_glue.lal.CacheEntry """ return segments.segmentlist(lal.CacheEntry(l, coltype = coltype).segment for l in cachefile)
python
{ "resource": "" }
q39403
S2playground
train
def S2playground(extent): """ Return a segmentlist identifying the S2 playground times within the interval defined by the segment extent. Example: >>> from pycbc_glue import segments >>> S2playground(segments.segment(874000000, 874010000)) [segment(874000013, 874000613), segment(874006383, 874006983)] """ lo = int(extent[0]) lo -= (lo - 729273613) % 6370 hi = int(extent[1]) + 1 return segments.segmentlist(segments.segment(t, t + 600) for t in range(lo, hi, 6370)) & segments.segmentlist([extent])
python
{ "resource": "" }
q39404
vote
train
def vote(seglists, n): """ Given a sequence of segmentlists, returns the intervals during which at least n of them intersect. The input segmentlists must be coalesced, the output is coalesced. Example: >>> from pycbc_glue.segments import * >>> w = segmentlist([segment(0, 15)]) >>> x = segmentlist([segment(5, 20)]) >>> y = segmentlist([segment(10, 25)]) >>> z = segmentlist([segment(15, 30)]) >>> vote((w, x, y, z), 3) [segment(10, 20)] The sequence of segmentlists is only iterated over once, and the segmentlists within it are only iterated over once; they can all be generators. If there are a total of N segments in M segment lists and the final result has L segments the algorithm is O(N M) + O(L). """ # check for no-op if n < 1: return segments.segmentlist() # digest the segmentlists into an ordered sequence of off-on and # on-off transitions with the vote count for each transition # FIXME: this generator is declared locally for now, is it useful # as a stand-alone generator? def pop_min(l): # remove and return the smallest value from a list val = min(l) for i in xrange(len(l) - 1, -1, -1): if l[i] is val: return l.pop(i) assert False # cannot get here def vote_generator(seglists): queue = [] for seglist in seglists: segiter = iter(seglist) try: seg = segiter.next() except StopIteration: continue # put them in so that the smallest boundary is # closest to the end of the list queue.append((seg[1], -1, segiter)) queue.append((seg[0], +1, None)) if not queue: return queue.sort(reverse = True) bound = queue[-1][0] votes = 0 while queue: this_bound, delta, segiter = pop_min(queue) if this_bound == bound: votes += delta else: yield bound, votes bound = this_bound votes = delta if segiter is not None: try: seg = segiter.next() except StopIteration: continue queue.append((seg[1], -1, segiter)) queue.append((seg[0], +1, None)) yield bound, votes # compute the cumulative sum of votes, and assemble a segmentlist # from the intervals when the vote count is equal to or greater # than n result = segments.segmentlist() votes = 0 for bound, delta in vote_generator(seglists): if delta > 0 and n - delta <= votes < n: start = bound elif delta < 0 and n <= votes < n - delta: result.append(segments.segment(start, bound)) del start # detect stops that aren't preceded by starts votes += delta assert votes == 0 # detect failed cumulative sum return result
python
{ "resource": "" }
q39405
validate_proxy
train
def validate_proxy(path): """Validate the users X509 proxy certificate Tests that the proxy certificate is RFC 3820 compliant and that it is valid for at least the next 15 minutes. @returns: L{True} if the certificate validates @raises RuntimeError: if the certificate cannot be validated """ # load the proxy from path try: proxy = M2Crypto.X509.load_cert(path) except Exception, e: msg = "Unable to load proxy from path %s : %s" % (path, e) raise RuntimeError(msg) # make sure the proxy is RFC 3820 compliant try: proxy.get_ext("proxyCertInfo") except LookupError: subject = proxy.get_subject().as_text() if re.search(r'.+CN=proxy$', subject): raise RuntimeError("Could not find a RFC 3820 compliant proxy " "credential. Please run 'grid-proxy-init -rfc' " "and try again.") # attempt to make sure the proxy is still good for more than 15 minutes try: expireASN1 = proxy.get_not_after().__str__() expireGMT = time.strptime(expireASN1, "%b %d %H:%M:%S %Y %Z") expireUTC = calendar.timegm(expireGMT) now = int(time.time()) secondsLeft = expireUTC - now except Exception, e: # problem getting or parsing time so just let the client # continue and pass the issue along to the server secondsLeft = 3600 if secondsLeft <= 0: raise RuntimeError("Your proxy certificate is expired.\n" "Please generate a new proxy certificate and " "try again. ") if secondsLeft < (60 * 15): raise RuntimeError("Your proxy certificate expires in less than 15 " "minutes.\nPlease generate a new proxy " "certificate and try again.") # return True to indicate validated proxy return True
python
{ "resource": "" }
q39406
find_credential
train
def find_credential(): """Locate the users X509 certificate and key files This method uses the C{X509_USER_CERT} and C{X509_USER_KEY} to locate valid proxy information. If those are not found, the standard location in /tmp/ is searched. @raises RuntimeError: if the proxy found via either method cannot be validated @raises RuntimeError: if the cert and key files cannot be located """ rfc_proxy_msg = ("Could not find a RFC 3820 compliant proxy credential." "Please run 'grid-proxy-init -rfc' and try again.") # use X509_USER_PROXY from environment if set if os.environ.has_key('X509_USER_PROXY'): filePath = os.environ['X509_USER_PROXY'] if validate_proxy(filePath): return filePath, filePath else: raise RuntimeError(rfc_proxy_msg) # use X509_USER_CERT and X509_USER_KEY if set if (os.environ.has_key('X509_USER_CERT') and os.environ.has_key('X509_USER_KEY')): certFile = os.environ['X509_USER_CERT'] keyFile = os.environ['X509_USER_KEY'] return certFile, keyFile # search for proxy file on disk uid = os.getuid() path = "/tmp/x509up_u%d" % uid if os.access(path, os.R_OK): if validate_proxy(path): return path, path else: raise RuntimeError(rfc_proxy_msg) # if we get here could not find a credential raise RuntimeError(rfc_proxy_msg)
python
{ "resource": "" }
q39407
find_server
train
def find_server(): """Find the default server host from the environment This method uses the C{LIGO_DATAFIND_SERVER} variable to construct a C{(host, port)} tuple. @returns: C{(host, port)}: the L{str} host name and L{int} port number @raises RuntimeError: if the C{LIGO_DATAFIND_SERVER} environment variable is not set """ if os.environ.has_key(_server_env): host = os.environ[_server_env] port = None if re.search(':', host): host, port = host.split(':', 1) if port: port = int(port) return host, port else: raise RuntimeError("Environment variable %s is not set" % _server_env)
python
{ "resource": "" }
q39408
GWDataFindHTTPConnection._requestresponse
train
def _requestresponse(self, method, url, body=None, headers={}): """Internal method to perform request and verify reponse. @param method: name of the method to use (e.g. 'GET') @param url : remote URL to query @type method: L{str} @type url : L{str} @returns: L{str} response from server query @raises RuntimeError: if query is unsuccessful """ try: self.request(method, url) response = self.getresponse() except Exception,e: raise RuntimeError("Unable to query server %s: %s\n\n" "Perhaps you need a valid proxy credential?\n" % (self.host, e)) if response.status != 200: raise RuntimeError("Server returned code %d: %s%s" % (response.status, response.reason, response.read())) return response
python
{ "resource": "" }
q39409
GWDataFindHTTPConnection.find_observatories
train
def find_observatories(self, match=None): """Query the LDR host for observatories. Use match to restrict returned observatories to those matching the regular expression. Example: >>> connection.find_observatories() ['AGHLT', 'G', 'GHLTV', 'GHLV', 'GHT', 'H', 'HL', 'HLT', 'L', 'T', 'V', 'Z'] >>> connection.find_observatories("H") ['H', 'HL', 'HLT'] @type match: L{str} @param match: name to match return observatories against @returns: L{list} of observatory prefixes """ url = "%s/gwf.json" % _url_prefix response = self._requestresponse("GET", url) sitelist = sorted(set(decode(response.read()))) if match: regmatch = re.compile(match) sitelist = [site for site in sitelist if regmatch.search(site)] return sitelist
python
{ "resource": "" }
q39410
GWDataFindHTTPConnection.find_types
train
def find_types(self, site=None, match=None): """Query the LDR host for frame types. Use site to restrict query to given observatory prefix, and use match to restrict returned types to those matching the regular expression. Example: >>> connection.find_types("L", "RDS") ['L1_RDS_C01_LX', 'L1_RDS_C02_LX', 'L1_RDS_C03_L2', 'L1_RDS_R_L1', 'L1_RDS_R_L3', 'L1_RDS_R_L4', 'PEM_RDS_A6', 'RDS_R_L1', 'RDS_R_L2', 'RDS_R_L3', 'TESTPEM_RDS_A6'] @param site: single-character name of site to match @param match: type-name to match against @type site: L{str} @type match: L{str} @returns: L{list} of frame types """ if site: url = "%s/gwf/%s.json" % (_url_prefix, site[0]) else: url = "%s/gwf/all.json" % _url_prefix response = self._requestresponse("GET", url) typelist = sorted(set(decode(response.read()))) if match: regmatch = re.compile(match) typelist = [type for type in typelist if regmatch.search(type)] return typelist
python
{ "resource": "" }
q39411
GWDataFindHTTPConnection.find_times
train
def find_times(self, site, frametype, gpsstart=None, gpsend=None): """Query the LDR for times for which frames are avaliable Use gpsstart and gpsend to restrict the returned times to this semiopen interval. @returns: L{segmentlist<pycbc_glue.segments.segmentlist>} @param site: single-character name of site to match @param frametype: name of frametype to match @param gpsstart: integer GPS start time of query @param gpsend: integer GPS end time of query @type site: L{str} @type frametype: L{str} @type gpsstart: L{int} @type gpsend: L{int} """ if gpsstart and gpsend: url = ("%s/gwf/%s/%s/segments/%s,%s.json" % (_url_prefix, site, frametype, gpsstart, gpsend)) else: url = ("%s/gwf/%s/%s/segments.json" % (_url_prefix, site, frametype)) response = self._requestresponse("GET", url) segmentlist = decode(response.read()) return segments.segmentlist(map(segments.segment, segmentlist))
python
{ "resource": "" }
q39412
GWDataFindHTTPConnection.find_frame
train
def find_frame(self, framefile, urltype=None, on_missing="warn"): """Query the LDR host for a single framefile @returns: L{Cache<pycbc_glue.lal.Cache>} @param frametype: name of frametype to match @param urltype: file scheme to search for (e.g. 'file') @param on_missing: what to do when the requested frame isn't found, one of: - C{'warn'} (default): print a warning, - C{'error'}: raise an L{RuntimeError}, or - C{'ignore'}: do nothing @type frametype: L{str} @type urltype: L{str} @type on_missing: L{str} @raises RuntimeError: if given framefile is malformed """ if on_missing not in ("warn", "error", "ignore"): raise ValueError("on_missing must be 'warn', 'error', or 'ignore'.") framefile = os.path.basename(framefile) # parse file name for site, frame type try: site,frametype,_,_ = framefile.split("-") except Exception, e: raise RuntimeError("Error parsing filename %s: %s" % (framefile, e)) url = ("%s/gwf/%s/%s/%s.json" % (_url_prefix, site, frametype, framefile)) response = self._requestresponse("GET", url) urllist = decode(response.read()) if len(urllist) == 0: if on_missing == "warn": sys.stderr.write("No files found!\n") elif on_missing == "error": raise RuntimeError("No files found!") # verify urltype is what we want cache = lal.Cache(e for e in [lal.CacheEntry.from_T050017(x, coltype=self.LIGOTimeGPSType) for x in urllist] if not urltype or e.scheme == urltype) return cache
python
{ "resource": "" }
q39413
GWDataFindHTTPConnection.find_frame_urls
train
def find_frame_urls(self, site, frametype, gpsstart, gpsend, match=None, urltype=None, on_gaps="warn"): """Find the framefiles for the given type in the [start, end) interval frame @param site: single-character name of site to match @param frametype: name of frametype to match @param gpsstart: integer GPS start time of query @param gpsend: integer GPS end time of query @param match: regular expression to match against @param urltype: file scheme to search for (e.g. 'file') @param on_gaps: what to do when the requested frame isn't found, one of: - C{'warn'} (default): print a warning, - C{'error'}: raise an L{RuntimeError}, or - C{'ignore'}: do nothing @type site: L{str} @type frametype: L{str} @type gpsstart: L{int} @type gpsend: L{int} @type match: L{str} @type urltype: L{str} @type on_gaps: L{str} @returns: L{Cache<pycbc_glue.lal.Cache>} @raises RuntimeError: if gaps are found and C{on_gaps='error'} """ if on_gaps not in ("warn", "error", "ignore"): raise ValueError("on_gaps must be 'warn', 'error', or 'ignore'.") url = ("%s/gwf/%s/%s/%s,%s" % (_url_prefix, site, frametype, gpsstart, gpsend)) # if a URL type is specified append it to the path if urltype: url += "/%s" % urltype # request JSON output url += ".json" # append a regex if input if match: url += "?match=%s" % match # make query response = self._requestresponse("GET", url) urllist = decode(response.read()) out = lal.Cache([lal.CacheEntry.from_T050017(x, coltype=self.LIGOTimeGPSType) for x in urllist]) if on_gaps == "ignore": return out else: span = segments.segment(gpsstart, gpsend) seglist = segments.segmentlist(e.segment for e in out).coalesce() missing = (segments.segmentlist([span]) - seglist).coalesce() if span in seglist: return out else: msg = "Missing segments: \n%s" % "\n".join(map(str, missing)) if on_gaps=="warn": sys.stderr.write("%s\n" % msg) return out else: raise RuntimeError(msg)
python
{ "resource": "" }
q39414
WalkChildren
train
def WalkChildren(elem): """ Walk the XML tree of children below elem, returning each in order. """ for child in elem.childNodes: yield child for elem in WalkChildren(child): yield elem
python
{ "resource": "" }
q39415
make_parser
train
def make_parser(handler): """ Convenience function to construct a document parser with namespaces enabled and validation disabled. Document validation is a nice feature, but enabling validation can require the LIGO LW DTD to be downloaded from the LDAS document server if the DTD is not included inline in the XML. This requires a working connection to the internet and the server to be up. """ parser = sax.make_parser() parser.setContentHandler(handler) parser.setFeature(sax.handler.feature_namespaces, True) parser.setFeature(sax.handler.feature_validation, False) parser.setFeature(sax.handler.feature_external_ges, False) return parser
python
{ "resource": "" }
q39416
Element.appendChild
train
def appendChild(self, child): """ Add a child to this element. The child's parentNode attribute is updated, too. """ self.childNodes.append(child) child.parentNode = self self._verifyChildren(len(self.childNodes) - 1) return child
python
{ "resource": "" }
q39417
Element.insertBefore
train
def insertBefore(self, newchild, refchild): """ Insert a new child node before an existing child. It must be the case that refchild is a child of this node; if not, ValueError is raised. newchild is returned. """ for i, childNode in enumerate(self.childNodes): if childNode is refchild: self.childNodes.insert(i, newchild) newchild.parentNode = self self._verifyChildren(i) return newchild raise ValueError(refchild)
python
{ "resource": "" }
q39418
Element.replaceChild
train
def replaceChild(self, newchild, oldchild): """ Replace an existing node with a new node. It must be the case that oldchild is a child of this node; if not, ValueError is raised. newchild is returned. """ # .index() would use compare-by-value, we want # compare-by-id because we want to find the exact object, # not something equivalent to it. for i, childNode in enumerate(self.childNodes): if childNode is oldchild: self.childNodes[i].parentNode = None self.childNodes[i] = newchild newchild.parentNode = self self._verifyChildren(i) return newchild raise ValueError(oldchild)
python
{ "resource": "" }
q39419
Element.appendData
train
def appendData(self, content): """ Add characters to the element's pcdata. """ if self.pcdata is not None: self.pcdata += content else: self.pcdata = content
python
{ "resource": "" }
q39420
Column.start_tag
train
def start_tag(self, indent): """ Generate the string for the element's start tag. """ return u"%s<%s%s/>" % (indent, self.tagName, u"".join(u" %s=\"%s\"" % keyvalue for keyvalue in self.attributes.items()))
python
{ "resource": "" }
q39421
Time.from_gps
train
def from_gps(cls, gps, Name = None): """ Instantiate a Time element initialized to the value of the given GPS time. The Name attribute will be set to the value of the Name parameter if given. Note: the new Time element holds a reference to the GPS time, not a copy of it. Subsequent modification of the GPS time object will be reflected in what gets written to disk. """ self = cls(AttributesImpl({u"Type": u"GPS"})) if Name is not None: self.Name = Name self.pcdata = gps return self
python
{ "resource": "" }
q39422
Document.write
train
def write(self, fileobj = sys.stdout, xsl_file = None): """ Write the document. """ fileobj.write(Header) fileobj.write(u"\n") if xsl_file is not None: fileobj.write(u'<?xml-stylesheet type="text/xsl" href="%s" ?>\n' % xsl_file) for c in self.childNodes: if c.tagName not in self.validchildren: raise ElementError("invalid child %s for %s" % (c.tagName, self.tagName)) c.write(fileobj)
python
{ "resource": "" }
q39423
SimpleLWXMLParser.start_element
train
def start_element(self, name, attrs): """ Callback for start of an XML element. Checks to see if we are about to start a table that matches the ignore pattern. @param name: the name of the tag being opened @type name: string @param attrs: a dictionary of the attributes for the tag being opened @type attrs: dictionary """ if name.lower() == "table": for attr in attrs.keys(): if attr.lower() == "name": if self.__ignore_pat.search(attrs[attr]): self.__in_table = 1
python
{ "resource": "" }
q39424
SimpleLWXMLParser.parse_line
train
def parse_line(self, line): """ For each line we are passed, call the XML parser. Returns the line if we are outside one of the ignored tables, otherwise returns the empty string. @param line: the line of the LIGO_LW XML file to be parsed @type line: string @return: the line of XML passed in or the null string @rtype: string """ self.__p.Parse(line) if self.__in_table: self.__silent = 1 if not self.__silent: ret = line else: ret = "" if not self.__in_table: self.__silent = 0 return ret
python
{ "resource": "" }
q39425
LDBDClient.ping
train
def ping(self): """ Ping the LDBD Server and return any message received back as a string. @return: message received (may be empty) from LDBD Server as a string """ msg = "PING\0" self.sfile.write(msg) ret, output = self.__response__() reply = str(output[0]) if ret: msg = "Error pinging server %d:%s" % (ret, reply) raise LDBDClientException, msg return reply
python
{ "resource": "" }
q39426
LDBDClient.query
train
def query(self,sql): """ Execute an SQL query on the server and fetch the resulting XML file back. @return: message received (may be empty) from LDBD Server as a string """ msg = "QUERY\0" + sql + "\0" self.sfile.write(msg) ret, output = self.__response__() reply = str(output[0]) if ret: msg = "Error executing query on server %d:%s" % (ret, reply) raise LDBDClientException, msg return reply
python
{ "resource": "" }
q39427
julianDay
train
def julianDay(year, month, day): "returns julian day=day since Jan 1 of year" hr = 12 #make sure you fall into right day, middle is save t = time.mktime((year, month, day, hr, 0, 0.0, 0, 0, -1)) julDay = time.localtime(t)[7] return julDay
python
{ "resource": "" }
q39428
mkUTC
train
def mkUTC(year, month, day, hour, min, sec): "similar to python's mktime but for utc" spec = [year, month, day, hour, min, sec] + [0, 0, 0] utc = time.mktime(spec) - time.timezone return utc
python
{ "resource": "" }
q39429
UTCFromGps
train
def UTCFromGps(gpsWeek, SOW, leapSecs=14): """converts gps week and seconds to UTC see comments of inverse function! SOW = seconds of week gpsWeek is the full number (not modulo 1024) """ secFract = SOW % 1 epochTuple = gpsEpoch + (-1, -1, 0) t0 = time.mktime(epochTuple) - time.timezone #mktime is localtime, correct for UTC tdiff = (gpsWeek * secsInWeek) + SOW - leapSecs t = t0 + tdiff (year, month, day, hh, mm, ss, dayOfWeek, julianDay, daylightsaving) = time.gmtime(t) #use gmtime since localtime does not allow to switch off daylighsavings correction!!! return (year, month, day, hh, mm, ss + secFract)
python
{ "resource": "" }
q39430
GpsSecondsFromPyUTC
train
def GpsSecondsFromPyUTC( pyUTC, leapSecs=14 ): """converts the python epoch to gps seconds pyEpoch = the python epoch from time.time() """ t = t=gpsFromUTC(*ymdhmsFromPyUTC( pyUTC )) return int(t[0] * 60 * 60 * 24 * 7 + t[1])
python
{ "resource": "" }
q39431
genMeme
train
def genMeme(template_id, text0, text1): ''' This function returns the url of the meme with the given template, upper text, and lower text using the ImgFlip meme generation API. Thanks! Returns None if it is unable to generate the meme. ''' username = 'blag' password = 'blag' api_url = 'https://api.imgflip.com/caption_image' payload = { 'template_id': template_id, 'username': username, 'password': password, 'text0': text0, } # Add bottom text if provided if text1 != '': payload['text1'] = text1 try: r = requests.get(api_url, params=payload) except ConnectionError: time.sleep(1) r = requests.get(api_url, params=payload) # print(parsed_json) parsed_json = json.loads(r.text) request_status = parsed_json['success'] if request_status != True: error_msg = parsed_json['error_message'] print(error_msg) return None else: imgURL = parsed_json['data']['url'] # print(imgURL) return imgURL
python
{ "resource": "" }
q39432
string_format_func
train
def string_format_func(s): """ Function used internally to format string data for output to XML. Escapes back-slashes and quotes, and wraps the resulting string in quotes. """ return u"\"%s\"" % unicode(s).replace(u"\\", u"\\\\").replace(u"\"", u"\\\"")
python
{ "resource": "" }
q39433
mk_complex_format_func
train
def mk_complex_format_func(fmt): """ Function used internally to generate functions to format complex valued data. """ fmt = fmt + u"+i" + fmt def complex_format_func(z): return fmt % (z.real, z.imag) return complex_format_func
python
{ "resource": "" }
q39434
fields
train
def fields(iterable, fields=None): """ Add a set of fields to each item in ``iterable``. The set of fields have a key=value format. '@' are added to the front of each key. """ if not fields: for item in iterable: yield item prepared_fields = _prepare_fields(fields) for item in iterable: yield _process_fields(item, prepared_fields)
python
{ "resource": "" }
q39435
PlugItAPI._request
train
def _request(self, uri, params=None, postParams=None, verb='GET'): """Execute a request on the plugit api""" return getattr(requests, verb.lower())(self.url + uri, params=params, data=postParams, stream=True)
python
{ "resource": "" }
q39436
PlugItAPI.get_user
train
def get_user(self, userPk): """Returns the user specified with the user's Pk or UUID""" r = self._request('user/' + str(userPk)) if r: # Set base properties and copy data inside the user u = User() u.pk = u.id = userPk u.__dict__.update(r.json()) return u return None
python
{ "resource": "" }
q39437
PlugItAPI.get_subscription_labels
train
def get_subscription_labels(self, userPk): """Returns a list with all the labels the user is subscribed to""" r = self._request('subscriptions/' + str(userPk)) if r: s = r.json() return s return []
python
{ "resource": "" }
q39438
PlugItAPI.get_orgas
train
def get_orgas(self): """Return the list of pk for all orgas""" r = self._request('orgas/') if not r: return None retour = [] for data in r.json()['data']: o = Orga() o.__dict__.update(data) o.pk = o.id retour.append(o) return retour
python
{ "resource": "" }
q39439
PlugItAPI.get_orga
train
def get_orga(self, orgaPk): """Return an organization speficied with orgaPk""" r = self._request('orga/' + str(orgaPk)) if r: # Set base properties and copy data inside the orga o = Orga() o.pk = o.id = orgaPk o.__dict__.update(r.json()) return o return None
python
{ "resource": "" }
q39440
PlugItAPI.get_project_members
train
def get_project_members(self): """Return the list of members in the project""" r = self._request('members/') if not r: return None retour = [] for data in r.json()['members']: # Base properties u = User() u.__dict__.update(data) retour.append(u) return retour
python
{ "resource": "" }
q39441
PlugItAPI.send_mail
train
def send_mail(self, sender, subject, recipients, message, response_id=None, html_message=False): """Send an email using EBUio features. If response_id is set, replies will be send back to the PlugIt server.""" params = { 'sender': sender, 'subject': subject, 'dests': recipients, 'message': message, 'html_message': html_message, } if response_id: params['response_id'] = response_id return self._request('mail/', postParams=params, verb='POST')
python
{ "resource": "" }
q39442
PlugItAPI.forum_create_topic
train
def forum_create_topic(self, subject, author, message, tags=""): """Create a topic using EBUio features.""" params = {'subject': subject, 'author': author, 'message': message, 'tags': tags} return self._request('ebuio/forum/', postParams=params, verb='POST')
python
{ "resource": "" }
q39443
PlugItAPI.forum_topic_get_by_tag_for_user
train
def forum_topic_get_by_tag_for_user(self, tag=None, author=None): """Get all forum topics with a specific tag""" if not tag: return None if author: r = self._request('ebuio/forum/search/bytag/' + tag + '?u=' + author) else: r = self._request('ebuio/forum/search/bytag/' + tag) if not r: return None retour = [] for data in r.json().get('data', []): retour.append(data) return retour
python
{ "resource": "" }
q39444
get_for_directory
train
def get_for_directory( dp, hash_mode="md5", filter_dots=False, filter_func=lambda fp:False ): r""" Returns a hash string for the files below a given directory path. :param dp: Path to a directory. :param hash_mode: Can be either one of 'md5', 'sha1', 'sha256' or 'sha512'. Defines the algorithm used to generate the resulting hash string. Default is 'md5'. :param filter_dots: If True will filter directories or files beginning with a '.' (dot) like '.git'. Default is False. :param filter_func: A function receiving a path as a single paramter. If it returns True the given path will be excluded from the hash calculation. Otherwise it will be included. """ hash_func = _HASH_MODE_DICT.get(hash_mode) root_dps_fns = os.walk( dp, topdown=True ) root_dps_fns = itertools.imap( list, root_dps_fns ) if filter_dots: root_dps_fns = itertools.ifilterfalse( _is_dot_root, root_dps_fns ) root_dps_fns = itertools.imap( _filter_dot_fns, root_dps_fns ) fps_lists = itertools.imap( _gen_fps, root_dps_fns ) fps = itertools.chain( *fps_lists ) fps = itertools.ifilterfalse( filter_func, fps ) file_handles = itertools.imap( _get_file_handle, fps ) file_hash_digests = itertools.imap( _get_file_hash_digest, file_handles, itertools.repeat(hash_func) ) file_hash_digests = sorted( file_hash_digests ) file_hash_digests = map( _get_utf8_encoded, file_hash_digests ) hash_ = _get_merged_hash( file_hash_digests, hash_func ) return hash_.hexdigest()
python
{ "resource": "" }
q39445
get_for_file
train
def get_for_file( fp, hash_mode="md5" ): r""" Returns a hash string for the given file path. :param fp: Path to the file. :param hash_mode: Can be either one of 'md5', 'sha1', 'sha256' or 'sha512'. Defines the algorithm used to generate the resulting hash string. Default is 'md5'. """ with _get_file_handle(fp) as f: file_hash_digest = get_for_handle(f, hash_mode) return file_hash_digest
python
{ "resource": "" }
q39446
get_for_handle
train
def get_for_handle( f, hash_mode="md5" ): r""" Returns a hash string for the given file-like object. :param f: The file object. :param hash_mode: Can be either one of 'md5', 'sha1', 'sha256' or 'sha512'. Defines the algorithm used to generate the resulting hash string. Default is 'md5'. """ hash_func = _HASH_MODE_DICT.get(hash_mode) file_hash_digest = _get_file_hash_digest( f, hash_func ) file_hash_digest = _get_utf8_encoded( file_hash_digest ) return file_hash_digest
python
{ "resource": "" }
q39447
getArraysByName
train
def getArraysByName(elem, name): """ Return a list of arrays with name name under elem. """ name = StripArrayName(name) return elem.getElements(lambda e: (e.tagName == ligolw.Array.tagName) and (e.Name == name))
python
{ "resource": "" }
q39448
from_array
train
def from_array(name, array, dim_names = None): """ Construct a LIGO Light Weight XML Array document subtree from a numpy array object. Example: >>> import numpy, sys >>> a = numpy.arange(12, dtype = "double") >>> a.shape = (4, 3) >>> from_array(u"test", a).write(sys.stdout) # doctest: +NORMALIZE_WHITESPACE <Array Type="real_8" Name="test:array"> <Dim>3</Dim> <Dim>4</Dim> <Stream Delimiter=" " Type="Local"> 0 3 6 9 1 4 7 10 2 5 8 11 </Stream> </Array> """ # Type must be set for .__init__(); easier to set Name afterwards # to take advantage of encoding handled by attribute proxy doc = Array(Attributes({u"Type": ligolwtypes.FromNumPyType[str(array.dtype)]})) doc.Name = name for n, dim in enumerate(reversed(array.shape)): child = ligolw.Dim() if dim_names is not None: child.Name = dim_names[n] child.pcdata = unicode(dim) doc.appendChild(child) child = ArrayStream(Attributes({u"Type": ArrayStream.Type.default, u"Delimiter": ArrayStream.Delimiter.default})) doc.appendChild(child) doc.array = array return doc
python
{ "resource": "" }
q39449
get_array
train
def get_array(xmldoc, name): """ Scan xmldoc for an array named name. Raises ValueError if not exactly 1 such array is found. """ arrays = getArraysByName(xmldoc, name) if len(arrays) != 1: raise ValueError("document must contain exactly one %s array" % StripArrayName(name)) return arrays[0]
python
{ "resource": "" }
q39450
use_in
train
def use_in(ContentHandler): """ Modify ContentHandler, a sub-class of pycbc_glue.ligolw.LIGOLWContentHandler, to cause it to use the Array and ArrayStream classes defined in this module when parsing XML documents. Example: >>> from pycbc_glue.ligolw import ligolw >>> class MyContentHandler(ligolw.LIGOLWContentHandler): ... pass ... >>> use_in(MyContentHandler) <class 'pycbc_glue.ligolw.array.MyContentHandler'> """ def startStream(self, parent, attrs, __orig_startStream = ContentHandler.startStream): if parent.tagName == ligolw.Array.tagName: return ArrayStream(attrs).config(parent) return __orig_startStream(self, parent, attrs) def startArray(self, parent, attrs): return Array(attrs) ContentHandler.startStream = startStream ContentHandler.startArray = startArray return ContentHandler
python
{ "resource": "" }
q39451
Array.get_shape
train
def get_shape(self): """ Return a tuple of this array's dimensions. This is done by querying the Dim children. Note that once it has been created, it is also possible to examine an Array object's .array attribute directly, and doing that is much faster. """ return tuple(int(c.pcdata) for c in self.getElementsByTagName(ligolw.Dim.tagName))[::-1]
python
{ "resource": "" }
q39452
get_all_files_in_range
train
def get_all_files_in_range(dirname, starttime, endtime, pad=64): """Returns all files in dirname and all its subdirectories whose names indicate that they contain segments in the range starttime to endtime""" ret = [] # Maybe the user just wants one file... if os.path.isfile(dirname): if re.match('.*-[0-9]*-[0-9]*\.xml$', dirname): return [dirname] else: return ret first_four_start = starttime / 100000 first_four_end = endtime / 100000 for filename in os.listdir(dirname): if re.match('.*-[0-9]{5}$', filename): dirtime = int(filename[-5:]) if dirtime >= first_four_start and dirtime <= first_four_end: ret += get_all_files_in_range(os.path.join(dirname,filename), starttime, endtime, pad=pad) elif re.match('.*-[0-9]{4}$', filename): dirtime = int(filename[-4:]) if dirtime >= first_four_start and dirtime <= first_four_end: ret += get_all_files_in_range(os.path.join(dirname,filename), starttime, endtime, pad=pad) elif re.match('.*-[0-9]*-[0-9]*\.xml$', filename): file_time = int(filename.split('-')[-2]) if file_time >= (starttime-pad) and file_time <= (endtime+pad): ret.append(os.path.join(dirname,filename)) else: # Keep recursing, we may be looking at directories of # ifos, each of which has directories with times ret += get_all_files_in_range(os.path.join(dirname,filename), starttime, endtime, pad=pad) return ret
python
{ "resource": "" }
q39453
ensure_segment_table
train
def ensure_segment_table(connection): """Ensures that the DB represented by connection posses a segment table. If not, creates one and prints a warning to stderr""" count = connection.cursor().execute("SELECT count(*) FROM sqlite_master WHERE name='segment'").fetchone()[0] if count == 0: print >>sys.stderr, "WARNING: None of the loaded files contain a segment table" theClass = lsctables.TableByName['segment'] statement = "CREATE TABLE IF NOT EXISTS segment (" + ", ".join(map(lambda key: "%s %s" % (key, ligolwtypes.ToSQLiteType[theClass.validcolumns[key]]), theClass.validcolumns)) + ")" connection.cursor().execute(statement)
python
{ "resource": "" }
q39454
build_segment_list_one
train
def build_segment_list_one(engine, gps_start_time, gps_end_time, ifo, segment_name, version = None, start_pad = 0, end_pad = 0): """Builds a list of segments satisfying the given criteria """ seg_result = segmentlist([]) sum_result = segmentlist([]) # Is there any way to get segment and segement summary in one query? # Maybe some sort of outer join where we keep track of which segment # summaries we've already seen. sql = "SELECT segment_summary.start_time, segment_summary.end_time " sql += "FROM segment_definer, segment_summary " sql += "WHERE segment_summary.segment_def_id = segment_definer.segment_def_id " sql += "AND segment_definer.ifos = '%s' " % ifo if engine.__class__ == query_engine.LdbdQueryEngine: sql += "AND segment_summary.segment_def_cdb = segment_definer.creator_db " sql += "AND segment_definer.name = '%s' " % segment_name sql += "AND segment_definer.version = %s " % version sql += "AND NOT (%s > segment_summary.end_time OR segment_summary.start_time > %s)" % (gps_start_time, gps_end_time) rows = engine.query(sql) for sum_start_time, sum_end_time in rows: sum_start_time = (sum_start_time < gps_start_time) and gps_start_time or sum_start_time sum_end_time = (sum_end_time > gps_end_time) and gps_end_time or sum_end_time sum_result |= segmentlist([segment(sum_start_time, sum_end_time)]) # We can't use queries paramaterized with ? since the ldbd protocol doesn't support it... sql = "SELECT segment.start_time + %d, segment.end_time + %d " % (start_pad, end_pad) sql += "FROM segment, segment_definer " sql += "WHERE segment.segment_def_id = segment_definer.segment_def_id " if engine.__class__ == query_engine.LdbdQueryEngine: sql += "AND segment.segment_def_cdb = segment_definer.creator_db " sql += "AND segment_definer.ifos = '%s' " % ifo sql += "AND segment_definer.name = '%s' " % segment_name sql += "AND segment_definer.version = %s " % version sql += "AND NOT (%s > segment.end_time OR segment.start_time > %s)" % (gps_start_time, gps_end_time) rows = engine.query(sql) for seg_start_time, seg_end_time in rows: seg_start_time = (seg_start_time < gps_start_time) and gps_start_time or seg_start_time seg_end_time = (seg_end_time > gps_end_time) and gps_end_time or seg_end_time seg_result |= segmentlist([segment(seg_start_time, seg_end_time)]) engine.close() return sum_result, seg_result
python
{ "resource": "" }
q39455
run_query_segments
train
def run_query_segments(doc, proc_id, engine, gps_start_time, gps_end_time, included_segments_string, excluded_segments_string = None, write_segments = True, start_pad = 0, end_pad = 0): """Runs a segment query. This was originally part of ligolw_query_segments, but now is also used by ligolw_segments_from_cats. The write_segments option is provided so callers can coalesce segments obtained over sever invocations (as segments_from_cats does). """ if write_segments: all_ifos = {} for ifo, segment_name, version in split_segment_ids(included_segments_string.split(',')): all_ifos[ifo] = True new_seg_def_id = add_to_segment_definer(doc, proc_id, ''.join(all_ifos.keys()), 'result', 0) add_to_segment_summary(doc, proc_id, new_seg_def_id, [[gps_start_time, gps_end_time]]) result = segmentlist([]) for ifo, segment_name, version in split_segment_ids(included_segments_string.split(',')): sum_segments, seg_segments = build_segment_list(engine, gps_start_time, gps_end_time, ifo, segment_name, version, start_pad, end_pad) seg_def_id = add_to_segment_definer(doc, proc_id, ifo, segment_name, version) add_to_segment_summary(doc, proc_id, seg_def_id, sum_segments) # and accumulate segments result |= seg_segments # Excluded segments are not required if excluded_segments_string: excluded_segments = segmentlist([]) for ifo, segment_name, version in split_segment_ids(excluded_segments_string.split(',')): sum_segments, seg_segments = build_segment_list(engine, gps_start_time, gps_end_time, ifo, segment_name, version) excluded_segments |= seg_segments result = result - excluded_segments result.coalesce() # Add the segments if write_segments: add_to_segment(doc, proc_id, new_seg_def_id, result) return result
python
{ "resource": "" }
q39456
url2path
train
def url2path(url): """ If url identifies a file on the local host, return the path to the file otherwise raise ValueError. """ scheme, host, path, nul, nul, nul = urlparse(url) if scheme.lower() in ("", "file") and host.lower() in ("", "localhost"): return path raise ValueError(url)
python
{ "resource": "" }
q39457
remove_input
train
def remove_input(urls, preserves, verbose = False): """ Attempt to delete all files identified by the URLs in urls except any that are the same as the files in the preserves list. """ for path in map(url2path, urls): if any(os.path.samefile(path, preserve) for preserve in preserves): continue if verbose: print >>sys.stderr, "removing \"%s\" ..." % path try: os.remove(path) except: pass
python
{ "resource": "" }
q39458
reassign_ids
train
def reassign_ids(doc, verbose = False): """ Assign new IDs to all rows in all LSC tables in doc so that there are no collisions when the LIGO_LW elements are merged. """ # Can't simply run reassign_ids() on doc because we need to # construct a fresh old --> new mapping within each LIGO_LW block. for n, elem in enumerate(doc.childNodes): if verbose: print >>sys.stderr, "reassigning row IDs: %.1f%%\r" % (100.0 * (n + 1) / len(doc.childNodes)), if elem.tagName == ligolw.LIGO_LW.tagName: table.reassign_ids(elem) if verbose: print >>sys.stderr, "reassigning row IDs: 100.0%" return doc
python
{ "resource": "" }
q39459
merge_ligolws
train
def merge_ligolws(elem): """ Merge all LIGO_LW elements that are immediate children of elem by appending their children to the first. """ ligolws = [child for child in elem.childNodes if child.tagName == ligolw.LIGO_LW.tagName] if ligolws: dest = ligolws.pop(0) for src in ligolws: # copy children; LIGO_LW elements have no attributes map(dest.appendChild, src.childNodes) # unlink from parent if src.parentNode is not None: src.parentNode.removeChild(src) return elem
python
{ "resource": "" }
q39460
merge_compatible_tables
train
def merge_compatible_tables(elem): """ Below the given element, find all Tables whose structure is described in lsctables, and merge compatible ones of like type. That is, merge all SnglBurstTables that have the same columns into a single table, etc.. """ for name in lsctables.TableByName.keys(): tables = table.getTablesByName(elem, name) if tables: dest = tables.pop(0) for src in tables: if src.Name != dest.Name: # src and dest have different names continue # src and dest have the same names if compare_table_cols(dest, src): # but they have different columns raise ValueError("document contains %s tables with incompatible columns" % dest.Name) # and the have the same columns # copy src rows to dest for row in src: dest.append(row) # unlink src from parent if src.parentNode is not None: src.parentNode.removeChild(src) return elem
python
{ "resource": "" }
q39461
Climb.run
train
def run(self): """Loops and executes commands in interactive mode.""" if self._skip_delims: delims = readline.get_completer_delims() for delim in self._skip_delims: delims = delims.replace(delim, '') readline.set_completer_delims(delims) readline.parse_and_bind("tab: complete") readline.set_completer(self._completer.complete) if self._history_file: # Ensure history file exists if not os.path.isfile(self._history_file): open(self._history_file, 'w').close() readline.read_history_file(self._history_file) self._running = True try: while self._running: try: command = input(self._format_prompt()) if command: result = self.execute(*shlex.split(command)) if result: print(result) except CLIException as exc: print(exc) except (KeyboardInterrupt, EOFError): self._running = False print() except Exception as exc: if self._verbose: traceback.print_exc() else: print(exc) finally: if self._history_file: readline.write_history_file(self._history_file)
python
{ "resource": "" }
q39462
Climb.execute
train
def execute(self, *args): """Executes single command and returns result.""" command, kwargs = self.parse(*args) return self._commands.execute(command, **kwargs)
python
{ "resource": "" }
q39463
gcommer_donate_threaded
train
def gcommer_donate_threaded(interval=5, region='EU-London', mode=None): """ Run a daemon thread that requests and donates a token every `interval` seconds. """ def donate_thread(): while 1: gcommer_donate(*find_server(region, mode)) time.sleep(interval) Thread(target=donate_thread, daemon=True).start()
python
{ "resource": "" }
q39464
Xlator._make_regex
train
def _make_regex(self): """ Build a re object based on keys in the current dictionary """ return re.compile("|".join(map(re.escape, self.keys())))
python
{ "resource": "" }
q39465
LIGOLwParser.__lstring
train
def __lstring(self,lstr): """ Returns a parsed lstring by stripping out and instances of the escaped delimiter. Sometimes the raw lstring has whitespace and a double quote at the beginning or end. If present, these are removed. """ lstr = self.llsrx.sub('',lstr.encode('ascii')) lstr = self.rlsrx.sub('',lstr) lstr = self.xmltostr.xlat(lstr) lstr = self.dlmrx.sub(',',lstr) return lstr
python
{ "resource": "" }
q39466
LIGOMetadata.parse
train
def parse(self,xml): """ Parses an XML document into a form read for insertion into the database xml = the xml document to be parsed """ if not self.xmlparser: raise LIGOLwParseError, "pyRXP parser not initialized" if not self.lwtparser: raise LIGOLwParseError, "LIGO_LW tuple parser not initialized" xml = "".join([x.strip() for x in xml.split('\n')]) ligolwtup = self.xmlparser(xml) if self.curs: self.lwtparser.unique = UniqueIds(self.curs) self.table = self.lwtparser.parsetuple(ligolwtup)
python
{ "resource": "" }
q39467
LIGOMetadata.add_lfn
train
def add_lfn(self,lfn): """ Add an LFN table to a parsed LIGO_LW XML document. lfn = lfn to be added """ if len(self.table['process']['stream']) > 1: msg = "cannot add lfn to table with more than one process" raise LIGOLwParseError, msg # get the process_id from the process table pid_col = self.table['process']['orderedcol'].index('process_id') pid = self.table['process']['stream'][0][pid_col] try: self.table['lfn']['stream'].append((pid,lfn)) except KeyError: self.table['lfn'] = { 'pos' : 0, 'column' : {'process_id' : 'ilwd:char', 'name' : 'lstring'}, 'stream' : [(pid, lfn)], 'query' : '', 'orderedcol' : ['process_id', 'name' ] }
python
{ "resource": "" }
q39468
LIGOMetadata.set_dn
train
def set_dn(self,dn): """ Use the domain column in the process table to store the DN dn = dn to be added """ try: domain_col = self.table['process']['orderedcol'].index('domain') for row_idx in range(len(self.table['process']['stream'])): row_list = list(self.table['process']['stream'][row_idx]) row_list[domain_col] = dn self.table['process']['stream'][row_idx] = tuple(row_list) except ValueError: self.table['process']['column']['domain'] = 'lstring' self.table['process']['orderedcol'].append('domain') for row_idx in range(len(self.table['process']['stream'])): row_list = list(self.table['process']['stream'][row_idx]) row_list.append(dn) self.table['process']['stream'][row_idx] = tuple(row_list)
python
{ "resource": "" }
q39469
LIGOMetadata.insert
train
def insert(self): """Insert the object into the database""" if not self.curs: raise LIGOLwDBError, "Database connection not initalized" if len(self.table) == 0: raise LIGOLwDBError, 'attempt to insert empty table' for tab in self.table.keys(): # find and add any missing unique ids generate = [] missingcols = [k for k in self.ldb.uniqueids[tab] if k not in self.table[tab]['column']] for m in missingcols: generate.append(',BLOB(GENERATE_UNIQUE())') self.table[tab]['orderedcol'].append(m) # and construct the sql query self.table[tab]['query'] = ' '.join( ['INSERT INTO', tab, '(', ','.join(self.table[tab]['orderedcol']), ') VALUES (', ','.join(['?' for x in self.table[tab]['column']]) , ''.join(generate), ')']) for tabtup in self.ldb.tables: tab = tabtup[0].lower() try: try: self.curs.executemany(self.table[tab]['query'], self.table[tab]['stream']) rowcount = self.curs.rowcount except DB2.Error, e: self.curs.execute('rollback') msg = e[2] msg += self.xml() + '\n' msg += str(self.table[tab]['query']) + '\n' msg += str(self.table[tab]['stream']) + '\n' raise LIGOLwDBError, msg except DB2.Warning, e: self.curs.execute('rollback') raise LIGOLwDBError, e[2] #except Exception, e: # self.curs.execute('rollback') # raise LIGOLwDBError, e[2] except KeyError: pass self.curs.execute('commit') return rowcount
python
{ "resource": "" }
q39470
LIGOMetadata.select
train
def select(self,sql): """ Execute an SQL select statement and stuff the results into a dictionary. sql = the (case sensitve) SQL statment to execute """ if not self.curs: raise LIGOLwDBError, "Database connection not initalized" if len(self.table) != 0: raise LIGOLwDBError, 'attempt to fill non-empty table from database' ligolw = '' self.table = {} sqltypes = { -2 : 'ilwd:char_u', 1 : 'lstring', 3 : 'real_8', 4 : 'int_4s', 5 : 'int_2s', 7 : 'real_4', 8 : 'real_8', 12 : 'lstring', 93 : 'lstring', } try: tab = re.compile(r'[Ff][Rr][Oo][Mm]\s+([A-Za-z0-0_]+)([,\s]+|$)').search(sql).group(1) except AttributeError: raise LIGOLwDBError, 'could not find table name in query ' + str(sql) self.table[tab] = { 'pos' : 0, 'column' : {}, 'stream' : (), 'query' : sql } try: self.curs.execute(sql) except DB2.Error, e: raise LIGOLwDBError, e[2] desc = self.curs.description for col,typ,disp,intsz,prec,sca,nul in desc: try: self.table[tab]['column'][col] = sqltypes[typ] except KeyError: raise LIGOLwDBError, 'unknown type returned by database ' + str(typ) self.table[tab].setdefault('orderedcol',[]).append(col) try: self.table[tab]['stream'] = self.curs.fetchall() except DB2.Error, e: raise LIGOLwDBError, e[2] return len(self.table[tab]['stream'])
python
{ "resource": "" }
q39471
Model.add
train
def add(self, name, priority=3, comment="", parent=""): """Adds new item to the model. Name argument may contain (ref:) syntax, which will be stripped down as needed. :parent: should have a form "<itemref>.<subitemref...>" (e.g. "1.1"). :name: Name (with refs). :priority: Item's priority. :comment: Comment. :parent: Item's parent ("" for top-level item). """ item = [name, priority, comment, False, []] data = self.data for c in self._split(parent): data = data[int(c) - 1][4] data.append(item)
python
{ "resource": "" }
q39472
Model.remove
train
def remove(self, index): """Removes specified item from the model. :index: Should have a form "<itemref>.<subitemref...>" (e.g. "1.1"). :index: Item's index. """ data = self.data index = self._split(index) for j, c in enumerate(index): i = int(c) - 1 if j + 1 == len(index): try: del data[i] except IndexError: raise NoItemError('.'.join(index)) else: data = data[i][4]
python
{ "resource": "" }
q39473
Model._modifyInternal
train
def _modifyInternal(self, *, sort=None, purge=False, done=None): """Creates a whole new database from existing one, based on given modifiers. :sort: pattern should look like this: ([(<index>, True|False)], {<level_index>: [(<index>, True|False)]}), where True|False indicate whether to reverse or not, <index> are one of Model.indexes and <level_index> indicate a number of level to sort. Of course, the lists above may contain multiple items. :done: patterns looks similar to :sort:, except that it has additional <regexp> values and that True|False means to mark as done|undone. @note: Should not be used directly. It was defined here, because :save: decorator needs undecorated version of Model.modify. :sort: Pattern on which to sort the database. :purge: Whether to purge done items. :done: Pattern on which to mark items as done/undone. :returns: New database, modified according to supplied arguments. """ sortAll, sortLevels = sort is not None and sort or ([], {}) doneAll, doneLevels = done is not None and done or ([], {}) def _mark(v, i): if done is None: return v[:4] def _mark_(index, regexp, du): if du is None: return v[:4] if index is None: for v_ in v[:3]: if regexp is None or re.match(regexp, str(v_)): return v[:3] + [du] return v[:4] if regexp is None or re.match(regexp, str(v[index])): return v[:3] + [du] try: for doneLevel in doneLevels[i]: result = _mark_(*doneLevel) if result is not None: return result except KeyError: pass for doneAll_ in doneAll: result = _mark_(*doneAll_) if result is None: return v[:4] return result def _modify(submodel, i): _new = list() for v in submodel: if purge: if not v[3]: _new.append(_mark(v, i) + [_modify(v[4], i + 1)]) else: _new.append(_mark(v, i) + [_modify(v[4], i + 1)]) levels = sortLevels.get(i) or sortLevels.get(str(i)) for index, reverse in levels or sortAll: _new = sorted(_new, key=lambda e: e[index], reverse=reverse) return _new return _modify(self.data, 1)
python
{ "resource": "" }
q39474
Model.modify
train
def modify(self, *, sort=None, purge=False, done=None): """Calls Model._modifyInternal after loading the database.""" return self._modifyInternal(sort=sort, purge=purge, done=done)
python
{ "resource": "" }
q39475
Model.modifyInPlace
train
def modifyInPlace(self, *, sort=None, purge=False, done=None): """Like Model.modify, but changes existing database instead of returning a new one.""" self.data = self.modify(sort=sort, purge=purge, done=done)
python
{ "resource": "" }
q39476
CodeContainer.add_line
train
def add_line(self, string): """ Adds a line to the LISP code to execute :param string: The line to add :return: None """ self.code_strings.append(string) code = '' if len(self.code_strings) == 1: code = '(setv result ' + self.code_strings[0] + ')' if len(self.code_strings) > 1: code = '(setv result (and ' + ' '.join(self.code_strings) + '))' self._compiled_ast_and_expr = self.__compile_code(code_string=code)
python
{ "resource": "" }
q39477
CodeContainer.add_graph_to_namespace
train
def add_graph_to_namespace(self, graph): """ Adds the variables name to the namespace of the local LISP code :param graph: the graph to add to the namespace :return: None """ for node in graph.vs: attributes = node.attributes() self.namespace[node['name']] = attributes for node in graph.es: attributes = node.attributes() self.namespace[node['name']] = attributes
python
{ "resource": "" }
q39478
CodeContainer.execute
train
def execute(self, vertices_substitution_dict={}): """ Executes the code :param vertices_substitution_dict: aliases of the variables in the code :return: True/False, depending on the result of the code (default is True) """ if not self.code_strings: return True if vertices_substitution_dict: namespace = self.__substitute_names_in_namespace(self.namespace, vertices_substitution_dict) else: namespace = self.namespace try: self.__execute_code(self._compiled_ast_and_expr, namespace) except: pass return namespace['result']
python
{ "resource": "" }
q39479
Client.droplets
train
def droplets(self): """ This method returns the list of droplets """ json = self.request('/droplets/', method='GET') status = json.get('status') if status == 'OK': droplet_json = json.get('droplets', []) droplets = [Droplet.from_json(droplet) for droplet in droplet_json] return droplets else: message = json.get('message', None) raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39480
Client.reboot_droplet
train
def reboot_droplet(self, droplet_id): """ This method allows you to reboot a droplet. This is the preferred method to use if a server is not responding. """ if not droplet_id: raise DOPException('droplet_id is required to reboot a droplet!') json = self.request('/droplets/%s/reboot' % droplet_id, method='GET') status = json.get('status') if status == 'OK': return json.get('event_id') else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39481
Client.power_cycle_droplet
train
def power_cycle_droplet(self, droplet_id): """ This method allows you to power cycle a droplet. This will turn off the droplet and then turn it back on. """ if not droplet_id: msg = 'droplet_id is required to power cycle a droplet!' raise DOPException(msg) json = self.request('/droplets/%s/power_cycle' % droplet_id, method='GET') status = json.get('status') if status == 'OK': return json.get('event_id') else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39482
Client.resize_droplet
train
def resize_droplet(self, droplet_id, size): """ This method allows you to resize a specific droplet to a different size. This will affect the number of processors and memory allocated to the droplet. Required parameters: droplet_id: Integer, this is the id of your droplet that you want to resize size, one of size_id: Numeric, this is the id of the size with which you would like the droplet created size_slug: String, this is the slug of the size with which you would like the droplet created """ if not droplet_id: raise DOPException('droplet_id is required to resize a droplet!') params = {} size_id = size.get('size_id') if size_id: params.update({'size_id': size_id}) else: size_slug = size.get('size_slug') if size_slug: params.update({'size_slug': size_slug}) else: msg = 'size_id or size_slug are required to resize a droplet!' raise DOPException(msg) json = self.request('/droplets/%s/resize' % droplet_id, method='GET', params=params) status = json.get('status') if status == 'OK': return json.get('event_id') else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39483
Client.restore_droplet
train
def restore_droplet(self, droplet_id, image_id): """ This method allows you to restore a droplet with a previous image or snapshot. This will be a mirror copy of the image or snapshot to your droplet. Be sure you have backed up any necessary information prior to restore. Required parameters: droplet_id: Numeric, this is the id of your droplet that you want to snapshot image_id: Numeric, this is the id of the image you would like to use to restore your droplet with """ if not droplet_id: raise DOPException('droplet_id is required to restore a droplet!') if not image_id: raise DOPException('image_id is required to rebuild a droplet!') params = {'image_id': image_id} json = self.request('/droplets/%s/restore' % droplet_id, method='GET', params=params) status = json.get('status') if status == 'OK': return json.get('event_id') else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39484
Client.rename_droplet
train
def rename_droplet(self, droplet_id, name): """ This method allows you to reinstall a droplet with a default image. This is useful if you want to start again but retain the same IP address for your droplet. Required parameters: droplet_id: Numeric, this is the id of your droplet that you want to snapshot image_id: Numeric, this is the id of the image you would like to use to rebuild your droplet with """ if not droplet_id: raise DOPException('droplet_id is required to rebuild a droplet!') if not name: raise DOPException('name is required to rebuild a droplet!') params = {'name': name} json = self.request('/droplets/%s/rename' % droplet_id, method='GET', params=params) status = json.get('status') if status == 'OK': return json.get('event_id') else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39485
Client.destroy_droplet
train
def destroy_droplet(self, droplet_id, scrub_data=False): """ This method destroys one of your droplets - this is irreversible. Required parameters: droplet_id: Numeric, this is the id of your droplet that you want to destroy Optional parameters scrub_data: Boolean, this will strictly write 0s to your prior partition to ensure that all data is completely erased """ params = {} if scrub_data: params['scrub_data'] = True json = self.request('/droplets/%s/destroy' % droplet_id, method='GET', params=params) status = json.get('status') if status == 'OK': return json.get('event_id') else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39486
Client.regions
train
def regions(self): """ This method will return all the available regions within the DigitalOcean cloud. """ json = self.request('/regions', method='GET') status = json.get('status') if status == 'OK': regions_json = json.get('regions', []) regions = [Region.from_json(region) for region in regions_json] return regions else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39487
Client.images
train
def images(self, filter='global'): """ This method returns all the available images that can be accessed by your client ID. You will have access to all public images by default, and any snapshots or backups that you have created in your own account. Optional parameters filter: String, either "my_images" or "global" """ if filter and filter not in ('my_images', 'global'): raise DOPException('"filter" must be either "my_images" or "global"') params = {} if filter: params['filter'] = filter json = self.request('/images', method='GET', params=params) status = json.get('status') if status == 'OK': images_json = json.get('images', []) images = [Image.from_json(image) for image in images_json] return images else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39488
Client.show_image
train
def show_image(self, image_id_or_slug): """ This method displays the attributes of an image. Required parameters image_id: Numeric, this is the id of the image you would like to use to rebuild your droplet with """ if not image_id_or_slug: msg = 'image_id_or_slug is required to destroy an image!' raise DOPException(msg) json = self.request('/images/%s' % image_id_or_slug, method='GET') image_json = json.get('image') status = json.get('status') if status == 'OK': image = Image.from_json(image_json) return image else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39489
Client.destroy_image
train
def destroy_image(self, image_id_or_slug): """ This method allows you to destroy an image. There is no way to restore a deleted image so be careful and ensure your data is properly backed up. Required parameters image_id: Numeric, this is the id of the image you would like to destroy """ if not image_id_or_slug: msg = 'image_id_or_slug is required to destroy an image!' raise DOPException(msg) json = self.request('/images/%s/destroy' % image_id_or_slug, method='GET') status = json.get('status') return status
python
{ "resource": "" }
q39490
Client.transfer_image
train
def transfer_image(self, image_id_or_slug, region_id): """ This method allows you to transfer an image to a specified region. Required parameters image_id: Numeric, this is the id of the image you would like to transfer. region_id Numeric, this is the id of the region to which you would like to transfer. """ if not image_id_or_slug: msg = 'image_id_or_slug is required to transfer an image!' raise DOPException(msg) if not region_id: raise DOPException('region_id is required to transfer an image!') params = {'region_id': region_id} json = self.request('/images/%s/transfer' % image_id_or_slug, method='GET', params=params) status = json.get('status') if status == 'OK': return json.get('event_id') else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39491
Client.ssh_keys
train
def ssh_keys(self): """ This method lists all the available public SSH keys in your account that can be added to a droplet. """ params = {} json = self.request('/ssh_keys', method='GET', params=params) status = json.get('status') if status == 'OK': ssh_keys_json = json.get('ssh_keys', []) keys = [SSHKey.from_json(ssh_key) for ssh_key in ssh_keys_json] return keys else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39492
Client.add_ssh_key
train
def add_ssh_key(self, name, ssh_pub_key): """ This method allows you to add a new public SSH key to your account. Required parameters name: String, the name you want to give this SSH key. ssh_pub_key: String, the actual public SSH key. """ params = {'name': name, 'ssh_pub_key': ssh_pub_key} json = self.request('/ssh_keys/new', method='GET', params=params) status = json.get('status') if status == 'OK': ssh_key_json = json.get('ssh_key') ssh_key = SSHKey.from_json(ssh_key_json) return ssh_key else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39493
Client.show_ssh_key
train
def show_ssh_key(self, ssh_key_id): """ This method shows a specific public SSH key in your account that can be added to a droplet. """ params = {} json = self.request('/ssh_keys/%s' % ssh_key_id, method='GET', params=params) status = json.get('status') if status == 'OK': ssh_key_json = json.get('ssh_key') ssh_key = SSHKey.from_json(ssh_key_json) return ssh_key else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39494
Client.destroy_ssh_key
train
def destroy_ssh_key(self, ssh_key_id): """ This method will delete the SSH key from your account. """ json = self.request('/ssh_keys/%s/destroy' % ssh_key_id, method='GET') status = json.get('status') return status
python
{ "resource": "" }
q39495
Client.sizes
train
def sizes(self): """ This method returns all the available sizes that can be used to create a droplet. """ json = self.request('/sizes', method='GET') status = json.get('status') if status == 'OK': sizes_json = json.get('sizes', []) sizes = [Size.from_json(s) for s in sizes_json] return sizes else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39496
Client.domains
train
def domains(self): """ This method returns all of your current domains. """ json = self.request('/domains', method='GET') status = json.get('status') if status == 'OK': domains_json = json.get('domains', []) domains = [Domain.from_json(domain) for domain in domains_json] return domains else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39497
Client.show_domain
train
def show_domain(self, domain_id): """ This method returns the specified domain. Required parameters domain_id: Integer or Domain Name (e.g. domain.com), specifies the domain to display. """ json = self.request('/domains/%s' % domain_id, method='GET') status = json.get('status') if status == 'OK': domain_json = json.get('domain') domain = Domain.from_json(domain_json) return domain else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }
q39498
Client.destroy_domain
train
def destroy_domain(self, domain_id): """ This method deletes the specified domain. Required parameters domain_id: Integer or Domain Name (e.g. domain.com), specifies the domain to destroy. """ json = self.request('/domains/%s/destroy' % domain_id, method='GET') status = json.get('status') return status
python
{ "resource": "" }
q39499
Client.domain_records
train
def domain_records(self, domain_id): """ This method returns all of your current domain records. Required parameters domain_id: Integer or Domain Name (e.g. domain.com), specifies the domain for which to retrieve records. """ json = self.request('/domains/%s/records' % domain_id, method='GET') status = json.get('status') if status == 'OK': records_json = json.get('records', []) records = [Record.from_json(record) for record in records_json] return records else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
python
{ "resource": "" }