code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def GetRendererForValueOrClass(cls, value, limit_lists=-1):
if inspect.isclass(value):
value_cls = value
else:
value_cls = value.__class__
cache_key = "%s_%d" % (value_cls.__name__, limit_lists)
try:
renderer_cls = cls._renderers_cache[cache_key]
except KeyError:
candidates =... | Returns renderer corresponding to a given value and rendering args. |
def delete_connector_c_pool(name, target='server', cascade=True, server=None):
data = {'target': target, 'cascade': cascade}
return _delete_element(name, 'resources/connector-connection-pool', data, server) | Delete a connection pool |
def _register_inet(self, oid=None, conn_or_curs=None):
from psycopg2 import extensions as _ext
if not oid:
oid = 869
_ext.INET = _ext.new_type((oid, ), "INET",
lambda data, cursor: data and Inet(data) or None)
_ext.register_type(_ext.INET, self._con_pg)
... | Create the INET type and an Inet adapter. |
def extract_keyhandle(path, filepath):
keyhandle = filepath.lstrip(path)
keyhandle = keyhandle.split("/")
return keyhandle[0] | extract keyhandle value from the path |
def skip_cycles(self) -> int:
return sum((int(re.sub(r'\D', '', op)) for op in self.skip_tokens)) | The number of cycles dedicated to skips. |
def rate_limits(self):
if not self._rate_limits:
self._rate_limits = utilities.get_rate_limits(self.response)
return self._rate_limits | Returns a list of rate limit details. |
def move(name, entry_name, config=None):
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
... | Move an entry to the sshconfig. |
def numval(token):
if token.type == 'INTEGER':
return int(token.value)
elif token.type == 'FLOAT':
return float(token.value)
else:
return token.value | Return the numerical value of token.value if it is a number |
def parse_oxi_states(self, data):
try:
oxi_states = {
data["_atom_type_symbol"][i]:
str2float(data["_atom_type_oxidation_number"][i])
for i in range(len(data["_atom_type_symbol"]))}
for i, symbol in enumerate(data["_atom_type_symbol"]):... | Parse oxidation states from data dictionary |
def _get_serv(ret=None):
_options = _get_options(ret)
global REDIS_POOL
if REDIS_POOL:
return REDIS_POOL
elif _options.get('cluster_mode'):
REDIS_POOL = StrictRedisCluster(startup_nodes=_options.get('startup_nodes'),
skip_full_coverage_check=_optio... | Return a redis server object |
def register_form_factory(Form):
class CsrfDisabledProfileForm(ProfileForm):
def __init__(self, *args, **kwargs):
kwargs = _update_with_csrf_disabled(kwargs)
super(CsrfDisabledProfileForm, self).__init__(*args, **kwargs)
class RegisterForm(Form):
profile = FormField(CsrfD... | Factory for creating an extended user registration form. |
def _create_new(self, request):
self.prefix = "{}_Pull_Request_{}".format(request.repo.name, request.pull.number)
head = list(self._newpage_head)
head.append(request.repo.testing.wiki(False))
if not self.testmode:
page = self.site.Pages[self.newpage]
result = page... | Creates the new wiki page that houses the details of the unit testing runs. |
def without_edge(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]':
return BipartiteGraph((e2, v) for e2, v in self._edges.items() if edge != e2) | Returns a copy of this bipartite graph with the given edge removed. |
def process_request(self, request_object):
entity = request_object.entity_cls.get(request_object.identifier)
resource = entity.update(request_object.data)
return ResponseSuccess(Status.SUCCESS, resource) | Process Update Resource Request |
def _filter(self, dict, keep):
if not keep:
return dict
result = {}
for key, value in dict.iteritems():
if key in keep:
result[key] = value
return result | Remove any keys not in 'keep' |
def Auth(email=None, password=None):
gd_client = SpreadsheetsService()
gd_client.source = "texastribune-ttspreadimporter-1"
if email is None:
email = os.environ.get('GOOGLE_ACCOUNT_EMAIL')
if password is None:
password = os.environ.get('GOOGLE_ACCOUNT_PASSWORD')
if email and password... | Get a reusable google data client. |
def _process_tokens(self, char):
if (char in self.WHITESPACE or char == self.COMMENT_START or
char in self.QUOTES or char in self.TOKENS):
add_token = True
if char == self.SPACE or char in self.TOKENS:
if self._escaped:
add_token = Fals... | Process a token character. |
def modified_on(self):
timestamp = self._info.get('lastModifiedTime')
return _parser.Parser.parse_timestamp(timestamp) | The timestamp for when the table was last modified. |
def play(self):
if self.state == PygAnimation.PLAYING:
pass
elif self.state == PygAnimation.STOPPED:
self.index = 0
self.elapsed = 0
self.playingStartTime = time.time()
self.elapsedStopTime = self.endTimesList[-1]
self.nextElaps... | Starts an animation playing. |
def tabset(titles, contents):
tabs = []
for no, title in enumerate(titles):
tab = {
'title': title,
}
content = contents[no]
if isinstance(content, list):
tab['items'] = content
else:
tab['items'] = [content]
tabs.append(tab)
... | A tabbed container widget |
def post_public(self, path, data, is_json=True):
return self._post(path, data, is_json) | Make a post request requiring no auth. |
def _make_attr_element(parent, attr_i):
attr = etree.SubElement(parent, "attribute")
attr_name = etree.SubElement(attr, 'name')
attr_name.text = attr_i.name
attr_desc = etree.SubElement(attr, 'description')
attr_desc.text = attr_i.description
attr_dimension = etree.SubElement(attr, 'di... | create an attribute element from an attribute DB object |
def atlas_peer_dequeue_all( peer_queue=None ):
peers = []
with AtlasPeerQueueLocked(peer_queue) as pq:
while len(pq) > 0:
peers.append( pq.pop(0) )
return peers | Get all queued peers |
def _adjust_probability_vec_best(population, fitnesses, probability_vec,
adjust_rate):
best_solution = max(zip(fitnesses, population))[1]
return _adjust(probability_vec, best_solution, adjust_rate) | Shift probabilities towards the best solution. |
def init_with_uid(self, uid):
self._uid = uid
self._brain = None
self._catalog = None
self._instance = None | Initialize with an UID |
def reset(stick):
nulls = (pyhsm.defines.YSM_MAX_PKT_SIZE - 1) * '\x00'
res = YHSM_Cmd(stick, pyhsm.defines.YSM_NULL, payload = nulls).execute(read_response = False)
unlock = stick.acquire()
try:
stick.drain()
stick.flush()
finally:
unlock()
return res == 0 | Send a bunch of zero-bytes to the YubiHSM, and flush the input buffer. |
def copy(self):
return Limit(self.scan_limit, self.item_limit, self.min_scan_limit,
self.strict, self.filter) | Return a copy of the limit |
def create(self, filename, filedata):
attributes = {'filename': filename,
'source': filedata}
return self.transport.POST(url='/file/v2/', body=attributes, type='multipart/form-data') | Create a file from raw data |
def word_intersection( word_a, word_b ):
positions = []
word_a_letters = get_letters( word_a )
word_b_letters = get_letters( word_b )
for idx,wa in enumerate(word_a_letters):
for idy,wb in enumerate(word_b_letters):
if ( wa == wb ):
positions.append( (idx, idy) )
... | return a list of tuples where word_a, word_b intersect |
def next(self):
if self.s:
self.s -= 1
else:
self.s = self.stride - 1
self.i = (self.i + 1) % self.l
return self.iterables[self.i].next() | Returns the next result from the chained iterables given ``"stride"``. |
def process_shell(self, creator, entry, config):
self.logger.info("Processing Bash code: start")
output = []
shell = creator(entry, config)
for line in shell.process():
output.append(line)
self.logger.info(" | %s", line)
if shell.success:
self.... | Processing a shell entry. |
def read_and_hash(fname, **kw):
return [addhash(frame, **kw) for frame in read(fname, **kw)]; | Read and and addhash each frame. |
def clean_super_features(self):
if self.super_features:
self.super_features = [int(sf) for sf in self.super_features
if sf is not None and is_valid_digit(sf)] | Removes any null & non-integer values from the super feature list |
def Flush(self):
while self._age:
node = self._age.PopLeft()
self.KillObject(node.data)
self._hash = dict() | Flush all items from cache. |
def body(self):
for chunk in self.gen_chunks(self.envelope.file_open(self.name)):
yield chunk
for chunk in self.gen_chunks(self.data):
yield chunk
for chunk in self.gen_chunks(self.envelope.file_close()):
yield chunk
for chunk in self.close():
... | Yields the encoded body. |
def release_address(self, address, vpnid):
query = address + "?action=releaseAddress&vpnId=" + vpnid
request_url = self._build_url(['Lease', query])
return self._do_request('DELETE', request_url) | Release a specific lease, called after delete_client_entry |
def max_sharpe(self):
if not self.w:
self.solve()
w_sr, sr = [], []
for i in range(len(self.w) - 1):
w0 = np.copy(self.w[i])
w1 = np.copy(self.w[i + 1])
kargs = {"minimum": False, "args": (w0, w1)}
a, b = self.golden_section(self.eval_s... | Get the max Sharpe ratio portfolio |
def overlaps(self, canvas, exclude=[]):
try:
exclude = list(exclude)
except TypeError:
exclude = [exclude]
exclude.append(self)
for selfY, row in enumerate(self.image.image()):
for selfX, pixel in enumerate(row):
canvasPixelOn = canvas.... | Returns True if sprite is touching any other sprite. |
def status(config):
with open(config) as fh:
config = yaml.safe_load(fh.read())
jsonschema.validate(config, CONFIG_SCHEMA)
last_index = get_incremental_starts(config, None)
accounts = {}
for (a, region), last in last_index.items():
accounts.setdefault(a, {})[region] = last
print(... | time series lastest record time by account. |
def uuid(dataset_uri):
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
click.secho(dataset.uuid) | Return the UUID of the dataset. |
def de_blank(val):
ret = list(val)
if type(val) == list:
for idx, item in enumerate(val):
if item.strip() == '':
ret.remove(item)
else:
ret[idx] = item.strip()
return ret | Remove blank elements in `val` and return `ret` |
def parse(self, title, pageid=None):
qry = self.PARSE.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
PAGE=safequote(title) or pageid)
if pageid and not title:
qry = qry.replace('&page=', '&pageid=').replace('&redirects', '')
if self.variant:
... | Returns Mediawiki action=parse query string |
def chunks(iterable, size=1):
iterator = iter(iterable)
for element in iterator:
yield chain([element], islice(iterator, size - 1)) | Splits iterator in chunks. |
async def _async_get_data(self, resource, id=None):
if id:
url = urljoin(self._api_url, "spc/{}/{}".format(resource, id))
else:
url = urljoin(self._api_url, "spc/{}".format(resource))
data = await async_request(self._session.get, url)
if not data:
retu... | Get the data from the resource. |
def add_house(self, complex: str, **kwargs):
self.check_complex(complex)
self.post('developers/{developer}/complexes/{complex}/houses/'.format(developer=self.developer, complex=complex), data=kwargs) | Add a new house to the rumetr db |
def add_state(self):
sid = len(self.states)
self.states.append(SFAState(sid)) | This function adds a new state |
def delete_by_id(self, del_id):
if self.check_post_role()['DELETE']:
pass
else:
return False
if self.is_p:
if MLink.delete(del_id):
output = {'del_link': 1}
else:
output = {'del_link': 0}
return json.dump... | Delete a link by id. |
def alien_filter(name, location, size, unsize):
(fname, flocation, fsize, funsize) = ([] for i in range(4))
for n, l, s, u in zip(name, location, size, unsize):
if "slackbuilds" != l:
fname.append(n)
flocation.append(l)
fsize.append(s)
funsize.append(u)
... | Fix to avoid packages include in slackbuilds folder |
def timeshift(self):
if self.tune and self.tune.get('@src'):
return True if self.tune.get('@src').startswith('timeshift') else False
else:
raise PyMediaroomError("No information in <node> about @src") | Return if the stream is a timeshift. |
def destroy(self):
if self._running is False:
return
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_... | Tear down the minion |
def search_list(kb, from_=None, match_type=None,
page=None, per_page=None, unique=False):
page = page or 1
per_page = per_page or 10
if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']:
query = api.query_kb_mappings(
kbid=kb.id,
... | Search "mapping from" for knowledge. |
def single_frame_plot(obj):
obj = Layout.from_values(obj) if isinstance(obj, AdjointLayout) else obj
backend = Store.current_backend
renderer = Store.renderers[backend]
plot_cls = renderer.plotting_class(obj)
plot = plot_cls(obj, **renderer.plot_options(obj, renderer.size))
fmt = renderer.params... | Returns plot, renderer and format for single frame export. |
def update_widget_attrs(self, bound_field, attrs):
if bound_field.field.has_subwidgets() is False:
widget_classes = getattr(self, 'widget_css_classes', None)
if widget_classes:
if 'class' in attrs:
attrs['class'] += ' ' + widget_classes
... | Updated the widget attributes which shall be added to the widget when rendering this field. |
def message(self, category, subject, msg_file):
users = getattr(self.sub, category)
if not users:
print('There are no {} users on {}.'.format(category, self.sub))
return
if msg_file:
try:
msg = open(msg_file).read()
except IOError a... | Send message to all users in `category`. |
def _update_conda_packages():
conda_bin = _get_conda_bin()
channels = _get_conda_channels(conda_bin)
assert conda_bin, ("Could not find anaconda distribution for upgrading bcbio.\n"
"Using python at %s but could not find conda." % (os.path.realpath(sys.executable)))
req_file = "bc... | If installed in an anaconda directory, upgrade conda packages. |
def one_cycle_scheduler(lr_max:float, **kwargs:Any)->OneCycleScheduler:
"Instantiate a `OneCycleScheduler` with `lr_max`."
return partial(OneCycleScheduler, lr_max=lr_max, **kwargs) | Instantiate a `OneCycleScheduler` with `lr_max`. |
def _no_duplicates_constructor(loader, node, deep=False):
mapping = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
value = loader.construct_object(value_node, deep=deep)
if key in mapping:
raise ConstructorError("while constructi... | Check for duplicate keys. |
def com_google_fonts_check_fvar_name_entries(ttFont):
failed = False
for instance in ttFont["fvar"].instances:
entries = [entry for entry in ttFont["name"].names if entry.nameID == instance.subfamilyNameID]
if len(entries) == 0:
failed = True
yield FAIL, (f"Named instance with coordinates {insta... | All name entries referenced by fvar instances exist on the name table? |
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) != urllib2.HTTPError or e.code != 500:
raise
print >>sys.stde... | Run MySend1 maybe twice, because Rietveld is unreliable. |
def step_indices(group_idx):
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, np.int64)
indices[0] = 0
indices[-1] = group_idx.size
cmp_pos = 0
ri = 1
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
indices[ri] = i
... | Return the edges of areas within group_idx, which are filled with the same value. |
def blob_counter(self):
import aa
from ROOT import EventFile
try:
event_file = EventFile(self.filename)
except Exception:
raise SystemExit("Could not open file")
num_blobs = 0
for event in event_file:
num_blobs += 1
return num_b... | Create a blob counter. |
def cli(obj):
client = obj['client']
timezone = obj['timezone']
screen = Screen(client, timezone)
screen.run() | Display alerts like unix "top" command. |
def unescape_all(string):
def escape_single(matchobj):
return _unicode_for_entity_with_name(matchobj.group(1))
return entities.sub(escape_single, string) | Resolve all html entities to their corresponding unicode character |
def network_size(value, options=None, version=None):
ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version)
if not ipaddr_filter_out:
return
if not isinstance(value, (list, tuple, types.GeneratorType)):
return _network_size(ipaddr_filter_out[0])
return [
_net... | Get the size of a network. |
def _check_reach_env():
path_to_reach = get_config('REACHPATH')
if path_to_reach is None:
path_to_reach = environ.get('REACHPATH', None)
if path_to_reach is None or not path.exists(path_to_reach):
raise ReachError(
'Reach path unset or invalid. Check REACH... | Check that the environment supports runnig reach. |
def dnd_endDnd(self, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
return self.api_call("dnd.endDnd", json=kwargs) | Ends the current user's Do Not Disturb session immediately. |
def add_object(self, nidm_object, export_file=True):
if not export_file:
export_dir = None
else:
export_dir = self.export_dir
if not isinstance(nidm_object, NIDMFile):
nidm_object.export(self.version, export_dir)
else:
nidm_object.export(se... | Add a NIDMObject to a NIDM-Results export. |
def _is_qstring(message):
my_class = str(message.__class__)
my_class_name = my_class.replace('<class \'', '').replace('\'>', '')
if my_class_name == 'PyQt5.QtCore.QString':
return True
return False | Check if its a QString without adding any dep to PyQt5. |
def autosave(self):
if self.button_autosave.is_checked():
self.save_file(_os.path.join(self._autosave_directory, "%04d " % (self.number_file.get_value()) + self._label_path.get_text()))
self.number_file.increment() | Autosaves the currently stored data, but only if autosave is checked! |
def window_size(self, window_size):
BasePlotter.window_size.fset(self, window_size)
self.app_window.setBaseSize(*window_size) | set the render window size |
def addPolygonAnnot(self, points):
CheckParent(self)
val = _fitz.Page_addPolygonAnnot(self, points)
if not val: return
val.thisown = True
val.parent = weakref.proxy(self)
self._annot_refs[id(val)] = val
return val | Add a 'Polygon' annotation for a sequence of points. |
def all_row_ids(data_batch):
all_users = mx.nd.arange(0, MOVIELENS['max_user'], dtype='int64')
all_movies = mx.nd.arange(0, MOVIELENS['max_movie'], dtype='int64')
return {'user_weight': all_users, 'item_weight': all_movies} | Generate row ids for all rows |
def check_auth(self, name):
if name in self.auths:
auth = self.auths[name]
if self.args.auth is None:
raise exceptions.ArgParseInatorAuthorizationRequired
elif ((auth is True and self.args.auth != self.auth_phrase) or
(auth is not True and se... | Check the authorization for the command |
def generate(self, name: str, **kwargs: Dict[str, str]) -> str:
template = self.patterns[name]
return template.substitute(kwargs) | generate url for named url pattern with kwargs |
def modify(self, vals):
self.vals = vals.view(np.ndarray).copy()
y = self.model.predict(self.vals)[0]
self.data_visualize.modify(y)
self.latent_handle.set_data(self.vals[0,self.latent_index[0]], self.vals[0,self.latent_index[1]])
self.axes.figure.canvas.draw() | When latent values are modified update the latent representation and ulso update the output visualization. |
def show(self, username):
filter = ['(objectclass=posixAccount)', "(uid={})".format(username)]
return self.client.search(filter) | Return a specific user's info in LDIF format. |
def setup(self):
super(CleanCSSFilter, self).setup()
self.root = current_app.config.get('COLLECT_STATIC_ROOT') | Initialize filter just before it will be used. |
def _is_video(filepath) -> bool:
if os.path.exists(filepath):
extension = os.path.splitext(filepath)[1]
return extension in ('.mkv', '.mp4', '.avi')
else:
return False | Check filename extension to see if it's a video file. |
def length(
cls, request,
vector: (Ptypes.body,
Vector('The vector to analyse.'))) -> [
(200, 'Ok', Float),
(400, 'Wrong vector format')]:
log.info('Computing the length of vector {}'.format(vector))
try:
Respond(200, sqrt(... | Return the modulo of a vector. |
def combine_results(self, results):
result = {}
for key in results[0]:
result[key] = numpy.concatenate([r[key] for r in results])
return result | Combine results from different batches of filtering |
def merge_dict(a, b, path=None):
if not path:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_dict(a[key], b[key], path + [str(key)])
else:
continue
else:
a[key] = b[k... | Merge dict b into a |
def create_app(app_id, app_name, source_id, region, app_data):
try:
create_at = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
conn = get_conn()
c = conn.cursor()
c.execute("SELECT count(*) FROM app WHERE name='{0}' ".format(app_name))
old_app = c.fetchone()
if... | insert app record when stack run as a app |
def index(config):
client = Client()
client.prepare_connection()
user_api = API(client)
CLI.show_user(user_api.index()) | Display user info in LDIF format. |
def create_alert_policy(self, policy_name):
policy_data = { 'policy': { 'incident_preference': 'PER_POLICY', 'name': policy_name } }
create_policy = requests.post(
'https://api.newrelic.com/v2/alerts_policies.json',
headers=self.auth_header,
data=json.dumps(policy_data))
create_policy.rais... | Creates an alert policy in NewRelic |
def run_parse(self):
parsedset = {}
parsedset['data_set'] = []
for log in self.input_files:
parsemodule = self.parse_modules[self.args.parser]
try:
if self.args.tzone:
parsemodule.tzone = self.args.tzone
except NameError: pa... | Parse one or more log files |
def parse_at_root(
self,
root,
state
):
parsed_dict = {}
dict_element = _element_find_from_root(root, self.element_path)
if dict_element is not None:
parsed_dict = self.parse_at_element(dict_element, state)
elif self.required:
... | Parse the root XML element as a dictionary. |
def check_obj(keys, name, obj):
msg = validate_obj(keys, obj)
if msg:
raise aomi.exceptions.AomiData("object check : %s in %s" % (msg, name)) | Do basic validation on an object |
def _do_timeout_for_query(self, timeout, datapath):
dpid = datapath.id
hub.sleep(timeout)
outport = self._to_querier[dpid]['port']
remove_dsts = []
for dst in self._to_hosts[dpid]:
if not self._to_hosts[dpid][dst]['replied']:
self._remove_multicast_gro... | the process when the QUERY from the querier timeout expired. |
def upcoming(self, chamber, congress=CURRENT_CONGRESS):
"Shortcut for upcoming bills"
path = "bills/upcoming/{chamber}.json".format(chamber=chamber)
return self.fetch(path) | Shortcut for upcoming bills |
def kill(self):
self._killed.set()
if not self.is_alive():
logging.debug('Cannot kill thread that is no longer running.')
return
if not self._is_thread_proc_running():
logging.debug("Thread's _thread_proc function is no longer running, "
'will not kill; letting thread e... | Terminates the current thread by raising an error. |
def add_data_to_df(self, data: np.array):
col_names = ['high_p', 'low_p', 'open_p', 'close_p', 'volume', 'oi']
data = np.array(data).reshape(-1, len(col_names) + 1)
df = pd.DataFrame(data=data[:, 1:], index=data[:, 0],
columns=col_names)
df.index = pd.to_datetim... | Build Pandas Dataframe in memory |
def build_strain_specific_models(self, joblib=False, cores=1, force_rerun=False):
if len(self.df_orthology_matrix) == 0:
raise RuntimeError('Empty orthology matrix, please calculate first!')
ref_functional_genes = [g.id for g in self.reference_gempro.functional_genes]
log.info('Build... | Wrapper function for _build_strain_specific_model |
def dict_hash(dct):
dct_s = json.dumps(dct, sort_keys=True)
try:
m = md5(dct_s)
except TypeError:
m = md5(dct_s.encode())
return m.hexdigest() | Return a hash of the contents of a dictionary |
def filter(cls, **items):
client = cls._new_api_client(subpath='/search')
items_dict = dict((k, v) for k, v in list(items.items()))
json_data = json.dumps(items_dict, sort_keys=True, indent=4)
return client.make_request(cls, 'post', post_data=json_data) | Returns multiple Union objects with search params |
def prepare_socket(bind_addr, family, type, proto, nodelay, ssl_adapter):
sock = socket.socket(family, type, proto)
prevent_socket_inheritance(sock)
host, port = bind_addr[:2]
IS_EPHEMERAL_PORT = port == 0
if not (IS_WINDOWS or IS_EPHEMERAL_PORT):
sock.setsockopt(sock... | Create and prepare the socket object. |
def logout_oauth2(self):
url = "https://api.robinhood.com/oauth2/revoke_token/"
data = {
"client_id": CLIENT_ID,
"token": self.refresh_token,
}
res = self.post(url, payload=data)
if res is None:
self.account_id = None
self.account_u... | Logout for given Oauth2 bearer token |
def disasm_symbol_app(_parser, _, args):
parser = argparse.ArgumentParser(
prog=_parser.prog,
description=_parser.description,
)
parser.add_argument(
'--syntax', '-s',
choices=AsmSyntax.__members__.keys(),
default=None,
)
parser.add_argument('file', help='ELF ... | Disassemble a symbol from an ELF file. |
def _load_config(self):
self._config = ConfigParser.SafeConfigParser()
self._config.read(self.config_path) | Read the configuration file and load it into memory. |
def from_urlencode(self, data, options=None):
qs = dict((k, v if len(v) > 1 else v[0])
for k, v in urlparse.parse_qs(data).iteritems())
return qs | handles basic formencoded url posts |
def can_view(self, user):
return user.is_admin or user in self.users \
or self.project.class_ in user.admin_for | Return whether or not `user` can view info about the group. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.