_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q43400 | Dataset.dump_df | train | def dump_df(self, df, version=None, tags=None, ext=None, **kwargs):
"""Dumps an instance of this dataset into a file.
Parameters
----------
df : pandas.DataFrame
The dataframe to dump to file.
version: str, optional
The version of the instance of this dataset.
tags : list of str, optional
The tags associated with the given instance of this dataset.
ext : str, optional
The file extension to use. If not given, the default extension is
used.
**kwargs : extra keyword arguments, optional
Extra keyword arguments are forwarded to the serialization method
of the SerializationFormat object corresponding to the extension
used.
"""
if ext is None:
ext = self.default_ext
fpath = self.fpath(version=version, tags=tags, ext=ext)
fmt = SerializationFormat.by_name(ext)
fmt.serialize(df, fpath, **kwargs) | python | {
"resource": ""
} |
q43401 | Dataset.upload_df | train | def upload_df(self, df, version=None, tags=None, ext=None, **kwargs):
"""Dumps an instance of this dataset into a file and then uploads it
to dataset store.
Parameters
----------
df : pandas.DataFrame
The dataframe to dump and upload.
version: str, optional
The version of the instance of this dataset.
tags : list of str, optional
The tags associated with the given instance of this dataset.
ext : str, optional
The file extension to use. If not given, the default extension is
used.
**kwargs : extra keyword arguments, optional
Extra keyword arguments are forwarded to the serialization method
of the SerializationFormat object corresponding to the extension
used.
"""
self.dump_df(df=df, version=version, tags=tags, ext=ext, **kwargs)
self.upload(version=version, tags=tags, ext=ext) | python | {
"resource": ""
} |
q43402 | BaseLoader._validate_extension | train | def _validate_extension(self):
"""Validates that source file extension is supported.
:raises: UnsupportedExtensionError
"""
extension = self.fpath.split('.')[-1]
if extension not in self.supported_extensions:
raise UnsupportedExtensionError | python | {
"resource": ""
} |
q43403 | BaseLoader._get_tags_and_content | train | def _get_tags_and_content(self, content: str) -> typing.Tuple[str, str]:
"""Splits content into two string - tags part and another content."""
content_lines = content.split('\n')
tag_lines = []
if content_lines[0] != '---':
return '', content
content_lines.pop(0)
for line in content_lines: # type: str
if line in ('---', '...'):
content_starts_at = content_lines.index(line) + 1
content_lines = content_lines[content_starts_at:]
break
tag_lines.append(line)
return '\n'.join(tag_lines), '\n'.join(content_lines) | python | {
"resource": ""
} |
q43404 | comp_listing | train | def comp_listing(request, directory_slug=None):
"""
Output the list of HTML templates and subdirectories in the COMPS_DIR
"""
context = {}
working_dir = settings.COMPS_DIR
if directory_slug:
working_dir = os.path.join(working_dir, directory_slug)
dirnames = []
templates = []
items = os.listdir(working_dir)
templates = [x for x in items if os.path.splitext(x)[1] == '.html']
dirnames = [x for x in items if \
not os.path.isfile(os.path.join(working_dir, x))]
templates.sort()
dirnames.sort()
context['directories'] = dirnames
context['templates'] = templates
context['subdirectory'] = directory_slug
return render(request, "comps/comp_listing.html", context) | python | {
"resource": ""
} |
q43405 | comp | train | def comp(request, slug, directory_slug=None):
"""
View the requested comp
"""
context = {}
path = settings.COMPS_DIR
comp_dir = os.path.split(path)[1]
template = "{0}/{1}".format(comp_dir, slug)
if directory_slug:
template = "{0}/{1}/{2}".format(comp_dir, directory_slug, slug)
working_dir = os.path.join(path, slug)
if os.path.isdir(working_dir):
return redirect('comp-listing', directory_slug=slug)
try:
t = get_template(template)
except TemplateDoesNotExist:
return redirect('comp-listing')
c = RequestContext(request, context)
return HttpResponse(t.render(c)) | python | {
"resource": ""
} |
q43406 | export_comps | train | def export_comps(request):
"""
Returns a zipfile of the rendered HTML templates in the COMPS_DIR
"""
in_memory = BytesIO()
zip = ZipFile(in_memory, "a")
comps = settings.COMPS_DIR
static = settings.STATIC_ROOT or ""
context = RequestContext(request, {})
context['debug'] = False
# dump static resources
# TODO: inspect each template and only pull in resources that are used
for dirname, dirs, filenames in os.walk(static):
for filename in filenames:
full_path = os.path.join(dirname, filename)
rel_path = os.path.relpath(full_path, static)
content = open(full_path, 'rb').read()
try:
ext = os.path.splitext(filename)[1]
except IndexError:
pass
if ext == '.css':
# convert static refs to relative links
dotted_rel = os.path.relpath(static, full_path)
new_rel_path = '{0}{1}'.format(dotted_rel, '/static')
content = content.replace(b'/static', bytes(new_rel_path, 'utf8'))
path = os.path.join('static', rel_path)
zip.writestr(path, content)
for dirname, dirs, filenames in os.walk(comps):
for filename in filenames:
full_path = os.path.join(dirname, filename)
rel_path = os.path.relpath(full_path, comps)
template_path = os.path.join(comps.split('/')[-1], rel_path)
html = render_to_string(template_path, context)
# convert static refs to relative links
depth = len(rel_path.split(os.sep)) - 1
if depth == 0:
dotted_rel = '.'
else:
dotted_rel = ''
i = 0
while i < depth:
dotted_rel += '../'
i += 1
new_rel_path = '{0}{1}'.format(dotted_rel, '/static')
html = html.replace('/static', new_rel_path)
if PY2:
html = unicode(html)
zip.writestr(rel_path, html.encode('utf8'))
for item in zip.filelist:
item.create_system = 0
zip.close()
response = HttpResponse(content_type="application/zip")
response["Content-Disposition"] = "attachment; filename=comps.zip"
in_memory.seek(0)
response.write(in_memory.read())
return response | python | {
"resource": ""
} |
q43407 | Peer.from_signed_raw | train | def from_signed_raw(cls: Type[PeerType], raw: str) -> PeerType:
"""
Return a Peer instance from a signed raw format string
:param raw: Signed raw format string
:return:
"""
lines = raw.splitlines(True)
n = 0
version = int(Peer.parse_field("Version", lines[n]))
n += 1
Peer.parse_field("Type", lines[n])
n += 1
currency = Peer.parse_field("Currency", lines[n])
n += 1
pubkey = Peer.parse_field("Pubkey", lines[n])
n += 1
block_uid = BlockUID.from_str(Peer.parse_field("Block", lines[n]))
n += 1
Peer.parse_field("Endpoints", lines[n])
n += 1
endpoints = []
while not Peer.re_signature.match(lines[n]):
endpoints.append(endpoint(lines[n]))
n += 1
data = Peer.re_signature.match(lines[n])
if data is None:
raise MalformedDocumentError("Peer")
signature = data.group(1)
return cls(version, currency, pubkey, block_uid, endpoints, signature) | python | {
"resource": ""
} |
q43408 | generate_image_from_url | train | def generate_image_from_url(url=None, timeout=30):
"""
Downloads and saves a image from url into a file.
"""
file_name = posixpath.basename(url)
img_tmp = NamedTemporaryFile(delete=True)
try:
response = requests.get(url, timeout=timeout)
response.raise_for_status()
except Exception as e: # NOQA
return None, None
img_tmp.write(response.content)
img_tmp.flush()
image = File(img_tmp)
image.seek(0)
return file_name, image | python | {
"resource": ""
} |
q43409 | is_rhyme | train | def is_rhyme(d, w1, w2):
"""check if words rhyme"""
for p1 in d[w1]:
# extract only "rhyming portion"
p1 = p1.split("'")[-1]
m = VOWELS_RE.search(p1)
if not m:
print(p1)
p1 = p1[m.start():]
for p2 in d[w2]:
p2 = p2.split("'")[-1]
m = VOWELS_RE.search(p2)
if not m:
print(w2, p2)
p2 = p2[m.start():]
if p1 == p2:
return True
return False | python | {
"resource": ""
} |
q43410 | NestedLookup._nested_lookup | train | def _nested_lookup(document, references, operation):
"""Lookup a key in a nested document, yield a value"""
if isinstance(document, list):
for d in document:
for result in NestedLookup._nested_lookup(d, references, operation):
yield result
if isinstance(document, dict):
for k, v in document.items():
if operation(k, v):
references.append((document, k))
yield v
elif isinstance(v, dict):
for result in NestedLookup._nested_lookup(v, references, operation):
yield result
elif isinstance(v, list):
for d in v:
for result in NestedLookup._nested_lookup(d, references, operation):
yield result | python | {
"resource": ""
} |
q43411 | build_chunk | train | def build_chunk(oscillators):
"""
Build an audio chunk and progress the oscillator states.
Args:
oscillators (list): A list of oscillator.Oscillator objects
to build chunks from
Returns:
str: a string of audio sample bytes ready to be written to a wave file
"""
step_random_processes(oscillators)
subchunks = []
for osc in oscillators:
osc.amplitude.step_amp()
osc_chunk = osc.get_samples(config.CHUNK_SIZE)
if osc_chunk is not None:
subchunks.append(osc_chunk)
if len(subchunks):
new_chunk = sum(subchunks)
else:
new_chunk = numpy.zeros(config.CHUNK_SIZE)
# If we exceed the maximum amplitude, handle it gracefully
chunk_amplitude = amplitude.find_amplitude(new_chunk)
if chunk_amplitude > config.MAX_AMPLITUDE:
# Normalize the amplitude chunk to mitigate immediate clipping
new_chunk = amplitude.normalize_amplitude(new_chunk,
config.MAX_AMPLITUDE)
# Pick some of the offending oscillators (and some random others)
# and lower their drift targets
avg_amp = (sum(osc.amplitude.value for osc in oscillators) /
len(oscillators))
for osc in oscillators:
if (osc.amplitude.value > avg_amp and rand.prob_bool(0.1) or
rand.prob_bool(0.01)):
osc.amplitude.drift_target = rand.weighted_rand(
[(-5, 1), (0, 10)])
osc.amplitude.change_rate = rand.weighted_rand(
osc.amplitude.change_rate_weights)
return new_chunk.astype(config.SAMPLE_DATA_TYPE).tostring() | python | {
"resource": ""
} |
q43412 | shell | train | def shell(database='default'):
"""Runs the command-line client for the specified database.
"""
target = engine[database]
dialect = engine[database].dialect.name
if dialect == 'mysql':
args = ['mysql']
if target.url.username:
args += ['--user=%s' % target.url.username]
if target.url.password:
args += ['--password=%s' % target.url.password]
if 'unix_socket' in target.url.query:
args += ["--socket=%s" % target.url.query['unix_socket']]
elif target.url.host:
args += ["--host=%s" % target.url.host]
if target.url.port:
args += ["--port=%s" % target.url.port]
if target.url.database:
args += [target.url.database]
elif dialect == 'sqlite':
args = ['sqlite3', target.url.database]
else: # pragma: nocoverage
raise RuntimeError(
'Database shell not available for the dialect %r' % dialect)
os.execvp(args[0], args) | python | {
"resource": ""
} |
q43413 | parseprint | train | def parseprint(code, filename="<string>", mode="exec", **kwargs):
"""Parse some code from a string and pretty-print it."""
node = parse(code, mode=mode) # An ode to the code
print(dump(node, **kwargs)) | python | {
"resource": ""
} |
q43414 | picknthweekday | train | def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1))
for n in range(whichweek):
dt = weekdayone+(whichweek-n)*ONEWEEK
if dt.month == month:
return dt | python | {
"resource": ""
} |
q43415 | colorstart | train | def colorstart(fgcolor, bgcolor, weight):
''' Begin a text style. '''
if weight:
weight = bold
else:
weight = norm
if bgcolor:
out('\x1b[%s;%s;%sm' % (weight, fgcolor, bgcolor))
else:
out('\x1b[%s;%sm' % (weight, fgcolor)) | python | {
"resource": ""
} |
q43416 | bargraph | train | def bargraph(data, maxwidth, incolor=True, cbrackets=('\u2595', '\u258F')):
''' Creates a monochrome or two-color bar graph. '''
threshold = 100.0 // (maxwidth * 2) # if smaller than 1/2 of one char wide
position = 0
begpcnt = data[0][1] * 100
endpcnt = data[-1][1] * 100
if len(data) < 1: return # Nada to do
maxwidth = maxwidth - 2 # because of brackets
datalen = len(data)
# Print left bracket in correct color:
if cbrackets and incolor: # and not (begpcnt == 0 and endpcnt == 0):
if begpcnt < threshold: bkcolor = data[-1][2] # greenbg
else: bkcolor = data[0][2] # redbg
cprint(cbrackets[0], data[0][2], bkcolor, None, None)
else:
out(cbrackets[0])
for i, part in enumerate(data):
# unpack data
char, pcnt, fgcolor, bgcolor, bold = part
width = int(round(pcnt/100.0 * maxwidth, 0))
position = position + width
# and graph
if incolor and not (fgcolor is None):
cprint(char * width, fgcolor, bgcolor, bold, False)
else:
out((char * width))
if i == (datalen - 1): # correct last one
if position < maxwidth:
if incolor: # char
cprint(char * (maxwidth-position), fgcolor, bgcolor,
bold, False)
else:
out(char * (maxwidth-position))
elif position > maxwidth:
out(chr(8) + ' ' + chr(8)) # backspace
# Print right bracket in correct color:
if cbrackets and incolor:
if endpcnt < threshold: bkcolor = data[0][3] # redbg
else: bkcolor = data[1][3] # greenbg
cprint(cbrackets[1], data[-1][2], bkcolor, None, None)
else:
out(cbrackets[1]) | python | {
"resource": ""
} |
q43417 | rainbar | train | def rainbar(data, maxwidth, incolor=True, hicolor=True,
cbrackets=('\u2595', '\u258F')):
''' Creates a "rainbar" style bar graph. '''
if not data: return # Nada to do
datalen = len(data)
endpcnt = data[-1][1]
maxwidth = maxwidth - 2 # because of brackets
# setup
csi, csib, _, pal, rst, plen = get_palette(hicolor)
empty = data[-1][0]
bucket = float(maxwidth) / plen
position = 0
# Print left bracket in correct color:
if incolor:
out((csi % pal[0]) + cbrackets[0]) # start bracket
else:
out(cbrackets[0])
for i, part in enumerate(data):
char, pcnt, fgcolor, bgcolor, bold = part
if fgcolor and hicolor:
fgcolor = map8[fgcolor]
if not bold:
csib = csi
lastind = None
width = int(maxwidth * (pcnt / 100.0))
offset = position
position += width
if incolor:
for j in range(width):
# faster?
colorind = fgcolor or min(int((j+offset)/bucket), (plen-1))
#~ colorind=fgcolor or get_color_index(j, offset,maxwidth,plen)
if colorind == lastind:
out(char)
else:
color = fgcolor or pal[colorind]
out((csib % color) + char)
lastind = colorind
else:
out((char * width))
if i == (datalen - 1): # check if last one correct
if position < maxwidth:
rest = maxwidth - position
if incolor:
out((csib % pal[-1]) + (empty * rest))
else:
out(char * rest)
elif position > maxwidth:
out(chr(8) + ' ' + chr(8)) # backspace
# Print right bracket in correct color:
if incolor:
lastcolor = darkred if (hicolor and endpcnt > 1) else pal[-1]
out((csi % lastcolor) + cbrackets[1]) # end bracket
colorend()
else:
out(cbrackets[1]) | python | {
"resource": ""
} |
q43418 | Config._set_linters | train | def _set_linters(self):
"""Use user linters or all available when not specified."""
if 'linters' in self._config:
self.user_linters = list(self._parse_cfg_linters())
self.linters = {linter: self._all_linters[linter]
for linter in self.user_linters}
else:
self.linters = self._all_linters | python | {
"resource": ""
} |
q43419 | Config.print_config | train | def print_config(self):
"""Print all yala configurations, including default and user's."""
linters = self.user_linters or list(self.linters)
print('linters:', ', '.join(linters))
for key, value in self._config.items():
if key != 'linters':
print('{}: {}'.format(key, value)) | python | {
"resource": ""
} |
q43420 | Config._parse_cfg_linters | train | def _parse_cfg_linters(self):
"""Return valid linter names found in config files."""
user_value = self._config.get('linters', '')
# For each line of "linters" value, use comma as separator
for line in user_value.splitlines():
yield from self._parse_linters_line(line) | python | {
"resource": ""
} |
q43421 | Config.get_linter_config | train | def get_linter_config(self, name):
"""Return linter options without linter name prefix."""
prefix = name + ' '
return {k[len(prefix):]: v
for k, v in self._config.items()
if k.startswith(prefix)} | python | {
"resource": ""
} |
q43422 | Config._merge | train | def _merge(cls, default, user):
"""Append user options to default options. Return yala section."""
section = cls._CFG_SECTION
merged = default[section]
if section not in user:
return merged
user = user[section]
for key, value in user.items():
if key in merged:
merged[key] += ' ' + value
else:
merged[key] = value
return merged | python | {
"resource": ""
} |
q43423 | as_text | train | def as_text(str_or_bytes, encoding='utf-8', errors='strict'):
"""Return input string as a text string.
Should work for input string that's unicode or bytes,
given proper encoding.
>>> print(as_text(b'foo'))
foo
>>> b'foo'.decode('utf-8') == u'foo'
True
"""
if isinstance(str_or_bytes, text):
return str_or_bytes
return str_or_bytes.decode(encoding, errors) | python | {
"resource": ""
} |
q43424 | Recipe.attempt_dev_link_via_import | train | def attempt_dev_link_via_import(self, egg):
"""Create egg-link to FS location if an egg is found through importing.
Sometimes an egg *is* installed, but without a proper egg-info file.
So we attempt to import the egg in order to return a link anyway.
TODO: currently it only works with simple package names like
"psycopg2" and "mapnik".
"""
try:
imported = __import__(egg)
except ImportError:
self.logger.warn("Tried importing '%s', but that also didn't work.", egg)
self.logger.debug("For reference, sys.path is %s", sys.path)
return
self.logger.info("Importing %s works, however", egg)
try:
probable_location = os.path.dirname(imported.__file__)
except: # Bare except
self.logger.exception("Determining the location failed, however")
return
filesystem_egg_link = os.path.join(
self.dev_egg_dir,
'%s.egg-link' % egg)
f = open(filesystem_egg_link, 'w')
f.write(probable_location)
f.close()
self.logger.info('Using sysegg %s for %s', probable_location, egg)
self.added.append(filesystem_egg_link)
return True | python | {
"resource": ""
} |
q43425 | Clan._install_exception_handler | train | def _install_exception_handler(self):
"""
Installs a replacement for sys.excepthook, which handles pretty-printing uncaught exceptions.
"""
def handler(t, value, traceback):
if self.args.verbose:
sys.__excepthook__(t, value, traceback)
else:
sys.stderr.write('%s\n' % unicode(value).encode('utf-8'))
sys.excepthook = handler | python | {
"resource": ""
} |
q43426 | strip_encoding_cookie | train | def strip_encoding_cookie(filelike):
"""Generator to pull lines from a text-mode file, skipping the encoding
cookie if it is found in the first two lines.
"""
it = iter(filelike)
try:
first = next(it)
if not cookie_comment_re.match(first):
yield first
second = next(it)
if not cookie_comment_re.match(second):
yield second
except StopIteration:
return
for line in it:
yield line | python | {
"resource": ""
} |
q43427 | source_to_unicode | train | def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
"""Converts a bytes string with python source code to unicode.
Unicode strings are passed through unchanged. Byte strings are checked
for the python source file encoding cookie to determine encoding.
txt can be either a bytes buffer or a string containing the source
code.
"""
if isinstance(txt, six.text_type):
return txt
if isinstance(txt, six.binary_type):
buffer = io.BytesIO(txt)
else:
buffer = txt
try:
encoding, _ = detect_encoding(buffer.readline)
except SyntaxError:
encoding = "ascii"
buffer.seek(0)
newline_decoder = io.IncrementalNewlineDecoder(None, True)
text = io.TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
text.mode = 'r'
if skip_encoding_cookie:
return u"".join(strip_encoding_cookie(text))
else:
return text.read() | python | {
"resource": ""
} |
q43428 | decode_source | train | def decode_source(source_bytes):
"""Decode bytes representing source code and return the string.
Universal newline support is used in the decoding.
"""
# source_bytes_readline = io.BytesIO(source_bytes).readline
# encoding, _ = detect_encoding(source_bytes_readline)
newline_decoder = io.IncrementalNewlineDecoder(None, True)
return newline_decoder.decode(source_to_unicode(source_bytes)) | python | {
"resource": ""
} |
q43429 | cached | train | def cached(fn, size=32):
''' this decorator creates a type safe lru_cache
around the decorated function. Unlike
functools.lru_cache, this will not crash when
unhashable arguments are passed to the function'''
assert callable(fn)
assert isinstance(size, int)
return overload(fn)(lru_cache(size, typed=True)(fn)) | python | {
"resource": ""
} |
q43430 | get_model | train | def get_model(app_dot_model):
"""
Returns Django model class corresponding to passed-in `app_dot_model`
string. This is helpful for preventing circular-import errors in a Django
project.
Positional Arguments:
=====================
- `app_dot_model`: Django's `<app_name>.<model_name>` syntax. For example,
the default Django User model would be `auth.User`,
where `auth` is the app and `User` is the model.
"""
try:
app, model = app_dot_model.split('.')
except ValueError:
msg = (f'Passed in value \'{app_dot_model}\' was not in the format '
'`<app_name>.<model_name>`.')
raise ValueError(msg)
return apps.get_app_config(app).get_model(model) | python | {
"resource": ""
} |
q43431 | run_cmd | train | def run_cmd(cmd, log='log.log', cwd='.', stdout=sys.stdout, bufsize=1, encode='utf-8'):
"""
Runs a command in the backround by creating a new process and writes the output to a specified log file.
:param log(str) - log filename to be used
:param cwd(str) - basedir to write/create the log file
:param stdout(pipe) - stdout process pipe (can be default stdout, a file, etc)
:param bufsize(int) - set the output buffering, default is 1 (per line)
:param encode(str) - string encoding to decode the logged content, default is utf-8
Returns:
The process object
"""
logfile = '%s/%s' % (cwd, log)
if os.path.exists(logfile):
os.remove(logfile)
proc_args = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'cwd': cwd,
'universal_newlines': True
}
proc = subprocess.Popen(cmd, **proc_args)
while True:
line = proc.stdout.readline()
if proc.poll() is None:
stdout.write(line)
else:
break
out, err = proc.communicate()
with open(logfile, 'w') as f:
if out:
f.write(out)
else:
f.write(err) | python | {
"resource": ""
} |
q43432 | create | train | def create(output_dir):
"""Create a new collector or actor"""
template_path = os.path.join(os.path.dirname(__file__), 'project_template')
click.secho('Let\'s create a new component!', fg='green')
name = click.prompt('What is the name of this component (ex. python-pip)?')
click.secho('')
click.secho('We assume this will be pushed to GitHub and Docker Hub eventually, but these don\'t have to exist yet.', fg='green')
repo_owner = click.prompt('GitHub repo owner (i.e. your username or organization name)')
repo_name = click.prompt('GitHub repo name', default=name)
dockerhub_owner = click.prompt('Docker Hub repo owner', default=repo_owner)
dockerhub_name = click.prompt('Docker Hub repo name', default=repo_name)
license_owner = click.prompt('Who should be the copyright owner on project?', default=repo_owner)
extra_context = {
'name': name,
'name_shields_io': name.replace('-', '--'),
'current_year': datetime.datetime.now().year,
'dependencies_cli_version': __version__,
'repo_owner': repo_owner,
'repo_name': repo_name,
'dockerhub_owner': dockerhub_owner,
'dockerhub_name': dockerhub_name,
'license_owner': license_owner,
}
project_dir = cookiecutter(template_path, no_input=True, extra_context=extra_context, output_dir=output_dir)
click.secho('')
click.secho('{name} is ready to go, `cd {project_dir}` and try running `dependencies test`!'.format(name=name, project_dir=project_dir), fg='green')
click.secho(
'We started you out with a fully functioning component based in python.\n' +
'Once you\'ve got a handle on how it works then you can change it to whatever language you want.'
) | python | {
"resource": ""
} |
q43433 | validate | train | def validate(text, file, schema_type):
"""Validate JSON input using dependencies-schema"""
content = None
if text:
print('Validating text input...')
content = text
if file:
print('Validating file input...')
content = file.read()
if content is None:
click.secho('Please give either text input or a file path. See help for more details.', fg='red')
exit(1)
try:
if schema_type == 'dependencies':
validator = DependenciesSchemaValidator()
elif schema_type == 'actions':
validator = ActionsSchemaValidator()
else:
raise Exception('Unknown type')
validator.validate_json(content)
click.secho('Valid JSON schema!', fg='green')
except Exception as e:
click.secho('Invalid JSON schema!', fg='red')
raise e | python | {
"resource": ""
} |
q43434 | AlembicEnvBase.run | train | def run(self):
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
self._config.get_section(self._config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
ensureSchemaExists(connectable, self._schemaName)
context.configure(
connection=connection,
target_metadata=self._targetMetadata,
include_object=self._includeObjectFilter,
include_schemas=True,
version_table_schema=self._schemaName
)
with context.begin_transaction():
context.run_migrations() | python | {
"resource": ""
} |
q43435 | _clean | train | def _clean():
"""
Cleans up build dir
"""
LOGGER.info('Cleaning project directory...')
folders_to_cleanup = [
'.eggs',
'build',
f'{config.PACKAGE_NAME()}.egg-info',
]
for folder in folders_to_cleanup:
if os.path.exists(folder):
LOGGER.info('\tremoving: %s', folder)
shutil.rmtree(folder) | python | {
"resource": ""
} |
q43436 | tokenise | train | def tokenise(template):
'''A generator which yields Token instances'''
upto = 0
lineno = 0
for m in tag_re.finditer(template):
start, end = m.span()
lineno = template.count('\n', 0, start) + 1 # Humans count from 1
# If there's a gap between our start and the end of the last match,
# there's a Text node between.
if upto < start:
yield Token(TokenType.text, template[upto:start], lineno)
upto = end
mode = m.lastgroup
content = m.group(mode)
yield Token(TokenType[mode], content, lineno)
# if the last match ended before the end of the source, we have a tail Text
# node.
if upto < len(template):
yield Token(TokenType.text, template[upto:], lineno) | python | {
"resource": ""
} |
q43437 | delete | train | def delete(table, session, conds):
"""Performs a hard delete on a row, which means the row is deleted from the Savage
table as well as the archive table.
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model
of the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
"""
with session.begin_nested():
archive_conds_list = _get_conditions_list(table, conds)
session.execute(
sa.delete(table.ArchiveTable, whereclause=_get_conditions(archive_conds_list))
)
conds_list = _get_conditions_list(table, conds, archive=False)
session.execute(
sa.delete(table, whereclause=_get_conditions(conds_list))
) | python | {
"resource": ""
} |
q43438 | _format_response | train | def _format_response(rows, fields, unique_col_names):
"""This function will look at the data column of rows and extract the specified fields. It
will also dedup changes where the specified fields have not changed. The list of rows should
be ordered by the compound primary key which versioning pivots around and be in ascending
version order.
This function will return a list of dictionaries where each dictionary has the following
schema:
{
'updated_at': timestamp of the change,
'version': version number for the change,
'data': a nested dictionary containing all keys specified in fields and values
corresponding to values in the user table.
}
Note that some versions may be omitted in the output for the same key if the specified fields
were not changed between versions.
:param rows: a list of dictionaries representing rows from the ArchiveTable.
:param fields: a list of strings of fields to be extracted from the archived row.
"""
output = []
old_id = None
for row in rows:
id_ = {k: row[k] for k in unique_col_names}
formatted = {k: row[k] for k in row if k != 'data'}
if id_ != old_id: # new unique versioned row
data = row['data']
formatted['data'] = {k: data.get(k) for k in fields}
output.append(formatted)
else:
data = row['data']
pruned_data = {k: data.get(k) for k in fields}
if (
pruned_data != output[-1]['data'] or
row['deleted'] != output[-1]['deleted']
):
formatted['data'] = pruned_data
output.append(formatted)
old_id = id_
return output | python | {
"resource": ""
} |
q43439 | _get_conditions_list | train | def _get_conditions_list(table, conds, archive=True):
"""This function returns a list of list of == conditions on sqlalchemy columns given conds.
This should be treated as an or of ands.
:param table: the user table model class which inherits from
savage.models.SavageModelMixin
:param conds: a list of dictionaries of key value pairs where keys are column names and
values are conditions to be placed on the column.
:param archive: If true, the condition is with columns from the archive table. Else its from
the user table.
"""
if conds is None:
conds = []
all_conditions = []
for cond in conds:
if len(cond) != len(table.version_columns):
raise ValueError('Conditions must specify all unique constraints.')
conditions = []
t = table.ArchiveTable if archive else table
for col_name, value in cond.iteritems():
if col_name not in table.version_columns:
raise ValueError('{} is not one of the unique columns <{}>'.format(
col_name, ','.join(table.version_columns)
))
conditions.append(getattr(t, col_name) == value)
all_conditions.append(conditions)
return all_conditions | python | {
"resource": ""
} |
q43440 | _get_limit_and_offset | train | def _get_limit_and_offset(page, page_size):
"""Returns a 0-indexed offset and limit based on page and page_size for a MySQL query.
"""
if page < 1:
raise ValueError('page must be >= 1')
limit = page_size
offset = (page - 1) * page_size
return limit, offset | python | {
"resource": ""
} |
q43441 | _get_order_clause | train | def _get_order_clause(archive_table):
"""Returns an ascending order clause on the versioned unique constraint as well as the
version column.
"""
order_clause = [
sa.asc(getattr(archive_table, col_name)) for col_name in archive_table._version_col_names
]
order_clause.append(sa.asc(archive_table.version_id))
return order_clause | python | {
"resource": ""
} |
q43442 | add_resource_permissions | train | def add_resource_permissions(*args, **kwargs):
"""
This syncdb hooks takes care of adding a view permission too all our
content types.
"""
# for each of our content types
for resource in find_api_classes('v1_api', ModelResource):
auth = resource._meta.authorization
content_type = ContentType.objects.get_for_model(resource._meta.queryset.model)
if isinstance(auth, SpiffAuthorization):
conditions = auth.conditions()
operations = auth.operations()
if len(conditions) == 0:
conditions = (None,)
for condition in conditions:
for operation in operations:
# build our permission slug
if condition:
codename = "%s_%s_%s" % (operation[0], condition[0], content_type.model)
name = "Can %s %s, when %s" % (operation[1], content_type.name,
condition[1])
else:
codename = "%s_%s" % (operation[1], content_type.model)
name = "Can %s %s" % (operation[1], content_type.name)
# if it doesn't exist..
if not Permission.objects.filter(content_type=content_type, codename=codename):
# add it
Permission.objects.create(content_type=content_type,
codename=codename,
name=name[:49])
funcLog().debug("Created permission %s.%s (%s)", content_type.app_label, codename, name) | python | {
"resource": ""
} |
q43443 | ObservableStore.areObservableElements | train | def areObservableElements(self, elementNames):
"""
Mention if all elements are observable element.
:param str ElementName: the element name to evaluate
:return: true if is an observable element, otherwise false.
:rtype: bool
"""
if not(hasattr(elementNames, "__len__")):
raise TypeError(
"Element name should be a array of strings." +
"I receive this {0}"
.format(elementNames))
return self._evaluateArray(elementNames) | python | {
"resource": ""
} |
q43444 | ObservableStore.isObservableElement | train | def isObservableElement(self, elementName):
"""
Mention if an element is an observable element.
:param str ElementName: the element name to evaluate
:return: true if is an observable element, otherwise false.
:rtype: bool
"""
if not(isinstance(elementName, str)):
raise TypeError(
"Element name should be a string ." +
"I receive this {0}"
.format(elementName))
return (True if (elementName == "*")
else self._evaluateString(elementName)) | python | {
"resource": ""
} |
q43445 | ObservableStore.add | train | def add(self, observableElement):
"""
add an observable element
:param str observableElement: the name of the observable element
:raises RuntimeError: if element name already exist in the store
"""
if observableElement not in self._observables:
self._observables.append(observableElement)
else:
raise RuntimeError(
"{0} is already an observable element"
.format(observableElement)) | python | {
"resource": ""
} |
q43446 | ObservableStore.remove | train | def remove(self, observableElement):
"""
remove an obsrvable element
:param str observableElement: the name of the observable element
"""
if observableElement in self._observables:
self._observables.remove(observableElement) | python | {
"resource": ""
} |
q43447 | URLOpener.get_response | train | def get_response(self, url, username=None, password=None):
"""
does the dirty work of actually getting the rsponse object using urllib2
and its HTTP auth builtins.
"""
scheme, netloc, path, query, frag = urlparse.urlsplit(url)
req = self.get_request(url)
stored_username, stored_password = self.passman.find_user_password(None, netloc)
# see if we have a password stored
if stored_username is None:
if username is None and self.prompting:
username = urllib.quote(raw_input('User for %s: ' % netloc))
password = urllib.quote(getpass.getpass('Password: '))
if username and password:
self.passman.add_password(None, netloc, username, password)
stored_username, stored_password = self.passman.find_user_password(None, netloc)
authhandler = urllib2.HTTPBasicAuthHandler(self.passman)
opener = urllib2.build_opener(authhandler)
# FIXME: should catch a 401 and offer to let the user reenter credentials
return opener.open(req) | python | {
"resource": ""
} |
q43448 | URLOpener.setup | train | def setup(self, proxystr='', prompting=True):
"""
Sets the proxy handler given the option passed on the command
line. If an empty string is passed it looks at the HTTP_PROXY
environment variable.
"""
self.prompting = prompting
proxy = self.get_proxy(proxystr)
if proxy:
proxy_support = urllib2.ProxyHandler({"http": proxy, "ftp": proxy})
opener = urllib2.build_opener(proxy_support, urllib2.CacheFTPHandler)
urllib2.install_opener(opener) | python | {
"resource": ""
} |
q43449 | URLOpener.get_proxy | train | def get_proxy(self, proxystr=''):
"""
Get the proxy given the option passed on the command line.
If an empty string is passed it looks at the HTTP_PROXY
environment variable.
"""
if not proxystr:
proxystr = os.environ.get('HTTP_PROXY', '')
if proxystr:
if '@' in proxystr:
user_password, server_port = proxystr.split('@', 1)
if ':' in user_password:
user, password = user_password.split(':', 1)
else:
user = user_password
prompt = 'Password for %s@%s: ' % (user, server_port)
password = urllib.quote(getpass.getpass(prompt))
return '%s:%s@%s' % (user, password, server_port)
else:
return proxystr
else:
return None | python | {
"resource": ""
} |
q43450 | pageassert | train | def pageassert(func):
'''
Decorator that assert page number
'''
@wraps(func)
def wrapper(*args, **kwargs):
if args[0] < 1 or args[0] > 40:
raise ValueError('Page Number not found')
return func(*args, **kwargs)
return wrapper | python | {
"resource": ""
} |
q43451 | AwsProcessor.do_mfa | train | def do_mfa(self, args):
"""
Enter a 6-digit MFA token. Nephele will execute the appropriate
`aws` command line to authenticate that token.
mfa -h for more details
"""
parser = CommandArgumentParser("mfa")
parser.add_argument(dest='token',help='MFA token value');
parser.add_argument("-p","--profile",dest='awsProfile',default=AwsConnectionFactory.instance.getProfile(),help='MFA token value');
args = vars(parser.parse_args(args))
token = args['token']
awsProfile = args['awsProfile']
arn = AwsConnectionFactory.instance.load_arn(awsProfile)
credentials_command = ["aws","--profile",awsProfile,"--output","json","sts","get-session-token","--serial-number",arn,"--token-code",token]
output = run_cmd(credentials_command) # Throws on non-zero exit :yey:
credentials = json.loads("\n".join(output.stdout))['Credentials']
AwsConnectionFactory.instance.setMfaCredentials(credentials,awsProfile) | python | {
"resource": ""
} |
q43452 | AwsProcessor.do_up | train | def do_up(self,args):
"""
Navigate up by one level.
For example, if you are in `(aws)/stack:.../asg:.../`, executing `up` will place you in `(aws)/stack:.../`.
up -h for more details
"""
parser = CommandArgumentParser("up")
args = vars(parser.parse_args(args))
if None == self.parent:
print "You're at the root. Try 'quit' to quit"
else:
return True | python | {
"resource": ""
} |
q43453 | AwsProcessor.do_slash | train | def do_slash(self,args):
"""
Navigate back to the root level.
For example, if you are in `(aws)/stack:.../asg:.../`, executing `slash` will place you in `(aws)/`.
slash -h for more details
"""
parser = CommandArgumentParser("slash")
args = vars(parser.parse_args(args))
if None == self.parent:
print "You're at the root. Try 'quit' to quit"
else:
raise SlashException() | python | {
"resource": ""
} |
q43454 | AwsProcessor.do_profile | train | def do_profile(self,args):
"""
Select nephele profile
profile -h for more details
"""
parser = CommandArgumentParser("profile")
parser.add_argument(dest="profile",help="Profile name")
parser.add_argument('-v','--verbose',dest="verbose",action='store_true',help='verbose')
args = vars(parser.parse_args(args))
profile = args['profile']
verbose = args['verbose']
if verbose:
print "Selecting profile '{}'".format(profile)
selectedProfile = {}
if profile in Config.config['profiles']:
selectedProfile = Config.config['profiles'][profile]
selectedProfile['name'] = profile
Config.config['selectedProfile'] = selectedProfile
awsProfile = profile
if 'awsProfile' in selectedProfile:
awsProfile = selectedProfile['awsProfile']
AwsConnectionFactory.resetInstance(profile=awsProfile) | python | {
"resource": ""
} |
q43455 | parse | train | def parse(timestring):
"""Convert a statbank time string to a python datetime object.
"""
for parser in _PARSERS:
match = parser['pattern'].match(timestring)
if match:
groups = match.groups()
ints = tuple(map(int, groups))
time = parser['factory'](ints)
return time
raise TimeError('Unsupported time format {}'.format(timestring)) | python | {
"resource": ""
} |
q43456 | confirm | train | def confirm(text, default=True):
"""
Console confirmation dialog based on raw_input.
"""
if default:
legend = "[y]/n"
else:
legend = "y/[n]"
res = ""
while (res != "y") and (res != "n"):
res = raw_input(text + " ({}): ".format(legend)).lower()
if not res and default:
res = "y"
elif not res and not default:
res = "n"
if res[0] == "y":
return True
else:
return False | python | {
"resource": ""
} |
q43457 | read_file | train | def read_file(fname):
"""
Read file, convert wildcards into regular expressions, skip empty lines
and comments.
"""
res = []
try:
with open(fname, 'r') as f:
for line in f:
line = line.rstrip('\n').rstrip('\r')
if line and (line[0] != '#'):
regexline = ".*" + re.sub("\*", ".*", line) + ".*"
res.append(regexline.lower())
except IOError:
pass
return res | python | {
"resource": ""
} |
q43458 | drop_it | train | def drop_it(title, filters, blacklist):
"""
The found torrents should be in filters list and shouldn't be in blacklist.
"""
title = title.lower()
matched = False
for f in filters:
if re.match(f, title):
matched = True
if not matched:
return True
for b in blacklist:
if re.match(b, title):
return True
return False | python | {
"resource": ""
} |
q43459 | do_list | train | def do_list():
"""
CLI action "list configurations".
"""
dirs = os.walk(CONFIG_ROOT).next()[1]
if dirs:
print "List of available configurations:\n"
for d in dirs:
print " * {}".format(d)
else:
print "No configurations available." | python | {
"resource": ""
} |
q43460 | do_create | train | def do_create(config, config_dir):
"""
CLI action "create new configuration".
"""
if os.path.exists(config_dir):
print "Configuration '{}' already exists.".format(config)
exit(1)
os.makedirs(config_dir)
print "Configuration directory created."
url = raw_input("RSS URL for processing []: ")
torrent_dir = raw_input("Output directory for found .torrent files [{}]: "\
.format(DEFAULT_TORRRENT_DIR)) or DEFAULT_TORRRENT_DIR
update_interval = raw_input("Update interval (mins) [{}]: "\
.format(DEFAULT_UPDATE_INTERVAL)) or DEFAULT_UPDATE_INTERVAL
editor = os.environ["EDITOR"]
config_filter = os.path.join(config_dir, 'filter')
if confirm("Do you want to create filters list?", False):
call([editor, config_filter])
print "Filter configuration has been saved."
config_blacklist = os.path.join(config_dir, 'blacklist')
if confirm("Do you want to create blacklist?", False):
call([editor, config_filter])
print "Blacklist configuration has been saved."
config_file = os.path.join(config_dir, 'config')
config_data = json.dumps({
"url": url,
"torrent_dir": torrent_dir,
"update_interval": update_interval
}, sort_keys=True, indent=4, separators=(',', ': '))
with open(config_file, 'w') as f:
f.write(config_data)
ct = CronTab(user=True)
cmd = "{} {} -e {}".format(sys.executable,
os.path.abspath(__file__),
config)
job = ct.new(command=cmd)
job.minute.every(update_interval)
job.enable()
ct.write()
print "Crontab updated."
print "Config '{}' has been saved.".format(config) | python | {
"resource": ""
} |
q43461 | do_update | train | def do_update(config, config_dir):
"""
CLI action "update new configuration".
"""
if not os.path.exists(config_dir):
print "Configuration '{}' does not exist.".format(config)
exit(1)
config_file = os.path.join(config_dir, 'config')
with open(config_file, 'r') as f:
old_config_data = json.load(f)
old_url = old_config_data['url']
old_torrent_dir = old_config_data['torrent_dir']
old_update_interval = old_config_data['update_interval']
url = raw_input("RSS URL for processing [{}]: "\
.format(old_url)) or old_url
torrent_dir = raw_input("Output directory for found .torrent files [{}]: "\
.format(old_torrent_dir)) or old_torrent_dir
update_interval = raw_input("Update interval (mins) [{}]: "\
.format(old_update_interval)) or old_update_interval
editor = os.environ["EDITOR"]
config_filter = os.path.join(config_dir, 'filter')
if confirm("Do you want to edit filters list?", False):
call([editor, config_filter])
print "Filter configuration has been saved."
config_blacklist = os.path.join(config_dir, 'blacklist')
if confirm("Do you want to edit blacklist?", False):
call([editor, config_filter])
print "Blacklist configuration has been saved."
config_data = json.dumps({
"url": url,
"torrent_dir": torrent_dir,
"update_interval": update_interval
}, sort_keys=True, indent=4, separators=(',', ': '))
with open(config_file, 'w') as f:
f.write(config_data)
ct = CronTab(user=True)
for job in ct:
if re.match('.*ghetto.*\-e\s{}'.format(config), job.command):
ct.remove(job)
cmd = "{} {} -e {}".format(sys.executable,
os.path.abspath(__file__),
config)
new_job = ct.new(command=cmd)
new_job.minute.every(update_interval)
new_job.enable()
ct.write()
print "Crontab updated."
print "Configuration '{}' has been updated.".format(config) | python | {
"resource": ""
} |
q43462 | do_remove | train | def do_remove(config, config_dir):
"""
CLI action "remove configuration".
"""
if not os.path.exists(config_dir):
print "Configuration '{}' does not exist.".format(config)
exit(1)
if confirm("Confirm removal of the configuration '{}'".format(config)):
shutil.rmtree(config_dir)
print "Configuration '{}' has been removed.".format(config)
else:
print "Removal cancelled." | python | {
"resource": ""
} |
q43463 | do_exec | train | def do_exec(config, config_dir):
"""
CLI action "process the feed from specified configuration".
"""
if not os.path.exists(config_dir):
print "Configuration '{}' does not exist.".format(config)
exit(1)
print "The parser for '{}' config has been initialized.".format(config)
config_file = os.path.join(config_dir, 'config')
with open(config_file, 'r') as f:
config_data = json.load(f)
url = config_data['url']
torrent_dir = config_data['torrent_dir']
ensure_dir(torrent_dir)
filters_file = os.path.join(config_dir, 'filter')
filters = read_file(filters_file)
blacklist_file = os.path.join(config_dir, 'blacklist')
blacklist = read_file(blacklist_file)
print "Fetching URL {}".format(url)
r = requests.get(url)
if r.status_code != 200:
print "Failed to fetch RSS feed."
xml = r.text.encode('utf-8')
parser = etree.XMLParser(ns_clean=True, recover=True, encoding='utf-8')
tree = etree.fromstring(xml, parser)
items = tree.xpath('//item')
downloaded = 0
for e in items:
e_title = e.xpath('title')[0].text
e_link = e.xpath('link')[0].text
if not drop_it(e_title, filters, blacklist):
downloaded += 1
target_file = os.path.join(torrent_dir, e_title + '.torrent')
r = requests.get(e_link, stream=True)
with open(target_file, 'wb') as f:
for chunk in r.iter_content(4096):
f.write(chunk)
print "Items found: {}, items downloaded: {}."\
.format(len(items), downloaded) | python | {
"resource": ""
} |
q43464 | do_filter | train | def do_filter(config, config_dir):
"""
CLI action "run editor for filters list".
"""
if not os.path.exists(config_dir):
print "Configuration '{}' does not exist.".format(config)
exit(1)
editor = os.environ["EDITOR"]
config_filter = os.path.join(config_dir, 'filter')
call([editor, config_filter])
print "Filter configuration has been updated." | python | {
"resource": ""
} |
q43465 | do_blacklist | train | def do_blacklist(config, config_dir):
"""
CLI action "run editor for blacklist".
"""
if not os.path.exists(config_dir):
print "Configuration '{}' does not exist.".format(config)
exit(1)
editor = os.environ["EDITOR"]
config_blacklist = os.path.join(config_dir, 'blacklist')
call([editor, config_blacklist])
print "Blacklist configuration has been updated." | python | {
"resource": ""
} |
q43466 | action | train | def action(act, config):
"""
CLI action preprocessor
"""
if not config:
pass
elif act is "list":
do_list()
else:
config_dir = os.path.join(CONFIG_ROOT, config)
globals()["do_" + act](config, config_dir) | python | {
"resource": ""
} |
q43467 | Space.is_discrete | train | def is_discrete(self):
"""
Return whether this space is discrete
"""
for domain in self.domains.values():
if not domain.is_discrete():
return False
return True | python | {
"resource": ""
} |
q43468 | Space.consistent | train | def consistent(self,lab):
"""
Check whether the labeling is consistent with all constraints
"""
for const in self.constraints:
if not const.consistent(lab):
return False
return True | python | {
"resource": ""
} |
q43469 | Space.satisfied | train | def satisfied(self,lab):
"""
Check whether the labeling satisfies all constraints
"""
for const in self.constraints:
if not const.satisfied(lab):
return False
return True | python | {
"resource": ""
} |
q43470 | Membership.from_inline | train | def from_inline(cls: Type[MembershipType], version: int, currency: str, membership_type: str,
inline: str) -> MembershipType:
"""
Return Membership instance from inline format
:param version: Version of the document
:param currency: Name of the currency
:param membership_type: "IN" or "OUT" to enter or exit membership
:param inline: Inline string format
:return:
"""
data = Membership.re_inline.match(inline)
if data is None:
raise MalformedDocumentError("Inline membership ({0})".format(inline))
issuer = data.group(1)
signature = data.group(2)
membership_ts = BlockUID.from_str(data.group(3))
identity_ts = BlockUID.from_str(data.group(4))
uid = data.group(5)
return cls(version, currency, issuer, membership_ts, membership_type, uid, identity_ts, signature) | python | {
"resource": ""
} |
q43471 | Membership.from_signed_raw | train | def from_signed_raw(cls: Type[MembershipType], signed_raw: str) -> MembershipType:
"""
Return Membership instance from signed raw format
:param signed_raw: Signed raw format string
:return:
"""
lines = signed_raw.splitlines(True)
n = 0
version = int(Membership.parse_field("Version", lines[n]))
n += 1
Membership.parse_field("Type", lines[n])
n += 1
currency = Membership.parse_field("Currency", lines[n])
n += 1
issuer = Membership.parse_field("Issuer", lines[n])
n += 1
membership_ts = BlockUID.from_str(Membership.parse_field("Block", lines[n]))
n += 1
membership_type = Membership.parse_field("Membership", lines[n])
n += 1
uid = Membership.parse_field("UserID", lines[n])
n += 1
identity_ts = BlockUID.from_str(Membership.parse_field("CertTS", lines[n]))
n += 1
signature = Membership.parse_field("Signature", lines[n])
n += 1
return cls(version, currency, issuer, membership_ts,
membership_type, uid, identity_ts, signature) | python | {
"resource": ""
} |
q43472 | make_basic_daemon | train | def make_basic_daemon(workspace=None):
"""Make basic daemon.
"""
workspace = workspace or os.getcwd()
# first fork
if os.fork():
os._exit(0)
# change env
os.chdir(workspace)
os.setsid()
os.umask(0o22)
# second fork
if os.fork():
os._exit(0)
# reset stdin/stdout/stderr to /dev/null
null = os.open('/dev/null', os.O_RDWR)
try:
for i in range(0, 3):
try:
os.dup2(null, i)
except OSError as error:
if error.errno != errno.EBADF:
raise
finally:
os.close(null) | python | {
"resource": ""
} |
q43473 | process_kill | train | def process_kill(pid, sig=None):
"""Send signal to process.
"""
sig = sig or signal.SIGTERM
os.kill(pid, sig) | python | {
"resource": ""
} |
q43474 | load_pid | train | def load_pid(pidfile):
"""read pid from pidfile.
"""
if pidfile and os.path.isfile(pidfile):
with open(pidfile, "r", encoding="utf-8") as fobj:
return int(fobj.readline().strip())
return 0 | python | {
"resource": ""
} |
q43475 | write_pidfile | train | def write_pidfile(pidfile):
"""write current pid to pidfile.
"""
pid = os.getpid()
if pidfile:
with open(pidfile, "w", encoding="utf-8") as fobj:
fobj.write(six.u(str(pid)))
return pid | python | {
"resource": ""
} |
q43476 | is_running | train | def is_running(pid):
"""check if the process with given pid still running
"""
process = get_process(pid)
if process and process.is_running() and process.status() != "zombie":
return True
else:
return False | python | {
"resource": ""
} |
q43477 | clean_pid_file | train | def clean_pid_file(pidfile):
"""clean pid file.
"""
if pidfile and os.path.exists(pidfile):
os.unlink(pidfile) | python | {
"resource": ""
} |
q43478 | daemon_start | train | def daemon_start(main, pidfile, daemon=True, workspace=None):
"""Start application in background mode if required and available. If not then in front mode.
"""
logger.debug("start daemon application pidfile={pidfile} daemon={daemon} workspace={workspace}.".format(pidfile=pidfile, daemon=daemon, workspace=workspace))
new_pid = os.getpid()
workspace = workspace or os.getcwd()
os.chdir(workspace)
daemon_flag = False
if pidfile and daemon:
old_pid = load_pid(pidfile)
if old_pid:
logger.debug("pidfile {pidfile} already exists, pid={pid}.".format(pidfile=pidfile, pid=old_pid))
# if old service is running, just exit.
if old_pid and is_running(old_pid):
error_message = "Service is running in process: {pid}.".format(pid=old_pid)
logger.error(error_message)
six.print_(error_message, file=os.sys.stderr)
os.sys.exit(95)
# clean old pid file.
clean_pid_file(pidfile)
# start as background mode if required and available.
if daemon and os.name == "posix":
make_basic_daemon()
daemon_flag = True
if daemon_flag:
logger.info("Start application in DAEMON mode, pidfile={pidfile} pid={pid}".format(pidfile=pidfile, pid=new_pid))
else:
logger.info("Start application in FRONT mode, pid={pid}.".format(pid=new_pid))
write_pidfile(pidfile)
atexit.register(clean_pid_file, pidfile)
main()
return | python | {
"resource": ""
} |
q43479 | is_dicom | train | def is_dicom(filename):
'''returns Boolean of whether the given file has the DICOM magic number'''
try:
with open(filename) as f:
d = f.read(132)
return d[128:132]=="DICM"
except:
return False | python | {
"resource": ""
} |
q43480 | info | train | def info(filename):
'''returns a DicomInfo object containing the header information in ``filename``'''
try:
out = subprocess.check_output([_dicom_hdr,'-sexinfo',filename])
except subprocess.CalledProcessError:
return None
slice_timing_out = subprocess.check_output([_dicom_hdr,'-slice_times',filename])
slice_timing = [float(x) for x in slice_timing_out.strip().split()[5:]]
frames = []
for frame in re.findall(r'^(\w{4}) (\w{4})\s+(\d+) \[(\d+)\s+\] \/\/(.*?)\/\/(.*?)$',out,re.M):
new_frame = {}
new_frame['addr'] = (int(frame[0],16),int(frame[1],16))
new_frame['size'] = int(frame[2])
new_frame['offset'] = int(frame[3])
new_frame['label'] = frame[4].strip()
new_frame['value'] = frame[5].strip()
frames.append(new_frame)
sex_info = {}
for i in re.findall(r'^(.*?)\s+= (.*)$',out,re.M):
sex_info[i[0]] = i[1]
return DicomInfo(frames,sex_info,slice_timing) | python | {
"resource": ""
} |
q43481 | info_for_tags | train | def info_for_tags(filename,tags):
'''return a dictionary for the given ``tags`` in the header of the DICOM file ``filename``
``tags`` is expected to be a list of tuples that contains the DICOM address in hex values.
basically a rewrite of :meth:`info` because it's so slow. This is a lot faster and more reliable'''
if isinstance(tags,tuple):
tags = [tags]
d = pydicom.read_file(filename)
return_dict = {}
dicom_info = None
for k in tags:
if k in d:
return_dict[k] = d[k].value
else:
# Backup to the old method
if dicom_info==None:
dicom_info = info(filename)
i = dicom_info.addr(k)
if i:
return_dict[k] = nl.numberize(i['value'])
return return_dict | python | {
"resource": ""
} |
q43482 | scan_dir | train | def scan_dir(dirname,tags=None,md5_hash=False):
'''scans a directory tree and returns a dictionary with files and key DICOM tags
return value is a dictionary absolute filenames as keys and with dictionaries of tags/values
as values
the param ``tags`` is the list of DICOM tags (given as tuples of hex numbers) that
will be obtained for each file. If not given,
the default list is:
:0008 0021: Series date
:0008 0031: Series time
:0008 103E: Series description
:0008 0080: Institution name
:0010 0020: Patient ID
:0028 0010: Image rows
:0028 0011: Image columns
If the param ``md5_hash`` is ``True``, this will also return the MD5 hash of the file. This is useful
for detecting duplicate files
'''
if tags==None:
tags = [
(0x0008, 0x0021),
(0x0008, 0x0031),
(0x0008, 0x103E),
(0x0008, 0x0080),
(0x0010, 0x0020),
(0x0028, 0x0010),
(0x0028, 0x0011),
]
return_dict = {}
for root,dirs,files in os.walk(dirname):
for filename in files:
fullname = os.path.join(root,filename)
if is_dicom(fullname):
return_dict[fullname] = info_for_tags(fullname,tags)
if md5_hash:
return_dict[fullname]['md5'] = nl.hash(fullname)
return return_dict | python | {
"resource": ""
} |
q43483 | create_dset | train | def create_dset(directory,slice_order='alt+z',sort_order='zt',force_slices=None):
'''tries to autocreate a dataset from images in the given directory'''
return _create_dset_dicom(directory,slice_order,sort_order,force_slices=force_slices) | python | {
"resource": ""
} |
q43484 | date_for_str | train | def date_for_str(date_str):
'''tries to guess date from ambiguous date string'''
try:
for date_format in itertools.permutations(['%Y','%m','%d']):
try:
date = datetime.strptime(date_str,''.join(date_format))
raise StopIteration
except ValueError:
pass
return None
except StopIteration:
return date | python | {
"resource": ""
} |
q43485 | organize_dir | train | def organize_dir(orig_dir):
'''scans through the given directory and organizes DICOMs that look similar into subdirectories
output directory is the ``orig_dir`` with ``-sorted`` appended to the end'''
tags = [
(0x10,0x20), # Subj ID
(0x8,0x21), # Date
(0x8,0x31), # Time
(0x8,0x103e) # Descr
]
orig_dir = orig_dir.rstrip('/')
files = scan_dir(orig_dir,tags=tags,md5_hash=True)
dups = find_dups(files)
for dup in dups:
nl.notify('Found duplicates of %s...' % dup[0])
for each_dup in dup[1:]:
nl.notify('\tdeleting %s' % each_dup)
try:
os.remove(each_dup)
except IOError:
nl.notify('\t[failed]')
del(files[each_dup])
clustered = cluster_files(files)
output_dir = '%s-sorted' % orig_dir
for key in clustered:
if (0x8,0x31) in clustered[key]['info']:
clustered[key]['info'][(0x8,0x31)] = str(int(float(clustered[key]['info'][(0x8,0x31)])))
for t in tags:
if t not in clustered[key]['info']:
clustered[key]['info'][t] = '_'
run_name = '-'.join([scrub_fname(str(clustered[key]['info'][x])) for x in tags])+'-%d_images' %len(clustered[key]['files'])
run_dir = os.path.join(output_dir,run_name)
nl.notify('Moving files into %s' % run_dir)
try:
if not os.path.exists(run_dir):
os.makedirs(run_dir)
except IOError:
nl.notify('Error: failed to create directory %s' % run_dir)
else:
for f in clustered[key]['files']:
try:
dset_fname = os.path.split(f)[1]
if dset_fname[0]=='.':
dset_fname = '_' + dset_fname[1:]
os.rename(f,os.path.join(run_dir,dset_fname))
except (IOError, OSError):
pass
for r,ds,fs in os.walk(output_dir,topdown=False):
for d in ds:
dname = os.path.join(r,d)
if len(os.listdir(dname))==0:
os.remove(dname) | python | {
"resource": ""
} |
q43486 | reconstruct_files | train | def reconstruct_files(input_dir):
'''sorts ``input_dir`` and tries to reconstruct the subdirectories found'''
input_dir = input_dir.rstrip('/')
with nl.notify('Attempting to organize/reconstruct directory'):
# Some datasets start with a ".", which confuses many programs
for r,ds,fs in os.walk(input_dir):
for f in fs:
if f[0]=='.':
shutil.move(os.path.join(r,f),os.path.join(r,'i'+f))
nl.dicom.organize_dir(input_dir)
output_dir = '%s-sorted' % input_dir
if os.path.exists(output_dir):
with nl.run_in(output_dir):
for dset_dir in os.listdir('.'):
with nl.notify('creating dataset from %s' % dset_dir):
nl.dicom.create_dset(dset_dir)
else:
nl.notify('Warning: failed to auto-organize directory %s' % input_dir,level=nl.level.warning) | python | {
"resource": ""
} |
q43487 | unpack_archive | train | def unpack_archive(fname,out_dir):
'''unpacks the archive file ``fname`` and reconstructs datasets into ``out_dir``
Datasets are reconstructed and auto-named using :meth:`create_dset`. The raw directories
that made the datasets are archive with the dataset name suffixed by ``tgz``, and any other
files found in the archive are put into ``other_files.tgz``'''
with nl.notify('Unpacking archive %s' % fname):
tmp_dir = tempfile.mkdtemp()
tmp_unpack = os.path.join(tmp_dir,'unpack')
os.makedirs(tmp_unpack)
nl.utils.unarchive(fname,tmp_unpack)
reconstruct_files(tmp_unpack)
out_dir = os.path.abspath(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not os.path.exists(tmp_unpack+'-sorted'):
return
with nl.run_in(tmp_unpack+'-sorted'):
for fname in glob.glob('*.nii'):
nl.run(['gzip',fname])
for fname in glob.glob('*.nii.gz'):
new_file = os.path.join(out_dir,fname)
if not os.path.exists(new_file):
shutil.move(fname,new_file)
raw_out = os.path.join(out_dir,'raw')
if not os.path.exists(raw_out):
os.makedirs(raw_out)
for rawdir in os.listdir('.'):
rawdir_tgz = os.path.join(raw_out,rawdir+'.tgz')
if not os.path.exists(rawdir_tgz):
with tarfile.open(rawdir_tgz,'w:gz') as tgz:
tgz.add(rawdir)
if len(os.listdir(tmp_unpack))!=0:
# There are still raw files left
with tarfile.open(os.path.join(raw_out,'other_files.tgz'),'w:gz') as tgz:
tgz.add(tmp_unpack)
shutil.rmtree(tmp_dir) | python | {
"resource": ""
} |
q43488 | static | train | def static(cls):
r"""Converts the given class into a static one, by changing all the methods of it into static methods.
Args:
cls (class): The class to be converted.
"""
for attr in dir(cls):
im_func = getattr(getattr(cls, attr), 'im_func', None)
if im_func:
setattr(cls, attr, staticmethod(im_func))
return cls | python | {
"resource": ""
} |
q43489 | _generate_mark_code | train | def _generate_mark_code(rule_name):
"""Generates a two digit string based on a provided string
Args:
rule_name (str): A configured rule name 'pytest_mark3'.
Returns:
str: A two digit code based on the provided string '03'
"""
code = ''.join([i for i in str(rule_name) if i.isdigit()])
code = code.zfill(2)
return code | python | {
"resource": ""
} |
q43490 | rule_n5xx | train | def rule_n5xx(filename, rule_name, rule_conf, class_type):
"""Validate filename against a pattern if the filename passes the filter.
Args:
filename (str): The name of the file being parsed by flake8.
rule_name (str): The name of the rule.
rule_conf (dict): The dictionary containing the properties of the rule
class_type (class): The class that this rule was called from
Yields:
tuple: (int, int, str, type) the tuple used by flake8 to construct a violation
"""
line_num = 0
code = _generate_mark_code(rule_name)
message = "N5{} filename failed regex validation '{}'".format(code, rule_conf['filename_regex'])
sanitized_filename = splitext(basename(filename))[0] # Strip path and extension
if re.match(rule_conf['filter_regex'], sanitized_filename):
if not re.match(rule_conf['filename_regex'], sanitized_filename):
yield (line_num, 0, message, class_type) | python | {
"resource": ""
} |
q43491 | upsert_many | train | def upsert_many(col, data):
"""
Only used when having "_id" field.
**中文文档**
要求 ``data`` 中的每一个 ``document`` 都必须有 ``_id`` 项。这样才能进行
``upsert`` 操作。
"""
ready_to_insert = list()
for doc in data:
res = col.update({"_id": doc["_id"]}, {"$set": doc}, upsert=False)
# 没有任何数据被修改, 且不是因为数据存在但值相同
if res["nModified"] == 0 and res["updatedExisting"] is False:
ready_to_insert.append(doc)
col.insert(ready_to_insert) | python | {
"resource": ""
} |
q43492 | resolve | train | def resolve(args):
"""Just print the result of parsing a target string."""
if not args:
log.error('Exactly 1 argument is required.')
app.quit(1)
print(address.new(args[0])) | python | {
"resource": ""
} |
q43493 | build | train | def build(args):
"""Build a target and its dependencies."""
if len(args) != 1:
log.error('One target required.')
app.quit(1)
target = address.new(args[0])
log.info('Resolved target to: %s', target)
try:
bb = Butcher()
bb.clean()
bb.load_graph(target)
bb.build(target)
except (gitrepo.GitError,
error.BrokenGraph,
error.NoSuchTargetError) as err:
log.fatal(err)
app.quit(1)
except error.OverallBuildFailure as err:
log.fatal(err)
log.fatal('Error list:')
[log.fatal(' [%s]: %s', e.node, e) for e in bb.failure_log]
app.quit(1) | python | {
"resource": ""
} |
q43494 | rebuild | train | def rebuild(args):
"""Rebuild a target and deps, even if it has been built and cached."""
if len(args) != 1:
log.fatal('One target required.')
app.quit(1)
app.set_option('disable_cache_fetch', True)
Butcher.options['cache_fetch'] = False
build(args) | python | {
"resource": ""
} |
q43495 | dump | train | def dump(args):
"""Load the build graph for a target and dump it to stdout."""
if len(args) != 1:
log.error('One target required.')
app.quit(1)
try:
bb = Butcher()
bb.load_graph(args[0])
except error.BrokenGraph as lolno:
log.fatal(lolno)
app.quit(1)
print "Nodes:"
pprint.pprint(bb.graph.node)
print "Edges:"
pprint.pprint(bb.graph.edge) | python | {
"resource": ""
} |
q43496 | draw | train | def draw(args):
"""Load the build graph for a target and render it to an image."""
if len(args) != 2:
log.error('Two arguments required: [build target] [output file]')
app.quit(1)
target = args[0]
out = args[1]
try:
bb = Butcher()
bb.load_graph(target)
except error.BrokenGraph as lolno:
log.fatal(lolno)
app.quit(1)
# Filter down to the target and all of its transitive dependencies.
# TODO: make it possible to optionally draw the entire graph
filtered_graph = bb.graph.subgraph(
networkx.topological_sort(bb.graph, nbunch=[address.new(target)]))
a = networkx.to_agraph(filtered_graph)
a.draw(out, prog='dot')
log.info('Graph written to %s', out) | python | {
"resource": ""
} |
q43497 | Butcher.setup_function | train | def setup_function(self):
"""Runs prior to the global main function."""
log.options.LogOptions.set_stderr_log_level('google:INFO')
if app.get_options().debug:
log.options.LogOptions.set_stderr_log_level('google:DEBUG')
if not app.get_options().build_root:
app.set_option('build_root', os.path.join(
app.get_options().butcher_basedir, 'build'))
self.buildroot = app.get_options().build_root
if not os.path.exists(self.buildroot):
os.makedirs(self.buildroot)
if app.get_options().disable_cache_fetch:
self.options['cache_fetch'] = False
if app.get_options().disable_hardlinks:
base.BaseBuilder.linkfiles = False | python | {
"resource": ""
} |
q43498 | Butcher.clean | train | def clean(self):
"""Clear the contents of the build area."""
if os.path.exists(self.buildroot):
log.info('Clearing the build area.')
log.debug('Deleting: %s', self.buildroot)
shutil.rmtree(self.buildroot)
os.makedirs(self.buildroot) | python | {
"resource": ""
} |
q43499 | Butcher.paths_wanted | train | def paths_wanted(self):
"""The set of paths where we expect to find missing nodes."""
return set(address.new(b, target='all') for b in self.missing_nodes) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.