code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
try:
fingerprint = fingerprint or utils.file_fingerprint(fullpath)
record = model.FileFingerprint.get(file_path=fullpath)
if record:
record.set(fingerprint=fingerprint,
file_mtime=os.stat(fullpath).st_mtime)
else:
record = model.FileFingerprint(
file_path=fullpath,
fingerprint=fingerprint,
file_mtime=os.stat(fullpath).st_mtime)
orm.commit()
except FileNotFoundError:
orm.delete(fp for fp in model.FileFingerprint if fp.file_path == fullpath)
|
def set_fingerprint(fullpath, fingerprint=None)
|
Set the last known modification time for a file
| 2.718221
| 2.782012
| 0.97707
|
observer = watchdog.observers.Observer()
observer.schedule(IndexWatchdog(content_dir),
content_dir, recursive=True)
logging.info("Watching %s for changes", content_dir)
observer.start()
|
def background_scan(content_dir)
|
Start background scanning a directory for changes
| 3.21898
| 3.086932
| 1.042777
|
try:
for item in table.select():
if not os.path.isfile(item.file_path):
logger.info("File disappeared: %s", item.file_path)
item.delete()
except: # pylint:disable=bare-except
logger.exception("Error pruning %s", table)
|
def prune_missing(table)
|
Prune any files which are missing from the specified table
| 3.357378
| 3.182884
| 1.054822
|
def scan_directory(root, files):
try:
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, content_dir)
fingerprint = utils.file_fingerprint(fullpath)
last_fingerprint = get_last_fingerprint(fullpath)
if fingerprint != last_fingerprint and SCHEDULED_FILES.add(fullpath):
scan_file(fullpath, relpath, False)
except: # pylint:disable=bare-except
logger.exception("Got error parsing directory %s", root)
for root, _, files in os.walk(content_dir, followlinks=True):
THREAD_POOL.submit(scan_directory, root, files)
for table in (model.Entry, model.Category, model.Image, model.FileFingerprint):
THREAD_POOL.submit(prune_missing, table)
|
def scan_index(content_dir)
|
Scan all files in a content directory
| 3.886933
| 3.99325
| 0.973376
|
with self.lock:
if item in self.set:
return False
self.set.add(item)
return True
|
def add(self, item)
|
Add an item to the set, and return whether it was newly added
| 3.089935
| 2.59929
| 1.188761
|
with self.lock:
if item in self.set:
self.set.remove(item)
return True
return False
|
def remove(self, item)
|
Remove an item from the set, returning if it was present
| 3.264029
| 2.585023
| 1.262669
|
if SCHEDULED_FILES.add(fullpath):
logger.debug("Scheduling reindex of %s", fullpath)
relpath = os.path.relpath(fullpath, self.content_dir)
THREAD_POOL.submit(scan_file, fullpath, relpath, False)
|
def update_file(self, fullpath)
|
Update a file
| 4.847384
| 5.327325
| 0.90991
|
logger.debug("file created: %s", event.src_path)
if not event.is_directory:
self.update_file(event.src_path)
|
def on_created(self, event)
|
on_created handler
| 3.273347
| 3.532201
| 0.926716
|
logger.debug("file modified: %s", event.src_path)
if not event.is_directory:
self.update_file(event.src_path)
|
def on_modified(self, event)
|
on_modified handler
| 3.122998
| 3.368633
| 0.927082
|
logger.debug("file moved: %s -> %s", event.src_path, event.dest_path)
if not event.is_directory:
self.update_file(event.dest_path)
|
def on_moved(self, event)
|
on_moved handler
| 3.030487
| 3.201307
| 0.946641
|
logger.debug("File deleted: %s", event.src_path)
if not event.is_directory:
self.update_file(event.src_path)
|
def on_deleted(self, event)
|
on_deleted handler
| 3.46156
| 3.635058
| 0.952271
|
view_spec = {}
if 'date' in args:
view_spec['date'] = args['date']
elif 'id' in args:
view_spec['start'] = args['id']
if 'tag' in args:
view_spec['tag'] = args.getlist('tag')
if len(view_spec['tag']) == 1:
view_spec['tag'] = args['tag']
return view_spec
|
def parse_view_spec(args)
|
Parse a view specification from a request arg list
| 2.459209
| 2.423764
| 1.014624
|
query = queries.build_query({**self.spec,
'future': False,
'_deleted': True})
return [Entry(e) for e in query]
|
def deleted(self)
|
Gets the deleted entries from the view
| 14.256639
| 11.942745
| 1.193749
|
if self.entries:
latest = max(self.entries, key=lambda x: x.last_modified)
return arrow.get(latest.last_modified)
return arrow.get()
|
def last_modified(self)
|
Gets the most recent modification time for all entries in the view
| 3.826771
| 3.335253
| 1.147371
|
if self._order_by == 'oldest':
return self.older
if self._order_by == 'newest':
return self.newer
return None
|
def previous(self)
|
Gets the previous page, respecting sort order
| 4.539652
| 4.248258
| 1.068592
|
if self._order_by == 'oldest':
return self.newer
if self._order_by == 'newest':
return self.older
return None
|
def next(self)
|
Gets the next page, respecting sort order
| 5.074751
| 4.419157
| 1.148353
|
if self._order_by == 'newest':
return self.first
if self._order_by == 'oldest':
return self.last
return max(self.entries, key=lambda x: (x.date, x.id))
|
def newest(self)
|
Gets the newest entry in the view, regardless of sort order
| 4.087729
| 3.526376
| 1.159187
|
if self._order_by == 'newest':
return self.last
if self._order_by == 'oldest':
return self.first
return min(self.entries, key=lambda x: (x.date, -x.id))
|
def oldest(self)
|
Gets the oldest entry in the view, regardless of sort order
| 3.926481
| 3.592182
| 1.093063
|
if 'date' in self.spec:
_, date_span, _ = utils.parse_date(self.spec['date'])
return date_span
return 'offset'
|
def paging(self)
|
Gets the pagination type; compatible with entry.archive(page_type=...)
| 10.052858
| 7.707023
| 1.304376
|
cur = self
pages = []
while cur.previous:
cur = cur.previous
while cur:
pages.append(cur)
cur = cur.next
return pages
|
def pages(self)
|
Gets a list of all pages for this view
| 3.890957
| 3.741241
| 1.040018
|
tag_list = self.spec.get('tag', [])
if isinstance(tag_list, (list, set, tuple)):
return list(tag_list)
return [tag_list]
|
def tags(self)
|
Returns a list of all the tags applied to this view
| 3.890856
| 3.39788
| 1.145083
|
oldest = self.oldest
newest = self.newest
base = {key: val for key, val in self.spec.items()
if key not in OFFSET_PRIORITY}
oldest_neighbor = View({
**base,
'before': oldest,
'order': 'newest'
}).first if oldest else None
newest_neighbor = View({
**base,
'after': newest,
'order': 'oldest'
}).first if newest else None
if 'date' in self.spec:
return self._get_date_pagination(base, oldest_neighbor, newest_neighbor)
if 'count' in self.spec:
return self._get_count_pagination(base, oldest_neighbor, newest_neighbor)
# we're not paginating
return None, None
|
def _pagination(self)
|
Compute the neighboring pages from this view.
Returns a tuple of older page, newer page.
| 3.838262
| 3.521249
| 1.090029
|
_, span, date_format = utils.parse_date(self.spec['date'])
if newest_neighbor:
newer_date = newest_neighbor.date.span(span)[0]
newer_view = View({**base,
'order': self._order_by,
'date': newer_date.format(date_format)})
else:
newer_view = None
if oldest_neighbor:
older_date = oldest_neighbor.date.span(span)[0]
older_view = View({**base,
'order': self._order_by,
'date': older_date.format(date_format)})
else:
older_view = None
return older_view, newer_view
|
def _get_date_pagination(self, base, oldest_neighbor, newest_neighbor)
|
Compute the pagination for date-based views
| 2.815555
| 2.713493
| 1.037613
|
count = self.spec['count']
out_spec = {**base, 'count': count, 'order': self._order_by}
if self._order_by == 'newest':
older_view = View({**out_spec,
'last': oldest_neighbor}) if oldest_neighbor else None
newer_count = View({**base,
'first': newest_neighbor,
'order': 'oldest',
'count': count}) if newest_neighbor else None
newer_view = View({**out_spec,
'last': newer_count.last}) if newer_count else None
return older_view, newer_view
if self._order_by == 'oldest':
older_count = View({**base,
'last': oldest_neighbor,
'order': 'newest',
'count': count}) if oldest_neighbor else None
older_view = View({**out_spec,
'first': older_count.last}) if older_count else None
newer_view = View({**out_spec,
'first': newest_neighbor}) if newest_neighbor else None
return older_view, newer_view
return None, None
|
def _get_count_pagination(self, base, oldest_neighbor, newest_neighbor)
|
Compute the pagination for count-based views
| 2.23425
| 2.150248
| 1.039066
|
return View({**self.spec, 'tag': list(set(self.tags) | set(tags))})
|
def tag_add(self, *tags)
|
Return a view with the specified tags added
| 10.338323
| 6.181255
| 1.672528
|
return View({**self.spec, 'tag': list(set(self.tags) - set(tags))})
|
def tag_remove(self, *tags)
|
Return a view with the specified tags removed
| 9.739178
| 5.71143
| 1.705208
|
return View({**self.spec, 'tag': list(set(self.tags) ^ set(tags))})
|
def tag_toggle(self, *tags)
|
Return a view with the specified tags toggled
| 12.547935
| 6.908254
| 1.816369
|
processor = misaka.Markdown(HtmlRenderer(config, search_path),
extensions=ENABLED_EXTENSIONS)
text = processor(text)
if not config.get('no_smartquotes'):
text = misaka.smartypants(text)
return flask.Markup(text)
|
def to_html(text, config, search_path)
|
Convert Markdown text to HTML
| 5.632029
| 5.702881
| 0.987576
|
# HACK: If the title starts with something that looks like a list, save it
# for later
pfx, text = re.match(r'([0-9. ]*)(.*)', text).group(1, 2)
text = pfx + misaka.Markdown(TitleRenderer(),
extensions=TITLE_EXTENSIONS)(text)
if not markup:
strip = HTMLStripper()
strip.feed(text)
text = strip.get_data()
if not no_smartquotes:
text = misaka.smartypants(text)
return flask.Markup(text)
|
def render_title(text, markup=True, no_smartquotes=False)
|
Convert a Markdown title to HTML
| 5.429895
| 5.534555
| 0.98109
|
# pylint: disable=too-many-locals
text = ''
image_specs = raw_url
if title:
image_specs += ' "{}"'.format(title)
alt, container_args = image.parse_alt_text(alt)
container_args = {**self._config, **container_args}
spec_list, original_count = image.get_spec_list(
image_specs, container_args)
for spec in spec_list:
text += self._render_image(spec,
container_args,
alt)
if original_count > len(spec_list) and 'more_text' in container_args:
more_text = container_args['more_text'].format(
count=original_count,
remain=original_count - len(spec_list))
if 'more_link' in container_args:
more_text = '{a}{text}</a>'.format(
text=more_text,
a=utils.make_tag('a', {'href': container_args['more_link']}))
if 'more_class' in container_args:
more_text = '{div}{text}</div>'.format(
text=more_text,
div=utils.make_tag('div', {'class': container_args['more_class']}))
text += flask.Markup(more_text)
if text and (container_args.get('div_class') or
container_args.get('div_style')):
text = '{tag}{text}</div>'.format(
tag=utils.make_tag('div',
{'class': container_args.get('div_class'),
'style': container_args.get('div_style')}),
text=text)
# if text is ''/falsy then misaka interprets this as a failed parse...
return text or ' '
|
def image(self, raw_url, title='', alt='')
|
Adapt a standard Markdown image to a generated rendition set.
Container arguments used (in addition to the rendition tags):
div_class -- The CSS class name to use on any wrapper div
div_style -- Additional CSS styles to apply to the wrapper div
count -- The maximum number of images to show at once
more_text -- If there are more than `count` images, add this text indicating
that there are more images to be seen. This string gets two template
arguments, `{count}` which is the total number of images in the set,
and `{remain}` which is the number of images omitted from the set.
more_link -- If `more_text` is shown, this will format the text as a link to this location.
more_class -- If `more_text` is shown, wraps it in a `<div>` with this class.
| 3.092099
| 2.76284
| 1.119174
|
if lang and self._config.get('highlight_syntax', 'True'):
try:
lexer = pygments.lexers.get_lexer_by_name(lang, stripall=True)
except pygments.lexers.ClassNotFound:
lexer = None
if lexer:
formatter = pygments.formatters.HtmlFormatter() # pylint: disable=no-member
return pygments.highlight(text, lexer, formatter)
return '\n<div class="highlight"><pre>{}</pre></div>\n'.format(
flask.escape(text.strip()))
|
def blockcode(self, text, lang)
|
Pass a code fence through pygments
| 2.731264
| 2.512154
| 1.08722
|
link = links.resolve(link, self._search_path,
self._config.get('absolute'))
return '{}{}</a>'.format(
utils.make_tag('a', {
'href': link,
'title': title if title else None
}),
content)
|
def link(self, content, link, title='')
|
Emit a link, potentially remapped based on our embed or static rules
| 7.670424
| 6.930268
| 1.106801
|
# if the content contains a top-level div then don't wrap it in a <p>
# tag
if content.startswith('<div') and content.endswith('</div>'):
return '\n' + content + '\n'
text = '<p>' + content + '</p>'
text = re.sub(r'<p>\s*</p>', r'', text)
return text or ' '
|
def paragraph(content)
|
emit a paragraph, stripping out any leading or following empty paragraphs
| 4.554449
| 4.40994
| 1.032769
|
try:
path, image_args, title = image.parse_image_spec(spec)
except Exception as err: # pylint: disable=broad-except
logger.exception("Got error on spec %s: %s", spec, err)
return ('<span class="error">Couldn\'t parse image spec: ' +
'<code>{}</code> {}</span>'.format(flask.escape(spec),
flask.escape(str(err))))
composite_args = {**container_args, **image_args}
try:
img = image.get_image(path, self._search_path)
except Exception as err: # pylint: disable=broad-except
logger.exception("Got error on image %s: %s", path, err)
return ('<span class="error">Error loading image {}: {}</span>'.format(
flask.escape(spec), flask.escape(str(err))))
return img.get_img_tag(title, alt_text, **composite_args)
|
def _render_image(self, spec, container_args, alt_text=None)
|
Render an image specification into an <img> tag
| 2.912915
| 2.782992
| 1.046684
|
# Resolve external URLs
if re.match(r'([a-z][a-z0-9+.\-]*:)?//', path, re.I):
return path
# Resolve static assets
if path.startswith('@'):
return utils.static_url(path[1:], absolute)
path, sep, anchor = path.partition('#')
# Resolve entries
entry = utils.find_entry(path, search_path)
if entry:
return entry.permalink(absolute=absolute) + sep + anchor
# Resolve images and assets
img_path, img_args, _ = image.parse_image_spec(path)
img = image.get_image(img_path, search_path)
if not isinstance(img, image.ImageNotFound):
path, _ = img.get_rendition(**img_args)
return path + sep + anchor
|
def resolve(path, search_path, absolute=False)
|
Remap a link or source target to an appropriate entry or image rendition
| 4.052782
| 3.688799
| 1.098672
|
from . import index # pylint: disable=cyclic-import
if index.in_progress():
# We are reindexing the site
return True
if request.if_none_match or request.if_modified_since:
# we might be returning a 304 NOT MODIFIED based on a client request,
# and we don't want to cache that as the result for *all* client
# requests to this URI
return True
return False
|
def do_not_cache()
|
Return whether we should cache a page render
| 8.186228
| 7.626191
| 1.073436
|
return orm.select(
e for e in query
if e.status == model.PublishStatus.PUBLISHED.value or
(e.status == model.PublishStatus.SCHEDULED.value and
(e.utc_date <= (date or arrow.utcnow().datetime))
)
)
|
def where_entry_visible(query, date=None)
|
Generate a where clause for currently-visible entries
Arguments:
date -- The date to generate it relative to (defaults to right now)
| 5.760798
| 6.493023
| 0.887229
|
return orm.select(
e for e in query
if e.status in (model.PublishStatus.PUBLISHED.value,
model.PublishStatus.SCHEDULED.value))
|
def where_entry_visible_future(query)
|
Generate a where clause for entries that are visible now or in the future
| 7.379477
| 6.910251
| 1.067903
|
return orm.select(
e for e in query
if e.status == model.PublishStatus.GONE.value)
|
def where_entry_deleted(query)
|
Generate a where clause for entries that have been deleted
| 14.20282
| 15.179907
| 0.935633
|
category = str(category)
if category and recurse:
# We're recursing and aren't in /, so add the prefix clause
return orm.select(
e for e in query
if e.category == category or e.category.startswith(category + '/')
)
if not recurse:
# We're not recursing, so we need an exact match on a possibly-empty
# category
return orm.select(e for e in query if e.category == category)
# We're recursing and have no category, which means we're doing nothing
return query
|
def where_entry_category(query, category, recurse=False)
|
Generate a where clause for a particular category
| 5.231829
| 5.10785
| 1.024272
|
return orm.select(
e for e in query
if e.local_date < ref.local_date or
(e.local_date == ref.local_date and e.id < ref.id)
)
|
def where_before_entry(query, ref)
|
Generate a where clause for prior entries
ref -- The entry of reference
| 4.234584
| 4.83524
| 0.875775
|
return orm.select(
e for e in query
if e.local_date > ref.local_date or
(e.local_date == ref.local_date and
e.id > ref.id
)
)
|
def where_after_entry(query, ref)
|
Generate a where clause for later entries
ref -- the entry of reference
| 4.896484
| 5.544452
| 0.883132
|
return orm.select(
e for e in query
if e.local_date < ref.local_date or
(e.local_date == ref.local_date and
e.id <= ref.id
)
)
|
def where_entry_last(query, ref)
|
Generate a where clause where this is the last entry
ref -- the entry of reference
| 5.108864
| 5.763481
| 0.88642
|
return orm.select(
e for e in query
if e.local_date > ref.local_date or
(e.local_date == ref.local_date and
e.id >= ref.id
)
)
|
def where_entry_first(query, ref)
|
Generate a where clause where this is the first entry
ref -- the entry of reference
| 5.244773
| 5.902227
| 0.888609
|
if isinstance(entry_type, (list, set, tuple)):
return orm.select(e for e in query if e.entry_type in entry_type)
return orm.select(e for e in query if e.entry_type == entry_type)
|
def where_entry_type(query, entry_type)
|
Generate a where clause for entries of certain types
entry_type -- one or more entries to check against
| 2.496916
| 2.689519
| 0.928388
|
if isinstance(tag, (list, set, tuple)):
tags = [t.lower() for t in tag]
return orm.select(e for e in query for t in e.tags if t.key in tags)
return orm.select(e for e in query for t in e.tags if t.key == tag.lower())
|
def where_entry_tag(query, tag)
|
Generate a where clause for entries with the given tag
| 2.92719
| 2.888458
| 1.013409
|
date, interval, _ = utils.parse_date(datespec)
start_date, end_date = date.span(interval)
return orm.select(
e for e in query if
e.local_date >= start_date.naive and
e.local_date <= end_date.naive
)
|
def where_entry_date(query, datespec)
|
Where clause for entries which match a textual date spec
datespec -- The date spec to check for, in YYYY[[-]MM[[-]DD]] format
| 5.057359
| 5.579939
| 0.906347
|
if hasattr(entry, 'id'):
return entry
if isinstance(entry, (int, str)):
return model.Entry.get(id=int(entry))
raise ValueError("entry is of unknown type {}".format(type(entry)))
|
def get_entry(entry)
|
Helper function to get an entry by ID or by object
| 4.212217
| 3.683701
| 1.143474
|
query = model.Entry.select()
# primarily restrict by publication status
if spec.get('_deleted', False):
query = where_entry_deleted(query)
elif spec.get('future', False):
query = where_entry_visible_future(query)
else:
query = where_entry_visible(query)
# restrict by category
if spec.get('category') is not None:
path = str(spec.get('category', ''))
recurse = spec.get('recurse', False)
query = where_entry_category(query, path, recurse)
if spec.get('entry_type') is not None:
query = where_entry_type(query, spec['entry_type'])
if spec.get('entry_type_not') is not None:
query = where_entry_type_not(query, spec['entry_type_not'])
if spec.get('tag') is not None:
query = where_entry_tag(query, spec['tag'])
if spec.get('date') is not None:
query = where_entry_date(query, spec['date'])
if spec.get('last') is not None:
query = where_entry_last(query, get_entry(spec['last']))
if spec.get('first') is not None:
query = where_entry_first(query, get_entry(spec['first']))
if spec.get('before') is not None:
query = where_before_entry(query, get_entry(spec['before']))
if spec.get('after') is not None:
query = where_after_entry(query, get_entry(spec['after']))
return query.distinct()
|
def build_query(spec)
|
build the where clause based on a view specification
spec -- The view specification. Contains the following possible values:
future -- Boolean; whether to include entries from the future
category -- Which category to limit to
recurse -- Whether to include subcategories
entry_type -- one or more entry types to include
entry_type_not -- one or more entry types to exclude
date -- a date spec
last -- the last entry to end a view on
first -- the first entry to start a view on
before -- get entries from before this one
after -- get entries from after this one
| 2.034182
| 1.783323
| 1.14067
|
if os.path.isdir(path):
warning_path_exist(path)
exit(1)
else:
logger.info('''\033[33m{Info}\033[0m
==> start init your flask project [on]
==> \033[32m%s\033[0m\n''' % path)
|
def start_init_info(path)
|
start init msg
| 6.279002
| 6.224753
| 1.008715
|
templates_path = os.path.join(app_path, 'templates')
static_path = os.path.join(app_path, 'static')
_mkdir_p(templates_path)
_mkdir_p(static_path)
# create {img, css, js}
os.chdir(static_path)
img_path = os.path.join(static_path, 'img')
css_path = os.path.join(static_path, 'css')
js_path = os.path.join(static_path, 'js')
_mkdir_p(img_path)
_mkdir_p(css_path)
_mkdir_p(js_path)
return css_path, templates_path
|
def create_templates_static_files(app_path)
|
create templates and static
| 1.802236
| 1.758389
| 1.024936
|
blueprint_path = os.path.join(app_path, blueprint)
_mkdir_p(blueprint_path)
# create blueprint files
os.chdir(blueprint_path)
init_code('__init__.py', _init_blueprint_code % (blueprint, blueprint))
init_code('views.py', views_code)
init_code('forms.py', forms_code)
# main blueprint templates
os.chdir(templates_path)
blueprint_templates_path = os.path.join(templates_path, blueprint)
_mkdir_p(blueprint_templates_path)
return blueprint_templates_path
|
def create_blueprint(app_path, blueprint, views_code, forms_code, templates_path)
|
create blueprint
| 2.787143
| 2.78935
| 0.999209
|
# the destination path
dst_path = os.path.join(os.getcwd(), project_name)
start_init_info(dst_path)
# create dst path
_mkdir_p(dst_path)
os.chdir(dst_path)
# create files
init_code('manage.py', _manage_basic_code)
init_code('requirement.txt', _requirement_code)
# create app/
app_path = os.path.join(dst_path, 'app')
_mkdir_p(app_path)
os.chdir(app_path)
# create files
init_code('views.py', _views_basic_code)
init_code('forms.py', _forms_basic_code)
init_code('__init__.py', _init_basic_code)
create_templates_static_files(app_path)
init_done_info()
|
def init(project_name)
|
build a minimal flask project
| 3.259174
| 3.220338
| 1.01206
|
app = os.getcwd().split('/')[-1]
if app != 'app':
logger.warning('''\033[31m{Warning}\033[0m
==> your current path is \033[32m%s\033[0m\n
==> please create your blueprint under app folder!''' % os.getcwd())
exit(1)
# destination path
dst_path = os.path.join(os.getcwd(), blueprint_name)
if os.path.isdir(dst_path):
logger.warning('''\033[31m{Warning}\033[0m
==> bluprint \033[32m%s\033[0m\n exist
==> please try again !''' % dst_path)
exit(1)
# create dst_path
_mkdir_p(dst_path)
# change dir
os.chdir(dst_path)
# create files
init_code('__init__.py', _init_blueprint_code %
(blueprint_name, blueprint_name))
init_code('views.py', _views_blueprint_code %
(blueprint_name, blueprint_name))
init_code('forms.py', _forms_basic_code)
# register auth in app
os.chdir(os.path.join(dst_path, '..'))
with open('__init__.py', 'r+') as f:
prev = pos = 0
while f.readline():
prev, pos = pos, f.tell()
f.seek(prev)
f.write(
'\nfrom %s import %s\napp.register_blueprint(%s, url_prefix="/%s")\n\n'
% (
blueprint_name, blueprint_name,
blueprint_name, blueprint_name
)
)
# create blueprint templates
templates_path = os.path.join(os.getcwd(), 'templates')
os.chdir(templates_path)
blueprint_templates_path = os.path.join(templates_path, blueprint_name)
_mkdir_p(blueprint_templates_path)
logger.info('''\033[33m{Info}\033[0m: create blueprint done!''')
|
def blueprint(blueprint_name)
|
create and register a blueprint
| 2.914609
| 2.888097
| 1.00918
|
# the destination path
dst_path = os.path.join(os.getcwd(), project_name)
start_init_info(dst_path)
# create dst path
_mkdir_p(dst_path)
# create project tree
os.chdir(dst_path)
# create files
init_code('manage.py', _manage_admin_code)
init_code('requirement.txt', _requirement_admin_code)
init_code('config.py', _config_sql_code)
# create app/
app_path = os.path.join(dst_path, 'app')
_mkdir_p(app_path)
# create files
os.chdir(app_path)
init_code('models.py', _models_admin_code)
init_code('__init__.py', _init_admin_code)
# create templates and static
css_path, templates_path = create_templates_static_files(app_path)
# create css files
os.chdir(css_path)
init_code('sign.css', _auth_login_css_code)
# create main blueprint
create_blueprint(
app_path,
'main',
_views_blueprint_code % ('main', 'main'),
_forms_basic_code,
templates_path
)
# create auth blueprint
auth_templates_path = create_blueprint(
app_path,
'auth',
_auth_views_code,
_auth_forms_code,
templates_path
)
# create auth templates files
os.chdir(auth_templates_path)
init_code('login.html', _auth_login_html_code)
# create admin site
admin_path = os.path.join(app_path, 'admin')
_mkdir_p(admin_path)
# create admin files
os.chdir(admin_path)
init_code('__init__.py', '')
init_code('views.py', _admin_views_code)
# create admin templates
os.chdir(templates_path)
admin_templates_path = os.path.join(templates_path, 'admin')
_mkdir_p(admin_templates_path)
# create admin templates files
os.chdir(admin_templates_path)
init_code('index.html', _admin_index_html_code)
init_code('logout.html', _admin_logout_html_code)
init_done_info()
|
def startproject(project_name)
|
build a full status project
| 2.586876
| 2.596266
| 0.996383
|
# add module into admin site
app = os.getcwd().split('/')[-1]
if app != 'app':
logger.warning('''\033[31m{Warning}\033[0m
==> your current path is \033[32m%s\033[0m\n
==> please add your sql module under app folder!''' % os.getcwd())
exit(1)
admin_path = os.path.join(os.getcwd(), 'admin')
os.chdir(admin_path)
with open('views.py', 'r+') as f:
prev = pos = 0
while f.readline():
prev, pos = pos, f.tell()
f.seek(prev)
f.write(
'\nfrom app.models import %s\nadmin.add_view(ModelView(%s, db.session))'
% (module, module)
)
logger.info('''\033[33m{Info}\033[0m: add module done!''')
|
def admin(module)
|
add sql modules into admin site
| 4.045009
| 3.637417
| 1.112055
|
try:
os.makedirs(abspath)
except OSError as e:
if (e.errno == errno.EEXIST) and (os.path.isdir(abspath)):
pass
else: raise
|
def _mkdir_p(abspath)
|
Usage:
create the abspath
except the abspath exist
Param:
abspath: the absolutly path you want to be created
| 2.048213
| 2.292767
| 0.893337
|
try:
error = next((v for k, v in ERROR_CODES.items() if k in error_type))
except StopIteration:
error = AirVisualError
raise error(error_type)
|
def raise_error(error_type: str) -> None
|
Raise the appropriate error based on error message.
| 5.271949
| 4.282987
| 1.230905
|
if isinstance(data, str):
raise_error(data)
elif 'status' in data and data['status'] != 'success':
raise_error(data['data']['message'])
|
def _raise_on_error(data: Union[str, dict]) -> None
|
Raise the appropriate exception on error.
| 3.694528
| 2.994159
| 1.233912
|
if not headers:
headers = {}
headers.update({'Content-Type': 'application/json'})
if not params:
params = {}
if self._api_key:
params.update({'key': self._api_key})
url = '{0}/{1}'.format(base_url, endpoint)
async with self.websession.request(method, url, headers=headers,
params=params, json=json) as resp:
data = await resp.json(content_type=None)
_raise_on_error(data)
return data
|
async def request(
self,
method: str,
endpoint: str,
*,
base_url: str = API_URL_SCAFFOLD,
headers: dict = None,
params: dict = None,
json: dict = None) -> dict
|
Make a request against AirVisual.
| 2.317006
| 2.188275
| 1.058828
|
client = Client(websession, api_key='<API KEY>')
# Get supported locations (by location):
try:
_LOGGER.info(await client.supported.countries())
_LOGGER.info(await client.supported.states('USA'))
_LOGGER.info(await client.supported.cities('USA', 'Colorado'))
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get supported locations (by station):
try:
_LOGGER.info(
await client.supported.stations(
'USA', 'Colorado', 'Denver'))
except UnauthorizedError as err:
_LOGGER.error(err)
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data by nearest location (by IP):
try:
_LOGGER.info(await client.api.nearest_city())
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data by nearest location (coordinates or explicit location):
try:
_LOGGER.info(
await client.api.nearest_city(
latitude=39.742599, longitude=-104.9942557))
_LOGGER.info(
await client.api.city(
city='Los Angeles', state='California', country='USA'))
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data by nearest station (by IP):
try:
_LOGGER.info(await client.api.nearest_station())
except UnauthorizedError as err:
_LOGGER.error(err)
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data by nearest station (by coordinates or explicit location):
try:
_LOGGER.info(
await client.api.nearest_station(
latitude=39.742599, longitude=-104.9942557))
_LOGGER.info(
await client.api.station(
station='US Embassy in Beijing',
city='Beijing',
state='Beijing',
country='China'))
except UnauthorizedError as err:
_LOGGER.error(err)
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get data on AQI ranking:
try:
_LOGGER.info(await client.api.ranking())
except UnauthorizedError as err:
_LOGGER.error(err)
except AirVisualError as err:
_LOGGER.error('There was an error: %s', err)
# Get info on a AirVisual Pro node:
_LOGGER.info(await client.api.node('zEp8CifbnasWtToBc'))
|
async def main() -> None: # pylint: disable=too-many-statements
logging.basicConfig(level=logging.INFO)
async with ClientSession() as websession
|
Create the aiohttp session and run the example.
| 2.434182
| 2.374727
| 1.025037
|
params = {}
if latitude and longitude:
params.update({'lat': str(latitude), 'lon': str(longitude)})
data = await self._request(
'get', 'nearest_{0}'.format(kind), params=params)
return data['data']
|
async def _nearest(
self,
kind: str,
latitude: Union[float, str] = None,
longitude: Union[float, str] = None) -> dict
|
Return data from nearest city/station (IP or coordinates).
| 3.482131
| 3.004808
| 1.158853
|
data = await self._request(
'get',
'city',
params={
'city': city,
'state': state,
'country': country
})
return data['data']
|
async def city(self, city: str, state: str, country: str) -> dict
|
Return data for the specified city.
| 3.304448
| 2.588658
| 1.27651
|
return await self._nearest('city', latitude, longitude)
|
async def nearest_city(
self,
latitude: Union[float, str] = None,
longitude: Union[float, str] = None) -> dict
|
Return data from nearest city (IP or coordinates).
| 17.256151
| 14.861842
| 1.161104
|
return await self._request('get', node_id, base_url=NODE_URL_SCAFFOLD)
|
async def node(self, node_id: str) -> dict
|
Return data from a node by its ID.
| 14.305262
| 9.989687
| 1.432003
|
data = await self._request(
'get', 'cities', params={
'state': state,
'country': country
})
return [d['city'] for d in data['data']]
|
async def cities(self, country: str, state: str) -> list
|
Return a list of supported cities in a country/state.
| 4.317693
| 3.569744
| 1.209525
|
data = await self._request(
'get', 'states', params={'country': country})
return [d['state'] for d in data['data']]
|
async def states(self, country: str) -> list
|
Return a list of supported states in a country.
| 4.98446
| 3.839623
| 1.298164
|
data = await self._request(
'get',
'stations',
params={
'city': city,
'state': state,
'country': country
})
return [station for station in data['data']]
|
async def stations(self, city: str, state: str, country: str) -> list
|
Return a list of supported stations in a city.
| 3.09677
| 2.607501
| 1.187639
|
get_len = tools.display_len if PY3 else len
lens = sorted(map(get_len, tokens or [])) or [0]
width = lens[-1]
# adjust for disproportionately long strings
if width >= 18:
most = lens[int(len(lens) * 0.9)]
if most < width + 6:
return most
return width
|
def column_width(tokens)
|
Return a suitable column width to display one or more strings.
| 8.072882
| 7.512616
| 1.074577
|
get_len = tools.display_len if PY3 else len
tok_len = get_len(tok)
diff_len = tok_len - len(tok) if PY3 else 0
cols = (int(math.ceil(float(tok_len) / col_width))
if col_width < tok_len + 4 else 1)
if cols > 1:
return tok.ljust((col_width * cols) + (4 * cols) - diff_len)
else:
return tok.ljust(col_width + 4 - diff_len)
|
def justify_token(tok, col_width)
|
Justify a string to fill one or more columns.
| 4.247491
| 4.118572
| 1.031302
|
prefix = '' if local else '.'
if isinstance(obj, SeeError):
suffix = '?'
elif hasattr(obj, '__call__'):
suffix = '()'
else:
suffix = ''
return ''.join((prefix, name, suffix))
|
def display_name(name, obj, local)
|
Get the display name of an object.
Keyword arguments (all required):
* ``name`` -- the name of the object as a string.
* ``obj`` -- the object itself.
* ``local`` -- a boolean value indicating whether the object is in local
scope or owned by an object.
| 5.175817
| 7.545046
| 0.685989
|
if isinstance(pattern, REGEX_TYPE):
func = tools.filter_regex
elif pattern.startswith('/'):
pattern = re.compile(pattern.strip('/'))
func = tools.filter_regex
else:
func = tools.filter_wildcard
return SeeResult(func(self, pattern))
|
def filter(self, pattern)
|
Filter the results using a pattern.
This accepts a shell-style wildcard pattern (as used by the fnmatch_
module)::
>>> see([]).filter('*op*')
.copy() .pop()
It also accepts a regular expression. This may be a compiled regular
expression (from the re_ module) or a string that starts with a ``/``
(forward slash) character::
>>> see([]).filter('/[aeiou]{2}/')
.clear() .count()
.. _fnmatch: https://docs.python.org/3/library/fnmatch.html
.. _re: https://docs.python.org/3/library/re.html
| 5.192924
| 4.475639
| 1.160264
|
return self.filter(re.compile(pattern, re.I))
|
def filter_ignoring_case(self, pattern)
|
Like ``filter`` but case-insensitive.
Expects a regular expression string without the surrounding ``/``
characters.
>>> see().filter('^my', ignore_case=True)
MyClass()
| 6.651371
| 9.331925
| 0.712754
|
if fcntl and termios:
try:
winsize = fcntl.ioctl(0, termios.TIOCGWINSZ, ' ')
_, width = struct.unpack('hh', winsize)
return width
except IOError:
pass
elif windll and create_string_buffer: # pragma: no cover (windows)
stderr_handle, struct_size = -12, 22
handle = windll.kernel32.GetStdHandle(stderr_handle)
csbi = create_string_buffer(struct_size)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
if res:
(_, _, _, _, _, left, _, right, _,
_, _) = struct.unpack('hhhhHhhhhhh', csbi.raw)
return right - left + 1
else:
return 0
|
def term_width()
|
Return the column width of the terminal, or ``None`` if it can't be
determined.
| 2.597101
| 2.631188
| 0.987045
|
width = term_width()
if width: # pragma: no cover (no terminal info in Travis CI)
return min(width, max_width)
else:
return default_width
|
def line_width(default_width=DEFAULT_LINE_WIDTH, max_width=MAX_LINE_WIDTH)
|
Return the ideal column width for the output from :func:`see.see`, taking
the terminal width into account to avoid wrapping.
| 6.26577
| 5.951421
| 1.052819
|
if getattr(obj, '_PERSIST_REFERENCES', None):
objid = id(obj)
obj._persistent_ref = objid
_weakmemos[objid] = obj
return None
|
def persistent_id(self, obj)
|
Tags objects with a persistent ID, but do NOT emit it
| 10.571621
| 9.993423
| 1.057858
|
num_args = len(args)
pattern = args[0] if num_args else kwargs.get('pattern', None)
regex = args[1] if num_args > 1 else kwargs.get('r', None)
if pattern is not None:
tokens = tools.filter_wildcard(tokens, pattern)
sys.stderr.write(
'Please use see().match() now. The "pattern" argument is '
'deprecated and will be removed in a later release. \n')
if regex is not None:
tokens = tools.filter_regex(tokens, re.compile(regex))
sys.stderr.write(
'Please use see().match() now. The "r" argument is '
'deprecated and will be removed in a later release. \n')
return tokens
|
def handle_deprecated_args(tokens, args, kwargs)
|
Backwards compatibility with deprecated arguments ``pattern`` and ``r``.
| 3.049596
| 2.685419
| 1.135613
|
use_locals = obj is DEFAULT_ARG
if use_locals:
# Get the local scope from the caller's stack frame.
# Typically this is the scope of an interactive Python session.
obj = Namespace(inspect.currentframe().f_back.f_locals)
tokens = []
attrs = dir(obj)
if not use_locals:
for name, func in INSPECT_FUNCS:
if func(obj):
tokens.append(name)
for feature in FEATURES:
if feature.match(obj, attrs):
tokens.append(feature.symbol)
for attr in filter(lambda a: not a.startswith('_'), attrs):
try:
prop = getattr(obj, attr)
except (AttributeError, Exception): # pylint: disable=broad-except
prop = SeeError()
action = output.display_name(name=attr, obj=prop, local=use_locals)
tokens.append(action)
if args or kwargs:
tokens = handle_deprecated_args(tokens, args, kwargs)
return output.SeeResult(tokens)
|
def see(obj=DEFAULT_ARG, *args, **kwargs)
|
see(obj=anything)
Show the features and attributes of an object.
This function takes a single argument, ``obj``, which can be of any type.
A summary of the object is printed immediately in the Python interpreter.
For example::
>>> see([])
[] in + += *
*= < <= == !=
> >= dir() hash()
help() iter() len() repr()
reversed() str() .append() .clear()
.copy() .count() .extend() .index()
.insert() .pop() .remove() .reverse()
.sort()
If this function is run without arguments, it will instead list the objects
that are available in the current scope. ::
>>> see()
os random see() sys
The return value is an instance of :class:`SeeResult`.
| 4.905219
| 5.296985
| 0.92604
|
access_type = self._get_access_type(mode)
return open(self.localpath, 'r'+access_type, encoding=encoding)
|
def open(self, mode='r', encoding=None)
|
Return file-like object (actually opens the file for this class)
| 5.581144
| 5.055972
| 1.103872
|
self.tmpfile = get_tempfile(**kwargs)
path = self.tmpfile.name
return path
|
def _open_tmpfile(self, **kwargs)
|
Open a temporary, unique file in CACHEDIR (/tmp/cyborgcache) by default.
Leave it open, assign file handle to self.tmpfile
**kwargs are passed to tempfile.NamedTemporaryFile
| 5.873075
| 5.469383
| 1.073809
|
access_type = self._get_access_type(mode)
if encoding is None:
encoding = self.encoding
# here, we face the task of returning the correct data type
if access_type == 'b':
if not self._isbytes:
content = self._contents.encode(encoding) # unicode in, bytes out
else:
content = self._contents # bytes in, bytes out
return io.BytesIO(content)
else:
assert access_type == 't'
if PYVERSION == 2 and self._isbytes:
return io.BytesIO(self._contents) # bytes in, bytes out (python 2 only)
elif self._isbytes:
content = self._contents.decode(encoding) # bytes in, unicode out
else:
content = self._contents # unicode in, unicode out
return io.StringIO(content)
|
def open(self, mode='r', encoding=None)
|
Return file-like object
Args:
mode (str): access mode (only reading modes are supported)
encoding (str): encoding type (only for binary access)
Returns:
io.BytesIO OR io.TextIOWrapper: buffer accessing the file as bytes or characters
| 3.196246
| 3.373021
| 0.947592
|
from . import LocalFile
if os.path.isdir(filename) and self.source is None:
raise ValueError("Cannot write this object to "
"directory %s without an explicit filename." % filename)
target = get_target_path(filename, self.source)
if encoding is None:
encoding = self.encoding
if self._isbytes:
kwargs = {'mode': 'wb'}
else:
kwargs = {'mode': 'w', 'encoding': encoding}
with open(target, **kwargs) as outfile:
outfile.write(self._contents)
return LocalFile(target, encoded_with=encoding)
|
def put(self, filename, encoding=None)
|
Write the file to the given path
Args:
filename (str): path to write this file to
encoding (str): file encoding (default: system default)
Returns:
LocalFile: reference to the copy of the file stored at ``filename``
| 4.075307
| 4.037968
| 1.009247
|
closure = getclosurevars(func)
if closure['nonlocal']:
raise TypeError("Can't launch a job with closure variables: %s" %
closure['nonlocals'].keys())
globalvars = dict(modules={},
functions={},
vars={})
for name, value in closure['global'].items():
if inspect.ismodule(value): # TODO: deal FUNCTIONS from closure
globalvars['modules'][name] = value.__name__
elif inspect.isfunction(value) or inspect.ismethod(value):
globalvars['functions'][name] = value
else:
globalvars['vars'][name] = value
return globalvars
|
def get_global_vars(func)
|
Store any methods or variables bound from the function's closure
Args:
func (function): function to inspect
Returns:
dict: mapping of variable names to globally bound VARIABLES
| 4.640751
| 5.348175
| 0.867726
|
if _isbuiltin(classorfunc):
return ''
try:
source = inspect.getsource(classorfunc)
except TypeError: # raised if defined in __main__ - use fallback to get the source instead
source = getsourcefallback(classorfunc)
declaration = []
lines = source.splitlines()
if PY2 and not isinstance(source, unicode):
encoding = detect_encoding(iter(lines).next)[0]
sourcelines = (s.decode(encoding) for s in lines)
else:
sourcelines = iter(lines)
# First, get the declaration
found_keyword = False
for line in sourcelines:
words = line.split()
if not words:
continue
if words[0] in ('def', 'class'):
found_keyword = True
if found_keyword:
cind = line.find(':')
if cind > 0:
declaration.append(line[:cind + 1])
after_decl = line[cind + 1:].strip()
break
else:
declaration.append(line)
bodylines = list(sourcelines) # the rest of the lines are body
# If it's a class, make sure we import its superclasses
# Unfortunately, we need to modify the code to make sure the
# parent classes have the correct names
# TODO: find a better way to do this without having to parse code
if type(classorfunc) == type:
cls = classorfunc
base_imports = {}
for base in cls.__bases__:
if base.__name__ == 'object' and base.__module__ == 'builtins': # don't import `object`
continue
if base in base_imports:
continue
if base.__module__ == '__main__':
continue
base_imports[base] = 'from %s import %s' % (base.__module__, base.__name__)
cind = declaration[0].index('class ')
declstring = declaration[0][:cind] + 'class %s(%s):%s' % (
cls.__name__,
','.join([base.__name__ for base in cls.__bases__]),
after_decl)
declaration = [impstring for c, impstring in base_imports.items()
if c.__module__ != '__builtin__']
declaration.append(declstring)
else:
declaration[-1] += after_decl
return '\n'.join(declaration + bodylines)
|
def getsource(classorfunc)
|
Return the source code for a class or function.
Notes:
Returned source will not include any decorators for the object.
This will only return the explicit declaration of the object, not any dependencies
Args:
classorfunc (type or function): the object to get the source code for
Returns:
str: text of source code (without any decorators). Note: in python 2, this returns unicode
| 3.273377
| 3.347089
| 0.977977
|
for attr in cls.__dict__:
if inspect.ismethod(getattr(cls, attr)):
imethod = getattr(cls, attr)
break
else:
raise AttributeError(
"Cannot get this class' source; it does not appear to have any methods")
### This part is derived from inspect.findsource ###
module = inspect.getmodule(cls)
file = inspect.getfile(imethod)
lines = linecache.getlines(file, module.__dict__)
name = cls.__name__
pat = re.compile(r'^(\s*)class\s*'+name+r'\b')
# AMVMOD: find the encoding (necessary for python 2 only)
#if PY2:
# with open(file, 'rb') as infile:
# encoding = detect_encoding(infile.readline)[0]
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
toplevel = False
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
flines, flnum = lines, i
toplevel = True
break
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates and not toplevel:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
flines, flnum = lines, candidates[0][1]
elif not candidates and not toplevel:
raise IOError('could not find class definition')
### end modified inspect.findsource ###
# this is what inspect.getsourcelines does
glines = inspect.getblock(flines[flnum:])
# And this is what inspect.getsource does
if False: #if PY2:
return ("".join(glines)).decode(encoding)
else:
return "".join(glines)
|
def getsourcefallback(cls)
|
Fallback for getting the source of interactively defined classes (typically in ipython)
This is basically just a patched version of the inspect module, in which
we get the code by calling inspect.findsource on an *instancemethod* of
a class for which inspect.findsource fails.
| 5.096247
| 4.943158
| 1.03097
|
import shlex
from pyccc.job import Job
job = Job(engine=self)
job.jobid = job.rundata.containerid = jobid
try:
jobdata = self.client.inspect_container(job.jobid)
except docker.errors.NotFound:
raise exceptions.JobNotFound(
'The daemon could not find containter "%s"' % job.jobid)
cmd = jobdata['Config']['Cmd']
entrypoint = jobdata['Config']['Entrypoint']
if len(cmd) == 3 and cmd[0:2] == ['sh', '-c']:
cmd = cmd[2]
elif entrypoint is not None:
cmd = entrypoint + cmd
if isinstance(cmd, list):
cmd = ' '.join(shlex.quote(x) for x in cmd)
job.command = cmd
job.env = jobdata['Config']['Env']
job.workingdir = jobdata['Config']['WorkingDir']
job.rundata.container = jobdata
return job
|
def get_job(self, jobid)
|
Return a Job object for the requested job id.
The returned object will be suitable for retrieving output, but depending on the engine,
may not populate all fields used at launch time (such as `job.inputs`, `job.commands`, etc.)
Args:
jobid (str): container id
Returns:
pyccc.job.Job: job object for this container
Raises:
pyccc.exceptions.JobNotFound: if no job could be located for this jobid
| 3.267016
| 3.060275
| 1.067556
|
self._check_job(job)
if job.workingdir is None:
job.workingdir = self.default_wdir
job.imageid = du.create_provisioned_image(self.client, job.image,
job.workingdir, job.inputs)
container_args = self._generate_container_args(job)
job.rundata.container = self.client.create_container(job.imageid, **container_args)
self.client.start(job.rundata.container)
job.rundata.containerid = job.rundata.container['Id']
job.jobid = job.rundata.containerid
|
def submit(self, job)
|
Submit job to the engine
Args:
job (pyccc.job.Job): Job to submit
| 3.740202
| 3.830773
| 0.976357
|
import os
import shutil
from pathlib import Path
root = Path(native_str(target))
true_outputs = job.get_output()
if abspaths or len(true_outputs) < self.BULK_OUTPUT_FILE_THRESHOLD:
return super().dump_all_outputs(job, root, abspaths)
stagingdir = root / Path(native_str(job.workingdir)).name
workdir = job.get_directory(job.workingdir)
if not root.is_dir():
root.mkdir(parents=False)
if stagingdir.exists():
if PY2:
raise IOError('Path % exists' % stagingdir)
else:
raise FileExistsError(stagingdir)
workdir.put(str(root))
assert stagingdir.is_dir()
assert root in stagingdir.parents
for pathstr in true_outputs:
if os.path.isabs(pathstr):
continue
destpath = root / pathstr
currpath = stagingdir / pathstr
if not destpath.parent.is_dir():
destpath.parent.mkdir(parents=True)
currpath.rename(destpath)
shutil.rmtree(str(stagingdir))
|
def dump_all_outputs(self, job, target, abspaths=None)
|
Specialized dumping strategy - copy the entire working directory, then discard
the input files that came along for the ride.
Not used if there are absolute paths
This is slow and wasteful if there are big input files
| 3.938456
| 3.869625
| 1.017787
|
self.download_button.on_click(self.handle_download_click,remove=True)
self.download_button.description = 'Downloading ...'
self._string = self._fileobj.read()
self.render_string()
|
def handle_download_click(self, *args)
|
Callback for download button. Downloads the file and replaces the button
with a view of the file.
:param args:
:return:
| 6.379973
| 6.353736
| 1.004129
|
assert os.path.isabs(wdir)
dockerlines = ["FROM %s" % image,
"RUN mkdir -p %s" % wdir]
build_context = {}
# This loop creates a Build Context for building the provisioned image
# We create a tar archive to be added to the root of the image filesystem
if inputs:
dockerlines.append('COPY root /')
for ifile, (path, obj) in enumerate(inputs.items()):
if not os.path.isabs(path):
path = os.path.join(wdir, path)
assert path[0] == '/'
build_context['root' + path] = obj
dockerstring = '\n'.join(dockerlines)
build_context['Dockerfile'] = pyccc.BytesContainer(dockerstring.encode('utf-8'))
return build_context
|
def create_build_context(image, inputs, wdir)
|
Creates a tar archive with a dockerfile and a directory called "inputs"
The Dockerfile will copy the "inputs" directory to the chosen working directory
| 5.038114
| 5.097583
| 0.988334
|
tf = tarfile.TarFile(fileobj=buffer, mode='w')
for context_path, fileobj in build_context.items():
if getattr(fileobj, 'localpath', None) is not None:
tf.add(fileobj.localpath, arcname=context_path)
else:
tar_add_bytes(tf, context_path, fileobj.read('rb'))
tf.close()
|
def make_tar_stream(build_context, buffer)
|
Write a tar stream of the build context to the provided buffer
Args:
build_context (Mapping[str, pyccc.FileReferenceBase]): dict mapping filenames to file references
buffer (io.BytesIO): writable binary mode buffer
| 2.978239
| 3.020655
| 0.985958
|
if not isinstance(bytestring, bytes): # it hasn't been encoded yet
bytestring = bytestring.encode('ascii')
buff = io.BytesIO(bytestring)
tarinfo = tarfile.TarInfo(filename)
tarinfo.size = len(bytestring)
tf.addfile(tarinfo, buff)
|
def tar_add_bytes(tf, filename, bytestring)
|
Add a file to a tar archive
Args:
tf (tarfile.TarFile): tarfile to add the file to
filename (str): path within the tar file
bytestring (bytes or str): file contents. Must be :class:`bytes` or
ascii-encodable :class:`str`
| 2.326605
| 2.659284
| 0.874899
|
from docker import tls
if client.base_url in ('http+docker://localunixsocket', 'http+docker://localhost'):
return {'base_url': 'unix://var/run/docker.sock'}
params = {'base_url': client.base_url}
if client.cert:
# TODO: problem - client.cert is filepaths, and it would be insecure to send those files.
params['tls'] = tls.TLSConfig(
client_cert=client.cert,
ca_cert=client.verify,
verify=bool(client.verify),
assert_hostname=assert_hostname)
return params
|
def kwargs_from_client(client, assert_hostname=False)
|
More or less stolen from docker-py's kwargs_from_env
https://github.com/docker/docker-py/blob/c0ec5512ae7ab90f7fac690064e37181186b1928/docker/utils/utils.py
:type client : docker.Client
| 4.471549
| 4.448214
| 1.005246
|
import pickle
modname = self.RENAMETABLE.get(module, module)
try:
# can't use ``super`` here (not 2/3 compatible)
klass = pickle.Unpickler.find_class(self, modname, name)
except (ImportError, RuntimeError):
definition = getattr(source, name)
newmod = _makemod(modname)
sys.modules[modname] = newmod
setattr(newmod, name, definition)
klass = pickle.Unpickler.find_class(self, newmod.__name__, name)
klass.__module__ = module
return klass
|
def find_class(self, module, name)
|
This override is here to help pickle find the modules that classes are defined in.
It does three things:
1) remaps the "PackagedFunction" class from pyccc to the `source.py` module.
2) Remaps any classes created in the client's '__main__' to the `source.py` module
3) Creates on-the-fly modules to store any other classes present in source.py
References:
This is a modified version of the 2-only recipe from
https://wiki.python.org/moin/UsingPickle/RenamingModules.
It's been modified for 2/3 cross-compatibility
| 5.08759
| 4.587573
| 1.108994
|
remote_file = wget(RAW_GIST)
proc = subprocess.Popen(('diff - %s'%MY_PATH).split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = proc.communicate(remote_file)
return stdout
|
def gist_diff()
|
Diff this file with the gist on github
| 6.149231
| 6.240319
| 0.985403
|
import urllib.parse
request = urllib.request.urlopen(url)
filestring = request.read()
return filestring
|
def wget(url)
|
Download the page into a string
| 4.73319
| 4.248575
| 1.114065
|
import warnings
import chardet
try:
return b.decode()
except UnicodeError:
result = chardet.detect(b)
if result['confidence'] < 0.95:
warnings.warn('autodecode failed with utf-8; guessing %s' % result['encoding'])
return result.decode(result['encoding'])
|
def autodecode(b)
|
Try to decode ``bytes`` to text - try default encoding first, otherwise try to autodetect
Args:
b (bytes): byte string
Returns:
str: decoded text string
| 4.48463
| 4.228324
| 1.060616
|
if 'IPython' not in sys.modules:
# IPython hasn't been imported, definitely not
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
if getattr(get_ipython(), 'kernel', None) is None:
return False
try:
import ipywidgets as ipy
import traitlets
except ImportError:
return False
return True
|
def can_use_widgets()
|
Expanded from from http://stackoverflow.com/a/34092072/1958900
| 3.736731
| 3.41463
| 1.09433
|
found_dirs = set('/')
for path in list_of_paths:
dirs = path.strip().split('/')
for i in range(2,len(dirs)):
found_dirs.add( '/'.join(dirs[:i]) )
paths = [ path for path in list_of_paths if
(path.strip() not in found_dirs) and path.strip()[-1]!='/' ]
return paths
|
def remove_directories(list_of_paths)
|
Removes non-leafs from a list of directory paths
| 3.312517
| 3.208933
| 1.03228
|
p = filename
if not os.path.isabs(p):
p = os.path.join(wdir, p)
targetpath = os.path.realpath(p)
wdir = os.path.realpath(wdir)
common = os.path.commonprefix([wdir, targetpath])
if len(common) < len(wdir):
raise exceptions.PathError(
"The subprocess engine does not support input files with absolute paths")
return p
|
def _check_file_is_under_workingdir(filename, wdir)
|
Raise error if input is being staged to a location not underneath the working dir
| 3.832901
| 3.682167
| 1.040936
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.