_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q26400
|
schunk
|
train
|
def schunk(string, size):
"""Splits string into n sized chunks."""
return [string[i:i+size] for i in range(0, len(string), size)]
|
python
|
{
"resource": ""
}
|
q26401
|
mill
|
train
|
def mill(it, label='', hide=None, expected_size=None, every=1):
"""Progress iterator. Prints a mill while iterating over the items."""
def _mill_char(_i):
if _i >= count:
return ' '
else:
return MILL_CHARS[(_i // every) % len(MILL_CHARS)]
def _show(_i):
if not hide:
if ((_i % every) == 0 or # True every "every" updates
(_i == count)): # And when we're done
STREAM.write(MILL_TEMPLATE % (
label, _mill_char(_i), _i, count))
STREAM.flush()
count = len(it) if expected_size is None else expected_size
if count:
_show(0)
for i, item in enumerate(it):
yield item
_show(i + 1)
if not hide:
STREAM.write('\n')
STREAM.flush()
|
python
|
{
"resource": ""
}
|
q26402
|
min_width
|
train
|
def min_width(string, cols, padding=' '):
"""Returns given string with right padding."""
is_color = isinstance(string, ColoredString)
stack = tsplit(str(string), NEWLINES)
for i, substring in enumerate(stack):
_sub = clean(substring).ljust((cols + 0), padding)
if is_color:
_sub = (_sub.replace(clean(substring), substring))
stack[i] = _sub
return '\n'.join(stack)
|
python
|
{
"resource": ""
}
|
q26403
|
join
|
train
|
def join(l, conj=CONJUNCTION, im_a_moron=MORON_MODE, separator=COMMA):
"""Joins lists of words. Oxford comma and all."""
collector = []
left = len(l)
separator = separator + SPACE
conj = conj + SPACE
for _l in l[:]:
left += -1
collector.append(_l)
if left == 1:
if len(l) == 2 or im_a_moron:
collector.append(SPACE)
else:
collector.append(separator)
collector.append(conj)
elif left is not 0:
collector.append(separator)
return unicode(str().join(collector))
|
python
|
{
"resource": ""
}
|
q26404
|
site_data_dir
|
train
|
def site_data_dir(appname, appauthor=None, version=None):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Typical user data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /etc/xdg/<appname>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_CONFIG_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if sys.platform.startswith("win"):
if appauthor is None:
raise AppDirsError("must specify 'appauthor' on Windows")
path = os.path.join(_get_win_folder("CSIDL_COMMON_APPDATA"),
appauthor, appname)
elif sys.platform == 'darwin':
path = os.path.join(
os.path.expanduser('/Library/Application Support'),
appname)
else:
# XDG default for $XDG_CONFIG_DIRS[0]. Perhaps should actually
# *use* that envvar, if defined.
path = "/etc/xdg/"+appname.lower()
if version:
path = os.path.join(path, version)
return path
|
python
|
{
"resource": ""
}
|
q26405
|
user_log_dir
|
train
|
def user_log_dir(appname, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<appname>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if sys.platform == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif sys.platform == "win32":
path = user_data_dir(appname, appauthor, version); version=False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version); version=False
if opinion:
path = os.path.join(path, "log")
if version:
path = os.path.join(path, version)
return path
|
python
|
{
"resource": ""
}
|
q26406
|
APIETAGProcessor.get_etags_and_matchers
|
train
|
def get_etags_and_matchers(self, request):
"""Get the etags from the header and perform a validation against the required preconditions."""
# evaluate the preconditions, raises 428 if condition is not met
self.evaluate_preconditions(request)
# alright, headers are present, extract the values and match the conditions
return super(APIETAGProcessor, self).get_etags_and_matchers(request)
|
python
|
{
"resource": ""
}
|
q26407
|
APIETAGProcessor.evaluate_preconditions
|
train
|
def evaluate_preconditions(self, request):
"""Evaluate whether the precondition for the request is met."""
if request.method.upper() in self.precondition_map.keys():
required_headers = self.precondition_map.get(request.method.upper(), [])
# check the required headers
for header in required_headers:
if not request.META.get(prepare_header_name(header)):
# raise an error for each header that does not match
logger.warning('Precondition required: %s', request.path,
extra={
'status_code': status.HTTP_428_PRECONDITION_REQUIRED,
'request': request
}
)
# raise an RFC 6585 compliant exception
raise PreconditionRequiredException(detail='Precondition required. This "%s" request '
'is required to be conditional. '
'Try again using "%s".' % (request.method, header)
)
return True
|
python
|
{
"resource": ""
}
|
q26408
|
_slugify
|
train
|
def _slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = word.encode('utf-8')
if word:
result.append(word)
slugified = delim.join([i.decode('utf-8') for i in result])
return re.sub('[^a-zA-Z0-9\\s\\-]{1}', replace_char, slugified).lower()
|
python
|
{
"resource": ""
}
|
q26409
|
Markdown.convert
|
train
|
def convert(self, text):
"""Convert the given text."""
# Main function. The order in which other subs are called here is
# essential. Link and image substitutions need to happen before
# _EscapeSpecialChars(), so that any *'s or _'s in the <a>
# and <img> tags get encoded.
# Clear the global hashes. If we don't clear these, you get conflicts
# from other articles when generating a page which contains more than
# one article (e.g. an index page that shows the N most recent
# articles):
self.reset()
if not isinstance(text, unicode):
#TODO: perhaps shouldn't presume UTF-8 for string input?
text = unicode(text, 'utf-8')
if self.use_file_vars:
# Look for emacs-style file variable hints.
emacs_vars = self._get_emacs_vars(text)
if "markdown-extras" in emacs_vars:
splitter = re.compile("[ ,]+")
for e in splitter.split(emacs_vars["markdown-extras"]):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
self.extras[ename] = earg
# Standardize line endings:
text = re.sub("\r\n|\r", "\n", text)
# Make sure $text ends with a couple of newlines:
text += "\n\n"
# Convert all tabs to spaces.
text = self._detab(text)
# Strip any lines consisting only of spaces and tabs.
# This makes subsequent regexen easier to write, because we can
# match consecutive blank lines with /\n+/ instead of something
# contorted like /[ \t]*\n+/ .
text = self._ws_only_line_re.sub("", text)
# strip metadata from head and extract
if "metadata" in self.extras:
text = self._extract_metadata(text)
text = self.preprocess(text)
if self.safe_mode:
text = self._hash_html_spans(text)
# Turn block-level HTML blocks into hash entries
text = self._hash_html_blocks(text, raw=True)
# Strip link definitions, store in hashes.
if "footnotes" in self.extras:
# Must do footnotes first because an unlucky footnote defn
# looks like a link defn:
# [^4]: this "looks like a link defn"
text = self._strip_footnote_definitions(text)
text = self._strip_link_definitions(text)
text = self._run_block_gamut(text)
if "footnotes" in self.extras:
text = self._add_footnotes(text)
text = self.postprocess(text)
text = self._unescape_special_chars(text)
if self.safe_mode:
text = self._unhash_html_spans(text)
if "nofollow" in self.extras:
text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
text += "\n"
rv = UnicodeWithAttrs(text)
if "toc" in self.extras:
rv._toc = self._toc
if "metadata" in self.extras:
rv.metadata = self.metadata
return rv
|
python
|
{
"resource": ""
}
|
q26410
|
Markdown._hash_html_blocks
|
train
|
def _hash_html_blocks(self, text, raw=False):
"""Hashify HTML blocks
We only want to do this for block-level HTML tags, such as headers,
lists, and tables. That's because we still want to wrap <p>s around
"paragraphs" that are wrapped in non-block-level tags, such as anchors,
phrase emphasis, and spans. The list of tags we're looking for is
hard-coded.
@param raw {boolean} indicates if these are raw HTML blocks in
the original source. It makes a difference in "safe" mode.
"""
if '<' not in text:
return text
# Pass `raw` value into our calls to self._hash_html_block_sub.
hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
# First, look for nested blocks, e.g.:
# <div>
# <div>
# tags for inner block must be indented.
# </div>
# </div>
#
# The outermost tags must start at the left margin for this to match, and
# the inner nested divs must be indented.
# We need to do this before the next, more liberal match, because the next
# match will start at the first `<div>` and stop at the first `</div>`.
text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
# Now match more liberally, simply from `\n<tag>` to `</tag>\n`
text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
# Special case just for <hr />. It was easier to make a special
# case than to make the other regex more complicated.
if "<hr" in text:
_hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
text = _hr_tag_re.sub(hash_html_block_sub, text)
# Special case for standalone HTML comments:
if "<!--" in text:
start = 0
while True:
# Delimiters for next comment block.
try:
start_idx = text.index("<!--", start)
except ValueError:
break
try:
end_idx = text.index("-->", start_idx) + 3
except ValueError:
break
# Start position for next comment block search.
start = end_idx
# Validate whitespace before comment.
if start_idx:
# - Up to `tab_width - 1` spaces before start_idx.
for i in range(self.tab_width - 1):
if text[start_idx - 1] != ' ':
break
start_idx -= 1
if start_idx == 0:
break
# - Must be preceded by 2 newlines or hit the start of
# the document.
if start_idx == 0:
pass
elif start_idx == 1 and text[0] == '\n':
start_idx = 0 # to match minute detail of Markdown.pl regex
elif text[start_idx-2:start_idx] == '\n\n':
pass
else:
break
# Validate whitespace after comment.
# - Any number of spaces and tabs.
while end_idx < len(text):
if text[end_idx] not in ' \t':
break
end_idx += 1
# - Must be following by 2 newlines or hit end of text.
if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
continue
# Escape and hash (must match `_hash_html_block_sub`).
html = text[start_idx:end_idx]
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
if "xml" in self.extras:
# Treat XML processing instructions and namespaced one-liner
# tags as if they were block HTML tags. E.g., if standalone
# (i.e. are their own paragraph), the following do not get
# wrapped in a <p> tag:
# <?foo bar?>
#
# <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
_xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
text = _xml_oneliner_re.sub(hash_html_block_sub, text)
return text
|
python
|
{
"resource": ""
}
|
q26411
|
Markdown.header_id_from_text
|
train
|
def header_id_from_text(self, text, prefix, n):
"""Generate a header id attribute value from the given header
HTML content.
This is only called if the "header-ids" extra is enabled.
Subclasses may override this for different header ids.
@param text {str} The text of the header tag
@param prefix {str} The requested prefix for header ids. This is the
value of the "header-ids" extra key, if any. Otherwise, None.
@param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
@returns {str} The value for the header tag's "id" attribute. Return
None to not have an id attribute and to exclude this header from
the TOC (if the "toc" extra is specified).
"""
header_id = _slugify(text)
if prefix and isinstance(prefix, base_string_type):
header_id = prefix + '-' + header_id
if header_id in self._count_from_header_id:
self._count_from_header_id[header_id] += 1
header_id += '-%s' % self._count_from_header_id[header_id]
else:
self._count_from_header_id[header_id] = 1
return header_id
|
python
|
{
"resource": ""
}
|
q26412
|
UnicodeWithAttrs.toc_html
|
train
|
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append('%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n'
|
python
|
{
"resource": ""
}
|
q26413
|
Command.handle
|
train
|
def handle(self, *args, **options):
"""
Queues the function given with the first argument with the
parameters given with the rest of the argument list.
"""
verbosity = int(options.get('verbosity', 1))
timeout = options.get('timeout')
queue = get_queue(options.get('queue'))
job = queue.enqueue_call(args[0], args=args[1:], timeout=timeout)
if verbosity:
print('Job %s created' % job.id)
|
python
|
{
"resource": ""
}
|
q26414
|
get_worker
|
train
|
def get_worker(*queue_names, **kwargs):
"""
Returns a RQ worker for all queues or specified ones.
"""
job_class = get_job_class(kwargs.pop('job_class', None))
queue_class = kwargs.pop('queue_class', None)
queues = get_queues(*queue_names, **{'job_class': job_class,
'queue_class': queue_class})
# normalize queue_class to what get_queues returns
queue_class = queues[0].__class__
worker_class = get_worker_class(kwargs.pop('worker_class', None))
return worker_class(queues,
connection=queues[0].connection,
exception_handlers=get_exception_handlers() or None,
job_class=job_class,
queue_class=queue_class,
**kwargs)
|
python
|
{
"resource": ""
}
|
q26415
|
commit
|
train
|
def commit(*args, **kwargs):
"""
Processes all jobs in the delayed queue.
"""
delayed_queue = get_queue()
try:
while delayed_queue:
queue, args, kwargs = delayed_queue.pop(0)
queue.original_enqueue_call(*args, **kwargs)
finally:
clear()
|
python
|
{
"resource": ""
}
|
q26416
|
get_queue_class
|
train
|
def get_queue_class(config=None, queue_class=None):
"""
Return queue class from config or from RQ settings, otherwise return DjangoRQ.
If ``queue_class`` is provided, it takes priority.
The full priority list for queue class sources:
1. ``queue_class`` argument
2. ``QUEUE_CLASS`` in ``config`` argument
3. ``QUEUE_CLASS`` in base settings (``RQ``)
"""
RQ = getattr(settings, 'RQ', {})
if queue_class is None:
queue_class = RQ.get('QUEUE_CLASS', DjangoRQ)
if config:
queue_class = config.get('QUEUE_CLASS', queue_class)
if isinstance(queue_class, six.string_types):
queue_class = import_attribute(queue_class)
return queue_class
|
python
|
{
"resource": ""
}
|
q26417
|
get_redis_connection
|
train
|
def get_redis_connection(config, use_strict_redis=False):
"""
Returns a redis connection from a connection config
"""
redis_cls = redis.StrictRedis if use_strict_redis else redis.Redis
if 'URL' in config:
return redis_cls.from_url(config['URL'], db=config.get('DB'))
if 'USE_REDIS_CACHE' in config.keys():
try:
# Assume that we're using django-redis
from django_redis import get_redis_connection as get_redis
return get_redis(config['USE_REDIS_CACHE'])
except ImportError:
pass
from django.core.cache import caches
cache = caches[config['USE_REDIS_CACHE']]
# We're using django-redis-cache
try:
return cache._client
except AttributeError:
# For django-redis-cache > 0.13.1
return cache.get_master_client()
if 'UNIX_SOCKET_PATH' in config:
return redis_cls(unix_socket_path=config['UNIX_SOCKET_PATH'], db=config['DB'])
if 'SENTINELS' in config:
sentinel_kwargs = {
'db': config.get('DB'),
'password': config.get('PASSWORD'),
'socket_timeout': config.get('SOCKET_TIMEOUT'),
}
sentinel_kwargs.update(config.get('CONNECTION_KWARGS', {}))
sentinel = Sentinel(config['SENTINELS'], **sentinel_kwargs)
return sentinel.master_for(
service_name=config['MASTER_NAME'], redis_class=redis_cls,
)
return redis_cls(host=config['HOST'], port=config['PORT'], db=config['DB'], password=config.get('PASSWORD'), ssl=config.get('SSL', False))
|
python
|
{
"resource": ""
}
|
q26418
|
get_connection
|
train
|
def get_connection(name='default', use_strict_redis=False):
"""
Returns a Redis connection to use based on parameters in settings.RQ_QUEUES
"""
from .settings import QUEUES
return get_redis_connection(QUEUES[name], use_strict_redis)
|
python
|
{
"resource": ""
}
|
q26419
|
get_queue
|
train
|
def get_queue(name='default', default_timeout=None, is_async=None,
autocommit=None, connection=None, queue_class=None, job_class=None, **kwargs):
"""
Returns an rq Queue using parameters defined in ``RQ_QUEUES``
"""
from .settings import QUEUES
if kwargs.get('async') is not None:
is_async = kwargs['async']
warnings.warn('The `async` keyword is deprecated. Use `is_async` instead', DeprecationWarning)
# If is_async is provided, use it, otherwise, get it from the configuration
if is_async is None:
is_async = QUEUES[name].get('ASYNC', True)
# same for job_class
job_class = get_job_class(job_class)
if default_timeout is None:
default_timeout = QUEUES[name].get('DEFAULT_TIMEOUT')
if connection is None:
connection = get_connection(name)
queue_class = get_queue_class(QUEUES[name], queue_class)
return queue_class(name, default_timeout=default_timeout,
connection=connection, is_async=is_async,
job_class=job_class, autocommit=autocommit, **kwargs)
|
python
|
{
"resource": ""
}
|
q26420
|
get_queue_by_index
|
train
|
def get_queue_by_index(index):
"""
Returns an rq Queue using parameters defined in ``QUEUES_LIST``
"""
from .settings import QUEUES_LIST
config = QUEUES_LIST[int(index)]
return get_queue_class(config)(
config['name'],
connection=get_redis_connection(config['connection_config']),
is_async=config.get('ASYNC', True))
|
python
|
{
"resource": ""
}
|
q26421
|
filter_connection_params
|
train
|
def filter_connection_params(queue_params):
"""
Filters the queue params to keep only the connection related params.
"""
CONNECTION_PARAMS = ('URL', 'DB', 'USE_REDIS_CACHE',
'UNIX_SOCKET_PATH', 'HOST', 'PORT', 'PASSWORD',
'SENTINELS', 'MASTER_NAME', 'SOCKET_TIMEOUT',
'SSL', 'CONNECTION_KWARGS',)
#return {p:v for p,v in queue_params.items() if p in CONNECTION_PARAMS}
# Dict comprehension compatible with python 2.6
return dict((p,v) for (p,v) in queue_params.items() if p in CONNECTION_PARAMS)
|
python
|
{
"resource": ""
}
|
q26422
|
get_queues
|
train
|
def get_queues(*queue_names, **kwargs):
"""
Return queue instances from specified queue names.
All instances must use the same Redis connection.
"""
from .settings import QUEUES
if len(queue_names) <= 1:
# Return "default" queue if no queue name is specified
# or one queue with specified name
return [get_queue(*queue_names, **kwargs)]
# will return more than one queue
# import job class only once for all queues
kwargs['job_class'] = get_job_class(kwargs.pop('job_class', None))
queue_params = QUEUES[queue_names[0]]
connection_params = filter_connection_params(queue_params)
queues = [get_queue(queue_names[0], **kwargs)]
# do consistency checks while building return list
for name in queue_names[1:]:
queue = get_queue(name, **kwargs)
if type(queue) is not type(queues[0]):
raise ValueError(
'Queues must have the same class.'
'"{0}" and "{1}" have '
'different classes'.format(name, queue_names[0]))
if connection_params != filter_connection_params(QUEUES[name]):
raise ValueError(
'Queues must have the same redis connection.'
'"{0}" and "{1}" have '
'different connections'.format(name, queue_names[0]))
queues.append(queue)
return queues
|
python
|
{
"resource": ""
}
|
q26423
|
get_unique_connection_configs
|
train
|
def get_unique_connection_configs(config=None):
"""
Returns a list of unique Redis connections from config
"""
if config is None:
from .settings import QUEUES
config = QUEUES
connection_configs = []
for key, value in config.items():
value = filter_connection_params(value)
if value not in connection_configs:
connection_configs.append(value)
return connection_configs
|
python
|
{
"resource": ""
}
|
q26424
|
enqueue_job
|
train
|
def enqueue_job(request, queue_index, job_id):
""" Enqueue deferred jobs
"""
queue_index = int(queue_index)
queue = get_queue_by_index(queue_index)
job = Job.fetch(job_id, connection=queue.connection)
if request.method == 'POST':
queue.enqueue_job(job)
# Remove job from correct registry if needed
if job.get_status() == JobStatus.DEFERRED:
registry = DeferredJobRegistry(queue.name, queue.connection)
registry.remove(job)
elif job.get_status() == JobStatus.FINISHED:
registry = FinishedJobRegistry(queue.name, queue.connection)
registry.remove(job)
messages.info(request, 'You have successfully enqueued %s' % job.id)
return redirect('rq_job_detail', queue_index, job_id)
context_data = {
'queue_index': queue_index,
'job': job,
'queue': queue,
}
return render(request, 'django_rq/delete_job.html', context_data)
|
python
|
{
"resource": ""
}
|
q26425
|
job
|
train
|
def job(func_or_queue, connection=None, *args, **kwargs):
"""
The same as RQ's job decorator, but it automatically works out
the ``connection`` argument from RQ_QUEUES.
And also, it allows simplified ``@job`` syntax to put job into
default queue.
If RQ.DEFAULT_RESULT_TTL setting is set, it is used as default
for ``result_ttl`` kwarg.
"""
if callable(func_or_queue):
func = func_or_queue
queue = 'default'
else:
func = None
queue = func_or_queue
if isinstance(queue, six.string_types):
try:
queue = get_queue(queue)
if connection is None:
connection = queue.connection
except KeyError:
pass
RQ = getattr(settings, 'RQ', {})
default_result_ttl = RQ.get('DEFAULT_RESULT_TTL')
if default_result_ttl:
kwargs.setdefault('result_ttl', default_result_ttl)
decorator = _rq_job(queue, connection=connection, *args, **kwargs)
if func:
return decorator(func)
return decorator
|
python
|
{
"resource": ""
}
|
q26426
|
to_localtime
|
train
|
def to_localtime(time):
'''Converts naive datetime to localtime based on settings'''
utc_time = time.replace(tzinfo=timezone.utc)
to_zone = timezone.get_default_timezone()
return utc_time.astimezone(to_zone)
|
python
|
{
"resource": ""
}
|
q26427
|
SSD1306Base.command
|
train
|
def command(self, c):
"""Send command byte to display."""
if self._spi is not None:
# SPI write.
self._gpio.set_low(self._dc)
self._spi.write([c])
else:
# I2C write.
control = 0x00 # Co = 0, DC = 0
self._i2c.write8(control, c)
|
python
|
{
"resource": ""
}
|
q26428
|
SSD1306Base.data
|
train
|
def data(self, c):
"""Send byte of data to display."""
if self._spi is not None:
# SPI write.
self._gpio.set_high(self._dc)
self._spi.write([c])
else:
# I2C write.
control = 0x40 # Co = 0, DC = 0
self._i2c.write8(control, c)
|
python
|
{
"resource": ""
}
|
q26429
|
SSD1306Base.begin
|
train
|
def begin(self, vccstate=SSD1306_SWITCHCAPVCC):
"""Initialize display."""
# Save vcc state.
self._vccstate = vccstate
# Reset and initialize display.
self.reset()
self._initialize()
# Turn on the display.
self.command(SSD1306_DISPLAYON)
|
python
|
{
"resource": ""
}
|
q26430
|
SSD1306Base.set_contrast
|
train
|
def set_contrast(self, contrast):
"""Sets the contrast of the display. Contrast should be a value between
0 and 255."""
if contrast < 0 or contrast > 255:
raise ValueError('Contrast must be a value from 0 to 255 (inclusive).')
self.command(SSD1306_SETCONTRAST)
self.command(contrast)
|
python
|
{
"resource": ""
}
|
q26431
|
SSD1306Base.dim
|
train
|
def dim(self, dim):
"""Adjusts contrast to dim the display if dim is True, otherwise sets the
contrast to normal brightness if dim is False.
"""
# Assume dim display.
contrast = 0
# Adjust contrast based on VCC if not dimming.
if not dim:
if self._vccstate == SSD1306_EXTERNALVCC:
contrast = 0x9F
else:
contrast = 0xCF
|
python
|
{
"resource": ""
}
|
q26432
|
MySQLQueryBuilder._select_sql
|
train
|
def _select_sql(self, **kwargs):
"""
Overridden function to generate the SELECT part of the SQL statement,
with the addition of the a modifier if present.
"""
return 'SELECT {distinct}{modifier}{select}'.format(
distinct='DISTINCT ' if self._distinct else '',
modifier='{} '.format(' '.join(self._modifiers)) if self._modifiers else '',
select=','.join(term.get_sql(with_alias=True, subquery=True, **kwargs)
for term in self._selects),
)
|
python
|
{
"resource": ""
}
|
q26433
|
QueryBuilder.from_
|
train
|
def from_(self, selectable):
"""
Adds a table to the query. This function can only be called once and will raise an AttributeError if called a
second time.
:param selectable:
Type: ``Table``, ``Query``, or ``str``
When a ``str`` is passed, a table with the name matching the ``str`` value is used.
:returns
A copy of the query with the table added.
"""
self._from.append(Table(selectable) if isinstance(selectable, str) else selectable)
if isinstance(selectable, (QueryBuilder, _UnionQuery)) and selectable.alias is None:
if isinstance(selectable, QueryBuilder):
sub_query_count = selectable._subquery_count
else:
sub_query_count = 0
sub_query_count = max(self._subquery_count, sub_query_count)
selectable.alias = 'sq%d' % sub_query_count
self._subquery_count = sub_query_count + 1
|
python
|
{
"resource": ""
}
|
q26434
|
QueryBuilder._validate_table
|
train
|
def _validate_table(self, term):
"""
Returns False if the term references a table not already part of the
FROM clause or JOINS and True otherwise.
"""
base_tables = self._from + [self._update_table]
for field in term.fields():
table_in_base_tables = field.table in base_tables
table_in_joins = field.table in [join.item for join in self._joins]
if field.table is not None \
and not table_in_base_tables \
and not table_in_joins \
and field.table != self._update_table:
return False
return True
|
python
|
{
"resource": ""
}
|
q26435
|
QueryBuilder._group_sql
|
train
|
def _group_sql(self, quote_char=None, groupby_alias=True, **kwargs):
"""
Produces the GROUP BY part of the query. This is a list of fields. The clauses are stored in the query under
self._groupbys as a list fields.
If an groupby field is used in the select clause,
determined by a matching alias, and the groupby_alias is set True
then the GROUP BY clause will use the alias,
otherwise the entire field will be rendered as SQL.
"""
clauses = []
selected_aliases = {s.alias for s in self._selects}
for field in self._groupbys:
if groupby_alias and field.alias and field.alias in selected_aliases:
clauses.append("{quote}{alias}{quote}".format(
alias=field.alias,
quote=quote_char or '',
))
else:
clauses.append(field.get_sql(quote_char=quote_char, **kwargs))
sql = ' GROUP BY {groupby}'.format(groupby=','.join(clauses))
if self._with_totals:
return sql + ' WITH TOTALS'
return sql
|
python
|
{
"resource": ""
}
|
q26436
|
Joiner.cross
|
train
|
def cross(self):
"""Return cross join"""
self.query.do_join(Join(self.item, JoinType.cross))
return self.query
|
python
|
{
"resource": ""
}
|
q26437
|
builder
|
train
|
def builder(func):
"""
Decorator for wrapper "builder" functions. These are functions on the Query class or other classes used for
building queries which mutate the query and return self. To make the build functions immutable, this decorator is
used which will deepcopy the current instance. This decorator will return the return value of the inner function
or the new copy of the instance. The inner function does not need to return self.
"""
import copy
def _copy(self, *args, **kwargs):
self_copy = copy.copy(self)
result = func(self_copy, *args, **kwargs)
# Return self if the inner function returns None. This way the inner function can return something
# different (for example when creating joins, a different builder is returned).
if result is None:
return self_copy
return result
return _copy
|
python
|
{
"resource": ""
}
|
q26438
|
resolve_is_aggregate
|
train
|
def resolve_is_aggregate(values):
"""
Resolves the is_aggregate flag for an expression that contains multiple terms. This works like a voter system,
each term votes True or False or abstains with None.
:param values: A list of booleans (or None) for each term in the expression
:return: If all values are True or None, True is returned. If all values are None, None is returned. Otherwise,
False is returned.
"""
result = [x
for x in values
if x is not None]
if result:
return all(result)
return None
|
python
|
{
"resource": ""
}
|
q26439
|
Term.wrap_constant
|
train
|
def wrap_constant(self, val):
"""
Used for wrapping raw inputs such as numbers in Criterions and Operator.
For example, the expression F('abc')+1 stores the integer part in a ValueWrapper object.
:param val:
Any value.
:return:
Raw string, number, or decimal values will be returned in a ValueWrapper. Fields and other parts of the
querybuilder will be returned as inputted.
"""
from .queries import QueryBuilder
if isinstance(val, (Term, QueryBuilder, Interval)):
return val
if val is None:
return NullValue()
if isinstance(val, list):
return Array(*val)
if isinstance(val, tuple):
return Tuple(*val)
_ValueWrapper = getattr(self, '_wrapper_cls', ValueWrapper)
return _ValueWrapper(val)
|
python
|
{
"resource": ""
}
|
q26440
|
Function.for_
|
train
|
def for_(self, table):
"""
Replaces the tables of this term for the table parameter provided. Useful when reusing fields across queries.
:param table:
The table to replace with.
:return:
A copy of the field with it's table value replaced.
"""
self.args = [param.for_(table) for param in self.args]
|
python
|
{
"resource": ""
}
|
q26441
|
LevelDBReader._get_head_state
|
train
|
def _get_head_state(self):
"""Get head state.
:return:
"""
if not self.head_state:
root = self._get_head_block().state_root
self.head_state = State(self.db, root)
return self.head_state
|
python
|
{
"resource": ""
}
|
q26442
|
LevelDBReader._get_account
|
train
|
def _get_account(self, address):
"""Get account by address.
:param address:
:return:
"""
state = self._get_head_state()
account_address = binascii.a2b_hex(utils.remove_0x_head(address))
return state.get_and_cache_account(account_address)
|
python
|
{
"resource": ""
}
|
q26443
|
LevelDBReader._get_block_hash
|
train
|
def _get_block_hash(self, number):
"""Get block hash by block number.
:param number:
:return:
"""
num = _format_block_number(number)
hash_key = header_prefix + num + num_suffix
return self.db.get(hash_key)
|
python
|
{
"resource": ""
}
|
q26444
|
LevelDBReader._get_head_block
|
train
|
def _get_head_block(self):
"""Get head block header.
:return:
"""
if not self.head_block_header:
block_hash = self.db.get(head_header_key)
num = self._get_block_number(block_hash)
self.head_block_header = self._get_block_header(block_hash, num)
# find header with valid state
while (
not self.db.get(self.head_block_header.state_root)
and self.head_block_header.prevhash is not None
):
block_hash = self.head_block_header.prevhash
num = self._get_block_number(block_hash)
self.head_block_header = self._get_block_header(block_hash, num)
return self.head_block_header
|
python
|
{
"resource": ""
}
|
q26445
|
LevelDBReader._get_block_number
|
train
|
def _get_block_number(self, block_hash):
"""Get block number by its hash.
:param block_hash:
:return:
"""
number_key = block_hash_prefix + block_hash
return self.db.get(number_key)
|
python
|
{
"resource": ""
}
|
q26446
|
LevelDBReader._get_block_header
|
train
|
def _get_block_header(self, block_hash, num):
"""Get block header by block header hash & number.
:param block_hash:
:param num:
:return:
"""
header_key = header_prefix + num + block_hash
block_header_data = self.db.get(header_key)
header = rlp.decode(block_header_data, sedes=BlockHeader)
return header
|
python
|
{
"resource": ""
}
|
q26447
|
LevelDBReader._get_address_by_hash
|
train
|
def _get_address_by_hash(self, block_hash):
"""Get mapped address by its hash.
:param block_hash:
:return:
"""
address_key = address_prefix + block_hash
return self.db.get(address_key)
|
python
|
{
"resource": ""
}
|
q26448
|
EthLevelDB.get_contracts
|
train
|
def get_contracts(self):
"""Iterate through all contracts."""
for account in self.reader._get_head_state().get_all_accounts():
if account.code is not None:
code = _encode_hex(account.code)
contract = EVMContract(code, enable_online_lookup=False)
yield contract, account.address, account.balance
|
python
|
{
"resource": ""
}
|
q26449
|
EthLevelDB.search
|
train
|
def search(self, expression, callback_func):
"""Search through all contract accounts.
:param expression:
:param callback_func:
"""
cnt = 0
indexer = AccountIndexer(self)
for contract, address_hash, balance in self.get_contracts():
if contract.matches_expression(expression):
try:
address = _encode_hex(indexer.get_contract_by_hash(address_hash))
except AddressNotFoundError:
"""The hash->address mapping does not exist in our index.
If the index is up-to-date, this likely means that
the contract was created by an internal transaction.
Skip this contract as right now we don't have a good
solution for this.
"""
continue
callback_func(contract, address, balance)
cnt += 1
if not cnt % 1000:
log.info("Searched %d contracts" % cnt)
|
python
|
{
"resource": ""
}
|
q26450
|
EthLevelDB.contract_hash_to_address
|
train
|
def contract_hash_to_address(self, contract_hash):
"""Try to find corresponding account address.
:param contract_hash:
:return:
"""
address_hash = binascii.a2b_hex(utils.remove_0x_head(contract_hash))
indexer = AccountIndexer(self)
return _encode_hex(indexer.get_contract_by_hash(address_hash))
|
python
|
{
"resource": ""
}
|
q26451
|
EthLevelDB.eth_getBlockHeaderByNumber
|
train
|
def eth_getBlockHeaderByNumber(self, number):
"""Get block header by block number.
:param number:
:return:
"""
block_hash = self.reader._get_block_hash(number)
block_number = _format_block_number(number)
return self.reader._get_block_header(block_hash, block_number)
|
python
|
{
"resource": ""
}
|
q26452
|
EthLevelDB.eth_getBlockByNumber
|
train
|
def eth_getBlockByNumber(self, number):
"""Get block body by block number.
:param number:
:return:
"""
block_hash = self.reader._get_block_hash(number)
block_number = _format_block_number(number)
body_key = body_prefix + block_number + block_hash
block_data = self.db.get(body_key)
body = rlp.decode(block_data, sedes=Block)
return body
|
python
|
{
"resource": ""
}
|
q26453
|
EthLevelDB.eth_getCode
|
train
|
def eth_getCode(self, address):
"""Get account code.
:param address:
:return:
"""
account = self.reader._get_account(address)
return _encode_hex(account.code)
|
python
|
{
"resource": ""
}
|
q26454
|
EthLevelDB.eth_getBalance
|
train
|
def eth_getBalance(self, address):
"""Get account balance.
:param address:
:return:
"""
account = self.reader._get_account(address)
return account.balance
|
python
|
{
"resource": ""
}
|
q26455
|
EthLevelDB.eth_getStorageAt
|
train
|
def eth_getStorageAt(self, address, position):
"""Get account storage data at position.
:param address:
:param position:
:return:
"""
account = self.reader._get_account(address)
return _encode_hex(
utils.zpad(utils.encode_int(account.get_storage_data(position)), 32)
)
|
python
|
{
"resource": ""
}
|
q26456
|
native_contracts
|
train
|
def native_contracts(address: int, data: BaseCalldata) -> List[int]:
"""Takes integer address 1, 2, 3, 4.
:param address:
:param data:
:return:
"""
functions = (ecrecover, sha256, ripemd160, identity)
if isinstance(data, ConcreteCalldata):
concrete_data = data.concrete(None)
else:
raise NativeContractException()
return functions[address - 1](concrete_data)
|
python
|
{
"resource": ""
}
|
q26457
|
get_call_parameters
|
train
|
def get_call_parameters(
global_state: GlobalState, dynamic_loader: DynLoader, with_value=False
):
"""Gets call parameters from global state Pops the values from the stack
and determines output parameters.
:param global_state: state to look in
:param dynamic_loader: dynamic loader to use
:param with_value: whether to pop the value argument from the stack
:return: callee_account, call_data, value, call_data_type, gas
"""
gas, to = global_state.mstate.pop(2)
value = global_state.mstate.pop() if with_value else 0
memory_input_offset, memory_input_size, memory_out_offset, memory_out_size = global_state.mstate.pop(
4
)
callee_address = get_callee_address(global_state, dynamic_loader, to)
callee_account = None
call_data = get_call_data(global_state, memory_input_offset, memory_input_size)
if int(callee_address, 16) >= 5 or int(callee_address, 16) == 0:
callee_account = get_callee_account(
global_state, callee_address, dynamic_loader
)
return (
callee_address,
callee_account,
call_data,
value,
gas,
memory_out_offset,
memory_out_size,
)
|
python
|
{
"resource": ""
}
|
q26458
|
get_callee_address
|
train
|
def get_callee_address(
global_state: GlobalState,
dynamic_loader: DynLoader,
symbolic_to_address: Expression,
):
"""Gets the address of the callee.
:param global_state: state to look in
:param dynamic_loader: dynamic loader to use
:param symbolic_to_address: The (symbolic) callee address
:return: Address of the callee
"""
environment = global_state.environment
try:
callee_address = hex(util.get_concrete_int(symbolic_to_address))
except TypeError:
log.debug("Symbolic call encountered")
match = re.search(r"storage_(\d+)", str(simplify(symbolic_to_address)))
log.debug("CALL to: " + str(simplify(symbolic_to_address)))
if match is None or dynamic_loader is None:
raise ValueError()
index = int(match.group(1))
log.debug("Dynamic contract address at storage index {}".format(index))
# attempt to read the contract address from instance storage
try:
callee_address = dynamic_loader.read_storage(
environment.active_account.address, index
)
# TODO: verify whether this happens or not
except:
log.debug("Error accessing contract storage.")
raise ValueError
# testrpc simply returns the address, geth response is more elaborate.
if not re.match(r"^0x[0-9a-f]{40}$", callee_address):
callee_address = "0x" + callee_address[26:]
return callee_address
|
python
|
{
"resource": ""
}
|
q26459
|
get_callee_account
|
train
|
def get_callee_account(
global_state: GlobalState, callee_address: str, dynamic_loader: DynLoader
):
"""Gets the callees account from the global_state.
:param global_state: state to look in
:param callee_address: address of the callee
:param dynamic_loader: dynamic loader to use
:return: Account belonging to callee
"""
environment = global_state.environment
accounts = global_state.accounts
try:
return global_state.accounts[callee_address]
except KeyError:
# We have a valid call address, but contract is not in the modules list
log.debug("Module with address " + callee_address + " not loaded.")
if dynamic_loader is None:
raise ValueError()
log.debug("Attempting to load dependency")
try:
code = dynamic_loader.dynld(callee_address)
except ValueError as error:
log.debug("Unable to execute dynamic loader because: {}".format(str(error)))
raise error
if code is None:
log.debug("No code returned, not a contract account?")
raise ValueError()
log.debug("Dependency loaded: " + callee_address)
callee_account = Account(
callee_address, code, callee_address, dynamic_loader=dynamic_loader
)
accounts[callee_address] = callee_account
return callee_account
|
python
|
{
"resource": ""
}
|
q26460
|
get_call_data
|
train
|
def get_call_data(
global_state: GlobalState,
memory_start: Union[int, BitVec],
memory_size: Union[int, BitVec],
):
"""Gets call_data from the global_state.
:param global_state: state to look in
:param memory_start: Start index
:param memory_size: Size
:return: Tuple containing: call_data array from memory or empty array if symbolic, type found
"""
state = global_state.mstate
transaction_id = "{}_internalcall".format(global_state.current_transaction.id)
memory_start = cast(
BitVec,
(
symbol_factory.BitVecVal(memory_start, 256)
if isinstance(memory_start, int)
else memory_start
),
)
memory_size = cast(
BitVec,
(
symbol_factory.BitVecVal(memory_size, 256)
if isinstance(memory_size, int)
else memory_size
),
)
uses_entire_calldata = simplify(
memory_size - global_state.environment.calldata.calldatasize == 0
)
if uses_entire_calldata is True:
return global_state.environment.calldata
try:
calldata_from_mem = state.memory[
util.get_concrete_int(memory_start) : util.get_concrete_int(
memory_start + memory_size
)
]
return ConcreteCalldata(transaction_id, calldata_from_mem)
except TypeError:
log.debug("Unsupported symbolic calldata offset")
return SymbolicCalldata(transaction_id)
|
python
|
{
"resource": ""
}
|
q26461
|
InstructionCoveragePlugin.initialize
|
train
|
def initialize(self, symbolic_vm: LaserEVM):
"""Initializes the instruction coverage plugin
Introduces hooks for each instruction
:param symbolic_vm:
:return:
"""
self.coverage = {}
self.initial_coverage = 0
self.tx_id = 0
@symbolic_vm.laser_hook("stop_sym_exec")
def stop_sym_exec_hook():
# Print results
for code, code_cov in self.coverage.items():
cov_percentage = sum(code_cov[1]) / float(code_cov[0]) * 100
log.info(
"Achieved {:.2f}% coverage for code: {}".format(
cov_percentage, code
)
)
@symbolic_vm.laser_hook("execute_state")
def execute_state_hook(global_state: GlobalState):
# Record coverage
code = global_state.environment.code.bytecode
if code not in self.coverage.keys():
number_of_instructions = len(
global_state.environment.code.instruction_list
)
self.coverage[code] = (
number_of_instructions,
[False] * number_of_instructions,
)
self.coverage[code][1][global_state.mstate.pc] = True
@symbolic_vm.laser_hook("start_sym_trans")
def execute_start_sym_trans_hook():
self.initial_coverage = self._get_covered_instructions()
@symbolic_vm.laser_hook("stop_sym_trans")
def execute_stop_sym_trans_hook():
end_coverage = self._get_covered_instructions()
log.info(
"Number of new instructions covered in tx %d: %d"
% (self.tx_id, end_coverage - self.initial_coverage)
)
self.tx_id += 1
|
python
|
{
"resource": ""
}
|
q26462
|
execute_message_call
|
train
|
def execute_message_call(laser_evm, callee_address: str) -> None:
"""Executes a message call transaction from all open states.
:param laser_evm:
:param callee_address:
"""
# TODO: Resolve circular import between .transaction and ..svm to import LaserEVM here
open_states = laser_evm.open_states[:]
del laser_evm.open_states[:]
for open_world_state in open_states:
if open_world_state[callee_address].deleted:
log.debug("Can not execute dead contract, skipping.")
continue
next_transaction_id = get_next_transaction_id()
transaction = MessageCallTransaction(
world_state=open_world_state,
identifier=next_transaction_id,
gas_price=symbol_factory.BitVecSym(
"gas_price{}".format(next_transaction_id), 256
),
gas_limit=8000000, # block gas limit
origin=symbol_factory.BitVecSym(
"origin{}".format(next_transaction_id), 256
),
caller=symbol_factory.BitVecVal(ATTACKER_ADDRESS, 256),
callee_account=open_world_state[callee_address],
call_data=SymbolicCalldata(next_transaction_id),
call_value=symbol_factory.BitVecSym(
"call_value{}".format(next_transaction_id), 256
),
)
_setup_global_state_for_execution(laser_evm, transaction)
laser_evm.exec()
|
python
|
{
"resource": ""
}
|
q26463
|
execute_contract_creation
|
train
|
def execute_contract_creation(
laser_evm, contract_initialization_code, contract_name=None
) -> Account:
"""Executes a contract creation transaction from all open states.
:param laser_evm:
:param contract_initialization_code:
:param contract_name:
:return:
"""
# TODO: Resolve circular import between .transaction and ..svm to import LaserEVM here
open_states = laser_evm.open_states[:]
del laser_evm.open_states[:]
new_account = laser_evm.world_state.create_account(
0, concrete_storage=True, dynamic_loader=None, creator=CREATOR_ADDRESS
)
if contract_name:
new_account.contract_name = contract_name
for open_world_state in open_states:
next_transaction_id = get_next_transaction_id()
transaction = ContractCreationTransaction(
world_state=open_world_state,
identifier=next_transaction_id,
gas_price=symbol_factory.BitVecSym(
"gas_price{}".format(next_transaction_id), 256
),
gas_limit=8000000, # block gas limit
origin=symbol_factory.BitVecSym(
"origin{}".format(next_transaction_id), 256
),
code=Disassembly(contract_initialization_code),
caller=symbol_factory.BitVecVal(CREATOR_ADDRESS, 256),
callee_account=new_account,
call_data=[],
call_value=symbol_factory.BitVecSym(
"call_value{}".format(next_transaction_id), 256
),
)
_setup_global_state_for_execution(laser_evm, transaction)
laser_evm.exec(True)
return new_account
|
python
|
{
"resource": ""
}
|
q26464
|
_setup_global_state_for_execution
|
train
|
def _setup_global_state_for_execution(laser_evm, transaction) -> None:
"""Sets up global state and cfg for a transactions execution.
:param laser_evm:
:param transaction:
"""
# TODO: Resolve circular import between .transaction and ..svm to import LaserEVM here
global_state = transaction.initial_global_state()
global_state.transaction_stack.append((transaction, None))
new_node = Node(
global_state.environment.active_account.contract_name,
function_name=global_state.environment.active_function_name,
)
if laser_evm.requires_statespace:
laser_evm.nodes[new_node.uid] = new_node
if transaction.world_state.node:
if laser_evm.requires_statespace:
laser_evm.edges.append(
Edge(
transaction.world_state.node.uid,
new_node.uid,
edge_type=JumpType.Transaction,
condition=None,
)
)
global_state.mstate.constraints += transaction.world_state.node.constraints
new_node.constraints = global_state.mstate.constraints.as_list
global_state.world_state.transaction_sequence.append(transaction)
global_state.node = new_node
new_node.states.append(global_state)
laser_evm.work_list.append(global_state)
|
python
|
{
"resource": ""
}
|
q26465
|
Mythril._init_config
|
train
|
def _init_config(self):
"""If no config file exists, create it and add default options.
Default LevelDB path is specified based on OS
dynamic loading is set to infura by default in the file
Returns: leveldb directory
"""
system = platform.system().lower()
leveldb_fallback_dir = os.path.expanduser("~")
if system.startswith("darwin"):
leveldb_fallback_dir = os.path.join(
leveldb_fallback_dir, "Library", "Ethereum"
)
elif system.startswith("windows"):
leveldb_fallback_dir = os.path.join(
leveldb_fallback_dir, "AppData", "Roaming", "Ethereum"
)
else:
leveldb_fallback_dir = os.path.join(leveldb_fallback_dir, ".ethereum")
leveldb_fallback_dir = os.path.join(leveldb_fallback_dir, "geth", "chaindata")
if not os.path.exists(self.config_path):
log.info("No config file found. Creating default: " + self.config_path)
open(self.config_path, "a").close()
config = ConfigParser(allow_no_value=True)
config.optionxform = str
config.read(self.config_path, "utf-8")
if "defaults" not in config.sections():
self._add_default_options(config)
if not config.has_option("defaults", "leveldb_dir"):
self._add_leveldb_option(config, leveldb_fallback_dir)
if not config.has_option("defaults", "dynamic_loading"):
self._add_dynamic_loading_option(config)
with codecs.open(self.config_path, "w", "utf-8") as fp:
config.write(fp)
leveldb_dir = config.get(
"defaults", "leveldb_dir", fallback=leveldb_fallback_dir
)
return os.path.expanduser(leveldb_dir)
|
python
|
{
"resource": ""
}
|
q26466
|
Mythril._init_solc_binary
|
train
|
def _init_solc_binary(version):
"""Figure out solc binary and version.
Only proper versions are supported. No nightlies, commits etc (such as available in remix).
"""
if not version:
return os.environ.get("SOLC") or "solc"
# tried converting input to semver, seemed not necessary so just slicing for now
main_version = solc.main.get_solc_version_string()
main_version_number = re.match(r"\d+.\d+.\d+", main_version)
if main_version is None:
raise CriticalError(
"Could not extract solc version from string {}".format(main_version)
)
if version == main_version_number:
log.info("Given version matches installed version")
solc_binary = os.environ.get("SOLC") or "solc"
else:
solc_binary = util.solc_exists(version)
if solc_binary:
log.info("Given version is already installed")
else:
try:
solc.install_solc("v" + version)
solc_binary = util.solc_exists(version)
if not solc_binary:
raise SolcError()
except SolcError:
raise CriticalError(
"There was an error when trying to install the specified solc version"
)
log.info("Setting the compiler to %s", solc_binary)
return solc_binary
|
python
|
{
"resource": ""
}
|
q26467
|
And
|
train
|
def And(*args: Union[Bool, bool]) -> Bool:
"""Create an And expression."""
union = []
args_list = [arg if isinstance(arg, Bool) else Bool(arg) for arg in args]
for arg in args_list:
union.append(arg.annotations)
return Bool(z3.And([a.raw for a in args_list]), union)
|
python
|
{
"resource": ""
}
|
q26468
|
Or
|
train
|
def Or(a: Bool, b: Bool) -> Bool:
"""Create an or expression.
:param a:
:param b:
:return:
"""
union = a.annotations + b.annotations
return Bool(z3.Or(a.raw, b.raw), annotations=union)
|
python
|
{
"resource": ""
}
|
q26469
|
Not
|
train
|
def Not(a: Bool) -> Bool:
"""Create a Not expression.
:param a:
:return:
"""
return Bool(z3.Not(a.raw), a.annotations)
|
python
|
{
"resource": ""
}
|
q26470
|
Bool.value
|
train
|
def value(self) -> Union[bool, None]:
"""Returns the concrete value of this bool if concrete, otherwise None.
:return: Concrete value or None
"""
self.simplify()
if self.is_true:
return True
elif self.is_false:
return False
else:
return None
|
python
|
{
"resource": ""
}
|
q26471
|
synchronized
|
train
|
def synchronized(sync_lock):
"""A decorator synchronizing multi-process access to a resource."""
def wrapper(f):
"""The decorator's core function.
:param f:
:return:
"""
@functools.wraps(f)
def inner_wrapper(*args, **kw):
"""
:param args:
:param kw:
:return:
"""
with sync_lock:
return f(*args, **kw)
return inner_wrapper
return wrapper
|
python
|
{
"resource": ""
}
|
q26472
|
SignatureDB._normalize_byte_sig
|
train
|
def _normalize_byte_sig(byte_sig: str) -> str:
"""Adds a leading 0x to the byte signature if it's not already there.
:param byte_sig: 4-byte signature string
:return: normalized byte signature string
"""
if not byte_sig.startswith("0x"):
byte_sig = "0x" + byte_sig
if not len(byte_sig) == 10:
raise ValueError(
"Invalid byte signature %s, must have 10 characters", byte_sig
)
return byte_sig
|
python
|
{
"resource": ""
}
|
q26473
|
SignatureDB.import_solidity_file
|
train
|
def import_solidity_file(
self, file_path: str, solc_binary: str = "solc", solc_args: str = None
):
"""Import Function Signatures from solidity source files.
:param solc_binary:
:param solc_args:
:param file_path: solidity source code file path
:return:
"""
cmd = [solc_binary, "--hashes", file_path]
if solc_args:
cmd.extend(solc_args.split())
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
ret = p.returncode
if ret != 0:
raise CompilerError(
"Solc has experienced a fatal error (code {}).\n\n{}".format(
ret, stderr.decode("utf-8")
)
)
except FileNotFoundError:
raise CompilerError(
(
"Compiler not found. Make sure that solc is installed and in PATH, "
"or the SOLC environment variable is set."
)
)
stdout = stdout.decode("unicode_escape").split("\n")
for line in stdout:
# the ':' need not be checked but just to be sure
if all(map(lambda x: x in line, ["(", ")", ":"])):
solc_bytes = "0x" + line.split(":")[0]
solc_text = line.split(":")[1].strip()
self.solidity_sigs[solc_bytes].append(solc_text)
log.debug(
"Signatures: found %d signatures after parsing" % len(self.solidity_sigs)
)
# update DB with what we've found
for byte_sig, text_sigs in self.solidity_sigs.items():
for text_sig in text_sigs:
self.add(byte_sig, text_sig)
|
python
|
{
"resource": ""
}
|
q26474
|
SignatureDB.lookup_online
|
train
|
def lookup_online(byte_sig: str, timeout: int, proxies=None) -> List[str]:
"""Lookup function signatures from 4byte.directory.
:param byte_sig: function signature hash as hexstr
:param timeout: optional timeout for online lookup
:param proxies: optional proxy servers for online lookup
:return: a list of matching function signatures for this hash
"""
if not ethereum_input_decoder:
return []
return list(
ethereum_input_decoder.decoder.FourByteDirectory.lookup_signatures(
byte_sig, timeout=timeout, proxies=proxies
)
)
|
python
|
{
"resource": ""
}
|
q26475
|
IntegerOverflowUnderflowModule.execute
|
train
|
def execute(self, state: GlobalState):
"""Executes analysis module for integer underflow and integer overflow.
:param state: Statespace to analyse
:return: Found issues
"""
address = _get_address_from_state(state)
has_overflow = self._overflow_cache.get(address, False)
has_underflow = self._underflow_cache.get(address, False)
if has_overflow or has_underflow:
return
opcode = state.get_current_instruction()["opcode"]
funcs = {
"ADD": [self._handle_add],
"SUB": [self._handle_sub],
"MUL": [self._handle_mul],
"SSTORE": [self._handle_sstore],
"JUMPI": [self._handle_jumpi],
"RETURN": [self._handle_return, self._handle_transaction_end],
"STOP": [self._handle_transaction_end],
"EXP": [self._handle_exp],
}
for func in funcs[opcode]:
func(state)
|
python
|
{
"resource": ""
}
|
q26476
|
get_transaction_sequence
|
train
|
def get_transaction_sequence(global_state, constraints):
"""Generate concrete transaction sequence.
:param global_state: GlobalState to generate transaction sequence for
:param constraints: list of constraints used to generate transaction sequence
"""
transaction_sequence = global_state.world_state.transaction_sequence
# gaslimit & gasprice don't exist yet
tx_template = {
"calldata": None,
"call_value": None,
"caller": "0xCA11EDEADBEEF37E636E6CA11EDEADBEEFCA11ED",
}
concrete_transactions = {}
creation_tx_ids = []
tx_constraints = constraints.copy()
minimize = []
transactions = []
for transaction in transaction_sequence:
tx_id = str(transaction.id)
if not isinstance(transaction, ContractCreationTransaction):
transactions.append(transaction)
# Constrain calldatasize
max_calldatasize = symbol_factory.BitVecVal(5000, 256)
tx_constraints.append(
UGE(max_calldatasize, transaction.call_data.calldatasize)
)
minimize.append(transaction.call_data.calldatasize)
minimize.append(transaction.call_value)
concrete_transactions[tx_id] = tx_template.copy()
else:
creation_tx_ids.append(tx_id)
model = get_model(tx_constraints, minimize=minimize)
for transaction in transactions:
tx_id = str(transaction.id)
concrete_transactions[tx_id]["calldata"] = "0x" + "".join(
[
hex(b)[2:] if len(hex(b)) % 2 == 0 else "0" + hex(b)[2:]
for b in transaction.call_data.concrete(model)
]
)
concrete_transactions[tx_id]["call_value"] = (
"0x%x"
% model.eval(transaction.call_value.raw, model_completion=True).as_long()
)
concrete_transactions[tx_id]["caller"] = "0x" + (
"%x" % model.eval(transaction.caller.raw, model_completion=True).as_long()
).zfill(40)
return concrete_transactions
|
python
|
{
"resource": ""
}
|
q26477
|
main
|
train
|
def main() -> None:
"""The main CLI interface entry point."""
parser = argparse.ArgumentParser(
description="Security analysis of Ethereum smart contracts"
)
create_parser(parser)
# Get config values
args = parser.parse_args()
parse_args(parser=parser, args=args)
|
python
|
{
"resource": ""
}
|
q26478
|
AccountIndexer.get_contract_by_hash
|
train
|
def get_contract_by_hash(self, contract_hash):
"""get mapped contract_address by its hash, if not found try
indexing."""
contract_address = self.db.reader._get_address_by_hash(contract_hash)
if contract_address is not None:
return contract_address
else:
raise AddressNotFoundError
|
python
|
{
"resource": ""
}
|
q26479
|
AccountIndexer._process
|
train
|
def _process(self, startblock):
"""Processesing method."""
log.debug("Processing blocks %d to %d" % (startblock, startblock + BATCH_SIZE))
addresses = []
for blockNum in range(startblock, startblock + BATCH_SIZE):
block_hash = self.db.reader._get_block_hash(blockNum)
if block_hash is not None:
receipts = self.db.reader._get_block_receipts(block_hash, blockNum)
for receipt in receipts:
if receipt.contractAddress is not None and not all(
b == 0 for b in receipt.contractAddress
):
addresses.append(receipt.contractAddress)
else:
if len(addresses) == 0:
raise Exception()
return addresses
|
python
|
{
"resource": ""
}
|
q26480
|
AccountIndexer.updateIfNeeded
|
train
|
def updateIfNeeded(self):
"""update address index."""
headBlock = self.db.reader._get_head_block()
if headBlock is not None:
# avoid restarting search if head block is same & we already initialized
# this is required for fastSync handling
if self.lastBlock is not None:
self.lastBlock = max(self.lastBlock, headBlock.number)
else:
self.lastBlock = headBlock.number
lastProcessed = self.db.reader._get_last_indexed_number()
if lastProcessed is not None:
self.lastProcessedBlock = utils.big_endian_to_int(lastProcessed)
# in fast sync head block is at 0 (e.g. in fastSync), we can't use it to determine length
if self.lastBlock is not None and self.lastBlock == 0:
self.lastBlock = 2e9
if self.lastBlock is None or (
self.lastProcessedBlock is not None
and self.lastBlock <= self.lastProcessedBlock
):
return
blockNum = 0
if self.lastProcessedBlock is not None:
blockNum = self.lastProcessedBlock + 1
print(
"Updating hash-to-address index from block "
+ str(self.lastProcessedBlock)
)
else:
print("Starting hash-to-address index")
count = 0
processed = 0
while blockNum <= self.lastBlock:
# leveldb cannot be accessed on multiple processes (not even readonly)
# multithread version performs significantly worse than serial
try:
results = self._process(blockNum)
except:
break
# store new mappings
self.db.writer._start_writing()
count += len(results)
for addr in results:
self.db.writer._store_account_address(addr)
self.db.writer._commit_batch()
processed += BATCH_SIZE
blockNum = min(blockNum + BATCH_SIZE, self.lastBlock + 1)
cost_time = time.time() - ethereum.start_time
print(
"%d blocks processed (in %d seconds), %d unique addresses found, next block: %d"
% (processed, cost_time, count, min(self.lastBlock, blockNum))
)
self.lastProcessedBlock = blockNum - 1
self.db.writer._set_last_indexed_number(self.lastProcessedBlock)
print("Finished indexing")
self.lastBlock = self.lastProcessedBlock
|
python
|
{
"resource": ""
}
|
q26481
|
instruction_list_to_easm
|
train
|
def instruction_list_to_easm(instruction_list: list) -> str:
"""Convert a list of instructions into an easm op code string.
:param instruction_list:
:return:
"""
result = ""
for instruction in instruction_list:
result += "{} {}".format(instruction["address"], instruction["opcode"])
if "argument" in instruction:
result += " " + instruction["argument"]
result += "\n"
return result
|
python
|
{
"resource": ""
}
|
q26482
|
get_opcode_from_name
|
train
|
def get_opcode_from_name(operation_name: str) -> int:
"""Get an op code based on its name.
:param operation_name:
:return:
"""
for op_code, value in opcodes.items():
if operation_name == value[0]:
return op_code
raise RuntimeError("Unknown opcode")
|
python
|
{
"resource": ""
}
|
q26483
|
find_op_code_sequence
|
train
|
def find_op_code_sequence(pattern: list, instruction_list: list) -> Generator:
"""Returns all indices in instruction_list that point to instruction
sequences following a pattern.
:param pattern: The pattern to look for, e.g. [["PUSH1", "PUSH2"], ["EQ"]] where ["PUSH1", "EQ"] satisfies pattern
:param instruction_list: List of instructions to look in
:return: Indices to the instruction sequences
"""
for i in range(0, len(instruction_list) - len(pattern) + 1):
if is_sequence_match(pattern, instruction_list, i):
yield i
|
python
|
{
"resource": ""
}
|
q26484
|
is_sequence_match
|
train
|
def is_sequence_match(pattern: list, instruction_list: list, index: int) -> bool:
"""Checks if the instructions starting at index follow a pattern.
:param pattern: List of lists describing a pattern, e.g. [["PUSH1", "PUSH2"], ["EQ"]] where ["PUSH1", "EQ"] satisfies pattern
:param instruction_list: List of instructions
:param index: Index to check for
:return: Pattern matched
"""
for index, pattern_slot in enumerate(pattern, start=index):
try:
if not instruction_list[index]["opcode"] in pattern_slot:
return False
except IndexError:
return False
return True
|
python
|
{
"resource": ""
}
|
q26485
|
disassemble
|
train
|
def disassemble(bytecode: bytes) -> list:
"""Disassembles evm bytecode and returns a list of instructions.
:param bytecode:
:return:
"""
instruction_list = []
address = 0
length = len(bytecode)
if "bzzr" in str(bytecode[-43:]):
# ignore swarm hash
length -= 43
while address < length:
try:
op_code = opcodes[bytecode[address]]
except KeyError:
instruction_list.append(EvmInstruction(address, "INVALID"))
address += 1
continue
op_code_name = op_code[0]
current_instruction = EvmInstruction(address, op_code_name)
match = re.search(regex_PUSH, op_code_name)
if match:
argument_bytes = bytecode[address + 1 : address + 1 + int(match.group(1))]
current_instruction.argument = "0x" + argument_bytes.hex()
address += int(match.group(1))
instruction_list.append(current_instruction)
address += 1
# We use a to_dict() here for compatibility reasons
return [element.to_dict() for element in instruction_list]
|
python
|
{
"resource": ""
}
|
q26486
|
stat_smt_query
|
train
|
def stat_smt_query(func: Callable):
"""Measures statistics for annotated smt query check function"""
stat_store = SolverStatistics()
def function_wrapper(*args, **kwargs):
if not stat_store.enabled:
return func(*args, **kwargs)
stat_store.query_count += 1
begin = time()
result = func(*args, **kwargs)
end = time()
stat_store.solver_time += end - begin
return result
return function_wrapper
|
python
|
{
"resource": ""
}
|
q26487
|
WorldState.create_account
|
train
|
def create_account(
self,
balance=0,
address=None,
concrete_storage=False,
dynamic_loader=None,
creator=None,
) -> Account:
"""Create non-contract account.
:param address: The account's address
:param balance: Initial balance for the account
:param concrete_storage: Interpret account storage as concrete
:param dynamic_loader: used for dynamically loading storage from the block chain
:return: The new account
"""
address = address if address else self._generate_new_address(creator)
new_account = Account(
address,
balance=balance,
dynamic_loader=dynamic_loader,
concrete_storage=concrete_storage,
)
self._put_account(new_account)
return new_account
|
python
|
{
"resource": ""
}
|
q26488
|
WorldState.create_initialized_contract_account
|
train
|
def create_initialized_contract_account(self, contract_code, storage) -> None:
"""Creates a new contract account, based on the contract code and
storage provided The contract code only includes the runtime contract
bytecode.
:param contract_code: Runtime bytecode for the contract
:param storage: Initial storage for the contract
:return: The new account
"""
# TODO: Add type hints
new_account = Account(
self._generate_new_address(), code=contract_code, balance=0
)
new_account.storage = storage
self._put_account(new_account)
|
python
|
{
"resource": ""
}
|
q26489
|
WorldState._generate_new_address
|
train
|
def _generate_new_address(self, creator=None) -> str:
"""Generates a new address for the global state.
:return:
"""
if creator:
# TODO: Use nounce
return "0x" + str(mk_contract_address(creator, 0).hex())
while True:
address = "0x" + "".join([str(hex(randint(0, 16)))[-1] for _ in range(40)])
if address not in self.accounts.keys():
return address
|
python
|
{
"resource": ""
}
|
q26490
|
PluginFactory.build_benchmark_plugin
|
train
|
def build_benchmark_plugin(name: str) -> LaserPlugin:
""" Creates an instance of the benchmark plugin with the given name """
from mythril.laser.ethereum.plugins.implementations.benchmark import (
BenchmarkPlugin,
)
return BenchmarkPlugin(name)
|
python
|
{
"resource": ""
}
|
q26491
|
PluginFactory.build_mutation_pruner_plugin
|
train
|
def build_mutation_pruner_plugin() -> LaserPlugin:
""" Creates an instance of the mutation pruner plugin"""
from mythril.laser.ethereum.plugins.implementations.mutation_pruner import (
MutationPruner,
)
return MutationPruner()
|
python
|
{
"resource": ""
}
|
q26492
|
PluginFactory.build_instruction_coverage_plugin
|
train
|
def build_instruction_coverage_plugin() -> LaserPlugin:
""" Creates an instance of the instruction coverage plugin"""
from mythril.laser.ethereum.plugins.implementations.coverage import (
InstructionCoveragePlugin,
)
return InstructionCoveragePlugin()
|
python
|
{
"resource": ""
}
|
q26493
|
Memory.get_word_at
|
train
|
def get_word_at(self, index: int) -> Union[int, BitVec]:
"""Access a word from a specified memory index.
:param index: integer representing the index to access
:return: 32 byte word at the specified index
"""
try:
return symbol_factory.BitVecVal(
util.concrete_int_from_bytes(
bytes([util.get_concrete_int(b) for b in self[index : index + 32]]),
0,
),
256,
)
except TypeError:
result = simplify(
Concat(
[
b if isinstance(b, BitVec) else symbol_factory.BitVecVal(b, 8)
for b in cast(
List[Union[int, BitVec]], self[index : index + 32]
)
]
)
)
assert result.size() == 256
return result
|
python
|
{
"resource": ""
}
|
q26494
|
Memory.write_word_at
|
train
|
def write_word_at(self, index: int, value: Union[int, BitVec, bool, Bool]) -> None:
"""Writes a 32 byte word to memory at the specified index`
:param index: index to write to
:param value: the value to write to memory
"""
try:
# Attempt to concretize value
if isinstance(value, bool):
_bytes = (
int(1).to_bytes(32, byteorder="big")
if value
else int(0).to_bytes(32, byteorder="big")
)
else:
_bytes = util.concrete_int_to_bytes(value)
assert len(_bytes) == 32
self[index : index + 32] = list(bytearray(_bytes))
except (Z3Exception, AttributeError): # BitVector or BoolRef
value = cast(Union[BitVec, Bool], value)
if isinstance(value, Bool):
value_to_write = If(
value,
symbol_factory.BitVecVal(1, 256),
symbol_factory.BitVecVal(0, 256),
)
else:
value_to_write = value
assert value_to_write.size() == 256
for i in range(0, value_to_write.size(), 8):
self[index + 31 - (i // 8)] = Extract(i + 7, i, value_to_write)
|
python
|
{
"resource": ""
}
|
q26495
|
BenchmarkPlugin.initialize
|
train
|
def initialize(self, symbolic_vm: LaserEVM):
"""Initializes the BenchmarkPlugin
Introduces hooks in symbolic_vm to track the desired values
:param symbolic_vm: Symbolic virtual machine to analyze
"""
self._reset()
@symbolic_vm.laser_hook("execute_state")
def execute_state_hook(_):
current_time = time() - self.begin
self.nr_of_executed_insns += 1
for key, value in symbolic_vm.coverage.items():
try:
self.coverage[key][current_time] = sum(value[1]) * 100 / value[0]
except KeyError:
self.coverage[key] = {}
self.coverage[key][current_time] = sum(value[1]) * 100 / value[0]
@symbolic_vm.laser_hook("start_sym_exec")
def start_sym_exec_hook():
self.begin = time()
@symbolic_vm.laser_hook("stop_sym_exec")
def stop_sym_exec_hook():
self.end = time()
self._write_to_graph()
self._store_report()
|
python
|
{
"resource": ""
}
|
q26496
|
BenchmarkPlugin._reset
|
train
|
def _reset(self):
"""Reset this plugin"""
self.nr_of_executed_insns = 0
self.begin = None
self.end = None
self.coverage = {}
|
python
|
{
"resource": ""
}
|
q26497
|
BenchmarkPlugin._write_to_graph
|
train
|
def _write_to_graph(self):
"""Write the coverage results to a graph"""
traces = []
for byte_code, trace_data in self.coverage.items():
traces += [list(trace_data.keys()), list(trace_data.values()), "r--"]
plt.plot(*traces)
plt.axis([0, self.end - self.begin, 0, 100])
plt.xlabel("Duration (seconds)")
plt.ylabel("Coverage (percentage)")
plt.savefig("{}.png".format(self.name))
|
python
|
{
"resource": ""
}
|
q26498
|
DependenceMap._merge_buckets
|
train
|
def _merge_buckets(self, bucket_list: Set[DependenceBucket]) -> DependenceBucket:
""" Merges the buckets in bucket list """
variables = [] # type: List[str]
conditions = [] # type: List[z3.BoolRef]
for bucket in bucket_list:
self.buckets.remove(bucket)
variables += bucket.variables
conditions += bucket.conditions
new_bucket = DependenceBucket(variables, conditions)
self.buckets.append(new_bucket)
return new_bucket
|
python
|
{
"resource": ""
}
|
q26499
|
IndependenceSolver.check
|
train
|
def check(self) -> z3.CheckSatResult:
"""Returns z3 smt check result. """
dependence_map = DependenceMap()
for constraint in self.constraints:
dependence_map.add_condition(constraint)
self.models = []
for bucket in dependence_map.buckets:
self.raw.reset()
self.raw.append(*bucket.conditions)
check_result = self.raw.check()
if check_result == z3.sat:
self.models.append(self.raw.model())
else:
return check_result
return z3.sat
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.