_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q26600
|
DateTime._first_of_month
|
train
|
def _first_of_month(self, day_of_week):
"""
Modify to the first occurrence of a given day of the week
in the current month. If no day_of_week is provided,
modify to the first day of the month. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
:type day_of_week: int
:rtype: DateTime
"""
dt = self.start_of("day")
if day_of_week is None:
return dt.set(day=1)
month = calendar.monthcalendar(dt.year, dt.month)
calendar_day = (day_of_week - 1) % 7
if month[0][calendar_day] > 0:
day_of_month = month[0][calendar_day]
else:
day_of_month = month[1][calendar_day]
return dt.set(day=day_of_month)
|
python
|
{
"resource": ""
}
|
q26601
|
DateTime._first_of_quarter
|
train
|
def _first_of_quarter(self, day_of_week=None):
"""
Modify to the first occurrence of a given day of the week
in the current quarter. If no day_of_week is provided,
modify to the first day of the quarter. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
:type day_of_week: int or None
:rtype: DateTime
"""
return self.on(self.year, self.quarter * 3 - 2, 1).first_of(
"month", day_of_week
)
|
python
|
{
"resource": ""
}
|
q26602
|
DateTime._last_of_quarter
|
train
|
def _last_of_quarter(self, day_of_week=None):
"""
Modify to the last occurrence of a given day of the week
in the current quarter. If no day_of_week is provided,
modify to the last day of the quarter. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
:type day_of_week: int or None
:rtype: DateTime
"""
return self.on(self.year, self.quarter * 3, 1).last_of("month", day_of_week)
|
python
|
{
"resource": ""
}
|
q26603
|
DateTime._nth_of_year
|
train
|
def _nth_of_year(self, nth, day_of_week):
"""
Modify to the given occurrence of a given day of the week
in the current year. If the calculated occurrence is outside,
the scope of the current year, then return False and no
modifications are made. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
:type nth: int
:type day_of_week: int or None
:rtype: DateTime
"""
if nth == 1:
return self.first_of("year", day_of_week)
dt = self.first_of("year")
year = dt.year
for i in range(nth - (1 if dt.day_of_week == day_of_week else 0)):
dt = dt.next(day_of_week)
if year != dt.year:
return False
return self.on(self.year, dt.month, dt.day).start_of("day")
|
python
|
{
"resource": ""
}
|
q26604
|
Date.closest
|
train
|
def closest(self, dt1, dt2):
"""
Get the closest date from the instance.
:type dt1: Date or date
:type dt2: Date or date
:rtype: Date
"""
dt1 = self.__class__(dt1.year, dt1.month, dt1.day)
dt2 = self.__class__(dt2.year, dt2.month, dt2.day)
if self.diff(dt1).in_seconds() < self.diff(dt2).in_seconds():
return dt1
return dt2
|
python
|
{
"resource": ""
}
|
q26605
|
Date.is_birthday
|
train
|
def is_birthday(self, dt=None):
"""
Check if its the birthday.
Compares the date/month values of the two dates.
:rtype: bool
"""
if dt is None:
dt = Date.today()
instance = dt1 = self.__class__(dt.year, dt.month, dt.day)
return (self.month, self.day) == (instance.month, instance.day)
|
python
|
{
"resource": ""
}
|
q26606
|
Date.diff
|
train
|
def diff(self, dt=None, abs=True):
"""
Returns the difference between two Date objects as a Period.
:type dt: Date or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Period
"""
if dt is None:
dt = self.today()
return Period(self, Date(dt.year, dt.month, dt.day), absolute=abs)
|
python
|
{
"resource": ""
}
|
q26607
|
Date.diff_for_humans
|
train
|
def diff_for_humans(self, other=None, absolute=False, locale=None):
"""
Get the difference in a human readable format in the current locale.
When comparing a value in the past to default now:
1 day ago
5 months ago
When comparing a value in the future to default now:
1 day from now
5 months from now
When comparing a value in the past to another value:
1 day before
5 months before
When comparing a value in the future to another value:
1 day after
5 months after
:type other: Date
:param absolute: removes time difference modifiers ago, after, etc
:type absolute: bool
:param locale: The locale to use for localization
:type locale: str
:rtype: str
"""
is_now = other is None
if is_now:
other = self.today()
diff = self.diff(other)
return pendulum.format_diff(diff, is_now, absolute, locale)
|
python
|
{
"resource": ""
}
|
q26608
|
Date._start_of_decade
|
train
|
def _start_of_decade(self):
"""
Reset the date to the first day of the decade.
:rtype: Date
"""
year = self.year - self.year % YEARS_PER_DECADE
return self.set(year, 1, 1)
|
python
|
{
"resource": ""
}
|
q26609
|
Date._end_of_decade
|
train
|
def _end_of_decade(self):
"""
Reset the date to the last day of the decade.
:rtype: Date
"""
year = self.year - self.year % YEARS_PER_DECADE + YEARS_PER_DECADE - 1
return self.set(year, 12, 31)
|
python
|
{
"resource": ""
}
|
q26610
|
Date._start_of_century
|
train
|
def _start_of_century(self):
"""
Reset the date to the first day of the century.
:rtype: Date
"""
year = self.year - 1 - (self.year - 1) % YEARS_PER_CENTURY + 1
return self.set(year, 1, 1)
|
python
|
{
"resource": ""
}
|
q26611
|
Date.next
|
train
|
def next(self, day_of_week=None):
"""
Modify to the next occurrence of a given day of the week.
If no day_of_week is provided, modify to the next occurrence
of the current day of the week. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:param day_of_week: The next day of week to reset to.
:type day_of_week: int or None
:rtype: Date
"""
if day_of_week is None:
day_of_week = self.day_of_week
if day_of_week < SUNDAY or day_of_week > SATURDAY:
raise ValueError("Invalid day of week")
dt = self.add(days=1)
while dt.day_of_week != day_of_week:
dt = dt.add(days=1)
return dt
|
python
|
{
"resource": ""
}
|
q26612
|
Date.previous
|
train
|
def previous(self, day_of_week=None):
"""
Modify to the previous occurrence of a given day of the week.
If no day_of_week is provided, modify to the previous occurrence
of the current day of the week. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:param day_of_week: The previous day of week to reset to.
:type day_of_week: int or None
:rtype: Date
"""
if day_of_week is None:
day_of_week = self.day_of_week
if day_of_week < SUNDAY or day_of_week > SATURDAY:
raise ValueError("Invalid day of week")
dt = self.subtract(days=1)
while dt.day_of_week != day_of_week:
dt = dt.subtract(days=1)
return dt
|
python
|
{
"resource": ""
}
|
q26613
|
Date._nth_of_month
|
train
|
def _nth_of_month(self, nth, day_of_week):
"""
Modify to the given occurrence of a given day of the week
in the current month. If the calculated occurrence is outside,
the scope of the current month, then return False and no
modifications are made. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:type nth: int
:type day_of_week: int or None
:rtype: Date
"""
if nth == 1:
return self.first_of("month", day_of_week)
dt = self.first_of("month")
check = dt.format("YYYY-MM")
for i in range(nth - (1 if dt.day_of_week == day_of_week else 0)):
dt = dt.next(day_of_week)
if dt.format("YYYY-MM") == check:
return self.set(day=dt.day)
return False
|
python
|
{
"resource": ""
}
|
q26614
|
Date._first_of_quarter
|
train
|
def _first_of_quarter(self, day_of_week=None):
"""
Modify to the first occurrence of a given day of the week
in the current quarter. If no day_of_week is provided,
modify to the first day of the quarter. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:type day_of_week: int or None
:rtype: Date
"""
return self.set(self.year, self.quarter * 3 - 2, 1).first_of(
"month", day_of_week
)
|
python
|
{
"resource": ""
}
|
q26615
|
Date._last_of_quarter
|
train
|
def _last_of_quarter(self, day_of_week=None):
"""
Modify to the last occurrence of a given day of the week
in the current quarter. If no day_of_week is provided,
modify to the last day of the quarter. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:type day_of_week: int or None
:rtype: Date
"""
return self.set(self.year, self.quarter * 3, 1).last_of("month", day_of_week)
|
python
|
{
"resource": ""
}
|
q26616
|
Date._nth_of_quarter
|
train
|
def _nth_of_quarter(self, nth, day_of_week):
"""
Modify to the given occurrence of a given day of the week
in the current quarter. If the calculated occurrence is outside,
the scope of the current quarter, then return False and no
modifications are made. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:type nth: int
:type day_of_week: int or None
:rtype: Date
"""
if nth == 1:
return self.first_of("quarter", day_of_week)
dt = self.replace(self.year, self.quarter * 3, 1)
last_month = dt.month
year = dt.year
dt = dt.first_of("quarter")
for i in range(nth - (1 if dt.day_of_week == day_of_week else 0)):
dt = dt.next(day_of_week)
if last_month < dt.month or year != dt.year:
return False
return self.set(self.year, dt.month, dt.day)
|
python
|
{
"resource": ""
}
|
q26617
|
Formatter.format
|
train
|
def format(self, dt, fmt, locale=None):
"""
Formats a DateTime instance with a given format and locale.
:param dt: The instance to format
:type dt: pendulum.DateTime
:param fmt: The format to use
:type fmt: str
:param locale: The locale to use
:type locale: str or Locale or None
:rtype: str
"""
if not locale:
locale = pendulum.get_locale()
locale = Locale.load(locale)
result = self._FORMAT_RE.sub(
lambda m: m.group(1)
if m.group(1)
else m.group(2)
if m.group(2)
else self._format_token(dt, m.group(3), locale),
fmt,
)
return decode(result)
|
python
|
{
"resource": ""
}
|
q26618
|
Formatter._format_token
|
train
|
def _format_token(self, dt, token, locale):
"""
Formats a DateTime instance with a given token and locale.
:param dt: The instance to format
:type dt: pendulum.DateTime
:param token: The token to use
:type token: str
:param locale: The locale to use
:type locale: Locale
:rtype: str
"""
if token in self._DATE_FORMATS:
fmt = locale.get("custom.date_formats.{}".format(token))
if fmt is None:
fmt = self._DEFAULT_DATE_FORMATS[token]
return self.format(dt, fmt, locale)
if token in self._LOCALIZABLE_TOKENS:
return self._format_localizable_token(dt, token, locale)
if token in self._TOKENS_RULES:
return self._TOKENS_RULES[token](dt)
# Timezone
if token in ["ZZ", "Z"]:
if dt.tzinfo is None:
return ""
separator = ":" if token == "Z" else ""
offset = dt.utcoffset() or datetime.timedelta()
minutes = offset.total_seconds() / 60
if minutes >= 0:
sign = "+"
else:
sign = "-"
hour, minute = divmod(abs(int(minutes)), 60)
return "{}{:02d}{}{:02d}".format(sign, hour, separator, minute)
|
python
|
{
"resource": ""
}
|
q26619
|
Formatter._format_localizable_token
|
train
|
def _format_localizable_token(self, dt, token, locale):
"""
Formats a DateTime instance
with a given localizable token and locale.
:param dt: The instance to format
:type dt: pendulum.DateTime
:param token: The token to use
:type token: str
:param locale: The locale to use
:type locale: Locale
:rtype: str
"""
if token == "MMM":
return locale.get("translations.months.abbreviated")[dt.month]
elif token == "MMMM":
return locale.get("translations.months.wide")[dt.month]
elif token == "dd":
return locale.get("translations.days.short")[dt.day_of_week]
elif token == "ddd":
return locale.get("translations.days.abbreviated")[dt.day_of_week]
elif token == "dddd":
return locale.get("translations.days.wide")[dt.day_of_week]
elif token == "Do":
return locale.ordinalize(dt.day)
elif token == "do":
return locale.ordinalize(dt.day_of_week)
elif token == "Mo":
return locale.ordinalize(dt.month)
elif token == "Qo":
return locale.ordinalize(dt.quarter)
elif token == "wo":
return locale.ordinalize(dt.week_of_year)
elif token == "DDDo":
return locale.ordinalize(dt.day_of_year)
elif token == "A":
key = "translations.day_periods"
if dt.hour >= 12:
key += ".pm"
else:
key += ".am"
return locale.get(key)
else:
return token
|
python
|
{
"resource": ""
}
|
q26620
|
Formatter.parse
|
train
|
def parse(
self,
time, # type: str
fmt, # type: str
now, # type: pendulum.DateTime
locale=None, # type: typing.Union[str, None]
): # type: (...) -> dict
"""
Parses a time string matching a given format as a tuple.
:param time: The timestring
:param fmt: The format
:param now: The datetime to use as "now"
:param locale: The locale to use
:return: The parsed elements
"""
escaped_fmt = re.escape(fmt)
tokens = self._FROM_FORMAT_RE.findall(escaped_fmt)
if not tokens:
return time
if not locale:
locale = pendulum.get_locale()
locale = Locale.load(locale)
parsed = {
"year": None,
"month": None,
"day": None,
"hour": None,
"minute": None,
"second": None,
"microsecond": None,
"tz": None,
"quarter": None,
"day_of_week": None,
"day_of_year": None,
"meridiem": None,
"timestamp": None,
}
pattern = self._FROM_FORMAT_RE.sub(
lambda m: self._replace_tokens(m.group(0), locale), escaped_fmt
)
if not re.match(pattern, time):
raise ValueError("String does not match format {}".format(fmt))
re.sub(pattern, lambda m: self._get_parsed_values(m, parsed, locale, now), time)
return self._check_parsed(parsed, now)
|
python
|
{
"resource": ""
}
|
q26621
|
local_time
|
train
|
def local_time(unix_time, utc_offset, microseconds):
"""
Returns a UNIX time as a broken down time
for a particular transition type.
:type unix_time: int
:type utc_offset: int
:type microseconds: int
:rtype: tuple
"""
year = EPOCH_YEAR
seconds = int(math.floor(unix_time))
# Shift to a base year that is 400-year aligned.
if seconds >= 0:
seconds -= 10957 * SECS_PER_DAY
year += 30 # == 2000
else:
seconds += (146097 - 10957) * SECS_PER_DAY
year -= 370 # == 1600
seconds += utc_offset
# Handle years in chunks of 400/100/4/1
year += 400 * (seconds // SECS_PER_400_YEARS)
seconds %= SECS_PER_400_YEARS
if seconds < 0:
seconds += SECS_PER_400_YEARS
year -= 400
leap_year = 1 # 4-century aligned
sec_per_100years = SECS_PER_100_YEARS[leap_year]
while seconds >= sec_per_100years:
seconds -= sec_per_100years
year += 100
leap_year = 0 # 1-century, non 4-century aligned
sec_per_100years = SECS_PER_100_YEARS[leap_year]
sec_per_4years = SECS_PER_4_YEARS[leap_year]
while seconds >= sec_per_4years:
seconds -= sec_per_4years
year += 4
leap_year = 1 # 4-year, non century aligned
sec_per_4years = SECS_PER_4_YEARS[leap_year]
sec_per_year = SECS_PER_YEAR[leap_year]
while seconds >= sec_per_year:
seconds -= sec_per_year
year += 1
leap_year = 0 # non 4-year aligned
sec_per_year = SECS_PER_YEAR[leap_year]
# Handle months and days
month = TM_DECEMBER + 1
day = seconds // SECS_PER_DAY + 1
seconds %= SECS_PER_DAY
while month != TM_JANUARY + 1:
month_offset = MONTHS_OFFSETS[leap_year][month]
if day > month_offset:
day -= month_offset
break
month -= 1
# Handle hours, minutes, seconds and microseconds
hour = seconds // SECS_PER_HOUR
seconds %= SECS_PER_HOUR
minute = seconds // SECS_PER_MIN
second = seconds % SECS_PER_MIN
return (year, month, day, hour, minute, second, microseconds)
|
python
|
{
"resource": ""
}
|
q26622
|
_normalize
|
train
|
def _normalize(parsed, **options):
"""
Normalizes the parsed element.
:param parsed: The parsed elements.
:type parsed: Parsed
:rtype: Parsed
"""
if options.get("exact"):
return parsed
if isinstance(parsed, time):
now = options["now"] or datetime.now()
return datetime(
now.year,
now.month,
now.day,
parsed.hour,
parsed.minute,
parsed.second,
parsed.microsecond,
)
elif isinstance(parsed, date) and not isinstance(parsed, datetime):
return datetime(parsed.year, parsed.month, parsed.day)
return parsed
|
python
|
{
"resource": ""
}
|
q26623
|
_parse_common
|
train
|
def _parse_common(text, **options):
"""
Tries to parse the string as a common datetime format.
:param text: The string to parse.
:type text: str
:rtype: dict or None
"""
m = COMMON.match(text)
has_date = False
year = 0
month = 1
day = 1
if not m:
raise ParserError("Invalid datetime string")
if m.group("date"):
# A date has been specified
has_date = True
year = int(m.group("year"))
if not m.group("monthday"):
# No month and day
month = 1
day = 1
else:
if options["day_first"]:
month = int(m.group("day"))
day = int(m.group("month"))
else:
month = int(m.group("month"))
day = int(m.group("day"))
if not m.group("time"):
return date(year, month, day)
# Grabbing hh:mm:ss
hour = int(m.group("hour"))
minute = int(m.group("minute"))
if m.group("second"):
second = int(m.group("second"))
else:
second = 0
# Grabbing subseconds, if any
microsecond = 0
if m.group("subsecondsection"):
# Limiting to 6 chars
subsecond = m.group("subsecond")[:6]
microsecond = int("{:0<6}".format(subsecond))
if has_date:
return datetime(year, month, day, hour, minute, second, microsecond)
return time(hour, minute, second, microsecond)
|
python
|
{
"resource": ""
}
|
q26624
|
FormattableMixing.format
|
train
|
def format(self, fmt, locale=None):
"""
Formats the instance using the given format.
:param fmt: The format to use
:type fmt: str
:param locale: The locale to use
:type locale: str or None
:rtype: str
"""
return self._formatter.format(self, fmt, locale)
|
python
|
{
"resource": ""
}
|
q26625
|
Spendable.as_bin
|
train
|
def as_bin(self, as_spendable=False):
"""Return the txo as binary."""
f = io.BytesIO()
self.stream(f, as_spendable=as_spendable)
return f.getvalue()
|
python
|
{
"resource": ""
}
|
q26626
|
Tx.from_bin
|
train
|
def from_bin(class_, blob):
"""Return the Tx for the given binary blob.
:param blob: a binary blob containing a transaction streamed in standard
form. The blob may also include the unspents (a nonstandard extension,
optionally written by :func:`Tx.stream <stream>`), and they will also be parsed.
:return: :class:`Tx`
If parsing fails, an exception is raised.
"""
f = io.BytesIO(blob)
tx = class_.parse(f)
try:
tx.parse_unspents(f)
except Exception:
# parsing unspents failed
tx.unspents = []
return tx
|
python
|
{
"resource": ""
}
|
q26627
|
Tx.as_bin
|
train
|
def as_bin(self, *args, **kwargs):
"""Returns a binary blob containing the streamed transaction.
For information about the parameters, see :func:`Tx.stream <stream>`
:return: binary blob that would parse to the given transaction
"""
f = io.BytesIO()
self.stream(f, *args, **kwargs)
return f.getvalue()
|
python
|
{
"resource": ""
}
|
q26628
|
Tx.set_unspents
|
train
|
def set_unspents(self, unspents):
"""
Set the unspent inputs for a transaction.
:param unspents: a list of :class:`TxOut` (or the subclass :class:`Spendable`) objects
corresponding to the :class:`TxIn` objects for this transaction (same number of
items in each list)
"""
if len(unspents) != len(self.txs_in):
raise ValueError("wrong number of unspents")
self.unspents = unspents
|
python
|
{
"resource": ""
}
|
q26629
|
Tx.sign
|
train
|
def sign(self, *args, **kwargs):
"""
Sign all transaction inputs. The parameters vary depending upon the way the coins being
spent are encumbered.
"""
self.Solver(self).sign(*args, **kwargs)
return self
|
python
|
{
"resource": ""
}
|
q26630
|
_from_bytes
|
train
|
def _from_bytes(bytes, byteorder="big", signed=False):
"""This is the same functionality as ``int.from_bytes`` in python 3"""
return int.from_bytes(bytes, byteorder=byteorder, signed=signed)
|
python
|
{
"resource": ""
}
|
q26631
|
MessageSigner.parse_signed_message
|
train
|
def parse_signed_message(class_, msg_in):
"""
Take an "armoured" message and split into the message body, signing address
and the base64 signature. Should work on all altcoin networks, and should
accept both Inputs.IO and Multibit formats but not Armory.
Looks like RFC2550 <https://www.ietf.org/rfc/rfc2440.txt> was an "inspiration"
for this, so in case of confusion it's a reference, but I've never found
a real spec for this. Should be a BIP really.
"""
msg, hdr = class_.parse_sections(msg_in)
# after message, expect something like an email/http headers, so split into lines
hdr = list(filter(None, [i.strip() for i in hdr.split('\n')]))
if '-----END' not in hdr[-1]:
raise EncodingError("expecting END on last line")
sig = hdr[-2]
addr = None
for line in hdr:
line = line.strip()
if not line:
continue
if line.startswith('-----END'):
break
if ':' in line:
label, value = [i.strip() for i in line.split(':', 1)]
if label.lower() == 'address':
addr = line.split(':')[1].strip()
break
continue
addr = line
break
if not addr or addr == sig:
raise EncodingError("Could not find address")
return msg, addr, sig
|
python
|
{
"resource": ""
}
|
q26632
|
MessageSigner.signature_for_message_hash
|
train
|
def signature_for_message_hash(self, secret_exponent, msg_hash, is_compressed):
"""
Return a signature, encoded in Base64, of msg_hash.
"""
r, s, recid = self._generator.sign_with_recid(secret_exponent, msg_hash)
# See http://bitcoin.stackexchange.com/questions/14263 and key.cpp
# for discussion of the proprietary format used for the signature
first = 27 + recid + (4 if is_compressed else 0)
sig = b2a_base64(int2byte(first) + to_bytes_32(r) + to_bytes_32(s)).strip()
sig = sig.decode("utf8")
return sig
|
python
|
{
"resource": ""
}
|
q26633
|
MessageSigner.sign_message
|
train
|
def sign_message(self, key, message, verbose=False):
"""
Return a signature, encoded in Base64, which can be verified by anyone using the
public key.
"""
secret_exponent = key.secret_exponent()
if not secret_exponent:
raise ValueError("Private key is required to sign a message")
addr = key.address()
msg_hash = self.hash_for_signing(message)
is_compressed = key.is_compressed()
sig = self.signature_for_message_hash(secret_exponent, msg_hash, is_compressed)
if not verbose or message is None:
return sig
return self.signature_template.format(
msg=message, sig=sig, addr=addr,
net_name=self._network_name.upper())
|
python
|
{
"resource": ""
}
|
q26634
|
MessageSigner._decode_signature
|
train
|
def _decode_signature(self, signature):
"""
Decode the internal fields of the base64-encoded signature.
"""
sig = a2b_base64(signature)
if len(sig) != 65:
raise EncodingError("Wrong length, expected 65")
# split into the parts.
first = byte2int(sig)
r = from_bytes_32(sig[1:33])
s = from_bytes_32(sig[33:33+32])
# first byte encodes a bits we need to know about the point used in signature
if not (27 <= first < 35):
raise EncodingError("First byte out of range")
# NOTE: The first byte encodes the "recovery id", or "recid" which is a 3-bit values
# which selects compressed/not-compressed and one of 4 possible public pairs.
#
first -= 27
is_compressed = bool(first & 0x4)
return is_compressed, (first & 0x3), r, s
|
python
|
{
"resource": ""
}
|
q26635
|
groestlHash
|
train
|
def groestlHash(data):
"""Groestl-512 compound hash."""
try:
import groestlcoin_hash
except ImportError:
t = 'Groestlcoin requires the groestlcoin_hash package ("pip install groestlcoin_hash").'
print(t)
raise ImportError(t)
return bytes_as_revhex(groestlcoin_hash.getHash(data, len(data)))
|
python
|
{
"resource": ""
}
|
q26636
|
merkle
|
train
|
def merkle(hashes, hash_f=double_sha256):
"""Take a list of hashes, and return the root merkle hash."""
while len(hashes) > 1:
hashes = merkle_pair(hashes, hash_f)
return hashes[0]
|
python
|
{
"resource": ""
}
|
q26637
|
merkle_pair
|
train
|
def merkle_pair(hashes, hash_f):
"""Take a list of hashes, and return the parent row in the tree of merkle hashes."""
if len(hashes) % 2 == 1:
hashes = list(hashes)
hashes.append(hashes[-1])
items = []
for i in range(0, len(hashes), 2):
items.append(hash_f(hashes[i] + hashes[i+1]))
return items
|
python
|
{
"resource": ""
}
|
q26638
|
BIP32Node.from_master_secret
|
train
|
def from_master_secret(class_, master_secret):
"""Generate a Wallet from a master password."""
I64 = hmac.HMAC(key=b"Bitcoin seed", msg=master_secret, digestmod=hashlib.sha512).digest()
return class_(chain_code=I64[32:], secret_exponent=from_bytes_32(I64[:32]))
|
python
|
{
"resource": ""
}
|
q26639
|
BIP32Node.serialize
|
train
|
def serialize(self, as_private=None):
"""
Yield a 74-byte binary blob corresponding to this node.
You must add a 4-byte prefix before converting to base58.
"""
if as_private is None:
as_private = self.secret_exponent() is not None
if self.secret_exponent() is None and as_private:
raise PublicPrivateMismatchError("public key has no private parts")
ba = bytearray()
ba.extend([self._depth])
ba.extend(self._parent_fingerprint + struct.pack(">L", self._child_index) + self._chain_code)
if as_private:
ba += b'\0' + self._secret_exponent_bytes
else:
ba += self.sec(is_compressed=True)
return bytes(ba)
|
python
|
{
"resource": ""
}
|
q26640
|
BIP32Node.hwif
|
train
|
def hwif(self, as_private=False):
"""Yield a 111-byte string corresponding to this node."""
return self._network.bip32_as_string(self.serialize(as_private=as_private), as_private=as_private)
|
python
|
{
"resource": ""
}
|
q26641
|
BIP32Node.public_copy
|
train
|
def public_copy(self):
"""Yield the corresponding public node for this node."""
d = dict(chain_code=self._chain_code, depth=self._depth,
parent_fingerprint=self._parent_fingerprint,
child_index=self._child_index, public_pair=self.public_pair())
return self.__class__(**d)
|
python
|
{
"resource": ""
}
|
q26642
|
BIP32Node.subkey
|
train
|
def subkey(self, i=0, is_hardened=False, as_private=None):
"""
Yield a child node for this node.
i:
the index for this node.
is_hardened:
use "hardened key derivation". That is, the public version
of this node cannot calculate this child.
as_private:
set to True to get a private subkey.
"""
if as_private is None:
as_private = self.secret_exponent() is not None
is_hardened = not not is_hardened
as_private = not not as_private
lookup = (i, is_hardened, as_private)
if lookup not in self._subkey_cache:
self._subkey_cache[lookup] = self._subkey(i, is_hardened, as_private)
return self._subkey_cache[lookup]
|
python
|
{
"resource": ""
}
|
q26643
|
crack_secret_exponent_from_k
|
train
|
def crack_secret_exponent_from_k(generator, signed_value, sig, k):
"""
Given a signature of a signed_value and a known k, return the secret exponent.
"""
r, s = sig
return ((s * k - signed_value) * generator.inverse(r)) % generator.order()
|
python
|
{
"resource": ""
}
|
q26644
|
crack_k_from_sigs
|
train
|
def crack_k_from_sigs(generator, sig1, val1, sig2, val2):
"""
Given two signatures with the same secret exponent and K value, return that K value.
"""
# s1 = v1 / k1 + (se * r1) / k1
# s2 = v2 / k2 + (se * r2) / k2
# and k = k1 = k2
# so
# k * s1 = v1 + (se * r1)
# k * s2 = v2 + (se * r2)
# so
# k * s1 * r2 = r2 * v1 + (se * r1 * r2)
# k * s2 * r1 = r1 * v2 + (se * r2 * r1)
# so
# k (s1 * r2 - s2 * r1) = r2 * v1 - r1 * v2
# so
# k = (r2 * v1 - r1 * v2) / (s1 * r2 - s2 * r1)
r1, s1 = sig1
r2, s2 = sig2
if r1 != r2:
raise ValueError("r values of signature do not match")
k = (r2 * val1 - r1 * val2) * generator.inverse(r2 * s1 - r1 * s2)
return k % generator.order()
|
python
|
{
"resource": ""
}
|
q26645
|
subpaths_for_path_range
|
train
|
def subpaths_for_path_range(path_range, hardening_chars="'pH"):
"""
Return an iterator of paths
# examples:
# 0/1H/0-4 => ['0/1H/0', '0/1H/1', '0/1H/2', '0/1H/3', '0/1H/4']
# 0/2,5,9-11 => ['0/2', '0/5', '0/9', '0/10', '0/11']
# 3H/2/5/15-20p => ['3H/2/5/15p', '3H/2/5/16p', '3H/2/5/17p', '3H/2/5/18p',
# '3H/2/5/19p', '3H/2/5/20p']
# 5-6/7-8p,15/1-2 => ['5/7H/1', '5/7H/2', '5/8H/1', '5/8H/2',
# '5/15/1', '5/15/2', '6/7H/1', '6/7H/2', '6/8H/1', '6/8H/2', '6/15/1', '6/15/2']
"""
if path_range == '':
yield ''
return
def range_iterator(the_range):
for r in the_range.split(","):
is_hardened = r[-1] in hardening_chars
hardened_char = hardening_chars[-1] if is_hardened else ''
if is_hardened:
r = r[:-1]
if '-' in r:
low, high = [int(x) for x in r.split("-", 1)]
for t in range(low, high+1):
yield "%d%s" % (t, hardened_char)
else:
yield "%s%s" % (r, hardened_char)
components = path_range.split("/")
iterators = [range_iterator(c) for c in components]
for v in itertools.product(*iterators):
yield '/'.join(v)
|
python
|
{
"resource": ""
}
|
q26646
|
double_sha256
|
train
|
def double_sha256(data):
"""A standard compound hash."""
return bytes_as_revhex(hashlib.sha256(hashlib.sha256(data).digest()).digest())
|
python
|
{
"resource": ""
}
|
q26647
|
Optimizations.multiply
|
train
|
def multiply(self, p, e):
"""Multiply a point by an integer."""
e %= self.order()
if p == self._infinity or e == 0:
return self._infinity
pubkey = create_string_buffer(64)
public_pair_bytes = b'\4' + to_bytes_32(p[0]) + to_bytes_32(p[1])
r = libsecp256k1.secp256k1_ec_pubkey_parse(
libsecp256k1.ctx, pubkey, public_pair_bytes, len(public_pair_bytes))
if not r:
return False
r = libsecp256k1.secp256k1_ec_pubkey_tweak_mul(libsecp256k1.ctx, pubkey, to_bytes_32(e))
if not r:
return self._infinity
pubkey_serialized = create_string_buffer(65)
pubkey_size = c_size_t(65)
libsecp256k1.secp256k1_ec_pubkey_serialize(
libsecp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey, SECP256K1_EC_UNCOMPRESSED)
x = from_bytes_32(pubkey_serialized[1:33])
y = from_bytes_32(pubkey_serialized[33:])
return self.Point(x, y)
|
python
|
{
"resource": ""
}
|
q26648
|
make_const_handler
|
train
|
def make_const_handler(data):
"""
Create a handler for a data opcode that returns a constant.
"""
data = bytes_as_hex(data)
def constant_data_opcode_handler(script, pc, verify_minimal_data=False):
return pc+1, data
return constant_data_opcode_handler
|
python
|
{
"resource": ""
}
|
q26649
|
make_sized_handler
|
train
|
def make_sized_handler(size, const_values, non_minimal_data_handler):
"""
Create a handler for a data opcode that returns literal data of a fixed size.
"""
const_values = list(const_values)
def constant_size_opcode_handler(script, pc, verify_minimal_data=False):
pc += 1
data = bytes_as_hex(script[pc:pc+size])
if len(data) < size:
return pc+1, None
if verify_minimal_data and data in const_values:
non_minimal_data_handler("not minimal push of %s" % repr(data))
return pc+size, data
return constant_size_opcode_handler
|
python
|
{
"resource": ""
}
|
q26650
|
make_variable_handler
|
train
|
def make_variable_handler(dec_f, sized_values, min_size, non_minimal_data_handler):
"""
Create a handler for a data opcode that returns literal data of a variable size
that's fetched and decoded by the function dec_f.
"""
sized_values = list(sized_values)
def f(script, pc, verify_minimal_data=False):
size, pc = dec_f(script, pc)
data = bytes_as_hex(script[pc:pc+size])
if len(data) < size:
return pc+1, None
if verify_minimal_data:
if size in sized_values or size <= min_size:
non_minimal_data_handler("not minimal push of data with size %d" % size)
return pc+size, data
return f
|
python
|
{
"resource": ""
}
|
q26651
|
make_sized_encoder
|
train
|
def make_sized_encoder(opcode_value):
"""
Create an encoder that encodes the given opcode value as binary data
and appends the given data.
"""
opcode_bin = int2byte(opcode_value)
def f(data):
return opcode_bin + data
return f
|
python
|
{
"resource": ""
}
|
q26652
|
ascend_bip32
|
train
|
def ascend_bip32(bip32_pub_node, secret_exponent, child):
"""
Given a BIP32Node with public derivation child "child" with a known private key,
return the secret exponent for the bip32_pub_node.
"""
i_as_bytes = struct.pack(">l", child)
sec = public_pair_to_sec(bip32_pub_node.public_pair(), compressed=True)
data = sec + i_as_bytes
I64 = hmac.HMAC(key=bip32_pub_node._chain_code, msg=data, digestmod=hashlib.sha512).digest()
I_left_as_exponent = from_bytes_32(I64[:32])
return (secret_exponent - I_left_as_exponent) % bip32_pub_node._generator.order()
|
python
|
{
"resource": ""
}
|
q26653
|
Key.wif
|
train
|
def wif(self, is_compressed=None):
"""
Return the WIF representation of this key, if available.
"""
secret_exponent = self.secret_exponent()
if secret_exponent is None:
return None
if is_compressed is None:
is_compressed = self.is_compressed()
blob = to_bytes_32(secret_exponent)
if is_compressed:
blob += b'\01'
return self._network.wif_for_blob(blob)
|
python
|
{
"resource": ""
}
|
q26654
|
Key.sec
|
train
|
def sec(self, is_compressed=None):
"""
Return the SEC representation of this key, if available.
"""
if is_compressed is None:
is_compressed = self.is_compressed()
public_pair = self.public_pair()
if public_pair is None:
return None
return public_pair_to_sec(public_pair, compressed=is_compressed)
|
python
|
{
"resource": ""
}
|
q26655
|
Key.sec_as_hex
|
train
|
def sec_as_hex(self, is_compressed=None):
"""
Return the SEC representation of this key as hex text.
"""
sec = self.sec(is_compressed=is_compressed)
return self._network.sec_text_for_blob(sec)
|
python
|
{
"resource": ""
}
|
q26656
|
Key.hash160
|
train
|
def hash160(self, is_compressed=None):
"""
Return the hash160 representation of this key, if available.
"""
if is_compressed is None:
is_compressed = self.is_compressed()
if is_compressed:
if self._hash160_compressed is None:
self._hash160_compressed = hash160(self.sec(is_compressed=is_compressed))
return self._hash160_compressed
if self._hash160_uncompressed is None:
self._hash160_uncompressed = hash160(self.sec(is_compressed=is_compressed))
return self._hash160_uncompressed
|
python
|
{
"resource": ""
}
|
q26657
|
Key.address
|
train
|
def address(self, is_compressed=None):
"""
Return the public address representation of this key, if available.
"""
return self._network.address.for_p2pkh(self.hash160(is_compressed=is_compressed))
|
python
|
{
"resource": ""
}
|
q26658
|
Key.as_text
|
train
|
def as_text(self):
"""
Return a textual representation of this key.
"""
if self.secret_exponent():
return self.wif()
sec_hex = self.sec_as_hex()
if sec_hex:
return sec_hex
return self.address()
|
python
|
{
"resource": ""
}
|
q26659
|
Key.sign
|
train
|
def sign(self, h):
"""
Return a der-encoded signature for a hash h.
Will throw a RuntimeError if this key is not a private key
"""
if not self.is_private():
raise RuntimeError("Key must be private to be able to sign")
val = from_bytes_32(h)
r, s = self._generator.sign(self.secret_exponent(), val)
return sigencode_der(r, s)
|
python
|
{
"resource": ""
}
|
q26660
|
Key.verify
|
train
|
def verify(self, h, sig):
"""
Return whether a signature is valid for hash h using this key.
"""
val = from_bytes_32(h)
pubkey = self.public_pair()
return self._generator.verify(pubkey, val, sigdecode_der(sig))
|
python
|
{
"resource": ""
}
|
q26661
|
create_tx
|
train
|
def create_tx(network, spendables, payables, fee="standard", lock_time=0, version=1):
"""
This function provides the easiest way to create an unsigned transaction.
All coin values are in satoshis.
:param spendables: a list of Spendable objects, which act as inputs.
Each item in the list can be a Spendable, or text from Spendable.as_text,
or a dictionary from Spendable.as_dict (which might be easier for
airgapped transactions, for example).
:param payables: a list where each entry is a address, or a tuple of
(address, coin_value). If the coin_value is missing or
zero, this address is thrown into the "split pool". Funds not
explicitly claimed by the fee or an address are shared as
equally as possible among the split pool. All coins are consumed:
if the amount to be split does not divide evenly, some of the earlier
addresses will get an extra satoshi.
:param fee: an integer, or the (deprecated) string "standard" for it to be calculated
:param version: (optional) the version to use in the transaction. Defaults to 1.
:param lock_time: (optional) the lock_time to use in the transaction. Defaults to 0.
:return: :class:`Tx <Tx>` object, with unspents populated
:rtype: pycoin.tx.Tx.Tx
Usage::
>>> spendables = spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH")
>>> tx = create_tx(network, spendables, ["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"], fee=0)
This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH
to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might
take a while to confirm, possibly never).
"""
Tx = network.tx
def _fix_spendable(s):
if isinstance(s, Tx.Spendable):
return s
if not hasattr(s, "keys"):
return Tx.Spendable.from_text(s)
return Tx.Spendable.from_dict(s)
spendables = [_fix_spendable(s) for s in spendables]
txs_in = [spendable.tx_in() for spendable in spendables]
txs_out = []
for payable in payables:
if len(payable) == 2:
address, coin_value = payable
else:
address = payable
coin_value = 0
script = network.contract.for_address(address)
txs_out.append(Tx.TxOut(coin_value, script))
tx = Tx(version=version, txs_in=txs_in, txs_out=txs_out, lock_time=lock_time)
tx.set_unspents(spendables)
distribute_from_split_pool(tx, fee)
return tx
|
python
|
{
"resource": ""
}
|
q26662
|
Curve.multiply
|
train
|
def multiply(self, p, e):
"""
multiply a point by an integer.
:param p: a point
:param e: an integer
:returns: the result, equivalent to adding p to itself e times
"""
if self._order:
e %= self._order
if p == self._infinity or e == 0:
return self._infinity
e3 = 3 * e
i = _leftmost_bit(e3) >> 1
result = p
while i > 1:
result += result
if (e3 & i):
v = [result, result+p]
else:
v = [result-p, result]
result = v[0 if (e & i) else 1]
i >>= 1
return result
|
python
|
{
"resource": ""
}
|
q26663
|
Block.parse
|
train
|
def parse(class_, f, include_transactions=True, include_offsets=None, check_merkle_hash=True):
"""
Parse the Block from the file-like object
"""
block = class_.parse_as_header(f)
if include_transactions:
count = parse_struct("I", f)[0]
txs = block._parse_transactions(f, count, include_offsets=include_offsets)
block.set_txs(txs, check_merkle_hash=check_merkle_hash)
return block
|
python
|
{
"resource": ""
}
|
q26664
|
Block.check_merkle_hash
|
train
|
def check_merkle_hash(self):
"""Raise a BadMerkleRootError if the Merkle hash of the
transactions does not match the Merkle hash included in the block."""
calculated_hash = merkle([tx.hash() for tx in self.txs], double_sha256)
if calculated_hash != self.merkle_root:
raise BadMerkleRootError(
"calculated %s but block contains %s" % (b2h(calculated_hash), b2h(self.merkle_root)))
|
python
|
{
"resource": ""
}
|
q26665
|
TxIn.public_key_sec
|
train
|
def public_key_sec(self):
"""Return the public key as sec, or None in case of failure."""
if self.is_coinbase():
return None
opcodes = ScriptTools.opcode_list(self.script)
if len(opcodes) == 2 and opcodes[0].startswith("[30"):
# the second opcode is probably the public key as sec
sec = h2b(opcodes[1][1:-1])
return sec
return None
|
python
|
{
"resource": ""
}
|
q26666
|
Tx.coinbase_tx
|
train
|
def coinbase_tx(cls, public_key_sec, coin_value, coinbase_bytes=b'', version=1, lock_time=0):
"""Create the special "first in block" transaction that includes the mining fees."""
tx_in = cls.TxIn.coinbase_tx_in(script=coinbase_bytes)
COINBASE_SCRIPT_OUT = "%s OP_CHECKSIG"
script_text = COINBASE_SCRIPT_OUT % b2h(public_key_sec)
script_bin = BitcoinScriptTools.compile(script_text)
tx_out = cls.TxOut(coin_value, script_bin)
return cls(version, [tx_in], [tx_out], lock_time)
|
python
|
{
"resource": ""
}
|
q26667
|
Tx.parse
|
train
|
def parse(class_, f, allow_segwit=None):
"""Parse a Bitcoin transaction Tx.
:param f: a file-like object that contains a binary streamed transaction
:param allow_segwit: (optional) set to True to allow parsing of segwit transactions.
The default value is defined by the class variable ALLOW_SEGWIT
"""
if allow_segwit is None:
allow_segwit = class_.ALLOW_SEGWIT
txs_in = []
txs_out = []
version, = parse_struct("L", f)
v1 = ord(f.read(1))
is_segwit = allow_segwit and (v1 == 0)
v2 = None
if is_segwit:
flag = f.read(1)
if flag == b'\0':
raise ValueError("bad flag in segwit")
if flag == b'\1':
v1 = None
else:
is_segwit = False
v2 = ord(flag)
count = parse_satoshi_int(f, v=v1)
txs_in = []
for i in range(count):
txs_in.append(class_.TxIn.parse(f))
count = parse_satoshi_int(f, v=v2)
txs_out = []
for i in range(count):
txs_out.append(class_.TxOut.parse(f))
if is_segwit:
for tx_in in txs_in:
stack = []
count = parse_satoshi_int(f)
for i in range(count):
stack.append(parse_satoshi_string(f))
tx_in.witness = stack
lock_time, = parse_struct("L", f)
return class_(version, txs_in, txs_out, lock_time)
|
python
|
{
"resource": ""
}
|
q26668
|
Tx.stream
|
train
|
def stream(self, f, blank_solutions=False, include_unspents=False, include_witness_data=True):
"""Stream a Bitcoin transaction Tx to the file-like object f.
:param f: writable file-like object to stream binary data of transaction
:param blank_solutions: (optional) clear out the solutions scripts, effectively "unsigning" the
transaction before writing it. Defaults to False
:param include_unspents: (optional) stread out the Spendable objects after streaming the transaction.
This is a pycoin-specific extension. Defaults to False.
:param include_witness_data: (optional) stream segwit transactions including the witness data if the
transaction has any witness data. Defaults to True.
"""
include_witnesses = include_witness_data and self.has_witness_data()
stream_struct("L", f, self.version)
if include_witnesses:
f.write(b'\0\1')
stream_struct("I", f, len(self.txs_in))
for t in self.txs_in:
t.stream(f, blank_solutions=blank_solutions)
stream_struct("I", f, len(self.txs_out))
for t in self.txs_out:
t.stream(f)
if include_witnesses:
for tx_in in self.txs_in:
witness = tx_in.witness
stream_struct("I", f, len(witness))
for w in witness:
stream_satoshi_string(f, w)
stream_struct("L", f, self.lock_time)
if include_unspents and not self.missing_unspents():
self.stream_unspents(f)
|
python
|
{
"resource": ""
}
|
q26669
|
Tx.validate_unspents
|
train
|
def validate_unspents(self, tx_db):
"""
Spendable objects returned from blockchain.info or
similar services contain coin_value information that must be trusted
on faith. Mistaken coin_value data can result in coins being wasted
to fees.
This function solves this problem by iterating over the incoming
transactions, fetching them from the tx_db in full, and verifying
that the coin_values are as expected.
Returns the fee for this transaction. If any of the spendables set by
tx.set_unspents do not match the authenticated transactions, a
ValidationFailureError is raised.
"""
tx_hashes = set((tx_in.previous_hash for tx_in in self.txs_in))
# build a local copy of the DB
tx_lookup = {}
for h in tx_hashes:
if h == ZERO32:
continue
the_tx = tx_db.get(h)
if the_tx is None:
raise KeyError("hash id %s not in tx_db" % b2h_rev(h))
if the_tx.hash() != h:
raise KeyError("attempt to load Tx %s yielded a Tx with id %s" % (h2b_rev(h), the_tx.id()))
tx_lookup[h] = the_tx
for idx, tx_in in enumerate(self.txs_in):
if tx_in.previous_hash == ZERO32:
continue
txs_out = tx_lookup[tx_in.previous_hash].txs_out
if tx_in.previous_index > len(txs_out):
raise BadSpendableError("tx_out index %d is too big for Tx %s" %
(tx_in.previous_index, b2h_rev(tx_in.previous_hash)))
tx_out1 = txs_out[tx_in.previous_index]
tx_out2 = self.unspents[idx]
if tx_out1.coin_value != tx_out2.coin_value:
raise BadSpendableError(
"unspents[%d] coin value mismatch (%d vs %d)" % (
idx, tx_out1.coin_value, tx_out2.coin_value))
if tx_out1.script != tx_out2.script:
raise BadSpendableError("unspents[%d] script mismatch!" % idx)
return self.fee()
|
python
|
{
"resource": ""
}
|
q26670
|
locked_blocks_iterator
|
train
|
def locked_blocks_iterator(blockfile, start_info=(0, 0), cached_headers=50, batch_size=50):
"""
This method loads blocks from disk, skipping any orphan blocks.
"""
f = blockfile
current_state = []
def change_state(bc, ops):
for op, bh, work in ops:
if op == 'add':
current_state.append(bh)
pass
else:
current_state.pop()
bc = BlockChain()
bc.add_change_callback(change_state)
bhs = []
index = 0
info_offset = start_info
while 1:
v = blockfile.next_offset(info_offset)
if v is None:
break
block_offset, info_offset = v
f.jump_to(block_offset)
bh = Block.parse_as_header(f)
bh.info = block_offset
bhs.append(bh)
if len(bhs) > batch_size:
bc.add_headers(bhs)
bhs = []
if len(current_state) > cached_headers:
for bh in current_state[:cached_headers]:
bh.index = index
yield bh
index += 1
bc.lock_to_index(index)
current_state = current_state[cached_headers:]
|
python
|
{
"resource": ""
}
|
q26671
|
post_unpack_merkleblock
|
train
|
def post_unpack_merkleblock(d, f):
"""
A post-processing "post_unpack" to merkleblock messages.
It validates the merkle proofs (throwing an exception if there's
an error), and returns the list of transaction hashes in "tx_hashes".
The transactions are supposed to be sent immediately after the merkleblock message.
"""
level_widths = []
count = d["total_transactions"]
while count > 1:
level_widths.append(count)
count += 1
count //= 2
level_widths.append(1)
level_widths.reverse()
tx_acc = []
flags = d["flags"]
hashes = list(reversed(d["hashes"]))
left_hash, flag_index = _recurse(level_widths, 0, 0, hashes, flags, 0, tx_acc)
if len(hashes) > 0:
raise ValueError("extra hashes: %s" % hashes)
idx, r = divmod(flag_index-1, 8)
if idx != len(flags) - 1:
raise ValueError("not enough flags consumed")
if flags[idx] > (1 << (r+1))-1:
raise ValueError("unconsumed 1 flag bits set")
if left_hash != d["header"].merkle_root:
raise ValueError(
"merkle root %s does not match calculated hash %s" % (
b2h_rev(d["header"].merkle_root), b2h_rev(left_hash)))
d["tx_hashes"] = tx_acc
return d
|
python
|
{
"resource": ""
}
|
q26672
|
_make_parser
|
train
|
def _make_parser(streamer, the_struct):
"Return a function that parses the given structure into a dict"
struct_items = [s.split(":") for s in the_struct.split()]
names = [s[0] for s in struct_items]
types = ''.join(s[1] for s in struct_items)
def f(message_stream):
return streamer.parse_as_dict(names, types, message_stream)
return f
|
python
|
{
"resource": ""
}
|
q26673
|
make_post_unpack_alert
|
train
|
def make_post_unpack_alert(streamer):
"""
Post-processor to "alert" message, to add an "alert_info" dictionary of parsed
alert information.
"""
the_struct = ("version:L relayUntil:Q expiration:Q id:L cancel:L setCancel:[L] minVer:L "
"maxVer:L setSubVer:[S] priority:L comment:S statusBar:S reserved:S")
alert_submessage_parser = _make_parser(streamer, the_struct)
def post_unpack_alert(d, f):
d1 = alert_submessage_parser(io.BytesIO(d["payload"]))
d["alert_info"] = d1
return d
return post_unpack_alert
|
python
|
{
"resource": ""
}
|
q26674
|
standard_parsing_functions
|
train
|
def standard_parsing_functions(Block, Tx):
"""
Return the standard parsing functions for a given Block and Tx class.
The return value is expected to be used with the standard_streamer function.
"""
def stream_block(f, block):
assert isinstance(block, Block)
block.stream(f)
def stream_blockheader(f, blockheader):
assert isinstance(blockheader, Block)
blockheader.stream_header(f)
def stream_tx(f, tx):
assert isinstance(tx, Tx)
tx.stream(f)
def parse_int_6(f):
b = f.read(6) + b'\0\0'
return struct.unpack(b, "<L")[0]
def stream_int_6(f, v):
f.write(struct.pack(v, "<L")[:6])
more_parsing = [
("A", (PeerAddress.parse, lambda f, peer_addr: peer_addr.stream(f))),
("v", (InvItem.parse, lambda f, inv_item: inv_item.stream(f))),
("T", (Tx.parse, stream_tx)),
("B", (Block.parse, stream_block)),
("z", (Block.parse_as_header, stream_blockheader)),
("1", (lambda f: struct.unpack("B", f.read(1))[0], lambda f, v: f.write(struct.pack("B", v)))),
("6", (parse_int_6, stream_int_6)),
("O", (lambda f: True if f.read(1) else False,
lambda f, v: f.write(b'' if v is None else struct.pack("B", v)))),
]
all_items = list(STREAMER_FUNCTIONS.items())
all_items.extend(more_parsing)
return all_items
|
python
|
{
"resource": ""
}
|
q26675
|
make_parser_and_packer
|
train
|
def make_parser_and_packer(streamer, message_dict, message_post_unpacks):
"""
Create a parser and a packer for a peer's network messages.
streamer:
used in conjunction with the message_dict. The message_dict turns a message into
a string specifying the fields, and this dictionary specifies how to pack or unpack
fields to or from bytes
message_dict:
a dictionary specifying how to pack or unpack the various messages like "version"
message_post_unpacks:
a dictionary specifying functions to call to postprocess message to, for example
extract submessages, like in "alert"
"""
message_parsers = dict((k, _make_parser(streamer, v)) for k, v in message_dict.items())
def parse_from_data(message_name, data):
message_stream = io.BytesIO(data)
parser = message_parsers.get(message_name)
if parser is None:
raise KeyError("unknown message: %s" % message_name)
d = parser(message_stream)
post_unpack = message_post_unpacks.get(message_name)
if post_unpack:
d = post_unpack(d, message_stream)
return d
def pack_from_data(message_name, **kwargs):
the_struct = message_dict[message_name]
if not the_struct:
return b''
f = io.BytesIO()
the_fields = the_struct.split(" ")
pairs = [t.split(":") for t in the_fields]
for name, type in pairs:
if type[0] == '[':
streamer.stream_struct("I", f, len(kwargs[name]))
for v in kwargs[name]:
if not isinstance(v, (tuple, list)):
v = [v]
streamer.stream_struct(type[1:-1], f, *v)
else:
streamer.stream_struct(type, f, kwargs[name])
return f.getvalue()
return parse_from_data, pack_from_data
|
python
|
{
"resource": ""
}
|
q26676
|
ScriptTools.compile
|
train
|
def compile(self, s):
"""
Compile the given script. Returns a bytes object with the compiled script.
"""
f = io.BytesIO()
for t in s.split():
t_up = t.upper()
if t_up in self.opcode_to_int:
f.write(int2byte(self.opcode_to_int[t]))
elif ("OP_%s" % t_up) in self.opcode_to_int:
f.write(int2byte(self.opcode_to_int["OP_%s" % t]))
elif t_up.startswith("0X"):
d = binascii.unhexlify(t[2:])
f.write(d)
else:
v = self.compile_expression(t)
self.write_push_data([v], f)
return f.getvalue()
|
python
|
{
"resource": ""
}
|
q26677
|
ScriptTools.get_opcodes
|
train
|
def get_opcodes(self, script, verify_minimal_data=False, pc=0):
"""
Iterator. Return opcode, data, pc, new_pc at each step
"""
while pc < len(script):
opcode, data, new_pc, is_ok = self.scriptStreamer.get_opcode(
script, pc, verify_minimal_data=verify_minimal_data)
yield opcode, data, pc, new_pc
pc = new_pc
|
python
|
{
"resource": ""
}
|
q26678
|
ScriptTools.opcode_list
|
train
|
def opcode_list(self, script):
"""Disassemble the given script. Returns a list of opcodes."""
opcodes = []
new_pc = 0
try:
for opcode, data, pc, new_pc in self.get_opcodes(script):
opcodes.append(self.disassemble_for_opcode_data(opcode, data))
except ScriptError:
opcodes.append(binascii.hexlify(script[new_pc:]).decode("utf8"))
return opcodes
|
python
|
{
"resource": ""
}
|
q26679
|
Annotate.annotate_scripts
|
train
|
def annotate_scripts(self, tx, tx_in_idx):
"return list of pre_annotations, pc, opcode, instruction, post_annotations"
# input_annotations_f, output_annotations_f = annotation_f_for_scripts(tx, tx_in_idx)
data_annotations = collections.defaultdict(list)
def traceback_f(opcode, data, pc, vmc):
if opcode in (self.OP_CHECKSIG, self.OP_CHECKSIGVERIFY):
self.annotate_checksig(vmc, data_annotations)
if opcode in (self.OP_CHECKMULTISIG, self.OP_CHECKMULTISIGVERIFY):
self.annotate_checkmultisig(vmc, data_annotations)
return
try:
tx.check_solution(tx_in_idx, traceback_f=traceback_f)
except ScriptError:
pass
r = []
def traceback_f(opcode, data, pc, vmc):
a0 = []
if vmc.pc == 0:
if vmc.is_solution_script:
a0.append("--- SIGNATURE SCRIPT START")
else:
a0.append("--- PUBLIC KEY SCRIPT START")
r.append((a0, vmc.pc, opcode, self.instruction_for_opcode(opcode, data), data_annotations[data]))
try:
tx.check_solution(tx_in_idx, traceback_f=traceback_f)
except ScriptError:
pass
# the script may have ended early, so let's just double-check
try:
for idx, (opcode, data, pc, new_pc) in enumerate(itertools.chain(
self._script_tools.get_opcodes(tx.unspents[tx_in_idx].script),
self._script_tools.get_opcodes(tx.txs_in[tx_in_idx].script))):
if idx >= len(r):
r.append(([], pc, opcode, self.instruction_for_opcode(opcode, data), []))
except IndexError:
pass
return r
|
python
|
{
"resource": ""
}
|
q26680
|
WhoSigned.solution_blobs
|
train
|
def solution_blobs(self, tx, tx_in_idx):
"""
This iterator yields data blobs that appear in the the last solution_script or the witness.
"""
sc = tx.SolutionChecker(tx)
tx_context = sc.tx_context_for_idx(tx_in_idx)
# set solution_stack in case there are no results from puzzle_and_solution_iterator
solution_stack = []
for puzzle_script, solution_stack, flags, sighash_f in sc.puzzle_and_solution_iterator(tx_context):
pass
# we only care about the last one
for s in solution_stack:
yield s
|
python
|
{
"resource": ""
}
|
q26681
|
WhoSigned.extract_secs
|
train
|
def extract_secs(self, tx, tx_in_idx):
"""
For a given script solution, iterate yield its sec blobs
"""
sc = tx.SolutionChecker(tx)
tx_context = sc.tx_context_for_idx(tx_in_idx)
# set solution_stack in case there are no results from puzzle_and_solution_iterator
solution_stack = []
for puzzle_script, solution_stack, flags, sighash_f in sc.puzzle_and_solution_iterator(tx_context):
for opcode, data, pc, new_pc in self._script_tools.get_opcodes(puzzle_script):
if data and is_sec(data):
yield data
for data in solution_stack:
if is_sec(data):
yield data
|
python
|
{
"resource": ""
}
|
q26682
|
WhoSigned.public_pairs_for_script
|
train
|
def public_pairs_for_script(self, tx, tx_in_idx, generator):
"""
For a given script, iterate over and pull out public pairs encoded as sec values.
"""
public_pairs = []
for sec in self.extract_secs(tx, tx_in_idx):
try:
public_pairs.append(sec_to_public_pair(sec, generator))
except EncodingError:
pass
return public_pairs
|
python
|
{
"resource": ""
}
|
q26683
|
iterate_symbols
|
train
|
def iterate_symbols():
"""
Return an iterator yielding registered netcodes.
"""
for prefix in search_prefixes():
package = importlib.import_module(prefix)
for importer, modname, ispkg in pkgutil.walk_packages(path=package.__path__, onerror=lambda x: None):
network = network_for_netcode(modname)
if network:
yield network.symbol.upper()
|
python
|
{
"resource": ""
}
|
q26684
|
BlockcypherProvider.tx_for_tx_hash
|
train
|
def tx_for_tx_hash(self, tx_hash):
"""
returns the pycoin.tx object for tx_hash
"""
try:
url_append = "?token=%s&includeHex=true" % self.api_key
url = self.base_url("txs/%s%s" % (b2h_rev(tx_hash), url_append))
result = json.loads(urlopen(url).read().decode("utf8"))
tx = Tx.parse(io.BytesIO(h2b(result.get("hex"))))
return tx
except Exception:
raise Exception
|
python
|
{
"resource": ""
}
|
q26685
|
BlockcypherProvider.get_balance
|
train
|
def get_balance(self, address):
"""
returns the balance object from blockcypher for address
"""
url_append = "/balance?token=%s" % self.api_key
url = self.base_url("addrs/%s" % (address + url_append))
result = json.loads(urlopen(url).read().decode("utf8"))
return result
|
python
|
{
"resource": ""
}
|
q26686
|
BlockcypherProvider.broadcast_tx
|
train
|
def broadcast_tx(self, tx):
"""
broadcast a transaction to the network
"""
url = self.base_url("txs/push")
data = {"tx": tx.as_hex()}
result = json.loads(urlopen(url, data=json.dumps(data)).read().decode("utf8"))
return result
|
python
|
{
"resource": ""
}
|
q26687
|
b2a_base58
|
train
|
def b2a_base58(s):
"""Convert binary to base58 using BASE58_ALPHABET. Like Bitcoin addresses."""
v, prefix = to_long(256, lambda x: x, iterbytes(s))
s = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v])
return s.decode("utf8")
|
python
|
{
"resource": ""
}
|
q26688
|
a2b_hashed_base58
|
train
|
def a2b_hashed_base58(s):
"""
If the passed string is hashed_base58, return the binary data.
Otherwise raises an EncodingError.
"""
data = a2b_base58(s)
data, the_hash = data[:-4], data[-4:]
if double_sha256(data)[:4] == the_hash:
return data
raise EncodingError("hashed base58 has bad checksum %s" % s)
|
python
|
{
"resource": ""
}
|
q26689
|
BloomFilter.copy
|
train
|
def copy(self):
"""Return a copy of this bloom filter.
"""
new_filter = BloomFilter(self.capacity, self.error_rate)
new_filter.bitarray = self.bitarray.copy()
return new_filter
|
python
|
{
"resource": ""
}
|
q26690
|
BloomFilter.intersection
|
train
|
def intersection(self, other):
""" Calculates the intersection of the two underlying bitarrays and returns
a new bloom filter object."""
if self.capacity != other.capacity or \
self.error_rate != other.error_rate:
raise ValueError("Intersecting filters requires both filters to \
have equal capacity and error rate")
new_bloom = self.copy()
new_bloom.bitarray = new_bloom.bitarray & other.bitarray
return new_bloom
|
python
|
{
"resource": ""
}
|
q26691
|
BloomFilter.tofile
|
train
|
def tofile(self, f):
"""Write the bloom filter to file object `f'. Underlying bits
are written as machine values. This is much more space
efficient than pickling the object."""
f.write(pack(self.FILE_FMT, self.error_rate, self.num_slices,
self.bits_per_slice, self.capacity, self.count))
(f.write(self.bitarray.tobytes()) if is_string_io(f)
else self.bitarray.tofile(f))
|
python
|
{
"resource": ""
}
|
q26692
|
BloomFilter.fromfile
|
train
|
def fromfile(cls, f, n=-1):
"""Read a bloom filter from file-object `f' serialized with
``BloomFilter.tofile''. If `n' > 0 read only so many bytes."""
headerlen = calcsize(cls.FILE_FMT)
if 0 < n < headerlen:
raise ValueError('n too small!')
filter = cls(1) # Bogus instantiation, we will `_setup'.
filter._setup(*unpack(cls.FILE_FMT, f.read(headerlen)))
filter.bitarray = bitarray.bitarray(endian='little')
if n > 0:
(filter.bitarray.frombytes(f.read(n-headerlen)) if is_string_io(f)
else filter.bitarray.fromfile(f, n - headerlen))
else:
(filter.bitarray.frombytes(f.read()) if is_string_io(f)
else filter.bitarray.fromfile(f))
if filter.num_bits != filter.bitarray.length() and \
(filter.num_bits + (8 - filter.num_bits % 8)
!= filter.bitarray.length()):
raise ValueError('Bit length mismatch!')
return filter
|
python
|
{
"resource": ""
}
|
q26693
|
ScalableBloomFilter.tofile
|
train
|
def tofile(self, f):
"""Serialize this ScalableBloomFilter into the file-object
`f'."""
f.write(pack(self.FILE_FMT, self.scale, self.ratio,
self.initial_capacity, self.error_rate))
# Write #-of-filters
f.write(pack(b'<l', len(self.filters)))
if len(self.filters) > 0:
# Then each filter directly, with a header describing
# their lengths.
headerpos = f.tell()
headerfmt = b'<' + b'Q'*(len(self.filters))
f.write(b'.' * calcsize(headerfmt))
filter_sizes = []
for filter in self.filters:
begin = f.tell()
filter.tofile(f)
filter_sizes.append(f.tell() - begin)
f.seek(headerpos)
f.write(pack(headerfmt, *filter_sizes))
|
python
|
{
"resource": ""
}
|
q26694
|
ScalableBloomFilter.fromfile
|
train
|
def fromfile(cls, f):
"""Deserialize the ScalableBloomFilter in file object `f'."""
filter = cls()
filter._setup(*unpack(cls.FILE_FMT, f.read(calcsize(cls.FILE_FMT))))
nfilters, = unpack(b'<l', f.read(calcsize(b'<l')))
if nfilters > 0:
header_fmt = b'<' + b'Q'*nfilters
bytes = f.read(calcsize(header_fmt))
filter_lengths = unpack(header_fmt, bytes)
for fl in filter_lengths:
filter.filters.append(BloomFilter.fromfile(f, fl))
else:
filter.filters = []
return filter
|
python
|
{
"resource": ""
}
|
q26695
|
safe_get_user_model
|
train
|
def safe_get_user_model():
"""
Safe loading of the User model, customized or not.
"""
user_app, user_model = settings.AUTH_USER_MODEL.split('.')
return apps.get_registered_model(user_app, user_model)
|
python
|
{
"resource": ""
}
|
q26696
|
EntryAdmin.get_title
|
train
|
def get_title(self, entry):
"""
Return the title with word count and number of comments.
"""
title = _('%(title)s (%(word_count)i words)') % \
{'title': entry.title, 'word_count': entry.word_count}
reaction_count = int(entry.comment_count +
entry.pingback_count +
entry.trackback_count)
if reaction_count:
return ungettext_lazy(
'%(title)s (%(reactions)i reaction)',
'%(title)s (%(reactions)i reactions)', reaction_count) % \
{'title': title,
'reactions': reaction_count}
return title
|
python
|
{
"resource": ""
}
|
q26697
|
EntryAdmin.get_authors
|
train
|
def get_authors(self, entry):
"""
Return the authors in HTML.
"""
try:
return format_html_join(
', ', '<a href="{}" target="blank">{}</a>',
[(author.get_absolute_url(),
getattr(author, author.USERNAME_FIELD))
for author in entry.authors.all()])
except NoReverseMatch:
return ', '.join(
[conditional_escape(getattr(author, author.USERNAME_FIELD))
for author in entry.authors.all()])
|
python
|
{
"resource": ""
}
|
q26698
|
EntryAdmin.get_categories
|
train
|
def get_categories(self, entry):
"""
Return the categories linked in HTML.
"""
try:
return format_html_join(
', ', '<a href="{}" target="blank">{}</a>',
[(category.get_absolute_url(), category.title)
for category in entry.categories.all()])
except NoReverseMatch:
return ', '.join([conditional_escape(category.title)
for category in entry.categories.all()])
|
python
|
{
"resource": ""
}
|
q26699
|
EntryAdmin.get_tags
|
train
|
def get_tags(self, entry):
"""
Return the tags linked in HTML.
"""
try:
return format_html_join(
', ', '<a href="{}" target="blank">{}</a>',
[(reverse('zinnia:tag_detail', args=[tag]), tag)
for tag in entry.tags_list])
except NoReverseMatch:
return conditional_escape(entry.tags)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.