_id stringlengths 2 6 | title stringlengths 9 130 | partition stringclasses 3 values | text stringlengths 66 10.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q18100 | CloudCrowd.NodeRecord.send_work_unit | train | def send_work_unit(unit)
result = node['/work'].post(:work_unit => unit.to_json)
unit.assign_to(self, JSON.parse(result.body)['pid'])
touch && true
rescue RestClient::RequestTimeout
# The node's gone away. Destroy it and it will check in when it comes back
CloudCrowd.log "Node #{host} received RequestTimeout, removing it"
destroy && false
rescue RestClient::RequestFailed => e
raise e unless e.http_code == 503 && e.http_body == Node::OVERLOADED_MESSAGE
update_attribute(:busy, true) && false
rescue RestClient::Exception, Errno::ECONNREFUSED, Timeout::Error, Errno::ECONNRESET=>e
# Couldn't post to node, assume it's gone away.
CloudCrowd.log "Node #{host} received #{e.class} #{e}, removing it"
destroy && false
end | ruby | {
"resource": ""
} |
q18101 | CloudCrowd.WorkUnit.fail | train | def fail(output, time_taken)
tries = self.attempts + 1
return try_again if tries < CloudCrowd.config[:work_unit_retries]
update_attributes({
:status => FAILED,
:node_record => nil,
:worker_pid => nil,
:attempts => tries,
:output => output,
:time => time_taken
})
job && job.check_for_completion
end | ruby | {
"resource": ""
} |
q18102 | CloudCrowd.Worker.complete_work_unit | train | def complete_work_unit(result)
keep_trying_to "complete work unit" do
data = base_params.merge({:status => 'succeeded', :output => result})
@node.central["/work/#{data[:id]}"].put(data)
log "finished #{display_work_unit} in #{data[:time]} seconds"
end
end | ruby | {
"resource": ""
} |
q18103 | CloudCrowd.Worker.fail_work_unit | train | def fail_work_unit(exception)
keep_trying_to "mark work unit as failed" do
data = base_params.merge({:status => 'failed', :output => {'output' => exception.message}.to_json})
@node.central["/work/#{data[:id]}"].put(data)
log "failed #{display_work_unit} in #{data[:time]} seconds\n#{exception.message}\n#{exception.backtrace}"
end
end | ruby | {
"resource": ""
} |
q18104 | CloudCrowd.Worker.keep_trying_to | train | def keep_trying_to(title)
begin
yield
rescue RestClient::ResourceNotFound => e
log "work unit ##{@unit['id']} doesn't exist. discarding..."
rescue Exception => e
log "failed to #{title} -- retry in #{@retry_wait} seconds"
log e.message
log e.backtrace
sleep @retry_wait
retry
end
end | ruby | {
"resource": ""
} |
q18105 | CloudCrowd.Worker.run_work_unit | train | def run_work_unit
begin
result = nil
action_class = CloudCrowd.actions[@unit['action']]
action = action_class.new(@status, @unit['input'], enhanced_unit_options, @node.asset_store)
Dir.chdir(action.work_directory) do
result = case @status
when PROCESSING then action.process
when SPLITTING then action.split
when MERGING then action.merge
else raise Error::StatusUnspecified, "work units must specify their status"
end
end
action.cleanup_work_directory if action
complete_work_unit({'output' => result}.to_json)
rescue Exception => e
action.cleanup_work_directory if action
fail_work_unit(e)
end
@node.resolve_work(@unit['id'])
end | ruby | {
"resource": ""
} |
q18106 | CloudCrowd.Worker.run | train | def run
trap_signals
log "starting #{display_work_unit}"
if @unit['options']['benchmark']
log("ran #{display_work_unit} in " + Benchmark.measure { run_work_unit }.to_s)
else
run_work_unit
end
Process.exit!
end | ruby | {
"resource": ""
} |
q18107 | CloudCrowd.Worker.trap_signals | train | def trap_signals
Signal.trap('QUIT') { Process.exit! }
Signal.trap('INT') { Process.exit! }
Signal.trap('TERM') { Process.exit! }
end | ruby | {
"resource": ""
} |
q18108 | CloudCrowd.CommandLine.start_server | train | def start_server
port = @options[:port] || 9173
daemonize = @options[:daemonize] ? '-d' : ''
log_path = CloudCrowd.log_path('server.log')
pid_path = CloudCrowd.pid_path('server.pid')
rackup_path = File.expand_path("#{@options[:config_path]}/config.ru")
FileUtils.mkdir_p(CloudCrowd.log_path) if @options[:daemonize] && !File.exists?(CloudCrowd.log_path)
puts "Starting CloudCrowd Central Server (#{VERSION}) on port #{port}..."
exec "thin -e #{@options[:environment]} -p #{port} #{daemonize} --tag cloud-crowd-server --log #{log_path} --pid #{pid_path} -R #{rackup_path} start"
end | ruby | {
"resource": ""
} |
q18109 | CloudCrowd.CommandLine.run_install | train | def run_install(install_path)
require 'fileutils'
install_path ||= '.'
FileUtils.mkdir_p install_path unless File.exists?(install_path)
install_file "#{CC_ROOT}/config/config.example.yml", "#{install_path}/config.yml"
install_file "#{CC_ROOT}/config/config.example.ru", "#{install_path}/config.ru"
install_file "#{CC_ROOT}/config/database.example.yml", "#{install_path}/database.yml"
install_file "#{CC_ROOT}/actions", "#{install_path}/actions", true
end | ruby | {
"resource": ""
} |
q18110 | CloudCrowd.CommandLine.ensure_config | train | def ensure_config
return if @config_found
found = CONFIG_FILES.all? {|f| File.exists? "#{@options[:config_path]}/#{f}" }
found ? @config_dir = true : config_not_found
end | ruby | {
"resource": ""
} |
q18111 | CloudCrowd.CommandLine.install_file | train | def install_file(source, dest, is_dir=false)
if File.exists?(dest)
print "#{dest} already exists. Overwrite it? (yes/no) "
return unless ['y', 'yes', 'ok'].include? gets.chomp.downcase
end
is_dir ? FileUtils.cp_r(source, dest) : FileUtils.cp(source, dest)
puts "installed #{dest}" unless ENV['RACK_ENV'] == 'test'
end | ruby | {
"resource": ""
} |
q18112 | CloudCrowd.Job.check_for_completion | train | def check_for_completion
return unless all_work_units_complete?
set_next_status
outs = gather_outputs_from_work_units
return queue_for_workers([outs]) if merging?
if complete?
update_attributes(:outputs => outs, :time => time_taken)
CloudCrowd.log "Job ##{id} (#{action}) #{display_status}." unless ENV['RACK_ENV'] == 'test'
CloudCrowd.defer { fire_callback } if callback_url
end
self
end | ruby | {
"resource": ""
} |
q18113 | CloudCrowd.Job.percent_complete | train | def percent_complete
return 99 if merging?
return 100 if complete?
unit_count = work_units.count
return 100 if unit_count <= 0
(work_units.complete.count / unit_count.to_f * 100).round
end | ruby | {
"resource": ""
} |
q18114 | CloudCrowd.Job.as_json | train | def as_json(opts={})
atts = {
'id' => id,
'color' => color,
'status' => display_status,
'percent_complete' => percent_complete,
'work_units' => work_units.count,
'time_taken' => time_taken
}
atts['outputs'] = JSON.parse(outputs) if outputs
atts['email'] = email if email
atts
end | ruby | {
"resource": ""
} |
q18115 | CloudCrowd.Job.gather_outputs_from_work_units | train | def gather_outputs_from_work_units
units = self.work_units.complete
outs = self.work_units.complete.map {|u| u.parsed_output }
self.work_units.complete.destroy_all
outs.to_json
end | ruby | {
"resource": ""
} |
q18116 | CloudCrowd.Job.queue_for_workers | train | def queue_for_workers(input=nil)
input ||= JSON.parse(self.inputs)
input.each {|i| WorkUnit.start(self, action, i, status) }
self
end | ruby | {
"resource": ""
} |
q18117 | CloudCrowd.Node.start | train | def start
FileUtils.mkdir_p(CloudCrowd.log_path) if @daemon && !File.exists?(CloudCrowd.log_path)
@server = Thin::Server.new('0.0.0.0', @port, self, :signals => false)
@server.tag = 'cloud-crowd-node'
@server.pid_file = CloudCrowd.pid_path('node.pid')
@server.log_file = CloudCrowd.log_path('node.log')
@server.daemonize if @daemon
trap_signals
asset_store
@server_thread = Thread.new { @server.start }
check_in(true)
check_in_periodically
monitor_system if @max_load || @min_memory
@server_thread.join
end | ruby | {
"resource": ""
} |
q18118 | CloudCrowd.Node.check_in | train | def check_in(critical=false)
@central["/node/#{@id}"].put(
:busy => @overloaded,
:tag => @tag,
:max_workers => CloudCrowd.config[:max_workers],
:enabled_actions => @enabled_actions.join(',')
)
rescue RestClient::Exception, Errno::ECONNREFUSED
CloudCrowd.log "Failed to connect to the central server (#{@central.to_s})."
raise SystemExit if critical
end | ruby | {
"resource": ""
} |
q18119 | CloudCrowd.Node.free_memory | train | def free_memory
case RUBY_PLATFORM
when /darwin/
stats = `vm_stat`
@mac_page_size ||= stats.match(SCRAPE_MAC_PAGE)[1].to_f / 1048576.0
stats.match(SCRAPE_MAC_MEMORY)[1].to_f * @mac_page_size
when /linux/
`cat /proc/meminfo`.match(SCRAPE_LINUX_MEMORY)[1].to_f / 1024.0
else
raise NotImplementedError, "'min_free_memory' is not yet implemented on your platform"
end
end | ruby | {
"resource": ""
} |
q18120 | CloudCrowd.Node.monitor_system | train | def monitor_system
@monitor_thread = Thread.new do
loop do
was_overloaded = @overloaded
@overloaded = overloaded?
check_in if was_overloaded && !@overloaded
sleep MONITOR_INTERVAL
end
end
end | ruby | {
"resource": ""
} |
q18121 | CloudCrowd.Node.check_in_periodically | train | def check_in_periodically
@check_in_thread = Thread.new do
loop do
check_on_workers
reply = ""
1.upto(5).each do | attempt_number |
# sleep for an ever increasing amount of time to prevent overloading the server
sleep CHECK_IN_INTERVAL * attempt_number
reply = check_in
# if we did not receive a reply, the server has went away; it
# will reply with an empty string if the check-in succeeds
if reply.nil?
CloudCrowd.log "Failed on attempt ##{attempt_number} to check in with server"
else
break
end
end
if reply.nil?
CloudCrowd.log "Giving up after repeated attempts to contact server"
raise SystemExit
end
end
end
end | ruby | {
"resource": ""
} |
q18122 | CloudCrowd.Node.trap_signals | train | def trap_signals
Signal.trap('QUIT') { shut_down }
Signal.trap('INT') { shut_down }
Signal.trap('TERM') { shut_down }
end | ruby | {
"resource": ""
} |
q18123 | TwitterJekyll.ApiClient.fetch | train | def fetch(api_request)
uri = api_request.to_uri
response = Net::HTTP.start(uri.host, use_ssl: api_request.ssl?) do |http|
http.read_timeout = 5
http.open_timeout = 5
http.get uri.request_uri, REQUEST_HEADERS
end
handle_response(api_request, response)
rescue Timeout::Error => e
ErrorResponse.new(api_request, e.class.name).to_h
end | ruby | {
"resource": ""
} |
q18124 | TwitterJekyll.TwitterTag.live_response | train | def live_response
if response = api_client.fetch(@api_request)
cache.write(@api_request.cache_key, response)
build_response(response)
end
end | ruby | {
"resource": ""
} |
q18125 | TwitterJekyll.TwitterTag.parse_params | train | def parse_params(params)
args = params.split(/\s+/).map(&:strip)
invalid_args!(args) unless args.any?
if args[0].to_s == OEMBED_ARG # TODO: remove after deprecation cycle
arguments_deprecation_warning(args)
args.shift
end
url, *api_args = args
ApiRequest.new(url, parse_args(api_args))
end | ruby | {
"resource": ""
} |
q18126 | Interactor.Contracts.enforce_contracts | train | def enforce_contracts(contracts)
outcome = contracts.call(context)
unless outcome.success?
contract.consequences.each do |handler|
instance_exec(outcome.breaches, &handler)
end
end
end | ruby | {
"resource": ""
} |
q18127 | ActiveRemote.Persistence.remote | train | def remote(endpoint, request_args = scope_key_hash)
response = remote_call(endpoint, request_args)
assign_attributes_from_rpc(response)
success?
end | ruby | {
"resource": ""
} |
q18128 | ActiveRemote.Attributes.read_attribute | train | def read_attribute(name)
name = name.to_s
if respond_to?(name)
attribute(name)
else
raise ::ActiveRemote::UnknownAttributeError, "unknown attribute: #{name}"
end
end | ruby | {
"resource": ""
} |
q18129 | ActiveRemote.Attributes.write_attribute | train | def write_attribute(name, value)
name = name.to_s
if respond_to?("#{name}=")
__send__("attribute=", name, value)
else
raise ::ActiveRemote::UnknownAttributeError, "unknown attribute: #{name}"
end
end | ruby | {
"resource": ""
} |
q18130 | ActiveRemote.Serialization.add_errors | train | def add_errors(errors)
errors.each do |error|
if error.respond_to?(:message)
self.errors.add(error.field, error.message)
elsif error.respond_to?(:messages)
error.messages.each do |message|
self.errors.add(error.field, message)
end
end
end
end | ruby | {
"resource": ""
} |
q18131 | ActiveRemote.Integration.cache_key | train | def cache_key
case
when new_record? then
"#{self.class.name.underscore}/new"
when ::ActiveRemote.config.default_cache_key_updated_at? && (timestamp = self[:updated_at]) then
timestamp = timestamp.utc.to_s(self.class.cache_timestamp_format)
"#{self.class.name.underscore}/#{self.to_param}-#{timestamp}"
else
"#{self.class.name.underscore}/#{self.to_param}"
end
end | ruby | {
"resource": ""
} |
q18132 | Rb1drv.OneDriveFile.save_as | train | def save_as(target_name=nil, overwrite: false, resume: true, &block)
target_name ||= @name
tmpfile = "#{target_name}.incomplete"
return if !overwrite && File.exist?(target_name)
if resume && File.size(tmpfile) > 0
from = File.size(tmpfile)
len = @size - from
fmode = 'ab'
headers = {
'Range': "bytes=#{from}-"
}
else
from = 0
len = @size
fmode = 'wb'
headers = {}
end
yield :new_segment, file: target_name, from: from if block_given?
File.open(tmpfile, mode: fmode, external_encoding: Encoding::BINARY) do |f|
Excon.get download_url, headers: headers, response_block: ->(chunk, remaining_bytes, total_bytes) do
f.write(chunk)
yield :progress, file: target_name, from: from, progress: total_bytes - remaining_bytes, total: total_bytes if block_given?
end
end
yield :finish_segment, file: target_name if block_given?
FileUtils.mv(tmpfile, filename)
end | ruby | {
"resource": ""
} |
q18133 | Rb1drv.OneDriveFile.set_mtime | train | def set_mtime(time)
attempt = 0
OneDriveFile.new(@od, @od.request(api_path, {fileSystemInfo: {lastModifiedDateTime: time.utc.iso8601}}, :patch))
rescue
sleep 10
attempt += 1
retry if attempt <= 3
end | ruby | {
"resource": ""
} |
q18134 | Rb1drv.OneDrive.request | train | def request(uri, data=nil, verb=:post)
@logger.info(uri) if @logger
auth_check
query = {
path: File.join('v1.0/me/', URI.escape(uri)),
headers: {
'Authorization': "Bearer #{@access_token.token}"
}
}
if data
query[:body] = JSON.generate(data)
query[:headers]['Content-Type'] = 'application/json'
@logger.info(query[:body]) if @logger
verb = :post unless [:post, :put, :patch, :delete].include?(verb)
response = @conn.send(verb, query)
else
response = @conn.get(query)
end
JSON.parse(response.body)
end | ruby | {
"resource": ""
} |
q18135 | Rb1drv.OneDriveDir.get_child | train | def get_child(path)
children.find { |child| child.name == path } || OneDrive404.new
end | ruby | {
"resource": ""
} |
q18136 | Rb1drv.OneDriveDir.mkdir | train | def mkdir(name)
return self if name == '.'
name = name[1..-1] if name[0] == '/'
newdir, *remainder = name.split('/')
subdir = get(newdir)
unless subdir.dir?
result = @od.request("#{api_path}/children",
name: newdir,
folder: {},
'@microsoft.graph.conflictBehavior': 'rename'
)
subdir = OneDriveDir.new(@od, result)
end
remainder.any? ? subdir.mkdir(remainder.join('/')) : subdir
end | ruby | {
"resource": ""
} |
q18137 | Rb1drv.OneDriveDir.upload_simple | train | def upload_simple(filename, overwrite:, target_name:)
target_file = get(target_name)
exist = target_file.file?
return if exist && !overwrite
path = nil
if exist
path = "#{target_file.api_path}/content"
else
path = "#{api_path}:/#{target_name}:/content"
end
query = {
path: File.join('v1.0/me/', path),
headers: {
'Authorization': "Bearer #{@od.access_token.token}",
'Content-Type': 'application/octet-stream'
},
body: File.read(filename)
}
result = @od.conn.put(query)
result = JSON.parse(result.body)
file = OneDriveFile.new(@od, result)
file.set_mtime(File.mtime(filename))
end | ruby | {
"resource": ""
} |
q18138 | EeePub.OCF.save | train | def save(output_path)
output_path = File.expand_path(output_path)
create_epub do
mimetype = Zip::ZipOutputStream::open(output_path) do |os|
os.put_next_entry("mimetype", nil, nil, Zip::ZipEntry::STORED, Zlib::NO_COMPRESSION)
os << "application/epub+zip"
end
zipfile = Zip::ZipFile.open(output_path)
Dir.glob('**/*').each do |path|
zipfile.add(path, path)
end
zipfile.commit
end
FileUtils.remove_entry_secure dir
end | ruby | {
"resource": ""
} |
q18139 | EeePub.ContainerItem.to_xml | train | def to_xml
out = ""
builder = Builder::XmlMarkup.new(:target => out, :indent => 2)
builder.instruct!
build_xml(builder)
out
end | ruby | {
"resource": ""
} |
q18140 | EeePub.ContainerItem.convert_to_xml_attributes | train | def convert_to_xml_attributes(hash)
result = {}
hash.each do |k, v|
key = k.to_s.gsub('_', '-').to_sym
result[key] = v
end
result
end | ruby | {
"resource": ""
} |
q18141 | Bitcoin.Tx.witness_commitment | train | def witness_commitment
return nil unless coinbase_tx?
outputs.each do |output|
commitment = output.script_pubkey.witness_commitment
return commitment if commitment
end
nil
end | ruby | {
"resource": ""
} |
q18142 | Bitcoin.Tx.serialize_old_format | train | def serialize_old_format
buf = [version].pack('V')
buf << Bitcoin.pack_var_int(inputs.length) << inputs.map(&:to_payload).join
buf << Bitcoin.pack_var_int(outputs.length) << outputs.map(&:to_payload).join
buf << [lock_time].pack('V')
buf
end | ruby | {
"resource": ""
} |
q18143 | Bitcoin.Tx.standard? | train | def standard?
return false if version > MAX_STANDARD_VERSION
return false if weight > MAX_STANDARD_TX_WEIGHT
inputs.each do |i|
# Biggest 'standard' txin is a 15-of-15 P2SH multisig with compressed keys (remember the 520 byte limit on redeemScript size).
# That works out to a (15*(33+1))+3=513 byte redeemScript, 513+1+15*(73+1)+3=1627
# bytes of scriptSig, which we round off to 1650 bytes for some minor future-proofing.
# That's also enough to spend a 20-of-20 CHECKMULTISIG scriptPubKey, though such a scriptPubKey is not considered standard.
return false if i.script_sig.size > 1650
return false unless i.script_sig.push_only?
end
data_count = 0
outputs.each do |o|
return false unless o.script_pubkey.standard?
data_count += 1 if o.script_pubkey.op_return?
# TODO add non P2SH multisig relay(permitbaremultisig)
# TODO add dust relay check
end
return false if data_count > 1
true
end | ruby | {
"resource": ""
} |
q18144 | Bitcoin.Tx.sighash_for_input | train | def sighash_for_input(input_index, output_script, hash_type: SIGHASH_TYPE[:all],
sig_version: :base, amount: nil, skip_separator_index: 0)
raise ArgumentError, 'input_index must be specified.' unless input_index
raise ArgumentError, 'does not exist input corresponding to input_index.' if input_index >= inputs.size
raise ArgumentError, 'script_pubkey must be specified.' unless output_script
raise ArgumentError, 'unsupported sig version specified.' unless SIG_VERSION.include?(sig_version)
if sig_version == :witness_v0 || Bitcoin.chain_params.fork_chain?
raise ArgumentError, 'amount must be specified.' unless amount
sighash_for_witness(input_index, output_script, hash_type, amount, skip_separator_index)
else
sighash_for_legacy(input_index, output_script, hash_type)
end
end | ruby | {
"resource": ""
} |
q18145 | Bitcoin.Tx.verify_input_sig | train | def verify_input_sig(input_index, script_pubkey, amount: nil, flags: STANDARD_SCRIPT_VERIFY_FLAGS)
script_sig = inputs[input_index].script_sig
has_witness = inputs[input_index].has_witness?
if script_pubkey.p2sh?
flags << SCRIPT_VERIFY_P2SH
redeem_script = Script.parse_from_payload(script_sig.chunks.last)
script_pubkey = redeem_script if redeem_script.p2wpkh?
end
if has_witness || Bitcoin.chain_params.fork_chain?
verify_input_sig_for_witness(input_index, script_pubkey, amount, flags)
else
verify_input_sig_for_legacy(input_index, script_pubkey, flags)
end
end | ruby | {
"resource": ""
} |
q18146 | Bitcoin.Tx.sighash_for_legacy | train | def sighash_for_legacy(index, script_code, hash_type)
ins = inputs.map.with_index do |i, idx|
if idx == index
i.to_payload(script_code.delete_opcode(Bitcoin::Opcodes::OP_CODESEPARATOR))
else
case hash_type & 0x1f
when SIGHASH_TYPE[:none], SIGHASH_TYPE[:single]
i.to_payload(Bitcoin::Script.new, 0)
else
i.to_payload(Bitcoin::Script.new)
end
end
end
outs = outputs.map(&:to_payload)
out_size = Bitcoin.pack_var_int(outputs.size)
case hash_type & 0x1f
when SIGHASH_TYPE[:none]
outs = ''
out_size = Bitcoin.pack_var_int(0)
when SIGHASH_TYPE[:single]
return "\x01".ljust(32, "\x00") if index >= outputs.size
outs = outputs[0...(index + 1)].map.with_index { |o, idx| (idx == index) ? o.to_payload : o.to_empty_payload }.join
out_size = Bitcoin.pack_var_int(index + 1)
end
if hash_type & SIGHASH_TYPE[:anyonecanpay] != 0
ins = [ins[index]]
end
buf = [[version].pack('V'), Bitcoin.pack_var_int(ins.size),
ins, out_size, outs, [lock_time, hash_type].pack('VV')].join
Bitcoin.double_sha256(buf)
end | ruby | {
"resource": ""
} |
q18147 | Bitcoin.Tx.verify_input_sig_for_legacy | train | def verify_input_sig_for_legacy(input_index, script_pubkey, flags)
script_sig = inputs[input_index].script_sig
checker = Bitcoin::TxChecker.new(tx: self, input_index: input_index)
interpreter = Bitcoin::ScriptInterpreter.new(checker: checker, flags: flags)
interpreter.verify_script(script_sig, script_pubkey)
end | ruby | {
"resource": ""
} |
q18148 | Bitcoin.Tx.verify_input_sig_for_witness | train | def verify_input_sig_for_witness(input_index, script_pubkey, amount, flags)
flags |= SCRIPT_VERIFY_WITNESS
flags |= SCRIPT_VERIFY_WITNESS_PUBKEYTYPE
checker = Bitcoin::TxChecker.new(tx: self, input_index: input_index, amount: amount)
interpreter = Bitcoin::ScriptInterpreter.new(checker: checker, flags: flags)
i = inputs[input_index]
script_sig = i.script_sig
witness = i.script_witness
interpreter.verify_script(script_sig, script_pubkey, witness)
end | ruby | {
"resource": ""
} |
q18149 | Bitcoin.ExtKey.ext_pubkey | train | def ext_pubkey
k = ExtPubkey.new
k.depth = depth
k.number = number
k.parent_fingerprint = parent_fingerprint
k.chain_code = chain_code
k.pubkey = key.pubkey
k.ver = priv_ver_to_pub_ver
k
end | ruby | {
"resource": ""
} |
q18150 | Bitcoin.ExtKey.to_payload | train | def to_payload
version.htb << [depth].pack('C') << parent_fingerprint.htb <<
[number].pack('N') << chain_code << [0x00].pack('C') << key.priv_key.htb
end | ruby | {
"resource": ""
} |
q18151 | Bitcoin.ExtKey.to_base58 | train | def to_base58
h = to_payload.bth
hex = h + Bitcoin.calc_checksum(h)
Base58.encode(hex)
end | ruby | {
"resource": ""
} |
q18152 | Bitcoin.ExtKey.derive | train | def derive(number, harden = false)
number += HARDENED_THRESHOLD if harden
new_key = ExtKey.new
new_key.depth = depth + 1
new_key.number = number
new_key.parent_fingerprint = fingerprint
if number > (HARDENED_THRESHOLD - 1)
data = [0x00].pack('C') << key.priv_key.htb << [number].pack('N')
else
data = key.pubkey.htb << [number].pack('N')
end
l = Bitcoin.hmac_sha512(chain_code, data)
left = l[0..31].bth.to_i(16)
raise 'invalid key' if left >= CURVE_ORDER
child_priv = (left + key.priv_key.to_i(16)) % CURVE_ORDER
raise 'invalid key ' if child_priv >= CURVE_ORDER
new_key.key = Bitcoin::Key.new(
priv_key: child_priv.to_even_length_hex.rjust(64, '0'), key_type: key_type)
new_key.chain_code = l[32..-1]
new_key.ver = version
new_key
end | ruby | {
"resource": ""
} |
q18153 | Bitcoin.ExtKey.priv_ver_to_pub_ver | train | def priv_ver_to_pub_ver
case version
when Bitcoin.chain_params.bip49_privkey_p2wpkh_p2sh_version
Bitcoin.chain_params.bip49_pubkey_p2wpkh_p2sh_version
when Bitcoin.chain_params.bip84_privkey_p2wpkh_version
Bitcoin.chain_params.bip84_pubkey_p2wpkh_version
else
Bitcoin.chain_params.extended_pubkey_version
end
end | ruby | {
"resource": ""
} |
q18154 | Bitcoin.ExtPubkey.to_payload | train | def to_payload
version.htb << [depth].pack('C') <<
parent_fingerprint.htb << [number].pack('N') << chain_code << pub.htb
end | ruby | {
"resource": ""
} |
q18155 | Bitcoin.ExtPubkey.derive | train | def derive(number)
new_key = ExtPubkey.new
new_key.depth = depth + 1
new_key.number = number
new_key.parent_fingerprint = fingerprint
raise 'hardened key is not support' if number > (HARDENED_THRESHOLD - 1)
data = pub.htb << [number].pack('N')
l = Bitcoin.hmac_sha512(chain_code, data)
left = l[0..31].bth.to_i(16)
raise 'invalid key' if left >= CURVE_ORDER
p1 = Bitcoin::Secp256k1::GROUP.generator.multiply_by_scalar(left)
p2 = Bitcoin::Key.new(pubkey: pubkey, key_type: key_type).to_point
new_key.pubkey = ECDSA::Format::PointOctetString.encode(p1 + p2, compression: true).bth
new_key.chain_code = l[32..-1]
new_key.ver = version
new_key
end | ruby | {
"resource": ""
} |
q18156 | Bitcoin.Script.p2pkh? | train | def p2pkh?
return false unless chunks.size == 5
[OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG] ==
(chunks[0..1]+ chunks[3..4]).map(&:ord) && chunks[2].bytesize == 21
end | ruby | {
"resource": ""
} |
q18157 | Bitcoin.Script.push_only? | train | def push_only?
chunks.each do |c|
return false if !c.opcode.nil? && c.opcode > OP_16
end
true
end | ruby | {
"resource": ""
} |
q18158 | Bitcoin.Script.witness_program? | train | def witness_program?
return false if size < 4 || size > 42 || chunks.size < 2
opcode = chunks[0].opcode
return false if opcode != OP_0 && (opcode < OP_1 || opcode > OP_16)
return false unless chunks[1].pushdata?
if size == (chunks[1][0].unpack('C').first + 2)
program_size = chunks[1].pushed_data.bytesize
return program_size >= 2 && program_size <= 40
end
false
end | ruby | {
"resource": ""
} |
q18159 | Bitcoin.Script.witness_commitment | train | def witness_commitment
return nil if !op_return? || op_return_data.bytesize < 36
buf = StringIO.new(op_return_data)
return nil unless buf.read(4).bth == WITNESS_COMMITMENT_HEADER
buf.read(32).bth
end | ruby | {
"resource": ""
} |
q18160 | Bitcoin.Script.to_script_code | train | def to_script_code(skip_separator_index = 0)
payload = to_payload
if p2wpkh?
payload = Script.to_p2pkh(chunks[1].pushed_data.bth).to_payload
elsif skip_separator_index > 0
payload = subscript_codeseparator(skip_separator_index)
end
Bitcoin.pack_var_string(payload)
end | ruby | {
"resource": ""
} |
q18161 | Bitcoin.Script.witness_data | train | def witness_data
version = opcode_to_small_int(chunks[0].opcode)
program = chunks[1].pushed_data
[version, program]
end | ruby | {
"resource": ""
} |
q18162 | Bitcoin.Script.<< | train | def <<(obj)
if obj.is_a?(Integer)
push_int(obj)
elsif obj.is_a?(String)
append_data(obj)
elsif obj.is_a?(Array)
obj.each { |o| self.<< o}
self
end
end | ruby | {
"resource": ""
} |
q18163 | Bitcoin.Script.append_opcode | train | def append_opcode(opcode)
opcode = Opcodes.small_int_to_opcode(opcode) if -1 <= opcode && opcode <= 16
raise ArgumentError, "specified invalid opcode #{opcode}." unless Opcodes.defined?(opcode)
chunks << opcode.chr
self
end | ruby | {
"resource": ""
} |
q18164 | Bitcoin.Script.append_data | train | def append_data(data)
data = Encoding::ASCII_8BIT == data.encoding ? data : data.htb
chunks << Bitcoin::Script.pack_pushdata(data)
self
end | ruby | {
"resource": ""
} |
q18165 | Bitcoin.Script.include? | train | def include?(item)
chunk_item = if item.is_a?(Integer)
item.chr
elsif item.is_a?(String)
data = Encoding::ASCII_8BIT == item.encoding ? item : item.htb
Bitcoin::Script.pack_pushdata(data)
end
return false unless chunk_item
chunks.include?(chunk_item)
end | ruby | {
"resource": ""
} |
q18166 | Bitcoin.Script.find_and_delete | train | def find_and_delete(subscript)
raise ArgumentError, 'subscript must be Bitcoin::Script' unless subscript.is_a?(Script)
return self if subscript.chunks.empty?
buf = []
i = 0
result = Script.new
chunks.each do |chunk|
sub_chunk = subscript.chunks[i]
if chunk.start_with?(sub_chunk)
if chunk == sub_chunk
buf << chunk
i += 1
(i = 0; buf.clear) if i == subscript.chunks.size # matched the whole subscript
else # matched the part of head
i = 0
tmp = chunk.dup
tmp.slice!(sub_chunk)
result.chunks << tmp
end
else
result.chunks << buf.join unless buf.empty?
if buf.first == chunk
i = 1
buf = [chunk]
else
i = 0
result.chunks << chunk
end
end
end
result
end | ruby | {
"resource": ""
} |
q18167 | Bitcoin.Script.subscript_codeseparator | train | def subscript_codeseparator(separator_index)
buf = []
process_separator_index = 0
chunks.each{|chunk|
buf << chunk if process_separator_index == separator_index
if chunk.ord == OP_CODESEPARATOR && process_separator_index < separator_index
process_separator_index += 1
end
}
buf.join
end | ruby | {
"resource": ""
} |
q18168 | Bitcoin.Script.p2pkh_addr | train | def p2pkh_addr
return nil unless p2pkh?
hash160 = chunks[2].pushed_data.bth
return nil unless hash160.htb.bytesize == 20
Bitcoin.encode_base58_address(hash160, Bitcoin.chain_params.address_version)
end | ruby | {
"resource": ""
} |
q18169 | Bitcoin.Script.p2sh_addr | train | def p2sh_addr
return nil unless p2sh?
hash160 = chunks[1].pushed_data.bth
return nil unless hash160.htb.bytesize == 20
Bitcoin.encode_base58_address(hash160, Bitcoin.chain_params.p2sh_version)
end | ruby | {
"resource": ""
} |
q18170 | Bitcoin.Script.bech32_addr | train | def bech32_addr
segwit_addr = Bech32::SegwitAddr.new
segwit_addr.hrp = Bitcoin.chain_params.bech32_hrp
segwit_addr.script_pubkey = to_payload.bth
segwit_addr.addr
end | ruby | {
"resource": ""
} |
q18171 | Bitcoin.Mnemonic.to_entropy | train | def to_entropy(words)
word_master = load_words
mnemonic = words.map do |w|
index = word_master.index(w.downcase)
raise IndexError, 'word not found in words list.' unless index
index.to_s(2).rjust(11, '0')
end.join
entropy = mnemonic.slice(0, (mnemonic.length * 32) / 33)
checksum = mnemonic.gsub(entropy, '')
raise SecurityError, 'checksum mismatch.' unless checksum == checksum(entropy)
[entropy].pack('B*').bth
end | ruby | {
"resource": ""
} |
q18172 | Bitcoin.Mnemonic.to_mnemonic | train | def to_mnemonic(entropy)
raise ArgumentError, 'entropy is empty.' if entropy.nil? || entropy.empty?
e = entropy.htb.unpack('B*').first
seed = e + checksum(e)
mnemonic_index = seed.chars.each_slice(11).map{|i|i.join.to_i(2)}
word_master = load_words
mnemonic_index.map{|i|word_master[i]}
end | ruby | {
"resource": ""
} |
q18173 | Bitcoin.Mnemonic.to_seed | train | def to_seed(mnemonic, passphrase: '')
to_entropy(mnemonic)
OpenSSL::PKCS5.pbkdf2_hmac(mnemonic.join(' ').downcase,
'mnemonic' + passphrase, 2048, 64, OpenSSL::Digest::SHA512.new).bth
end | ruby | {
"resource": ""
} |
q18174 | Bitcoin.Mnemonic.checksum | train | def checksum(entropy)
b = Bitcoin.sha256([entropy].pack('B*')).unpack('B*').first
b.slice(0, (entropy.length/32))
end | ruby | {
"resource": ""
} |
q18175 | Bitcoin.Key.to_wif | train | def to_wif
version = Bitcoin.chain_params.privkey_version
hex = version + priv_key
hex += '01' if compressed?
hex += Bitcoin.calc_checksum(hex)
Base58.encode(hex)
end | ruby | {
"resource": ""
} |
q18176 | Bitcoin.Key.sign | train | def sign(data, low_r = true, extra_entropy = nil)
sig = secp256k1_module.sign_data(data, priv_key, extra_entropy)
if low_r && !sig_has_low_r?(sig)
counter = 1
until sig_has_low_r?(sig)
extra_entropy = [counter].pack('I*').bth.ljust(64, '0').htb
sig = secp256k1_module.sign_data(data, priv_key, extra_entropy)
counter += 1
end
end
sig
end | ruby | {
"resource": ""
} |
q18177 | Bitcoin.Key.verify | train | def verify(sig, origin)
return false unless valid_pubkey?
begin
sig = ecdsa_signature_parse_der_lax(sig)
secp256k1_module.verify_sig(origin, sig, pubkey)
rescue Exception
false
end
end | ruby | {
"resource": ""
} |
q18178 | Bitcoin.Key.to_point | train | def to_point
p = pubkey
p ||= generate_pubkey(priv_key, compressed: compressed)
ECDSA::Format::PointOctetString.decode(p.htb, Bitcoin::Secp256k1::GROUP)
end | ruby | {
"resource": ""
} |
q18179 | Bitcoin.Key.ecdsa_signature_parse_der_lax | train | def ecdsa_signature_parse_der_lax(sig)
sig_array = sig.unpack('C*')
len_r = sig_array[3]
r = sig_array[4...(len_r+4)].pack('C*').bth
len_s = sig_array[len_r + 5]
s = sig_array[(len_r + 6)...(len_r + 6 + len_s)].pack('C*').bth
ECDSA::Signature.new(r.to_i(16), s.to_i(16)).to_der
end | ruby | {
"resource": ""
} |
q18180 | Bitcoin.Base58.encode | train | def encode(hex)
leading_zero_bytes = (hex.match(/^([0]+)/) ? $1 : '').size / 2
int_val = hex.to_i(16)
base58_val = ''
while int_val > 0
int_val, remainder = int_val.divmod(SIZE)
base58_val = ALPHABET[remainder] + base58_val
end
('1' * leading_zero_bytes) + base58_val
end | ruby | {
"resource": ""
} |
q18181 | Bitcoin.Base58.decode | train | def decode(base58_val)
int_val = 0
base58_val.reverse.split(//).each_with_index do |char,index|
raise ArgumentError, 'Value passed not a valid Base58 String.' if (char_index = ALPHABET.index(char)).nil?
int_val += char_index * (SIZE ** index)
end
s = int_val.to_even_length_hex
s = '' if s == '00'
leading_zero_bytes = (base58_val.match(/^([1]+)/) ? $1 : '').size
s = ('00' * leading_zero_bytes) + s if leading_zero_bytes > 0
s
end | ruby | {
"resource": ""
} |
q18182 | Bitcoin.Block.calculate_witness_commitment | train | def calculate_witness_commitment
witness_hashes = [COINBASE_WTXID]
witness_hashes += (transactions[1..-1].map(&:witness_hash))
reserved_value = transactions[0].inputs[0].script_witness.stack.map(&:bth).join
root_hash = Bitcoin::MerkleTree.build_from_leaf(witness_hashes).merkle_root
Bitcoin.double_sha256([root_hash + reserved_value].pack('H*')).bth
end | ruby | {
"resource": ""
} |
q18183 | Bitcoin.Block.height | train | def height
return nil if header.version < 2
coinbase_tx = transactions[0]
return nil unless coinbase_tx.coinbase_tx?
buf = StringIO.new(coinbase_tx.inputs[0].script_sig.to_payload)
len = Bitcoin.unpack_var_int_from_io(buf)
buf.read(len).reverse.bth.to_i(16)
end | ruby | {
"resource": ""
} |
q18184 | OpenAssets.Payload.to_payload | train | def to_payload
payload = String.new
payload << MARKER
payload << VERSION
payload << Bitcoin.pack_var_int(quantities.size) << quantities.map{|q| LEB128.encode_unsigned(q).read }.join
payload << Bitcoin.pack_var_int(metadata.length) << metadata.bytes.map{|b| sprintf("%02x", b)}.join.htb
payload
end | ruby | {
"resource": ""
} |
q18185 | Bitcoin.Util.hash160 | train | def hash160(hex)
Digest::RMD160.hexdigest(Digest::SHA256.digest(hex.htb))
end | ruby | {
"resource": ""
} |
q18186 | Bitcoin.Util.encode_base58_address | train | def encode_base58_address(hex, addr_version)
base = addr_version + hex
Base58.encode(base + calc_checksum(base))
end | ruby | {
"resource": ""
} |
q18187 | Bitcoin.Util.decode_base58_address | train | def decode_base58_address(addr)
hex = Base58.decode(addr)
if hex.size == 50 && calc_checksum(hex[0...-8]) == hex[-8..-1]
raise 'Invalid version bytes.' unless [Bitcoin.chain_params.address_version, Bitcoin.chain_params.p2sh_version].include?(hex[0..1])
[hex[2...-8], hex[0..1]]
else
raise 'Invalid address.'
end
end | ruby | {
"resource": ""
} |
q18188 | Bitcoin.Validation.check_tx | train | def check_tx(tx, state)
# Basic checks that don't depend on any context
if tx.inputs.empty?
return state.DoS(10, reject_code: Message::Reject::CODE_INVALID, reject_reason: 'bad-txns-vin-empty')
end
if tx.outputs.empty?
return state.DoS(100, reject_code: Message::Reject::CODE_INVALID, reject_reason: 'bad-txns-vout-empty')
end
# Size limits (this doesn't take the witness into account, as that hasn't been checked for malleability)
if tx.serialize_old_format.bytesize * Bitcoin::WITNESS_SCALE_FACTOR > Bitcoin::MAX_BLOCK_WEIGHT
return state.DoS(100, reject_code: Message::Reject::CODE_INVALID, reject_reason: 'bad-txns-oversize')
end
# Check for negative or overflow output values
amount = 0
tx.outputs.each do |o|
return state.DoS(100, reject_code: Message::Reject::CODE_INVALID, reject_reason: 'bad-txns-vout-negative') if o.value < 0
return state.DoS(100, reject_code: Message::Reject::CODE_INVALID, reject_reason: 'bad-txns-vout-toolarge') if MAX_MONEY < o.value
amount += o.value
return state.DoS(100, reject_code: Message::Reject::CODE_INVALID, reject_reason: 'bad-txns-vout-toolarge') if MAX_MONEY < amount
end
# Check for duplicate inputs - note that this check is slow so we skip it in CheckBlock
out_points = tx.inputs.map{|i|i.out_point.to_payload}
unless out_points.size == out_points.uniq.size
return state.DoS(100, reject_code: Message::Reject::CODE_INVALID, reject_reason: 'bad-txns-inputs-duplicate')
end
if tx.coinbase_tx?
if tx.inputs[0].script_sig.size < 2 || tx.inputs[0].script_sig.size > 100
return state.DoS(100, reject_code: Message::Reject::CODE_INVALID, reject_reason: 'bad-cb-length')
end
else
tx.inputs.each do |i|
if i.out_point.nil? || !i.out_point.valid?
return state.DoS(10, reject_code: Message::Reject::CODE_INVALID, reject_reason: 'bad-txns-prevout-null')
end
end
end
true
end | ruby | {
"resource": ""
} |
q18189 | Bitcoin.ScriptInterpreter.verify_script | train | def verify_script(script_sig, script_pubkey, witness = ScriptWitness.new)
return set_error(SCRIPT_ERR_SIG_PUSHONLY) if flag?(SCRIPT_VERIFY_SIGPUSHONLY) && !script_sig.push_only?
stack_copy = nil
had_witness = false
return false unless eval_script(script_sig, :base)
stack_copy = stack.dup if flag?(SCRIPT_VERIFY_P2SH)
return false unless eval_script(script_pubkey, :base)
return set_error(SCRIPT_ERR_EVAL_FALSE) if stack.empty? || !cast_to_bool(stack.last.htb)
# Bare witness programs
if flag?(SCRIPT_VERIFY_WITNESS) && script_pubkey.witness_program?
had_witness = true
return set_error(SCRIPT_ERR_WITNESS_MALLEATED) unless script_sig.size == 0
version, program = script_pubkey.witness_data
stack_copy = stack.dup
return false unless verify_witness_program(witness, version, program)
end
# Additional validation for spend-to-script-hash transactions
if flag?(SCRIPT_VERIFY_P2SH) && script_pubkey.p2sh?
return set_error(SCRIPT_ERR_SIG_PUSHONLY) unless script_sig.push_only?
tmp = stack
@stack = stack_copy
raise 'stack cannot be empty.' if stack.empty?
begin
redeem_script = Bitcoin::Script.parse_from_payload(stack.pop.htb)
rescue Exception => e
return set_error(SCRIPT_ERR_BAD_OPCODE, "Failed to parse serialized redeem script for P2SH. #{e.message}")
end
return false unless eval_script(redeem_script, :base)
return set_error(SCRIPT_ERR_EVAL_FALSE) if stack.empty? || !cast_to_bool(stack.last)
# P2SH witness program
if flag?(SCRIPT_VERIFY_WITNESS) && redeem_script.witness_program?
had_witness = true
# The scriptSig must be _exactly_ a single push of the redeemScript. Otherwise we reintroduce malleability.
return set_error(SCRIPT_ERR_WITNESS_MALLEATED_P2SH) unless script_sig == (Bitcoin::Script.new << redeem_script.to_payload.bth)
version, program = redeem_script.witness_data
return false unless verify_witness_program(witness, version, program)
end
end
# The CLEANSTACK check is only performed after potential P2SH evaluation,
# as the non-P2SH evaluation of a P2SH script will obviously not result in a clean stack (the P2SH inputs remain).
# The same holds for witness evaluation.
if flag?(SCRIPT_VERIFY_CLEANSTACK)
# Disallow CLEANSTACK without P2SH, as otherwise a switch CLEANSTACK->P2SH+CLEANSTACK would be possible,
# which is not a softfork (and P2SH should be one).
raise 'assert' unless flag?(SCRIPT_VERIFY_P2SH)
return set_error(SCRIPT_ERR_CLEANSTACK) unless stack.size == 1
end
if flag?(SCRIPT_VERIFY_WITNESS)
raise 'assert' unless flag?(SCRIPT_VERIFY_P2SH)
return set_error(SCRIPT_ERR_WITNESS_UNEXPECTED) if !had_witness && !witness.empty?
end
true
end | ruby | {
"resource": ""
} |
q18190 | Bitcoin.ScriptInterpreter.pop_int | train | def pop_int(count = 1)
i = stack.pop(count).map{ |s| cast_to_int(s) }
count == 1 ? i.first : i
end | ruby | {
"resource": ""
} |
q18191 | Bitcoin.ScriptInterpreter.cast_to_int | train | def cast_to_int(s, max_num_size = DEFAULT_MAX_NUM_SIZE)
data = s.htb
raise '"script number overflow"' if data.bytesize > max_num_size
if require_minimal && data.bytesize > 0
if data.bytes[-1] & 0x7f == 0 && (data.bytesize <= 1 || data.bytes[data.bytesize - 2] & 0x80 == 0)
raise 'non-minimally encoded script number'
end
end
Script.decode_number(s)
end | ruby | {
"resource": ""
} |
q18192 | Bitcoin.BloomFilter.contains? | train | def contains?(data)
return true if full?
hash_funcs.times do |i|
hash = to_hash(data, i)
return false unless check_bit(hash)
end
true
end | ruby | {
"resource": ""
} |
q18193 | Bitcoin.GCSFilter.hash_to_range | train | def hash_to_range(element)
hash = SipHash.digest(key, element)
map_into_range(hash, f)
end | ruby | {
"resource": ""
} |
q18194 | Bitcoin.GCSFilter.match_internal? | train | def match_internal?(hashes, size)
n, payload = Bitcoin.unpack_var_int(encoded.htb)
bit_reader = Bitcoin::BitStreamReader.new(payload)
value = 0
hashes_index = 0
n.times do
delta = golomb_rice_decode(bit_reader, p)
value += delta
loop do
return false if hashes_index == size
return true if hashes[hashes_index] == value
break if hashes[hashes_index] > value
hashes_index += 1
end
end
false
end | ruby | {
"resource": ""
} |
q18195 | Bitcoin.GCSFilter.golomb_rice_encode | train | def golomb_rice_encode(bit_writer, p, x)
q = x >> p
while q > 0
nbits = q <= 64 ? q : 64
bit_writer.write(-1, nbits) # 18446744073709551615 is 2**64 - 1 = ~0ULL in cpp.
q -= nbits
end
bit_writer.write(0, 1)
bit_writer.write(x, p)
end | ruby | {
"resource": ""
} |
q18196 | Bitcoin.GCSFilter.golomb_rice_decode | train | def golomb_rice_decode(bit_reader, p)
q = 0
while bit_reader.read(1) == 1
q +=1
end
r = bit_reader.read(p)
(q << p) + r
end | ruby | {
"resource": ""
} |
q18197 | Capistrano.Caplock.remote_file_content_same_as? | train | def remote_file_content_same_as?(full_path, content)
Digest::MD5.hexdigest(content) == top.capture("md5sum #{full_path} | awk '{ print $1 }'").strip
end | ruby | {
"resource": ""
} |
q18198 | Capistrano.Caplock.remote_file_differs? | train | def remote_file_differs?(full_path, content)
!remote_file_exists?(full_path) || remote_file_exists?(full_path) && !remote_file_content_same_as?(full_path, content)
end | ruby | {
"resource": ""
} |
q18199 | CukeModeler.Gherkin6Adapter.adapt_tag! | train | def adapt_tag!(parsed_tag)
# Saving off the original data
parsed_tag['cuke_modeler_parsing_data'] = Marshal::load(Marshal.dump(parsed_tag))
parsed_tag['name'] = parsed_tag.delete(:name)
parsed_tag['line'] = parsed_tag.delete(:location)[:line]
end | ruby | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.