_id stringlengths 2 6 | title stringlengths 9 130 | partition stringclasses 3 values | text stringlengths 66 10.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q20100 | Gem.SourceIndex.gem_signature | train | def gem_signature(gem_full_name)
require 'digest'
Digest::SHA256.new.hexdigest(@gems[gem_full_name].to_yaml).to_s
end | ruby | {
"resource": ""
} |
q20101 | Gem.SourceIndex.find_name | train | def find_name(gem_name, version_requirement = Gem::Requirement.default)
dep = Gem::Dependency.new gem_name, version_requirement
search dep
end | ruby | {
"resource": ""
} |
q20102 | Gem.SourceIndex.update | train | def update(source_uri, all)
source_uri = URI.parse source_uri unless URI::Generic === source_uri
source_uri.path += '/' unless source_uri.path =~ /\/$/
use_incremental = false
begin
gem_names = fetch_quick_index source_uri, all
remove_extra gem_names
missing_gems = find_missing gem_names
return false if missing_gems.size.zero?
say "Missing metadata for #{missing_gems.size} gems" if
missing_gems.size > 0 and Gem.configuration.really_verbose
use_incremental = missing_gems.size <= Gem.configuration.bulk_threshold
rescue Gem::OperationNotSupportedError => ex
alert_error "Falling back to bulk fetch: #{ex.message}" if
Gem.configuration.really_verbose
use_incremental = false
end
if use_incremental then
update_with_missing(source_uri, missing_gems)
else
new_index = fetch_bulk_index(source_uri)
@gems.replace(new_index.gems)
end
true
end | ruby | {
"resource": ""
} |
q20103 | Gem.SourceIndex.fetch_quick_index | train | def fetch_quick_index(source_uri, all)
index = all ? 'index' : 'latest_index'
zipped_index = fetcher.fetch_path source_uri + "quick/#{index}.rz"
unzip(zipped_index).split("\n")
rescue ::Exception => e
unless all then
say "Latest index not found, using quick index" if
Gem.configuration.really_verbose
fetch_quick_index source_uri, true
else
raise Gem::OperationNotSupportedError,
"No quick index found: #{e.message}"
end
end | ruby | {
"resource": ""
} |
q20104 | Gem.SourceIndex.find_missing | train | def find_missing(spec_names)
unless defined? @originals then
@originals = {}
each do |full_name, spec|
@originals[spec.original_name] = spec
end
end
spec_names.find_all { |full_name|
@originals[full_name].nil?
}
end | ruby | {
"resource": ""
} |
q20105 | Gem.SourceIndex.fetch_single_spec | train | def fetch_single_spec(source_uri, spec_name)
@fetch_error = nil
begin
marshal_uri = source_uri + "quick/Marshal.#{Gem.marshal_version}/#{spec_name}.gemspec.rz"
zipped = fetcher.fetch_path marshal_uri
return Marshal.load(unzip(zipped))
rescue => ex
@fetch_error = ex
if Gem.configuration.really_verbose then
say "unable to fetch marshal gemspec #{marshal_uri}: #{ex.class} - #{ex}"
end
end
begin
yaml_uri = source_uri + "quick/#{spec_name}.gemspec.rz"
zipped = fetcher.fetch_path yaml_uri
return YAML.load(unzip(zipped))
rescue => ex
@fetch_error = ex
if Gem.configuration.really_verbose then
say "unable to fetch YAML gemspec #{yaml_uri}: #{ex.class} - #{ex}"
end
end
nil
end | ruby | {
"resource": ""
} |
q20106 | Gem.SourceIndex.update_with_missing | train | def update_with_missing(source_uri, missing_names)
progress = ui.progress_reporter(missing_names.size,
"Updating metadata for #{missing_names.size} gems from #{source_uri}")
missing_names.each do |spec_name|
gemspec = fetch_single_spec(source_uri, spec_name)
if gemspec.nil? then
ui.say "Failed to download spec #{spec_name} from #{source_uri}:\n" \
"\t#{@fetch_error.message}"
else
add_spec gemspec
progress.updated spec_name
end
@fetch_error = nil
end
progress.done
progress.count
end | ruby | {
"resource": ""
} |
q20107 | Specinfra::Backend.BeakerBase.ssh_exec! | train | def ssh_exec!(node, command)
r = on node, command, { :acceptable_exit_codes => (0..127) }
{
:exit_status => r.exit_code,
:stdout => r.stdout,
:stderr => r.stderr
}
end | ruby | {
"resource": ""
} |
q20108 | Specinfra::Backend.BeakerCygwin.run_command | train | def run_command(cmd, opt = {})
node = get_working_node
script = create_script(cmd)
#when node is not cygwin rm -rf will fail so lets use native del instead
#There should be a better way to do this, but for now , this works
if node.is_cygwin?
delete_command = "rm -rf"
redirection = "< /dev/null"
else
delete_command = "del"
redirection = "< NUL"
end
on node, "#{delete_command} script.ps1"
create_remote_file(node, 'script.ps1', script)
#When using cmd on a pswindows node redirection should be set to < NUl
#when using a cygwing one, /dev/null should be fine
ret = ssh_exec!(node, "powershell.exe -File script.ps1 #{redirection}")
if @example
@example.metadata[:command] = script
@example.metadata[:stdout] = ret[:stdout]
end
CommandResult.new ret
end | ruby | {
"resource": ""
} |
q20109 | Specinfra::Backend.BeakerExec.run_command | train | def run_command(cmd, opt = {})
node = get_working_node
cmd = build_command(cmd)
cmd = add_pre_command(cmd)
ret = ssh_exec!(node, cmd)
if @example
@example.metadata[:command] = cmd
@example.metadata[:stdout] = ret[:stdout]
end
CommandResult.new ret
end | ruby | {
"resource": ""
} |
q20110 | BeakerRSpec.BeakerShim.setup | train | def setup(args = [])
options_parser = Beaker::Options::Parser.new
options = options_parser.parse_args(args)
options[:debug] = true
RSpec.configuration.logger = Beaker::Logger.new(options)
options[:logger] = logger
RSpec.configuration.hosts = []
RSpec.configuration.options = options
end | ruby | {
"resource": ""
} |
q20111 | M.Executor.suites | train | def suites
# Since we're not using `ruby -Itest -Ilib` to run the tests, we need to add this directory to the `LOAD_PATH`
$:.unshift "./test", "./spec", "./lib"
begin
# Fire up this Ruby file. Let's hope it actually has tests.
require "./#{testable.file}"
rescue LoadError => e
# Fail with a happier error message instead of spitting out a backtrace from this gem
STDERR.puts "Failed loading test file:\n#{e.message}"
return []
end
suites = runner.suites
# Use some janky internal APIs to group test methods by test suite.
suites.each_with_object({}) do |suite_class, test_suites|
# End up with a hash of suite class name to an array of test methods, so we can later find them and ignore empty test suites
if runner.test_methods(suite_class).any?
test_suites[suite_class] = runner.test_methods(suite_class)
end
end
end | ruby | {
"resource": ""
} |
q20112 | PaypalAdaptive.Response.approve_paypal_payment_url | train | def approve_paypal_payment_url(opts = {})
if opts.is_a?(Symbol) || opts.is_a?(String)
warn "[DEPRECATION] use approve_paypal_payment_url(:type => #{opts})"
opts = {:type => opts}
end
return nil if self['payKey'].nil?
if ['mini', 'light'].include?(opts[:type].to_s)
"#{@paypal_base_url}/webapps/adaptivepayment/flow/pay?expType=#{opts[:type]}&paykey=#{self['payKey']}"
else
base = @paypal_base_url
base = base + "/#{opts[:country]}" if opts[:country]
"#{base}/webscr?cmd=_ap-payment&paykey=#{self['payKey']}"
end
end | ruby | {
"resource": ""
} |
q20113 | Scraper.Base.scrape | train | def scrape()
# Call prepare with the document, but before doing anything else.
prepare document
# Retrieve the document. This may raise HTTPError or HTMLParseError.
case document
when Array
stack = @document.reverse # see below
when HTML::Node
# If a root element is specified, start selecting from there.
# The stack is empty if we can't find any root element (makes
# sense). However, the node we're going to process may be
# a tag, or an HTML::Document.root which is the equivalent of
# a document fragment.
root_element = option(:root_element)
root = root_element ? @document.find(:tag=>root_element) : @document
stack = root ? (root.tag? ? [root] : root.children.reverse) : []
else
return
end
# @skip stores all the elements we want to skip (see #skip).
# rules stores all the rules we want to process with this
# scraper, based on the class definition.
@skip = []
@stop = false
rules = self.class.rules.clone
begin
# Process the document one node at a time. We process elements
# from the end of the stack, so each time we visit child elements,
# we add them to the end of the stack in reverse order.
while node = stack.pop
break if @stop
skip_this = false
# Only match nodes that are elements, ignore text nodes.
# Also ignore any element that's on the skip list, and if
# found one, remove it from the list (since we never visit
# the same element twice). But an element may be added twice
# to the skip list.
# Note: equal? is faster than == for nodes.
next unless node.tag?
@skip.delete_if { |s| skip_this = true if s.equal?(node) }
next if skip_this
# Run through all the rules until we process the element or
# run out of rules. If skip_this=true then we processed the
# element and we can break out of the loop. However, we might
# process (and skip) descedants so also watch the skip list.
rules.delete_if do |selector, extractor, rule_name, first_only|
break if skip_this
# The result of calling match (selected) is nil, element
# or array of elements. We turn it into an array to
# process one element at a time. We process all elements
# that are not on the skip list (we haven't visited
# them yet).
if selected = selector.match(node, first_only)
selected = [selected] unless selected.is_a?(Array)
selected = [selected.first] if first_only
selected.each do |element|
# Do not process elements we already skipped
# (see above). However, this time we may visit
# an element twice, since selected elements may
# be descendants of the current element on the
# stack. In rare cases two elements on the stack
# may pick the same descendants.
next if @skip.find { |s| s.equal?(element) }
# Call the extractor method with this element.
# If it returns true, skip the element and if
# the current element, don't process any more
# rules. Again, pay attention to descendants.
if extractor.bind(self).call(element)
@extracted = true
end
if @skip.delete(true)
if element.equal?(node)
skip_this = true
else
@skip << element
end
end
end
first_only if !selected.empty?
end
end
# If we did not skip the element, we're going to process its
# children. Reverse order since we're popping from the stack.
if !skip_this && children = node.children
stack.concat children.reverse
end
end
ensure
@skip = nil
end
collect
return result
end | ruby | {
"resource": ""
} |
q20114 | Scraper.Base.document | train | def document
if @document.is_a?(URI)
# Attempt to read page. May raise HTTPError.
options = {}
READER_OPTIONS.each { |key| options[key] = option(key) }
request(@document, options)
end
if @document.is_a?(String)
# Parse the page. May raise HTMLParseError.
parsed = Reader.parse_page(@document, @page_info.encoding,
option(:parser_options), option(:parser))
@document = parsed.document
@page_info.encoding = parsed.encoding
end
return @document if @document.is_a?(HTML::Node)
raise RuntimeError, "No document to process"
end | ruby | {
"resource": ""
} |
q20115 | HTML.Node.next_sibling | train | def next_sibling()
if siblings = parent.children
siblings.each_with_index do |node, i|
return siblings[i + 1] if node.equal?(self)
end
end
nil
end | ruby | {
"resource": ""
} |
q20116 | HTML.Node.previous_sibling | train | def previous_sibling()
if siblings = parent.children
siblings.each_with_index do |node, i|
return siblings[i - 1] if node.equal?(self)
end
end
nil
end | ruby | {
"resource": ""
} |
q20117 | HTML.Node.previous_element | train | def previous_element(name = nil)
if siblings = parent.children
found = nil
siblings.each do |node|
return found if node.equal?(self)
found = node if node.tag? && (name.nil? || node.name == name)
end
end
nil
end | ruby | {
"resource": ""
} |
q20118 | HTML.Node.each | train | def each(value = nil, &block)
yield self, value
if @children
@children.each do |child|
child.each value, &block
end
end
value
end | ruby | {
"resource": ""
} |
q20119 | HTML.Tokenizer.scan_tag | train | def scan_tag
tag = @scanner.getch
if @scanner.scan(/!--/) # comment
tag << @scanner.matched
tag << (@scanner.scan_until(/--\s*>/) || @scanner.scan_until(/\Z/))
elsif @scanner.scan(/!\[CDATA\[/)
tag << @scanner.matched
tag << @scanner.scan_until(/\]\]>/)
elsif @scanner.scan(/!/) # doctype
tag << @scanner.matched
tag << consume_quoted_regions
else
tag << consume_quoted_regions
end
tag
end | ruby | {
"resource": ""
} |
q20120 | HTML.Selector.next_element | train | def next_element(element, name = nil)
if siblings = element.parent.children
found = false
siblings.each do |node|
if node.equal?(element)
found = true
elsif found && node.tag?
return node if (name.nil? || node.name == name)
end
end
end
nil
end | ruby | {
"resource": ""
} |
q20121 | HTML.Selector.only_child | train | def only_child(of_type)
lambda do |element|
# Element must be inside parent element.
return false unless element.parent and element.parent.tag?
name = of_type ? element.name : nil
other = false
for child in element.parent.children
# Skip text nodes/comments.
if child.tag? and (name == nil or child.name == name)
unless child.equal?(element)
other = true
break
end
end
end
!other
end
end | ruby | {
"resource": ""
} |
q20122 | HTML.Node.find_all | train | def find_all(conditions)
conditions = validate_conditions(conditions)
matches = []
matches << self if match(conditions)
@children.each do |child|
matches.concat child.find_all(conditions)
end
matches
end | ruby | {
"resource": ""
} |
q20123 | HTML.Tag.to_s | train | def to_s
if @closing == :close
"</#{@name}>"
else
s = "<#{@name}"
@attributes.each do |k,v|
s << " #{k}"
s << "='#{v.gsub(/'/,"\\\\'")}'" if String === v
end
s << " /" if @closing == :self
s << ">"
@children.each { |child| s << child.to_s }
s << "</#{@name}>" if @closing != :self && !@children.empty?
s
end
end | ruby | {
"resource": ""
} |
q20124 | HTML.Tag.match_condition | train | def match_condition(value, condition)
case condition
when String
value && value == condition
when Regexp
value && value.match(condition)
when Numeric
value == condition.to_s
when true
!value.nil?
when false, nil
value.nil?
else
false
end
end | ruby | {
"resource": ""
} |
q20125 | Hoodoo.Client.resource | train | def resource( resource, version = 1, options = {} )
endpoint_options = {
:discoverer => @discoverer,
:session_id => @session_id,
:locale => options[ :locale ] || @locale
}
Hoodoo::Client::Headers::HEADER_TO_PROPERTY.each do | rack_header, description |
property = description[ :property ]
endpoint_options[ property ] = options[ property ] if options.has_key?( property )
end
endpoint = Hoodoo::Client::Endpoint.endpoint_for(
resource,
version,
endpoint_options
)
unless @auto_session_endpoint.nil?
remote_discovery_result = Hoodoo::Services::Discovery::ForRemote.new(
:resource => resource,
:version => version,
:wrapped_endpoint => endpoint
)
endpoint = Hoodoo::Client::Endpoint::AutoSession.new(
resource,
version,
:caller_id => @caller_id,
:caller_secret => @caller_secret,
:session_endpoint => @auto_session_endpoint,
:discovery_result => remote_discovery_result
)
end
return endpoint
end | ruby | {
"resource": ""
} |
q20126 | Hoodoo.Logger.remove | train | def remove( *writer_instances )
writer_instances.each do | writer_instance |
communicator = @writers[ writer_instance ]
@pool.remove( communicator ) unless communicator.nil?
@writers.delete( writer_instance )
end
end | ruby | {
"resource": ""
} |
q20127 | Hoodoo.Logger.include_class? | train | def include_class?( writer_class )
@writers.keys.each do | writer_instance |
return true if writer_instance.is_a?( writer_class )
end
return false
end | ruby | {
"resource": ""
} |
q20128 | Hoodoo.Errors.add_error | train | def add_error( code, options = nil )
options = Hoodoo::Utilities.stringify( options || {} )
reference = options[ 'reference' ] || {}
message = options[ 'message' ]
# Make sure nobody uses an undeclared error code.
raise UnknownCode, "In \#add_error: Unknown error code '#{code}'" unless @descriptions.recognised?( code )
# If the error description specifies a list of required reference keys,
# make sure all are present and complain if not.
description = @descriptions.describe( code )
required_keys = description[ 'reference' ] || []
actual_keys = reference.keys
missing_keys = required_keys - actual_keys
unless missing_keys.empty?
raise MissingReferenceData, "In \#add_error: Reference hash missing required keys: '#{ missing_keys.join( ', ' ) }'"
end
# All good!
@http_status_code = ( description[ 'status' ] || 200 ).to_i if @errors.empty? # Use first in collection for overall HTTP status code
error = {
'code' => code,
'message' => message || description[ 'message' ] || code
}
ordered_keys = required_keys + ( actual_keys - required_keys )
ordered_values = ordered_keys.map { | key | escape_commas( reference[ key ].to_s ) }
# See #unjoin_and_unescape_commas to undo the join below.
error[ 'reference' ] = ordered_values.join( ',' ) unless ordered_values.empty?
@errors << error
end | ruby | {
"resource": ""
} |
q20129 | Hoodoo.Errors.add_precompiled_error | train | def add_precompiled_error( code, message, reference, http_status = 500 )
@http_status_code = http_status.to_i if @errors.empty?
error = {
'code' => code,
'message' => message
}
error[ 'reference' ] = reference unless reference.nil? || reference.empty?
@errors << error
end | ruby | {
"resource": ""
} |
q20130 | Hoodoo.Generator.run! | train | def run!
git = nil
path = nil
return show_usage() if ARGV.length < 1
name = ARGV.shift() if ARGV.first[ 0 ] != '-'
opts = GetoptLong.new(
[ '--help', '-h', GetoptLong::NO_ARGUMENT ],
[ '--version', '-v', '-V', GetoptLong::NO_ARGUMENT ],
[ '--path', '-p', GetoptLong::REQUIRED_ARGUMENT ],
[ '--from', '-f', GetoptLong::REQUIRED_ARGUMENT ],
[ '--git', '-g', GetoptLong::REQUIRED_ARGUMENT ],
)
silence_stream( $stderr ) do
begin
opts.each do | opt, arg |
case opt
when '--help'
return show_usage()
when '--version'
return show_version()
when '--path'
path = arg
when '--from', '--git'
git = arg
end
end
rescue GetoptLong::InvalidOption, GetoptLong::MissingArgument => e
return usage_and_warning( e.message )
end
end
unless path.nil? || git.nil?
return usage_and_warning( 'Use the --path OR --from arguments, but not both' )
end
git ||= 'git@github.com:LoyaltyNZ/service_shell.git'
name = ARGV.shift() if name.nil?
return show_usage() if name.nil?
return usage_and_warning( "Unexpected extra arguments were given" ) if ARGV.count > 0
return usage_and_warning( "SERVICE_NAME must match #{ NAME_REGEX.inspect }" ) if naughty_name?( name )
return usage_and_warning( "'#{ name }' already exists" ) if File.exist?( "./#{ name }" )
return create_service( name, git, path )
end | ruby | {
"resource": ""
} |
q20131 | Hoodoo.TransientStore.set | train | def set( key:, payload:, maximum_lifespan: nil )
key = normalise_key( key, 'set' )
if payload.nil?
raise "Hoodoo::TransientStore\#set: Payloads of 'nil' are prohibited"
end
maximum_lifespan ||= @default_maximum_lifespan
begin
result = @storage_engine_instance.set(
key: key,
payload: payload,
maximum_lifespan: maximum_lifespan
)
if result != true && result != false
raise "Hoodoo::TransientStore\#set: Engine '#{ @storage_engine }' returned an invalid response"
end
rescue => e
result = e
end
return result
end | ruby | {
"resource": ""
} |
q20132 | Hoodoo.TransientStore.normalise_key | train | def normalise_key( key, calling_method_name )
unless key.is_a?( String ) || key.is_a?( Symbol )
raise "Hoodoo::TransientStore\##{ calling_method_name }: Keys must be of String or Symbol class; you provided '#{ key.class }'"
end
key = key.to_s
if key.empty?
raise "Hoodoo::TransientStore\##{ calling_method_name }: Empty String or Symbol keys are prohibited"
end
return key
end | ruby | {
"resource": ""
} |
q20133 | Gemfury.Client.push_gem | train | def push_gem(file, options = {})
ensure_ready!(:authorization)
push_api = connection(:url => self.pushpoint)
response = push_api.post('uploads', options.merge(:file => file))
checked_response_body(response)
end | ruby | {
"resource": ""
} |
q20134 | Gemfury.Client.versions | train | def versions(name, options = {})
ensure_ready!(:authorization)
url = "gems/#{escape(name)}/versions"
response = connection.get(url, options)
checked_response_body(response)
end | ruby | {
"resource": ""
} |
q20135 | Gemfury.Client.yank_version | train | def yank_version(name, version, options = {})
ensure_ready!(:authorization)
url = "gems/#{escape(name)}/versions/#{escape(version)}"
response = connection.delete(url, options)
checked_response_body(response)
end | ruby | {
"resource": ""
} |
q20136 | Gemfury.Client.add_collaborator | train | def add_collaborator(login, options = {})
ensure_ready!(:authorization)
url = "collaborators/#{escape(login)}"
response = connection.put(url, options)
checked_response_body(response)
end | ruby | {
"resource": ""
} |
q20137 | Gemfury.Client.remove_collaborator | train | def remove_collaborator(login, options = {})
ensure_ready!(:authorization)
url = "collaborators/#{escape(login)}"
response = connection.delete(url, options)
checked_response_body(response)
end | ruby | {
"resource": ""
} |
q20138 | Gemfury.Client.git_repos | train | def git_repos(options = {})
ensure_ready!(:authorization)
response = connection.get(git_repo_path, options)
checked_response_body(response)
end | ruby | {
"resource": ""
} |
q20139 | Gemfury.Client.git_update | train | def git_update(repo, options = {})
ensure_ready!(:authorization)
response = connection.patch(git_repo_path(repo), options)
checked_response_body(response)
end | ruby | {
"resource": ""
} |
q20140 | Gemfury.Client.git_reset | train | def git_reset(repo, options = {})
ensure_ready!(:authorization)
response = connection.delete(git_repo_path(repo), options)
checked_response_body(response)
end | ruby | {
"resource": ""
} |
q20141 | Gemfury.Client.git_rebuild | train | def git_rebuild(repo, options = {})
ensure_ready!(:authorization)
url = "#{git_repo_path(repo)}/builds"
api = connection(:api_format => :text)
checked_response_body(api.post(url, options))
end | ruby | {
"resource": ""
} |
q20142 | Reel.Response.status= | train | def status=(status, reason=nil)
case status
when Integer
@status = status
@reason ||= STATUS_CODES[status]
when Symbol
if code = SYMBOL_TO_STATUS_CODE[status]
self.status = code
else
raise ArgumentError, "unrecognized status symbol: #{status}"
end
else
raise TypeError, "invalid status type: #{status.inspect}"
end
end | ruby | {
"resource": ""
} |
q20143 | Reel.Connection.request | train | def request
raise StateError, "already processing a request" if current_request
req = @parser.current_request
@request_fsm.transition :headers
@keepalive = false if req[CONNECTION] == CLOSE || req.version == HTTP_VERSION_1_0
@current_request = req
req
rescue IOError, Errno::ECONNRESET, Errno::EPIPE
@request_fsm.transition :closed
@keepalive = false
nil
end | ruby | {
"resource": ""
} |
q20144 | Reel.Spy.readpartial | train | def readpartial(maxlen, outbuf = "")
data = @socket.readpartial(maxlen, outbuf)
log :read, data
data
end | ruby | {
"resource": ""
} |
q20145 | Reel.Spy.log | train | def log(type, str)
case type
when :connect
@logger << Colors.green(str)
when :close
@logger << Colors.red(str)
when :read
@logger << Colors.gold(str)
when :write
@logger << Colors.white(str)
else
raise "unknown event type: #{type.inspect}"
end
end | ruby | {
"resource": ""
} |
q20146 | Reel.Request.read | train | def read(length = nil, buffer = nil)
raise ArgumentError, "negative length #{length} given" if length && length < 0
return '' if length == 0
res = buffer.nil? ? '' : buffer.clear
chunk_size = length.nil? ? @connection.buffer_size : length
begin
while chunk_size > 0
chunk = readpartial(chunk_size)
break unless chunk
res << chunk
chunk_size = length - res.length unless length.nil?
end
rescue EOFError
end
return length && res.length == 0 ? nil : res
end | ruby | {
"resource": ""
} |
q20147 | Reel.Request.readpartial | train | def readpartial(length = nil)
if length.nil? && @buffer.length > 0
slice = @buffer
@buffer = ""
else
unless finished_reading? || (length && length <= @buffer.length)
@connection.readpartial(length ? length - @buffer.length : @connection.buffer_size)
end
if length
slice = @buffer.slice!(0, length)
else
slice = @buffer
@buffer = ""
end
end
slice && slice.length == 0 ? nil : slice
end | ruby | {
"resource": ""
} |
q20148 | Pickle.Session.created_model | train | def created_model(name)
factory, name_or_index = *parse_model(name)
if name_or_index.blank?
models_by_index(factory).last
elsif name_or_index.is_a?(Integer)
models_by_index(factory)[name_or_index]
else
models_by_name(factory)[name_or_index] or raise ModelNotKnownError, name
end
end | ruby | {
"resource": ""
} |
q20149 | Pickle.Session.model | train | def model(name)
model = created_model(name)
return nil unless model
Pickle::Adapter.get_model(model.class, model.id)
end | ruby | {
"resource": ""
} |
q20150 | Pickle.Session.store_model | train | def store_model(factory, name, record)
store_record(record.class.name, name, record) unless pickle_parser.canonical(factory) == pickle_parser.canonical(record.class.name)
store_record(factory, name, record)
end | ruby | {
"resource": ""
} |
q20151 | Pickle.Path.path_to_pickle | train | def path_to_pickle(*pickle_names)
options = pickle_names.extract_options!
resources = pickle_names.map{|n| model(n) || n.to_sym}
if options[:extra]
parts = options[:extra].underscore.gsub(' ','_').split("_")
find_pickle_path_using_action_segment_combinations(resources, parts)
else
pickle_path_for_resources_action_segment(resources, options[:action], options[:segment])
end or raise "Could not figure out a path for #{pickle_names.inspect} #{options.inspect}"
end | ruby | {
"resource": ""
} |
q20152 | Pickle.Email.emails | train | def emails(fields = nil)
@emails = ActionMailer::Base.deliveries.select {|m| email_has_fields?(m, fields)}
end | ruby | {
"resource": ""
} |
q20153 | Rightscale.HttpConnection.error_add | train | def error_add(error)
message = error
message = "#{error.class.name}: #{error.message}" if error.is_a?(Exception)
@state[@server] = { :count => error_count+1, :time => Time.now, :message => message }
end | ruby | {
"resource": ""
} |
q20154 | Rightscale.HttpConnection.setup_streaming | train | def setup_streaming(request)
if(request.body && request.body.respond_to?(:read))
body = request.body
request.content_length = body.respond_to?(:lstat) ? body.lstat.size : body.size
request.body_stream = request.body
true
end
end | ruby | {
"resource": ""
} |
q20155 | Rightscale.HttpConnection.start | train | def start(request_params)
# close the previous if exists
finish
# create new connection
@server = request_params[:server]
@port = request_params[:port]
@protocol = request_params[:protocol]
@proxy_host = request_params[:proxy_host]
@proxy_port = request_params[:proxy_port]
@proxy_username = request_params[:proxy_username]
@proxy_password = request_params[:proxy_password]
SECURITY_PARAMS.each do |param_name|
@params[param_name] = request_params[param_name]
end
@logger.info("Opening new #{@protocol.upcase} connection to #@server:#@port")
@logger.info("Connecting to proxy #{@proxy_host}:#{@proxy_port} with username" +
" #{@proxy_username.inspect}") unless @proxy_host.nil?
@http = Net::HTTP.new(@server, @port, @proxy_host, @proxy_port, @proxy_username,
@proxy_password)
@http.open_timeout = get_param(:http_connection_open_timeout, request_params)
@http.read_timeout = get_param(:http_connection_read_timeout, request_params)
if @protocol == 'https'
verifyCallbackProc = Proc.new{ |ok, x509_store_ctx|
# List of error codes: http://www.openssl.org/docs/apps/verify.html
code = x509_store_ctx.error
msg = x509_store_ctx.error_string
if request_params[:fail_if_ca_mismatch] && code != 0
false
else
true
end
}
@http.use_ssl = true
ca_file = get_param(:ca_file)
if ca_file && File.exists?(ca_file)
# Documentation for 'http.rb':
# : verify_mode, verify_mode=((|mode|))
# Sets the flags for server the certification verification at
# beginning of SSL/TLS session.
# OpenSSL::SSL::VERIFY_NONE or OpenSSL::SSL::VERIFY_PEER is acceptable.
#
# KHRVI: looks like the constant VERIFY_FAIL_IF_NO_PEER_CERT is not acceptable
@http.verify_callback = verifyCallbackProc
@http.ca_file= ca_file
@http.verify_mode = get_param(:use_server_auth) ? OpenSSL::SSL::VERIFY_PEER : OpenSSL::SSL::VERIFY_NONE
# The depth count is 'level 0:peer certificate', 'level 1: CA certificate', 'level 2: higher level CA certificate', and so on.
# Setting the maximum depth to 2 allows the levels 0, 1, and 2. The default depth limit is 9, allowing for the peer certificate and additional 9 CA certificates.
@http.verify_depth = 9
else
@http.verify_mode = OpenSSL::SSL::VERIFY_NONE
end
# CERT
cert_file = get_param(:cert_file, request_params)
cert = File.read(cert_file) if cert_file && File.exists?(cert_file)
cert ||= get_param(:cert, request_params)
# KEY
key_file = get_param(:key_file, request_params)
key = File.read(key_file) if key_file && File.exists?(key_file)
key ||= get_param(:key, request_params)
if cert && key
begin
@http.verify_callback = verifyCallbackProc
@http.cert = OpenSSL::X509::Certificate.new(cert)
@http.key = OpenSSL::PKey::RSA.new(key)
rescue OpenSSL::PKey::RSAError, OpenSSL::X509::CertificateError => e
@logger.error "##### Error loading SSL client cert or key: #{e.message} :: backtrace #{e.backtrace}"
raise e
end
end
end
# open connection
@http.start
end | ruby | {
"resource": ""
} |
q20156 | Rightscale.HttpConnection.request | train | def request(request_params, &block)
current_params = @params.merge(request_params)
exception = get_param(:exception, current_params) || RuntimeError
# Re-establish the connection if any of auth params has changed
same_auth_params_as_before = SECURITY_PARAMS.select do |param|
request_params[param] != get_param(param)
end.empty?
# We save the offset here so that if we need to retry, we can return the file pointer to its initial position
mypos = get_fileptr_offset(current_params)
loop do
current_params[:protocol] ||= (current_params[:port] == 443 ? 'https' : 'http')
# (re)open connection to server if none exists or params has changed
same_server_as_before = @server == current_params[:server] &&
@port == current_params[:port] &&
@protocol == current_params[:protocol] &&
same_auth_params_as_before
# if we are inside a delay between retries: no requests this time!
# (skip this step if the endpoint has changed)
if error_count > current_params[:http_connection_retry_count] &&
error_time + current_params[:http_connection_retry_delay] > Time.now &&
same_server_as_before
# store the message (otherwise it will be lost after error_reset and
# we will raise an exception with an empty text)
banana_message_text = banana_message
@logger.warn("#{err_header} re-raising same error: #{banana_message_text} " +
"-- error count: #{error_count}, error age: #{Time.now.to_i - error_time.to_i}")
raise exception.new(banana_message_text)
end
# try to connect server(if connection does not exist) and get response data
begin
request = current_params[:request]
request['User-Agent'] = get_param(:user_agent, current_params) || ''
unless @http &&
@http.started? &&
same_server_as_before
same_auth_params_as_before = true
start(current_params)
end
# Detect if the body is a streamable object like a file or socket. If so, stream that
# bad boy.
setup_streaming(request)
# update READ_TIMEOUT value (it can be passed with request_params hash)
@http.read_timeout = get_param(:http_connection_read_timeout, current_params)
response = @http.request(request, &block)
error_reset
eof_reset
return response
# We treat EOF errors and the timeout/network errors differently. Both
# are tracked in different statistics blocks. Note below that EOF
# errors will sleep for a certain (exponentially increasing) period.
# Other errors don't sleep because there is already an inherent delay
# in them; connect and read timeouts (for example) have already
# 'slept'. It is still not clear which way we should treat errors
# like RST and resolution failures. For now, there is no additional
# delay for these errors although this may change in the future.
# EOFError means the server closed the connection on us.
rescue EOFError => e
finish(e.message)
@logger.debug("#{err_header} server #{@server} closed connection")
# if we have waited long enough - raise an exception...
if raise_on_eof_exception?
@logger.warn("#{err_header} raising #{exception} due to permanent EOF being received from #{@server}, error age: #{Time.now.to_i - eof_time.to_i}")
raise exception.new("Permanent EOF is being received from #{@server}.")
else
# ... else just sleep a bit before new retry
sleep(add_eof)
# We will be retrying the request, so reset the file pointer
reset_fileptr_offset(request, mypos)
end
rescue ArgumentError => e
finish(e.message)
if e.message.include?('wrong number of arguments (5 for 4)')
# seems our net_fix patch was overriden...
raise exception.new('incompatible Net::HTTP monkey-patch')
else
raise e
end
rescue Timeout::Error, SocketError, SystemCallError, Interrupt => e # See comment at bottom for the list of errors seen...
finish(e.message)
if e.is_a?(Errno::ETIMEDOUT) || e.is_a?(Timeout::Error)
# Omit retries if it was explicitly requested
# #6481:
# ... When creating a resource in EC2 (instance, volume, snapshot, etc) it is undetermined what happened if the call times out.
# The resource may or may not have been created in EC2. Retrying the call may cause multiple resources to be created...
raise exception.new("#{e.class.name}: #{e.message}") if current_params[:raise_on_timeout]
elsif e.is_a?(Interrupt)
# if ctrl+c is pressed - we have to reraise exception to terminate proggy
@logger.debug( "#{err_header} request to server #{@server} interrupted by ctrl-c")
raise e
end
# oops - we got a banana: log it
error_add(e)
@logger.warn("#{err_header} request failure count: #{error_count}, exception: #{e.inspect}")
# We will be retrying the request, so reset the file pointer
reset_fileptr_offset(request, mypos)
end
end
end | ruby | {
"resource": ""
} |
q20157 | Princely.Pdf.pdf_from_string | train | def pdf_from_string(string, output_file = '-')
with_timeout do
pdf = initialize_pdf_from_string(string, output_file, {:output_to_log_file => false})
pdf.close_write
result = pdf.gets(nil)
pdf.close_read
result.force_encoding('BINARY') if RUBY_VERSION >= "1.9"
result
end
end | ruby | {
"resource": ""
} |
q20158 | Ruote.Dashboard.attach | train | def attach(fei_or_fe, definition, opts={})
fe = Ruote.extract_fexp(@context, fei_or_fe).to_h
fei = fe['fei']
cfei = fei.merge(
'expid' => "#{fei['expid']}_0",
'subid' => Ruote.generate_subid(fei.inspect))
tree = @context.reader.read(definition)
tree[0] = 'sequence'
fields = fe['applied_workitem']['fields']
if fs = opts[:fields] || opts[:workitem]
fields = fs
elsif fs = opts[:merge_fields]
fields.merge!(fs)
end
@context.storage.put_msg(
'launch', # "apply" is OK, but "launch" stands out better
'parent_id' => fei,
'fei' => cfei,
'tree' => tree,
'workitem' => { 'fields' => fields },
'attached' => true)
Ruote::FlowExpressionId.new(cfei)
end | ruby | {
"resource": ""
} |
q20159 | Ruote.Dashboard.apply_mutation | train | def apply_mutation(wfid, pdef)
Mutation.new(self, wfid, @context.reader.read(pdef)).apply
end | ruby | {
"resource": ""
} |
q20160 | Ruote.Dashboard.processes | train | def processes(opts={})
wfids = @context.storage.expression_wfids(opts)
opts[:count] ? wfids.size : ProcessStatus.fetch(@context, wfids, opts)
end | ruby | {
"resource": ""
} |
q20161 | Ruote.Dashboard.wait_for | train | def wait_for(*items)
opts = (items.size > 1 && items.last.is_a?(Hash)) ? items.pop : {}
@context.logger.wait_for(items, opts)
end | ruby | {
"resource": ""
} |
q20162 | Ruote.Dashboard.register_participant | train | def register_participant(regex, participant=nil, opts={}, &block)
if participant.is_a?(Hash)
opts = participant
participant = nil
end
pa = @context.plist.register(regex, participant, opts, block)
@context.storage.put_msg(
'participant_registered',
'regex' => regex.is_a?(Regexp) ? regex.inspect : regex.to_s)
pa
end | ruby | {
"resource": ""
} |
q20163 | Ruote::Exp.FlowExpression.has_attribute | train | def has_attribute(*args)
args.each { |a| a = a.to_s; return a if attributes[a] != nil }
nil
end | ruby | {
"resource": ""
} |
q20164 | Ruote::Exp.FlowExpression.attribute | train | def attribute(n, workitem=h.applied_workitem, options={})
n = n.to_s
default = options[:default]
escape = options[:escape]
string = options[:to_s] || options[:string]
v = attributes[n]
v = if v == nil
default
elsif escape
v
else
dsub(v, workitem)
end
v = v.to_s if v and string
v
end | ruby | {
"resource": ""
} |
q20165 | Ruote::Exp.FlowExpression.att | train | def att(keys, values, opts={})
default = opts[:default] || values.first
val = Array(keys).collect { |key| attribute(key) }.compact.first.to_s
values.include?(val) ? val : default
end | ruby | {
"resource": ""
} |
q20166 | Ruote::Exp.FlowExpression.lookup_val_prefix | train | def lookup_val_prefix(prefix, att_options={})
lval(
[ prefix ] + [ 'val', 'value' ].map { |s| "#{prefix}_#{s}" },
%w[ v var variable ].map { |s| "#{prefix}_#{s}" },
%w[ f fld field ].map { |s| "#{prefix}_#{s}" },
att_options)
end | ruby | {
"resource": ""
} |
q20167 | Ruote::Exp.FlowExpression.compile_atts | train | def compile_atts(opts={})
attributes.keys.each_with_object({}) { |k, r|
r[dsub(k)] = attribute(k, h.applied_workitem, opts)
}
end | ruby | {
"resource": ""
} |
q20168 | Ruote::Exp.FlowExpression.attribute_text | train | def attribute_text(workitem=h.applied_workitem)
text = attributes.keys.find { |k| attributes[k] == nil }
dsub(text.to_s, workitem)
end | ruby | {
"resource": ""
} |
q20169 | Ruote::Exp.FlowExpression.determine_tos | train | def determine_tos
to_v = attribute(:to_v) || attribute(:to_var) || attribute(:to_variable)
to_f = attribute(:to_f) || attribute(:to_fld) || attribute(:to_field)
if to = attribute(:to)
pre, key = to.split(':')
pre, key = [ 'f', pre ] if key == nil
if pre.match(/^f/)
to_f = key
else
to_v = key
end
end
[ to_v, to_f ]
end | ruby | {
"resource": ""
} |
q20170 | Ruote::Exp.FlowExpression.do_apply | train | def do_apply(msg)
if msg['state'] == 'paused'
return pause_on_apply(msg)
end
if msg['flavour'].nil? && (aw = attribute(:await))
return await(aw, msg)
end
unless Condition.apply?(attribute(:if), attribute(:unless))
return do_reply_to_parent(h.applied_workitem)
end
pi = h.parent_id
reply_immediately = false
if attribute(:scope).to_s == 'true'
h.variables ||= {}
end
if attribute(:forget).to_s == 'true'
h.variables = compile_variables
h.parent_id = nil
h.forgotten = true
reply_immediately = true
elsif attribute(:lose).to_s == 'true'
h.lost = true
elsif msg['flanking'] or (attribute(:flank).to_s == 'true')
h.flanking = true
reply_immediately = true
end
if reply_immediately and pi
@context.storage.put_msg(
'reply',
'fei' => pi,
'workitem' => Ruote.fulldup(h.applied_workitem),
'flanking' => h.flanking)
end
filter
consider_tag
consider_timers
apply
end | ruby | {
"resource": ""
} |
q20171 | Ruote::Exp.FlowExpression.do_reply_to_parent | train | def do_reply_to_parent(workitem, delete=true)
# propagate the cancel "flavour" back, so that one can know
# why a branch got cancelled.
flavour = if @msg.nil?
nil
elsif @msg['action'] == 'cancel'
@msg['flavour'] || 'cancel'
elsif h.state.nil?
nil
else
@msg['flavour']
end
# deal with the timers and the schedules
%w[ timeout_schedule_id job_id ].each do |sid|
@context.storage.delete_schedule(h[sid]) if h[sid]
end
#
# legacy schedule ids, to be removed for ruote 2.4.0
@context.storage.delete_schedule(h.schedule_id) if h.schedule_id
#
# time-driven exps like cron, wait and once now all use h.schedule_id
h.timers.each do |schedule_id, action|
@context.storage.delete_schedule(schedule_id)
end if h.timers
# cancel flanking expressions if any
cancel_flanks(h.state == 'dying' ? 'kill' : nil)
# trigger or vanilla reply
if h.state == 'failing' # on_error is implicit (#do_fail got called)
trigger('on_error', workitem)
elsif h.state == 'cancelling' && h.on_cancel
trigger('on_cancel', workitem)
elsif h.state == 'cancelling' && h.on_re_apply
trigger('on_re_apply', workitem)
elsif h.state == 'timing_out' && h.on_timeout
trigger('on_timeout', workitem)
elsif h.state == nil && h.on_reply
trigger('on_reply', workitem)
elsif h.flanking && h.state.nil?
#
# do vanish
do_unpersist
elsif h.lost && h.state.nil?
#
# do not reply, sit here (and wait for cancellation probably)
do_persist
elsif h.trigger && workitem['fields']["__#{h.trigger}__"]
#
# the "second take"
trigger(h.trigger, workitem)
else # vanilla reply
filter(workitem) if h.state.nil?
f = h.state.nil? && attribute(:vars_to_f)
Ruote.set(workitem['fields'], f, h.variables) if f
workitem['sub_wf_name'] = h.applied_workitem['sub_wf_name']
workitem['sub_wf_revision'] = h.applied_workitem['sub_wf_revision']
leave_tag(workitem) if h.tagname
(do_unpersist || return) if delete
# remove expression from storage
if h.parent_id && ! h.attached
@context.storage.put_msg(
'reply',
'fei' => h.parent_id,
'workitem' => workitem.merge!('fei' => h.fei),
'updated_tree' => h.updated_tree, # nil most of the time
'flavour' => flavour)
else
@context.storage.put_msg(
(h.forgotten || h.attached) ? 'ceased' : 'terminated',
'wfid' => h.fei['wfid'],
'fei' => h.fei,
'workitem' => workitem,
'variables' => h.variables,
'flavour' => flavour)
if
h.state.nil? &&
h.on_terminate == 'regenerate' &&
! (h.forgotten || h.attached)
then
@context.storage.put_msg(
'regenerate',
'wfid' => h.fei['wfid'],
'tree' => h.original_tree,
'workitem' => workitem,
'variables' => h.variables,
'flavour' => flavour)
#'stash' =>
end
end
end
end | ruby | {
"resource": ""
} |
q20172 | Ruote::Exp.FlowExpression.do_pause | train | def do_pause(msg)
return if h.state != nil
h.state = 'paused'
do_persist || return
h.children.each { |i|
@context.storage.put_msg('pause', 'fei' => i)
} unless msg['breakpoint']
end | ruby | {
"resource": ""
} |
q20173 | Ruote::Exp.FlowExpression.ancestor? | train | def ancestor?(fei)
fei = fei.to_h if fei.respond_to?(:to_h)
return false unless h.parent_id
return true if h.parent_id == fei
parent.ancestor?(fei)
end | ruby | {
"resource": ""
} |
q20174 | Ruote::Exp.FlowExpression.pre_apply_child | train | def pre_apply_child(child_index, workitem, forget)
child_fei = h.fei.merge(
'expid' => "#{h.fei['expid']}_#{child_index}",
'subid' => Ruote.generate_subid(h.fei.inspect))
h.children << child_fei unless forget
msg = {
'fei' => child_fei,
'tree' => tree.last[child_index],
'parent_id' => forget ? nil : h.fei,
'variables' => forget ? compile_variables : nil,
'workitem' => workitem
}
msg['forgotten'] = true if forget
msg
end | ruby | {
"resource": ""
} |
q20175 | Ruote::Exp.FlowExpression.apply_child | train | def apply_child(child_index, workitem, forget=false)
msg = pre_apply_child(child_index, workitem, forget)
persist_or_raise unless forget
# no need to persist the parent (this) if the child is to be forgotten
@context.storage.put_msg('apply', msg)
end | ruby | {
"resource": ""
} |
q20176 | Ruote::Exp.FlowExpression.leave_tag | train | def leave_tag(workitem)
unset_variable(h.tagname)
Ruote::Workitem.new(workitem).send(:remove_tag, h.tagname)
@context.storage.put_msg(
'left_tag',
'tag' => h.tagname,
'full_tag' => h.full_tagname,
'fei' => h.fei,
'workitem' => workitem)
return unless h.full_tagname # for backward compatibility
r = root
return unless r && r.variables # might happen
r.variables.delete(h.full_tagname)
state = case (h.trigger || h.state)
when 'on_cancel' then 'cancelled'
when 'on_error' then 'failed'
when 'on_timeout' then 'timed out'
when 'on_re_apply' then nil
when 'cancelling' then 'cancelled'
when 'dying' then 'killed'
else nil
end
(r.variables['__past_tags__'] ||= []) << [
h.full_tagname,
fei.sid,
state,
Ruote.now_to_utc_s,
Ruote.fulldup(h.variables)
# not fullduping here triggers a segfault at some point with YAJL
]
r.do_persist unless r.fei == self.fei
end | ruby | {
"resource": ""
} |
q20177 | Ruote.RuleSession.do_merge | train | def do_merge(field, target, value)
value = Rufus::Json.dup(value)
if target.is_a?(Array)
target.push(value)
elsif value.is_a?(Hash)
target.merge!(value)
else # deal with non Hash
target[field.split('.').last] = value
end
target.delete('~')
target.delete('~~')
end | ruby | {
"resource": ""
} |
q20178 | Ruote.ProcessStatus.tags | train | def tags
variables ? Hash[variables.select { |k, v| Ruote.is_a_fei?(v) }] : nil
end | ruby | {
"resource": ""
} |
q20179 | Ruote.ProcessStatus.all_tags | train | def all_tags
all_variables.remap do |(fei, vars), h|
vars.each { |k, v| (h[k] ||= []) << v if Ruote.is_a_fei?(v) }
end
end | ruby | {
"resource": ""
} |
q20180 | Ruote.ProcessStatus.wfid | train | def wfid
l = [ @expressions, @errors, @stored_workitems ].find { |l| l.any? }
l ? l.first.fei.wfid : nil
end | ruby | {
"resource": ""
} |
q20181 | Ruote.ProcessStatus.position | train | def position
workitems.collect { |wi|
r = [ wi.fei.sid, wi.participant_name ]
params = (wi.fields['params'] || {}).dup
params.delete('ref')
if err = errors.find { |e| e.fei == wi.fei }
params['error'] = err.message
end
r << params
r
}
end | ruby | {
"resource": ""
} |
q20182 | Ruote.ProcessStatus.leaves | train | def leaves
expressions.inject([]) { |a, exp|
a.select { |e| ! exp.ancestor?(e.fei) } + [ exp ]
}
end | ruby | {
"resource": ""
} |
q20183 | Ruote.ProcessStatus.workitems | train | def workitems
@expressions.select { |fexp|
#fexp.is_a?(Ruote::Exp::ParticipantExpression)
fexp.h.name == 'participant'
}.collect { |fexp|
Ruote::Workitem.new(fexp.h.applied_workitem)
}
end | ruby | {
"resource": ""
} |
q20184 | Ruote.ProcessStatus.to_dot | train | def to_dot(opts={})
s = [ "digraph \"process wfid #{wfid}\" {" ]
@expressions.each { |e| s.push(*e.send(:to_dot, opts)) }
@errors.each { |e| s.push(*e.send(:to_dot, opts)) }
s << '}'
s.join("\n")
end | ruby | {
"resource": ""
} |
q20185 | Ruote.TemplateMixin.render_template | train | def render_template(template, flow_expression, workitem)
template = (File.read(template) rescue nil) if is_a_file?(template)
return render_default_template(workitem) unless template
template = template.to_s
workitem = workitem.to_h if workitem.respond_to?(:to_h)
@context.dollar_sub.s(template, flow_expression, workitem)
end | ruby | {
"resource": ""
} |
q20186 | Ruote.TemplateMixin.render_default_template | train | def render_default_template(workitem)
workitem = workitem.to_h if workitem.respond_to?(:to_h)
s = []
s << "workitem for #{workitem['participant_name']}"
s << ''
s << Rufus::Json.pretty_encode(workitem['fei'])
s << ''
workitem['fields'].keys.sort.each do |key|
s << " - '#{key}' ==> #{Rufus::Json.encode(workitem['fields'][key])}"
end
s.join("\n")
end | ruby | {
"resource": ""
} |
q20187 | Ruote::Exp.ParticipantExpression.consider_participant_timers | train | def consider_participant_timers(p_info)
return if h.has_timers
# process definition takes precedence over participant defined timers.
timers = nil
[ :rtimers, :timers, :rtimeout ].each do |meth|
pa = @context.plist.instantiate(p_info, :if_respond_to? => meth)
next unless pa
timers = Ruote.participant_send(
pa, meth, 'workitem' => Ruote::Workitem.new(h.applied_workitem))
break if timers
end
return unless timers
timers = if timers.index(':')
timers.split(/,/)
else
[ "#{timers}: timeout" ]
end
schedule_timers(timers)
end | ruby | {
"resource": ""
} |
q20188 | Ruote.LocalParticipant.re_dispatch | train | def re_dispatch(wi=nil, opts=nil)
wi, opts = [ nil, wi ] if wi.is_a?(Hash) && opts.nil?
wi ||= workitem()
opts ||= {}
wi.h.re_dispatch_count = wi.h.re_dispatch_count.to_s.to_i + 1
msg = {
'action' => 'dispatch',
'fei' => wi.h.fei,
'workitem' => wi.to_h,
'participant_name' => wi.participant_name
}
if t = opts[:in] || opts[:at]
sched_id = @context.storage.put_schedule('at', wi.h.fei, t, msg)
exp = fexp(wi)
exp.h['re_dispatch_sched_id'] = sched_id
exp.try_persist
else
@context.storage.put_msg('dispatch', msg)
end
end | ruby | {
"resource": ""
} |
q20189 | Ruote.ErrorHandler.msg_handle | train | def msg_handle(msg, err)
fexp = Ruote::Exp::FlowExpression.fetch(
@context, msg['fei'] || msg['workitem']['fei']
) rescue nil
handle(msg, fexp, err)
end | ruby | {
"resource": ""
} |
q20190 | Ruote.ErrorHandler.msg_raise | train | def msg_raise(msg, err)
fei = msg['fei']
wfid = msg['wfid'] || msg.fetch('fei', {})['wfid']
@context.storage.put_msg(
'raise',
'fei' => fei,
'wfid' => wfid,
'msg' => msg,
'error' => deflate(err, fei))
end | ruby | {
"resource": ""
} |
q20191 | Ruote.ErrorHandler.handle | train | def handle(msg, fexp, err)
err = RaisedError.new(err) unless err.respond_to?(:backtrace)
meta = err.is_a?(Ruote::MetaError)
fei = msg['fei'] || (fexp.h.fei rescue nil)
wfid = msg['wfid'] || (fei || {})['wfid']
# on_error ?
return if ( ! meta) && fexp && fexp.handle_on_error(msg, err)
# emit 'msg'
#
# (this message might get intercepted by a tracker)
herr = deflate(err, fei, fexp)
# fill error in the error journal
@context.storage.put(
herr.merge(
'type' => 'errors',
'_id' => "err_#{Ruote.to_storage_id(fei)}",
'message' => err.inspect, # :-(
'trace' => (err.backtrace || []).join("\n"), # :-(
'msg' => msg)
) if fei
# advertise 'error_intercepted'
@context.storage.put_msg(
'error_intercepted',
'error' => herr, 'wfid' => wfid, 'fei' => fei, 'msg' => msg)
rescue => e
raise e unless @context.worker
@context.worker.send(
:handle_step_error,
e,
{ 'action' => 'error_intercepted',
'error' => deflate(err, fei),
'fei' => fei,
'wfid' => wfid,
'msg' => msg })
end | ruby | {
"resource": ""
} |
q20192 | Ruote.ErrorHandler.deflate | train | def deflate(err, fei, fexp=nil)
return err unless err.respond_to?(:backtrace)
fexp ||=
Ruote::Exp::FlowExpression.dummy('fei' => fei, 'original_tree' => nil)
fexp.deflate(err)
end | ruby | {
"resource": ""
} |
q20193 | Ruote::Exp.ConcurrenceExpression.expected_count | train | def expected_count
if h.ccount.nil?
count_list_size
elsif h.ccount >= 0
[ h.ccount, count_list_size ].min
else # all but 1, 2, ...
i = count_list_size + h.ccount
i < 1 ? 1 : i
end
end | ruby | {
"resource": ""
} |
q20194 | Ruote::Exp.ConcurrenceExpression.merge_workitems | train | def merge_workitems(workitems, merge_type)
workitems.inject(nil) do |t, wi|
Ruote.merge_workitem(workitem_index(wi), t, wi, merge_type)
end
end | ruby | {
"resource": ""
} |
q20195 | Ruote.Workitem.as_json | train | def as_json(pretty=false)
pretty ? Rufus::Json.pretty_encode(@h) : Rufus::Json.encode(@h)
end | ruby | {
"resource": ""
} |
q20196 | Ruote.Workitem.remove_tag | train | def remove_tag(tag)
# it's a bit convoluted... trying to cope with potential inconsistencies
#
# normally, it should only be a tags.pop(), but since user have
# access to the workitem and its fields... better be safe than sorry
tags = (h.fields['__tags__'] || [])
if index = tags.rindex(tag)
tags.delete_at(index)
end
h.fields['__left_tag__'] = tag
end | ruby | {
"resource": ""
} |
q20197 | Ruote.DispatchPool.dispatch | train | def dispatch(msg)
participant = @context.plist.lookup(
msg['participant'] || msg['participant_name'], msg['workitem'])
if
@context['participant_threads_enabled'] == false ||
do_not_thread?(participant, msg)
then
do_dispatch(participant, msg)
else
do_threaded_dispatch(participant, msg)
end
end | ruby | {
"resource": ""
} |
q20198 | Ruote.DispatchPool.do_dispatch | train | def do_dispatch(participant, msg)
do_raw_dispatch(participant, msg)
rescue => err
@context.error_handler.msg_handle(msg, err)
end | ruby | {
"resource": ""
} |
q20199 | Ruote.DispatchPool.do_threaded_dispatch | train | def do_threaded_dispatch(participant, msg)
msg = Rufus::Json.dup(msg)
#
# the thread gets its own copy of the message
# (especially important if the main thread does something with
# the message 'during' the dispatch)
# Maybe at some point a limit on the number of dispatch threads
# would be OK.
# Or maybe it's the job of an extension / subclass
Thread.new { do_dispatch(participant, msg) }
end | ruby | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.