_id stringlengths 2 6 | title stringlengths 9 130 | partition stringclasses 3 values | text stringlengths 66 10.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q13200 | Mongo.Collection.update_one | train | def update_one(filter, update, options = {})
find(filter, options).update_one(update, options)
end | ruby | {
"resource": ""
} |
q13201 | Mongo.Collection.find_one_and_update | train | def find_one_and_update(filter, update, options = {})
find(filter, options).find_one_and_update(update, options)
end | ruby | {
"resource": ""
} |
q13202 | Mongo.Collection.find_one_and_replace | train | def find_one_and_replace(filter, replacement, options = {})
find(filter, options).find_one_and_update(replacement, options)
end | ruby | {
"resource": ""
} |
q13203 | Katello.ApplicationController.render_bad_parameters | train | def render_bad_parameters(*args)
default_message = if request.xhr?
_('Invalid parameters sent in the request for this operation. Please contact a system administrator.')
else
_('Invalid parameters sent. You may have mistyped the address. If you continue having trouble with this, please contact an Administrator.')
end
exception = args.find { |o| o.is_a? Exception }
message = args.find { |o| o.is_a? String } || exception.try(:message) || default_message
status = if exception && exception.respond_to?(:status_code)
exception.status_code
else
400
end
if exception
log_exception exception
else
Rails.logger.warn message
end
respond_to do |format|
format.html do
render :template => 'common/400', :layout => !request.xhr?, :status => status,
:locals => {:message => message}
end
format.atom { head exception.status_code }
format.xml { head exception.status_code }
format.json { head exception.status_code }
end
User.current = nil
end | ruby | {
"resource": ""
} |
q13204 | Katello.Api::V2::SyncController.find_object | train | def find_object
if params.key?(:product_id)
@obj = find_product
elsif params.key?(:repository_id)
@obj = find_repository
else
fail HttpErrors::NotFound, N_("Couldn't find subject of synchronization") if @obj.nil?
end
@obj
end | ruby | {
"resource": ""
} |
q13205 | Katello.KTEnvironment.insert_successor | train | def insert_successor(create_params, path)
self.class.transaction do
new_successor = self.class.create!(create_params)
if library?
if path
old_successor = path.first
old_successor.prior = new_successor
end
save_successor new_successor
elsif successor.nil?
save_successor new_successor
else
old_successor = successor
old_successor.prior = new_successor
save_successor new_successor
end
fail HttpErrors::UnprocessableEntity, _('An environment is missing a prior') unless all_have_prior?
new_successor
end
end | ruby | {
"resource": ""
} |
q13206 | Katello.KTEnvironment.full_path | train | def full_path
p = self
until p.prior.nil? || p.prior.library
p = p.prior
end
p.prior.nil? ? p.path : [p.prior] + p.path
end | ruby | {
"resource": ""
} |
q13207 | Katello.HostsAndHostgroupsHelper.content_options | train | def content_options(host, selected_id, object_type, options = {})
include_blank = options.fetch(:include_blank, nil)
include_blank = '<option></option>' if include_blank == true #check for true specifically
orgs = relevant_organizations(host)
all_options = []
orgs.each do |org|
content_object_options = ""
accessible_content_objects = if object_type == :lifecycle_environment
accessible_lifecycle_environments(org, host)
elsif object_type == :content_source
accessible_content_proxies(host)
end
accessible_content_objects.each do |content_object|
selected = selected_id == content_object.id ? 'selected' : ''
content_object_options << %(<option value="#{content_object.id}" class="kt-env" #{selected}>#{h(content_object.name)}</option>)
end
if orgs.count > 1
all_options << %(<optgroup label="#{org.name}">#{content_object_options}</optgroup>)
else
all_options << content_object_options
end
end
all_options = all_options.join
all_options.insert(0, include_blank) if include_blank
all_options.html_safe
end | ruby | {
"resource": ""
} |
q13208 | Katello.SyncManagementController.sync_repos | train | def sync_repos(repo_ids)
collected = []
repos = Repository.where(:id => repo_ids).syncable
repos.each do |repo|
if latest_task(repo).try(:state) != 'running'
ForemanTasks.async_task(::Actions::Katello::Repository::Sync, repo)
end
collected << format_sync_progress(repo)
end
collected
end | ruby | {
"resource": ""
} |
q13209 | Katello.Repository.dynflow_handled_last_sync? | train | def dynflow_handled_last_sync?(pulp_task_id)
task = ForemanTasks::Task::DynflowTask.for_action(::Actions::Katello::Repository::Sync).
for_resource(self).order(:started_at).last
return task && task.main_action.pulp_task_id == pulp_task_id
end | ruby | {
"resource": ""
} |
q13210 | Katello.Repository.destroyable? | train | def destroyable?
if self.environment.try(:library?) && self.content_view.default?
if self.environment.organization.being_deleted?
return true
elsif self.custom? && self.deletable?
return true
elsif !self.custom? && self.redhat_deletable?
return true
else
errors.add(:base, _("Repository cannot be deleted since it has already been included in a published Content View. " \
"Please delete all Content View versions containing this repository before attempting to delete it."))
return false
end
end
return true
end | ruby | {
"resource": ""
} |
q13211 | Katello.ContentView.component_ids= | train | def component_ids=(component_version_ids_to_set)
content_view_components.destroy_all
component_version_ids_to_set.each do |content_view_version_id|
cvv = ContentViewVersion.find(content_view_version_id)
content_view_components.build(:content_view_version => cvv,
:latest => false,
:composite_content_view => self)
end
end | ruby | {
"resource": ""
} |
q13212 | Katello.ContentView.all_version_library_instances | train | def all_version_library_instances
all_repos = all_version_repos.where(:library_instance_id => nil).pluck("#{Katello::Repository.table_name}.id")
all_repos += all_version_repos.pluck(:library_instance_id)
Repository.where(:id => all_repos)
end | ruby | {
"resource": ""
} |
q13213 | Katello.ContentView.add_environment | train | def add_environment(env, version)
if self.content_view_environments.where(:environment_id => env.id).empty?
label = generate_cp_environment_label(env)
ContentViewEnvironment.create!(:name => label,
:label => label,
:cp_id => generate_cp_environment_id(env),
:environment_id => env.id,
:content_view => self,
:content_view_version => version
)
end
end | ruby | {
"resource": ""
} |
q13214 | Katello.ContentView.remove_environment | train | def remove_environment(env)
# Do not remove the content view environment, if there is still a view
# version in the environment.
if self.versions.in_environment(env).blank?
view_env = self.content_view_environments.where(:environment_id => env.id)
view_env.first.destroy unless view_env.blank?
end
end | ruby | {
"resource": ""
} |
q13215 | Katello.PulpSyncProgress.format_errors | train | def format_errors(details)
errors = {messages: [], details: []}
if details && !details.key?(:finished_count)
details.each do |step, report|
if step == "content"
parse_content(report, errors)
else
parse_generic(report, errors)
end
end
end
errors
end | ruby | {
"resource": ""
} |
q13216 | ThreeScaleToolbox.Configuration.read | train | def read
@store.transaction(true) do
@store.roots.each_with_object({}) do |key, obj|
obj[key] = @store[key]
end
end
end | ruby | {
"resource": ""
} |
q13217 | ThreeScaleToolbox.Remotes.update | train | def update
config.update(:remotes) do |rmts|
yield(rmts || {}).tap do |new_rmts|
raise_invalid unless validate(new_rmts)
end
end
end | ruby | {
"resource": ""
} |
q13218 | Rdkafka.Consumer.subscribe | train | def subscribe(*topics)
# Create topic partition list with topics and no partition set
tpl = TopicPartitionList.new_native_tpl(topics.length)
topics.each do |topic|
Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
tpl,
topic,
-1
)
end
# Subscribe to topic partition list and check this was successful
response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
if response != 0
raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
end
end | ruby | {
"resource": ""
} |
q13219 | Rdkafka.Consumer.unsubscribe | train | def unsubscribe
response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
if response != 0
raise Rdkafka::RdkafkaError.new(response)
end
end | ruby | {
"resource": ""
} |
q13220 | Rdkafka.Consumer.pause | train | def pause(list)
unless list.is_a?(TopicPartitionList)
raise TypeError.new("list has to be a TopicPartitionList")
end
tpl = list.to_native_tpl
response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
if response != 0
list = TopicPartitionList.from_native_tpl(tpl)
raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
end
end | ruby | {
"resource": ""
} |
q13221 | Rdkafka.Consumer.resume | train | def resume(list)
unless list.is_a?(TopicPartitionList)
raise TypeError.new("list has to be a TopicPartitionList")
end
tpl = list.to_native_tpl
response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
if response != 0
raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
end
end | ruby | {
"resource": ""
} |
q13222 | Rdkafka.Consumer.subscription | train | def subscription
tpl = FFI::MemoryPointer.new(:pointer)
response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, tpl)
if response != 0
raise Rdkafka::RdkafkaError.new(response)
end
tpl = tpl.read(:pointer).tap { |it| it.autorelease = false }
begin
Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
ensure
Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
end
end | ruby | {
"resource": ""
} |
q13223 | Rdkafka.Consumer.assign | train | def assign(list)
unless list.is_a?(TopicPartitionList)
raise TypeError.new("list has to be a TopicPartitionList")
end
tpl = list.to_native_tpl
response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
if response != 0
raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
end
end | ruby | {
"resource": ""
} |
q13224 | Rdkafka.Consumer.committed | train | def committed(list=nil, timeout_ms=1200)
if list.nil?
list = assignment
elsif !list.is_a?(TopicPartitionList)
raise TypeError.new("list has to be nil or a TopicPartitionList")
end
tpl = list.to_native_tpl
response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
if response != 0
raise Rdkafka::RdkafkaError.new(response)
end
TopicPartitionList.from_native_tpl(tpl)
end | ruby | {
"resource": ""
} |
q13225 | Rdkafka.Consumer.store_offset | train | def store_offset(message)
# rd_kafka_offset_store is one of the few calls that does not support
# a string as the topic, so create a native topic for it.
native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
@native_kafka,
message.topic,
nil
)
response = Rdkafka::Bindings.rd_kafka_offset_store(
native_topic,
message.partition,
message.offset
)
if response != 0
raise Rdkafka::RdkafkaError.new(response)
end
ensure
if native_topic && !native_topic.null?
Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
end
end | ruby | {
"resource": ""
} |
q13226 | Rdkafka.Consumer.commit | train | def commit(list=nil, async=false)
if !list.nil? && !list.is_a?(TopicPartitionList)
raise TypeError.new("list has to be nil or a TopicPartitionList")
end
tpl = if list
list.to_native_tpl
else
nil
end
response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
if response != 0
raise Rdkafka::RdkafkaError.new(response)
end
end | ruby | {
"resource": ""
} |
q13227 | Rdkafka.Consumer.poll | train | def poll(timeout_ms)
message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
if message_ptr.null?
nil
else
# Create struct wrapper
native_message = Rdkafka::Bindings::Message.new(message_ptr)
# Raise error if needed
if native_message[:err] != 0
raise Rdkafka::RdkafkaError.new(native_message[:err])
end
# Create a message to pass out
Rdkafka::Consumer::Message.new(native_message)
end
ensure
# Clean up rdkafka message if there is one
if !message_ptr.nil? && !message_ptr.null?
Rdkafka::Bindings.rd_kafka_message_destroy(message_ptr)
end
end | ruby | {
"resource": ""
} |
q13228 | Rdkafka.Config.consumer | train | def consumer
opaque = Opaque.new
config = native_config(opaque)
if @consumer_rebalance_listener
opaque.consumer_rebalance_listener = @consumer_rebalance_listener
Rdkafka::Bindings.rd_kafka_conf_set_rebalance_cb(config, Rdkafka::Bindings::RebalanceCallback)
end
kafka = native_kafka(config, :rd_kafka_consumer)
# Redirect the main queue to the consumer
Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka)
# Return consumer with Kafka client
Rdkafka::Consumer.new(kafka)
end | ruby | {
"resource": ""
} |
q13229 | Rdkafka.Config.producer | train | def producer
# Create opaque
opaque = Opaque.new
# Create Kafka config
config = native_config(opaque)
# Set callback to receive delivery reports on config
Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Bindings::DeliveryCallback)
# Return producer with Kafka client
Rdkafka::Producer.new(native_kafka(config, :rd_kafka_producer)).tap do |producer|
opaque.producer = producer
end
end | ruby | {
"resource": ""
} |
q13230 | Rdkafka.Config.native_config | train | def native_config(opaque=nil)
Rdkafka::Bindings.rd_kafka_conf_new.tap do |config|
# Create config
@config_hash.merge(REQUIRED_CONFIG).each do |key, value|
error_buffer = FFI::MemoryPointer.from_string(" " * 256)
result = Rdkafka::Bindings.rd_kafka_conf_set(
config,
key.to_s,
value.to_s,
error_buffer,
256
)
unless result == :config_ok
raise ConfigError.new(error_buffer.read_string)
end
end
# Set opaque pointer that's used as a proxy for callbacks
if opaque
pointer = ::FFI::Pointer.new(:pointer, opaque.object_id)
Rdkafka::Bindings.rd_kafka_conf_set_opaque(config, pointer)
# Store opaque with the pointer as key. We use this approach instead
# of trying to convert the pointer to a Ruby object because there is
# no risk of a segfault this way.
Rdkafka::Config.opaques[pointer.to_i] = opaque
end
# Set log callback
Rdkafka::Bindings.rd_kafka_conf_set_log_cb(config, Rdkafka::Bindings::LogCallback)
# Set stats callback
Rdkafka::Bindings.rd_kafka_conf_set_stats_cb(config, Rdkafka::Bindings::StatsCallback)
end
end | ruby | {
"resource": ""
} |
q13231 | PDK.Util.find_upwards | train | def find_upwards(target, start_dir = nil)
previous = nil
current = File.expand_path(start_dir || Dir.pwd)
until !File.directory?(current) || current == previous
filename = File.join(current, target)
return filename if File.file?(filename)
previous = current
current = File.expand_path('..', current)
end
end | ruby | {
"resource": ""
} |
q13232 | PDK.Util.make_tmpdir_name | train | def make_tmpdir_name(base)
t = Time.now.strftime('%Y%m%d')
name = "#{base}#{t}-#{Process.pid}-#{rand(0x100000000).to_s(36)}"
File.join(Dir.tmpdir, name)
end | ruby | {
"resource": ""
} |
q13233 | PDK.Util.canonical_path | train | def canonical_path(path)
if Gem.win_platform?
unless File.exist?(path)
raise PDK::CLI::FatalError, _("Cannot resolve a full path to '%{path}', as it does not currently exist.") % { path: path }
end
PDK::Util::Windows::File.get_long_pathname(path)
else
File.expand_path(path)
end
end | ruby | {
"resource": ""
} |
q13234 | PDK.Util.cachedir | train | def cachedir
if Gem.win_platform?
File.join(ENV['LOCALAPPDATA'], 'PDK', 'cache')
else
File.join(Dir.home, '.pdk', 'cache')
end
end | ruby | {
"resource": ""
} |
q13235 | PDK.Util.module_root | train | def module_root
metadata_path = find_upwards('metadata.json')
if metadata_path
File.dirname(metadata_path)
elsif in_module_root?
Dir.pwd
else
nil
end
end | ruby | {
"resource": ""
} |
q13236 | PDK.Util.find_valid_json_in | train | def find_valid_json_in(text, opts = {})
break_on_first = opts.key?(:break_on_first) ? opts[:break_on_first] : true
json_result = break_on_first ? nil : []
text.scan(%r{\{(?:[^{}]|(?:\g<0>))*\}}x) do |str|
begin
if break_on_first
json_result = JSON.parse(str)
break
else
json_result.push(JSON.parse(str))
end
rescue JSON::ParserError
next
end
end
json_result
end | ruby | {
"resource": ""
} |
q13237 | PDK.Util.targets_relative_to_pwd | train | def targets_relative_to_pwd(targets)
targets.map do |t|
if Pathname.new(t).absolute?
Pathname.new(t).relative_path_from(Pathname.pwd)
else
t
end
end
end | ruby | {
"resource": ""
} |
q13238 | PDK.Report.write_junit | train | def write_junit(target = self.class.default_target)
# Open a File Object for IO if target is a string containing a filename or path
target = File.open(target, 'w') if target.is_a? String
document = REXML::Document.new
document << REXML::XMLDecl.new
testsuites = REXML::Element.new('testsuites')
id = 0
events.each do |testsuite_name, testcases|
testsuite = REXML::Element.new('testsuite')
testsuite.attributes['name'] = testsuite_name
testsuite.attributes['tests'] = testcases.length
testsuite.attributes['errors'] = testcases.select(&:error?).length
testsuite.attributes['failures'] = testcases.select(&:failure?).length
testsuite.attributes['skipped'] = testcases.select(&:skipped?).length
testsuite.attributes['time'] = 0
testsuite.attributes['timestamp'] = Time.now.strftime('%Y-%m-%dT%H:%M:%S')
testsuite.attributes['hostname'] = Socket.gethostname
testsuite.attributes['id'] = id
testsuite.attributes['package'] = testsuite_name
testsuite.add_element('properties')
testcases.each { |r| testsuite.elements << r.to_junit }
testsuite.add_element('system-out')
testsuite.add_element('system-err')
testsuites.elements << testsuite
id += 1
end
document.elements << testsuites
document.write(target, 2)
ensure
target.close if target.is_a? File
end | ruby | {
"resource": ""
} |
q13239 | PDK.Report.write_text | train | def write_text(target = self.class.default_target)
# Open a File Object for IO if target is a string containing a filename or path
target = File.open(target, 'w') if target.is_a? String
coverage_report = nil
events.each do |_tool, tool_events|
tool_events.each do |event|
if event.rspec_puppet_coverage?
coverage_report = event.to_text
else
target.puts(event.to_text) unless event.pass?
end
end
end
ensure
target.puts "\n#{coverage_report}" if coverage_report
target.close if target.is_a? File
end | ruby | {
"resource": ""
} |
q13240 | PDK.AnswerFile.update! | train | def update!(new_answers = {})
unless new_answers.is_a?(Hash)
raise PDK::CLI::FatalError, _('Answer file can be updated only with a Hash')
end
answers.merge!(new_answers)
save_to_disk
end | ruby | {
"resource": ""
} |
q13241 | PDK.AnswerFile.read_from_disk | train | def read_from_disk
return {} if !File.file?(answer_file_path) || File.zero?(answer_file_path)
unless File.readable?(answer_file_path)
raise PDK::CLI::FatalError, _("Unable to open '%{file}' for reading") % {
file: answer_file_path,
}
end
answers = JSON.parse(File.read(answer_file_path))
if answers.is_a?(Hash)
answers
else
PDK.logger.warn _("Answer file '%{path}' did not contain a valid set of answers, recreating it") % {
path: answer_file_path,
}
{}
end
rescue JSON::JSONError
PDK.logger.warn _("Answer file '%{path}' did not contain valid JSON, recreating it") % {
path: answer_file_path,
}
{}
end | ruby | {
"resource": ""
} |
q13242 | PDK.AnswerFile.save_to_disk | train | def save_to_disk
FileUtils.mkdir_p(File.dirname(answer_file_path))
write_file(answer_file_path, JSON.pretty_generate(answers))
rescue SystemCallError, IOError => e
raise PDK::CLI::FatalError, _("Unable to write '%{file}': %{msg}") % {
file: answer_file_path,
msg: e.message,
}
end | ruby | {
"resource": ""
} |
q13243 | PDK.TemplateFile.template_content | train | def template_content
if File.file?(@template_file) && File.readable?(@template_file)
return File.read(@template_file)
end
raise ArgumentError, _("'%{template}' is not a readable file") % { template: @template_file }
end | ruby | {
"resource": ""
} |
q13244 | PDK.TemplateFile.render_erb | train | def render_erb
renderer = ERB.new(template_content, nil, '-')
renderer.filename = @template_file
renderer.result(binding)
end | ruby | {
"resource": ""
} |
q13245 | Loofah.Scrubber.append_attribute | train | def append_attribute(node, attribute, value)
current_value = node.get_attribute(attribute) || ''
current_values = current_value.split(/\s+/)
updated_value = current_values | [value]
node.set_attribute(attribute, updated_value.join(' '))
end | ruby | {
"resource": ""
} |
q13246 | Loofah.TextBehavior.text | train | def text(options={})
result = serialize_root.children.inner_text rescue ""
if options[:encode_special_chars] == false
result # possibly dangerous if rendered in a browser
else
encode_special_chars result
end
end | ruby | {
"resource": ""
} |
q13247 | Commander.Command.option | train | def option(*args, &block)
switches, description = Runner.separate_switches_from_description(*args)
proc = block || option_proc(switches)
@options << {
args: args,
proc: proc,
switches: switches,
description: description,
}
end | ruby | {
"resource": ""
} |
q13248 | Commander.Command.call | train | def call(args = [])
object, meth = @when_called[0, 2]
meth ||= :call
options = proxy_option_struct
case object
when Proc then object.call(args, options)
when Class then meth != :call ? object.new.send(meth, args, options) : object.new(args, options)
else object.send(meth, args, options) if object
end
end | ruby | {
"resource": ""
} |
q13249 | Commander.Runner.run! | train | def run!
trace = @always_trace || false
require_program :version, :description
trap('INT') { abort program(:int_message) } if program(:int_message)
trap('INT') { program(:int_block).call } if program(:int_block)
global_option('-h', '--help', 'Display help documentation') do
args = @args - %w(-h --help)
command(:help).run(*args)
return
end
global_option('-v', '--version', 'Display version information') do
say version
return
end
global_option('-t', '--trace', 'Display backtrace when an error occurs') { trace = true } unless @never_trace || @always_trace
parse_global_options
remove_global_options options, @args
if trace
run_active_command
else
begin
run_active_command
rescue InvalidCommandError => e
abort "#{e}. Use --help for more information"
rescue \
OptionParser::InvalidOption,
OptionParser::InvalidArgument,
OptionParser::MissingArgument => e
abort e.to_s
rescue => e
if @never_trace
abort "error: #{e}."
else
abort "error: #{e}. Use --trace to view backtrace"
end
end
end
end | ruby | {
"resource": ""
} |
q13250 | Commander.Runner.program | train | def program(key, *args, &block)
if key == :help && !args.empty?
@program[:help] ||= {}
@program[:help][args.first] = args.at(1)
elsif key == :help_formatter && !args.empty?
@program[key] = (@help_formatter_aliases[args.first] || args.first)
elsif block
@program[key] = block
else
unless args.empty?
@program[key] = args.count == 1 ? args[0] : args
end
@program[key]
end
end | ruby | {
"resource": ""
} |
q13251 | Commander.Runner.alias_command | train | def alias_command(alias_name, name, *args)
@commands[alias_name.to_s] = command name
@aliases[alias_name.to_s] = args
end | ruby | {
"resource": ""
} |
q13252 | Commander.Runner.args_without_command_name | train | def args_without_command_name
removed = []
parts = command_name_from_args.split rescue []
@args.dup.delete_if do |arg|
removed << arg if parts.include?(arg) && !removed.include?(arg)
end
end | ruby | {
"resource": ""
} |
q13253 | Commander.Runner.remove_global_options | train | def remove_global_options(options, args)
# TODO: refactor with flipflop, please TJ ! have time to refactor me !
options.each do |option|
switches = option[:switches].dup
next if switches.empty?
if (switch_has_arg = switches.any? { |s| s =~ /[ =]/ })
switches.map! { |s| s[0, s.index('=') || s.index(' ') || s.length] }
end
switches = expand_optionally_negative_switches(switches)
past_switch, arg_removed = false, false
args.delete_if do |arg|
if switches.any? { |s| s[0, arg.length] == arg }
arg_removed = !switch_has_arg
past_switch = true
elsif past_switch && !arg_removed && arg !~ /^-/
arg_removed = true
else
arg_removed = true
false
end
end
end
end | ruby | {
"resource": ""
} |
q13254 | Commander.Runner.parse_global_options | train | def parse_global_options
parser = options.inject(OptionParser.new) do |options, option|
options.on(*option[:args], &global_option_proc(option[:switches], &option[:proc]))
end
options = @args.dup
begin
parser.parse!(options)
rescue OptionParser::InvalidOption => e
# Remove the offending args and retry.
options = options.reject { |o| e.args.include?(o) }
retry
end
end | ruby | {
"resource": ""
} |
q13255 | Commander.Runner.require_program | train | def require_program(*keys)
keys.each do |key|
fail CommandError, "program #{key} required" if program(key).nil? || program(key).empty?
end
end | ruby | {
"resource": ""
} |
q13256 | Commander.Runner.run_active_command | train | def run_active_command
require_valid_command
if alias? command_name_from_args
active_command.run(*(@aliases[command_name_from_args.to_s] + args_without_command_name))
else
active_command.run(*args_without_command_name)
end
end | ruby | {
"resource": ""
} |
q13257 | Commander.UI.ask_editor | train | def ask_editor(input = nil, preferred_editor = nil)
editor = available_editor preferred_editor
program = Commander::Runner.instance.program(:name).downcase rescue 'commander'
tmpfile = Tempfile.new program
begin
tmpfile.write input if input
tmpfile.close
system("#{editor} #{tmpfile.path.shellescape}") ? IO.read(tmpfile.path) : nil
ensure
tmpfile.unlink
end
end | ruby | {
"resource": ""
} |
q13258 | Commander.UI.enable_paging | train | def enable_paging
return unless $stdout.tty?
return unless Process.respond_to? :fork
read, write = IO.pipe
# Kernel.fork is not supported on all platforms and configurations.
# As of Ruby 1.9, `Process.respond_to? :fork` should return false on
# configurations that don't support it, but versions before 1.9 don't
# seem to do this reliably and instead raise a NotImplementedError
# (which is rescued below).
if Kernel.fork
$stdin.reopen read
write.close
read.close
Kernel.select [$stdin]
ENV['LESS'] = 'FSRX' unless ENV.key? 'LESS'
pager = ENV['PAGER'] || 'less'
exec pager rescue exec '/bin/sh', '-c', pager
else
# subprocess
$stdout.reopen write
$stderr.reopen write if $stderr.tty?
write.close
read.close
end
rescue NotImplementedError
ensure
write.close if write && !write.closed?
read.close if read && !read.closed?
end | ruby | {
"resource": ""
} |
q13259 | Commander.UI.progress | train | def progress(arr, options = {})
bar = ProgressBar.new arr.length, options
bar.show
arr.each { |v| bar.increment yield(v) }
end | ruby | {
"resource": ""
} |
q13260 | Commander.UI.replace_tokens | train | def replace_tokens(str, hash) #:nodoc:
hash.inject(str) do |string, (key, value)|
string.gsub ":#{key}", value.to_s
end
end | ruby | {
"resource": ""
} |
q13261 | Ufo.Tasks::Register.rubyize_format | train | def rubyize_format(original_data)
data = original_data.to_snake_keys.deep_symbolize_keys
definitions = data[:container_definitions]
definitions.each_with_index do |definition, i|
next unless definition[:log_configuration]
options = definition[:log_configuration][:options]
next unless options
# LogConfiguration options do not get transformed and keep their original
# structure:
# https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ECS/Types/ContainerDefinition.html
original_definition = original_data["containerDefinitions"][i]
definition[:log_configuration][:options] = original_definition["logConfiguration"]["options"]
end
data
end | ruby | {
"resource": ""
} |
q13262 | Ufo.Ps.display_scale_help | train | def display_scale_help
return if service.running_count >= service.desired_count
events = service["events"][0..3] # only check most recent 4 messages
error_event = events.find do |e|
e.message =~ /was unable to place a task/
end
return unless error_event
puts "There is an issue scaling the #{@service.color(:green)} service to #{service.desired_count}. Here's the error:"
puts error_event.message.color(:red)
if service.launch_type == "EC2"
puts "If AutoScaling is set up for the container instances, it can take a little time to add additional instances. You'll see this message until the capacity is added."
end
end | ruby | {
"resource": ""
} |
q13263 | Ufo.Setting.ufo_env | train | def ufo_env
settings = YAML.load_file("#{Ufo.root}/.ufo/settings.yml")
env = settings.find do |_env, section|
section ||= {}
ENV['AWS_PROFILE'] && ENV['AWS_PROFILE'] == section['aws_profile']
end
ufo_env = env.first if env
ufo_env = ENV['UFO_ENV'] if ENV['UFO_ENV'] # highest precedence
ufo_env || 'development'
end | ruby | {
"resource": ""
} |
q13264 | Ufo.DSL.evaluate_template_definitions | train | def evaluate_template_definitions
source_code = IO.read(@template_definitions_path)
begin
instance_eval(source_code, @template_definitions_path)
rescue Exception => e
if e.class == SystemExit # allow exit to happen normally
raise
else
task_definition_error(e)
puts "\nFull error:"
raise
end
end
end | ruby | {
"resource": ""
} |
q13265 | Ufo.DSL.task_definition_error | train | def task_definition_error(e)
error_info = e.backtrace.first
path, line_no, _ = error_info.split(':')
line_no = line_no.to_i
puts "Error evaluating #{path}:".color(:red)
puts e.message
puts "Here's the line in #{path} with the error:\n\n"
contents = IO.read(path)
content_lines = contents.split("\n")
context = 5 # lines of context
top, bottom = [line_no-context-1, 0].max, line_no+context-1
spacing = content_lines.size.to_s.size
content_lines[top..bottom].each_with_index do |line_content, index|
line_number = top+index+1
if line_number == line_no
printf("%#{spacing}d %s\n".color(:red), line_number, line_content)
else
printf("%#{spacing}d %s\n", line_number, line_content)
end
end
end | ruby | {
"resource": ""
} |
q13266 | Ufo.Stack.template_body | train | def template_body
custom_template = "#{Ufo.root}/.ufo/settings/cfn/stack.yml"
path = if File.exist?(custom_template)
custom_template
else
# built-in default
File.expand_path("../cfn/stack.yml", File.dirname(__FILE__))
end
RenderMePretty.result(path, context: context.scope)
end | ruby | {
"resource": ""
} |
q13267 | Ufo.Stack.save_template | train | def save_template
path = "/tmp/ufo/#{@stack_name}/stack.yml"
FileUtils.mkdir_p(File.dirname(path))
IO.write(path, template_body)
puts "Generated template saved at: #{path}"
path = "/tmp/ufo/#{@stack_name}/parameters.yml"
IO.write(path, JSON.pretty_generate(parameters))
puts "Generated parameters saved at: #{path}"
end | ruby | {
"resource": ""
} |
q13268 | Ufo.Ship.stop_old_tasks | train | def stop_old_tasks
# only works when deployment is blocking
return unless @options[:wait]
Thread.new do
stop = Ufo::Stop.new(@service, @options.merge(mute: true))
while true
stop.log "checking for old tasks and waiting for 10 seconds"
stop.run
sleep 10
end
end
end | ruby | {
"resource": ""
} |
q13269 | Ufo.Info.load_balancer | train | def load_balancer(service)
load_balancer = service.load_balancers.first
return unless load_balancer
resp = elb.describe_target_groups(
target_group_arns: [load_balancer.target_group_arn]
)
target_group = resp.target_groups.first
load_balancer_arn = target_group.load_balancer_arns.first # assume first only
resp = elb.describe_load_balancers(load_balancer_arns: [load_balancer_arn])
resp.load_balancers.first
end | ruby | {
"resource": ""
} |
q13270 | Ufo.Completer.all_commands | train | def all_commands
commands = @command_class.all_commands.reject do |k,v|
v.is_a?(Thor::HiddenCommand)
end
commands.keys
end | ruby | {
"resource": ""
} |
q13271 | Ufo.Status.run | train | def run
unless stack_exists?(@stack_name)
puts "The stack #{@stack_name.color(:green)} does not exist."
return
end
resp = cloudformation.describe_stacks(stack_name: @stack_name)
stack = resp.stacks.first
puts "The current status for the stack #{@stack_name.color(:green)} is #{stack.stack_status.color(:green)}"
status_poller = Stack::Status.new(@stack_name)
if stack.stack_status =~ /_IN_PROGRESS$/
puts "Stack events (tailing):"
# tail all events until done
status_poller.hide_time_took = true
status_poller.wait
else
puts "Stack events:"
# show the last events that was user initiated
status_poller.refresh_events
status_poller.show_events(true)
end
end | ruby | {
"resource": ""
} |
q13272 | Ufo.Task.adjust_fargate_options | train | def adjust_fargate_options(options)
task_def = recent_task_definition
return options unless task_def[:network_mode] == "awsvpc"
awsvpc_conf = { subnets: network[:ecs_subnets] }
if task_def[:requires_compatibilities] == ["FARGATE"]
awsvpc_conf[:assign_public_ip] = "ENABLED"
options[:launch_type] = "FARGATE"
end
options[:network_configuration] = { awsvpc_configuration: awsvpc_conf }
options
end | ruby | {
"resource": ""
} |
q13273 | Ufo.Task.adjust_security_groups | train | def adjust_security_groups(options)
return options unless options[:network_configuration] &&
options[:network_configuration][:awsvpc_configuration]
awsvpc_conf = options[:network_configuration][:awsvpc_configuration]
security_groups = awsvpc_conf[:security_groups]
if [nil, '', 'nil'].include?(security_groups)
security_groups = []
end
if security_groups.empty?
fetch = Network::Fetch.new(network[:vpc])
sg = fetch.security_group_id
security_groups << sg
security_groups.uniq!
end
# override security groups
options[:network_configuration][:awsvpc_configuration][:security_groups] = security_groups
options
end | ruby | {
"resource": ""
} |
q13274 | ActiveDecorator.Decorator.decorate_association | train | def decorate_association(owner, target)
owner.is_a?(ActiveDecorator::Decorated) ? decorate(target) : target
end | ruby | {
"resource": ""
} |
q13275 | ActiveDecorator.Decorator.decorator_for | train | def decorator_for(model_class)
return @@decorators[model_class] if @@decorators.key? model_class
decorator_name = "#{model_class.name}#{ActiveDecorator.config.decorator_suffix}"
d = Object.const_get decorator_name, false
unless Class === d
d.send :include, ActiveDecorator::Helpers
@@decorators[model_class] = d
else
# Cache nil results
@@decorators[model_class] = nil
end
rescue NameError
if model_class.respond_to?(:base_class) && (model_class.base_class != model_class)
@@decorators[model_class] = decorator_for model_class.base_class
else
# Cache nil results
@@decorators[model_class] = nil
end
end | ruby | {
"resource": ""
} |
q13276 | Azure::Web::Mgmt::V2015_08_01.AppServiceCertificateOrders.delete | train | def delete(resource_group_name, certificate_order_name, custom_headers:nil)
response = delete_async(resource_group_name, certificate_order_name, custom_headers:custom_headers).value!
nil
end | ruby | {
"resource": ""
} |
q13277 | Azure::Web::Mgmt::V2015_08_01.AppServiceCertificateOrders.resend_email | train | def resend_email(resource_group_name, certificate_order_name, custom_headers:nil)
response = resend_email_async(resource_group_name, certificate_order_name, custom_headers:custom_headers).value!
nil
end | ruby | {
"resource": ""
} |
q13278 | Azure::Web::Mgmt::V2015_08_01.AppServiceCertificateOrders.retrieve_certificate_actions | train | def retrieve_certificate_actions(resource_group_name, name, custom_headers:nil)
response = retrieve_certificate_actions_async(resource_group_name, name, custom_headers:custom_headers).value!
response.body unless response.nil?
end | ruby | {
"resource": ""
} |
q13279 | Azure::Web::Mgmt::V2015_08_01.AppServiceCertificateOrders.retrieve_certificate_email_history | train | def retrieve_certificate_email_history(resource_group_name, name, custom_headers:nil)
response = retrieve_certificate_email_history_async(resource_group_name, name, custom_headers:custom_headers).value!
response.body unless response.nil?
end | ruby | {
"resource": ""
} |
q13280 | Azure::Web::Mgmt::V2015_08_01.AppServiceCertificateOrders.list_by_resource_group_next | train | def list_by_resource_group_next(next_page_link, custom_headers:nil)
response = list_by_resource_group_next_async(next_page_link, custom_headers:custom_headers).value!
response.body unless response.nil?
end | ruby | {
"resource": ""
} |
q13281 | Azure::Network::Mgmt::V2018_12_01.VirtualNetworkPeerings.get_with_http_info | train | def get_with_http_info(resource_group_name, virtual_network_name, virtual_network_peering_name, custom_headers:nil)
get_async(resource_group_name, virtual_network_name, virtual_network_peering_name, custom_headers:custom_headers).value!
end | ruby | {
"resource": ""
} |
q13282 | Azure::Network::Mgmt::V2018_12_01.VirtualNetworkPeerings.list | train | def list(resource_group_name, virtual_network_name, custom_headers:nil)
first_page = list_as_lazy(resource_group_name, virtual_network_name, custom_headers:custom_headers)
first_page.get_all_items
end | ruby | {
"resource": ""
} |
q13283 | MsRestAzure.AzureServiceClient.check_for_status_code_failure | train | def check_for_status_code_failure(azure_response)
fail MsRest::ValidationError, 'Azure response cannot be nil' if azure_response.nil?
fail MsRest::ValidationError, 'Azure response cannot have empty response object' if azure_response.response.nil?
fail MsRest::ValidationError, 'Azure response cannot have empty request object' if azure_response.request.nil?
status_code = azure_response.response.status
http_method = azure_response.request.method
fail AzureOperationError, "Unexpected polling status code from long running operation #{status_code}" unless status_code === 200 || status_code === 202 ||
(status_code === 201 && http_method === :put) ||
(status_code === 204 && (http_method === :delete || http_method === :post))
end | ruby | {
"resource": ""
} |
q13284 | MsRestAzure.AzureServiceClient.update_state_from_get_resource_operation | train | def update_state_from_get_resource_operation(request, polling_state, custom_deserialization_block)
result = get_async_with_custom_deserialization(request, custom_deserialization_block)
fail AzureOperationError, 'The response from long running operation does not contain a body' if result.response.body.nil? || result.response.body.empty?
# On non flattened resource, we should find provisioning_state inside 'properties'
if result.body.respond_to?(:properties) && result.body.properties.respond_to?(:provisioning_state) && !result.body.properties.provisioning_state.nil?
polling_state.status = result.body.properties.provisioning_state
# On flattened resource, we should find provisioning_state at the top level
elsif result.body.respond_to?(:provisioning_state) && !result.body.provisioning_state.nil?
polling_state.status = result.body.provisioning_state
else
polling_state.status = AsyncOperationStatus::SUCCESS_STATUS
end
error_data = CloudErrorData.new
error_data.code = polling_state.status
error_data.message = "Long running operation failed with status #{polling_state.status}"
polling_state.error_data = error_data
polling_state.update_response(result.response)
polling_state.request = result.request
polling_state.resource = result.body
end | ruby | {
"resource": ""
} |
q13285 | MsRestAzure.AzureServiceClient.update_state_from_location_header | train | def update_state_from_location_header(request, polling_state, custom_deserialization_block, final_state_via = FinalStateVia::DEFAULT)
result = get_async_with_custom_deserialization(request, custom_deserialization_block)
polling_state.update_response(result.response)
polling_state.request = result.request
status_code = result.response.status
http_method = request.method
if status_code === 202
polling_state.status = AsyncOperationStatus::IN_PROGRESS_STATUS
elsif status_code === 200 || (status_code === 201 && http_method === :put) ||
(status_code === 204 && (http_method === :delete || http_method === :post || http_method === :get))
polling_state.status = AsyncOperationStatus::SUCCESS_STATUS
error_data = CloudErrorData.new
error_data.code = polling_state.status
error_data.message = "Long running operation failed with status #{polling_state.status}"
polling_state.error_data = error_data
polling_state.resource = result.body
elsif final_state_via == FinalStateVia::LOCATION && status_code === 404 && http_method === :delete && !polling_state.azure_async_operation_header_link.nil? && !polling_state.location_header_link.nil?
polling_state.status = AsyncOperationStatus::SUCCESS_STATUS
else
fail AzureOperationError, "The response from long running operation does not have a valid status code. Method: #{http_method}, Status Code: #{status_code}"
end
end | ruby | {
"resource": ""
} |
q13286 | MsRestAzure.AzureServiceClient.update_state_from_azure_async_operation_header | train | def update_state_from_azure_async_operation_header(request, polling_state)
result = get_async_with_async_operation_deserialization(request)
fail AzureOperationError, 'The response from long running operation does not contain a body' if result.body.nil? || result.body.status.nil?
polling_state.status = result.body.status
polling_state.error_data = result.body.error
polling_state.response = result.response
polling_state.request = result.request
polling_state.resource = nil
polling_state
end | ruby | {
"resource": ""
} |
q13287 | Azure::RecoveryServicesSiteRecovery::Mgmt::V2016_08_10.ReplicationAlertSettings.create | train | def create(alert_setting_name, request, custom_headers:nil)
response = create_async(alert_setting_name, request, custom_headers:custom_headers).value!
response.body unless response.nil?
end | ruby | {
"resource": ""
} |
q13288 | Azure::PrivateDns::Mgmt::V2018_09_01.RecordSets.create_or_update_with_http_info | train | def create_or_update_with_http_info(resource_group_name, private_zone_name, record_type, relative_record_set_name, parameters, if_match:nil, if_none_match:nil, custom_headers:nil)
create_or_update_async(resource_group_name, private_zone_name, record_type, relative_record_set_name, parameters, if_match:if_match, if_none_match:if_none_match, custom_headers:custom_headers).value!
end | ruby | {
"resource": ""
} |
q13289 | Azure::PrivateDns::Mgmt::V2018_09_01.RecordSets.list_with_http_info | train | def list_with_http_info(resource_group_name, private_zone_name, top:nil, recordsetnamesuffix:nil, custom_headers:nil)
list_async(resource_group_name, private_zone_name, top:top, recordsetnamesuffix:recordsetnamesuffix, custom_headers:custom_headers).value!
end | ruby | {
"resource": ""
} |
q13290 | Azure::Postgresql::Mgmt::V2017_12_01_preview.Configurations.get_with_http_info | train | def get_with_http_info(resource_group_name, server_name, configuration_name, custom_headers:nil)
get_async(resource_group_name, server_name, configuration_name, custom_headers:custom_headers).value!
end | ruby | {
"resource": ""
} |
q13291 | Azure::StorageSync::Mgmt::V2018_07_01.ServerEndpoints.get_with_http_info | train | def get_with_http_info(resource_group_name, storage_sync_service_name, sync_group_name, server_endpoint_name, custom_headers:nil)
get_async(resource_group_name, storage_sync_service_name, sync_group_name, server_endpoint_name, custom_headers:custom_headers).value!
end | ruby | {
"resource": ""
} |
q13292 | Azure::StorageSync::Mgmt::V2018_07_01.ServerEndpoints.begin_delete_with_http_info | train | def begin_delete_with_http_info(resource_group_name, storage_sync_service_name, sync_group_name, server_endpoint_name, custom_headers:nil)
begin_delete_async(resource_group_name, storage_sync_service_name, sync_group_name, server_endpoint_name, custom_headers:custom_headers).value!
end | ruby | {
"resource": ""
} |
q13293 | Azure::CognitiveServices::ComputerVision::V2_0.ComputerVisionClient.make_request_with_http_info | train | def make_request_with_http_info(method, path, options = {})
result = make_request_async(method, path, options).value!
result.body = result.response.body.to_s.empty? ? nil : JSON.load(result.response.body)
result
end | ruby | {
"resource": ""
} |
q13294 | Azure::CognitiveServices::ComputerVision::V2_0.ComputerVisionClient.make_request_async | train | def make_request_async(method, path, options = {})
fail ArgumentError, 'method is nil' if method.nil?
fail ArgumentError, 'path is nil' if path.nil?
request_url = options[:base_url] || @base_url
if(!options[:headers].nil? && !options[:headers]['Content-Type'].nil?)
@request_headers['Content-Type'] = options[:headers]['Content-Type']
end
request_headers = @request_headers
request_headers.merge!({'accept-language' => @accept_language}) unless @accept_language.nil?
options.merge!({headers: request_headers.merge(options[:headers] || {})})
options.merge!({credentials: @credentials}) unless @credentials.nil?
super(request_url, method, path, options)
end | ruby | {
"resource": ""
} |
q13295 | Azure::CognitiveServices::ComputerVision::V2_0.ComputerVisionClient.get_text_operation_result | train | def get_text_operation_result(operation_id, custom_headers:nil)
response = get_text_operation_result_async(operation_id, custom_headers:custom_headers).value!
response.body unless response.nil?
end | ruby | {
"resource": ""
} |
q13296 | Azure::CognitiveServices::ComputerVision::V2_0.ComputerVisionClient.get_read_operation_result | train | def get_read_operation_result(operation_id, custom_headers:nil)
response = get_read_operation_result_async(operation_id, custom_headers:custom_headers).value!
response.body unless response.nil?
end | ruby | {
"resource": ""
} |
q13297 | Azure::Redis::Mgmt::V2017_02_01.Redis.update_with_http_info | train | def update_with_http_info(resource_group_name, name, parameters, custom_headers:nil)
update_async(resource_group_name, name, parameters, custom_headers:custom_headers).value!
end | ruby | {
"resource": ""
} |
q13298 | Azure::Redis::Mgmt::V2017_02_01.Redis.list_keys | train | def list_keys(resource_group_name, name, custom_headers:nil)
response = list_keys_async(resource_group_name, name, custom_headers:custom_headers).value!
response.body unless response.nil?
end | ruby | {
"resource": ""
} |
q13299 | Azure::Redis::Mgmt::V2017_02_01.FirewallRules.create_or_update_with_http_info | train | def create_or_update_with_http_info(resource_group_name, cache_name, rule_name, parameters, custom_headers:nil)
create_or_update_async(resource_group_name, cache_name, rule_name, parameters, custom_headers:custom_headers).value!
end | ruby | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.