_id stringlengths 2 6 | title stringlengths 9 130 | partition stringclasses 3
values | text stringlengths 66 10.5k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q16600 | Algolia.Index.browse | train | def browse(page_or_query_parameters = nil, hits_per_page = nil, request_options = {}, &block)
params = {}
if page_or_query_parameters.is_a?(Hash)
params.merge!(page_or_query_parameters)
else
params[:page] = page_or_query_parameters unless page_or_query_parameters.nil?
end
i... | ruby | {
"resource": ""
} |
q16601 | Algolia.Index.browse_from | train | def browse_from(cursor, hits_per_page = 1000, request_options = {})
client.post(Protocol.browse_uri(name), { :cursor => cursor, :hitsPerPage => hits_per_page }.to_json, :read, request_options)
end | ruby | {
"resource": ""
} |
q16602 | Algolia.Index.get_object | train | def get_object(objectID, attributes_to_retrieve = nil, request_options = {})
attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array)
if attributes_to_retrieve.nil?
client.get(Protocol.object_uri(name, objectID, nil), :read, request_options)
else
... | ruby | {
"resource": ""
} |
q16603 | Algolia.Index.get_objects | train | def get_objects(objectIDs, attributes_to_retrieve = nil, request_options = {})
attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array)
requests = objectIDs.map do |objectID|
req = { :indexName => name, :objectID => objectID.to_s }
req[:attributesToRet... | ruby | {
"resource": ""
} |
q16604 | Algolia.Index.save_object | train | def save_object(object, objectID = nil, request_options = {})
client.put(Protocol.object_uri(name, get_objectID(object, objectID)), object.to_json, :write, request_options)
end | ruby | {
"resource": ""
} |
q16605 | Algolia.Index.save_object! | train | def save_object!(object, objectID = nil, request_options = {})
res = save_object(object, objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16606 | Algolia.Index.save_objects! | train | def save_objects!(objects, request_options = {})
res = save_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16607 | Algolia.Index.replace_all_objects | train | def replace_all_objects(objects, request_options = {})
safe = request_options[:safe] || request_options['safe'] || false
request_options.delete(:safe)
request_options.delete('safe')
tmp_index = @client.init_index(@name + '_tmp_' + rand(10000000).to_s)
responses = []
scope = ['sett... | ruby | {
"resource": ""
} |
q16608 | Algolia.Index.partial_update_objects | train | def partial_update_objects(objects, create_if_not_exits = true, request_options = {})
if create_if_not_exits
batch(build_batch('partialUpdateObject', objects, true), request_options)
else
batch(build_batch('partialUpdateObjectNoCreate', objects, true), request_options)
end
end | ruby | {
"resource": ""
} |
q16609 | Algolia.Index.partial_update_objects! | train | def partial_update_objects!(objects, create_if_not_exits = true, request_options = {})
res = partial_update_objects(objects, create_if_not_exits, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16610 | Algolia.Index.delete_object | train | def delete_object(objectID, request_options = {})
raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == ''
client.delete(Protocol.object_uri(name, objectID), :write, request_options)
end | ruby | {
"resource": ""
} |
q16611 | Algolia.Index.delete_object! | train | def delete_object!(objectID, request_options = {})
res = delete_object(objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16612 | Algolia.Index.delete_objects | train | def delete_objects(objects, request_options = {})
check_array(objects)
batch(build_batch('deleteObject', objects.map { |objectID| { :objectID => objectID } }, false), request_options)
end | ruby | {
"resource": ""
} |
q16613 | Algolia.Index.delete_objects! | train | def delete_objects!(objects, request_options = {})
res = delete_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16614 | Algolia.Index.delete_by_query | train | def delete_by_query(query, params = nil, request_options = {})
raise ArgumentError.new('query cannot be nil, use the `clear` method to wipe the entire index') if query.nil? && params.nil?
params = sanitized_delete_by_query_params(params)
params[:query] = query
params[:hitsPerPage] = 1000
... | ruby | {
"resource": ""
} |
q16615 | Algolia.Index.delete_by_query! | train | def delete_by_query!(query, params = nil, request_options = {})
res = delete_by_query(query, params, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res
res
end | ruby | {
"resource": ""
} |
q16616 | Algolia.Index.clear! | train | def clear!(request_options = {})
res = clear(request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16617 | Algolia.Index.set_settings | train | def set_settings(new_settings, options = {}, request_options = {})
client.put(Protocol.settings_uri(name, options), new_settings.to_json, :write, request_options)
end | ruby | {
"resource": ""
} |
q16618 | Algolia.Index.set_settings! | train | def set_settings!(new_settings, options = {}, request_options = {})
res = set_settings(new_settings, options, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16619 | Algolia.Index.get_settings | train | def get_settings(options = {}, request_options = {})
options['getVersion'] = 2 if !options[:getVersion] && !options['getVersion']
client.get(Protocol.settings_uri(name, options).to_s, :read, request_options)
end | ruby | {
"resource": ""
} |
q16620 | Algolia.Index.get_api_key | train | def get_api_key(key, request_options = {})
client.get(Protocol.index_key_uri(name, key), :read, request_options)
end | ruby | {
"resource": ""
} |
q16621 | Algolia.Index.delete_api_key | train | def delete_api_key(key, request_options = {})
client.delete(Protocol.index_key_uri(name, key), :write, request_options)
end | ruby | {
"resource": ""
} |
q16622 | Algolia.Index.batch | train | def batch(request, request_options = {})
client.post(Protocol.batch_uri(name), request.to_json, :batch, request_options)
end | ruby | {
"resource": ""
} |
q16623 | Algolia.Index.batch! | train | def batch!(request, request_options = {})
res = batch(request, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16624 | Algolia.Index.search_for_facet_values | train | def search_for_facet_values(facet_name, facet_query, search_parameters = {}, request_options = {})
params = search_parameters.clone
params['facetQuery'] = facet_query
client.post(Protocol.search_facet_uri(name, facet_name), params.to_json, :read, request_options)
end | ruby | {
"resource": ""
} |
q16625 | Algolia.Index.search_disjunctive_faceting | train | def search_disjunctive_faceting(query, disjunctive_facets, params = {}, refinements = {}, request_options = {})
raise ArgumentError.new('Argument "disjunctive_facets" must be a String or an Array') unless disjunctive_facets.is_a?(String) || disjunctive_facets.is_a?(Array)
raise ArgumentError.new('Argument "... | ruby | {
"resource": ""
} |
q16626 | Algolia.Index.get_synonym | train | def get_synonym(objectID, request_options = {})
client.get(Protocol.synonym_uri(name, objectID), :read, request_options)
end | ruby | {
"resource": ""
} |
q16627 | Algolia.Index.delete_synonym! | train | def delete_synonym!(objectID, forward_to_replicas = false, request_options = {})
res = delete_synonym(objectID, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16628 | Algolia.Index.save_synonym | train | def save_synonym(objectID, synonym, forward_to_replicas = false, request_options = {})
client.put("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", synonym.to_json, :write, request_options)
end | ruby | {
"resource": ""
} |
q16629 | Algolia.Index.save_synonym! | train | def save_synonym!(objectID, synonym, forward_to_replicas = false, request_options = {})
res = save_synonym(objectID, synonym, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16630 | Algolia.Index.clear_synonyms! | train | def clear_synonyms!(forward_to_replicas = false, request_options = {})
res = clear_synonyms(forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16631 | Algolia.Index.replace_all_synonyms | train | def replace_all_synonyms(synonyms, request_options = {})
forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false
batch_synonyms(synonyms, forward_to_replicas, true, request_options)
end | ruby | {
"resource": ""
} |
q16632 | Algolia.Index.replace_all_synonyms! | train | def replace_all_synonyms!(synonyms, request_options = {})
res = replace_all_synonyms(synonyms, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16633 | Algolia.Index.export_synonyms | train | def export_synonyms(hits_per_page = 100, request_options = {}, &_block)
res = []
page = 0
loop do
curr = search_synonyms('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits']
curr.each do |synonym|
res << synonym
yield synonym if block_given... | ruby | {
"resource": ""
} |
q16634 | Algolia.Index.get_rule | train | def get_rule(objectID, request_options = {})
client.get(Protocol.rule_uri(name, objectID), :read, request_options)
end | ruby | {
"resource": ""
} |
q16635 | Algolia.Index.delete_rule! | train | def delete_rule!(objectID, forward_to_replicas = false, request_options = {})
res = delete_rule(objectID, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end | ruby | {
"resource": ""
} |
q16636 | Algolia.Index.save_rule | train | def save_rule(objectID, rule, forward_to_replicas = false, request_options = {})
raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == ''
client.put("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", rule.to_json, :write, request_options)
end | ruby | {
"resource": ""
} |
q16637 | Algolia.Index.save_rule! | train | def save_rule!(objectID, rule, forward_to_replicas = false, request_options = {})
res = save_rule(objectID, rule, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end | ruby | {
"resource": ""
} |
q16638 | Algolia.Index.clear_rules! | train | def clear_rules!(forward_to_replicas = false, request_options = {})
res = clear_rules(forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end | ruby | {
"resource": ""
} |
q16639 | Algolia.Index.replace_all_rules | train | def replace_all_rules(rules, request_options = {})
forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false
batch_rules(rules, forward_to_replicas, true, request_options)
end | ruby | {
"resource": ""
} |
q16640 | Algolia.Index.replace_all_rules! | train | def replace_all_rules!(rules, request_options = {})
res = replace_all_rules(rules, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end | ruby | {
"resource": ""
} |
q16641 | Algolia.Index.export_rules | train | def export_rules(hits_per_page = 100, request_options = {}, &_block)
res = []
page = 0
loop do
curr = search_rules('', { :hits_per_page => hits_per_page, :page => page }, request_options)['hits']
curr.each do |rule|
res << rule
yield rule if block_given?
end... | ruby | {
"resource": ""
} |
q16642 | GraphQL::Models.AttributeLoader.hash_to_condition | train | def hash_to_condition(table, hash)
conditions = hash.map do |attr, value|
if value.is_a?(Array) && value.size > 1
table[attr].in(value)
elsif value.is_a?(Array)
table[attr].eq(value[0])
else
table[attr].eq(value)
end
end
conditions.reduce ... | ruby | {
"resource": ""
} |
q16643 | Squeel.DSL.method_missing | train | def method_missing(method_id, *args)
super if method_id == :to_ary
if args.empty?
Nodes::Stub.new method_id
elsif (args.size == 1) && (Class === args[0])
Nodes::Join.new(method_id, InnerJoin, args[0])
else
Nodes::Function.new method_id, args
end
end | ruby | {
"resource": ""
} |
q16644 | Dragonfly.Job.initialize_copy | train | def initialize_copy(other)
@steps = other.steps.map do |step|
step.class.new(self, *step.args)
end
@content = other.content.dup
@url_attributes = other.url_attributes.dup
end | ruby | {
"resource": ""
} |
q16645 | Dragonfly.Content.update | train | def update(obj, meta=nil)
meta ||= {}
self.temp_object = TempObject.new(obj, meta['name'])
self.meta['name'] ||= temp_object.name if temp_object.name
clear_analyser_cache
add_meta(obj.meta) if obj.respond_to?(:meta)
add_meta(meta)
self
end | ruby | {
"resource": ""
} |
q16646 | Dragonfly.Content.shell_eval | train | def shell_eval(opts={})
should_escape = opts[:escape] != false
command = yield(should_escape ? shell.escape(path) : path)
run command, :escape => should_escape
end | ruby | {
"resource": ""
} |
q16647 | Dragonfly.Content.shell_generate | train | def shell_generate(opts={})
ext = opts[:ext] || self.ext
should_escape = opts[:escape] != false
tempfile = Utils.new_tempfile(ext)
new_path = should_escape ? shell.escape(tempfile.path) : tempfile.path
command = yield(new_path)
run(command, :escape => should_escape)
update(temp... | ruby | {
"resource": ""
} |
q16648 | LogStashLogger.MultiLogger.method_missing | train | def method_missing(name, *args, &block)
@loggers.each do |logger|
if logger.respond_to?(name)
logger.send(name, args, &block)
end
end
end | ruby | {
"resource": ""
} |
q16649 | LogStashLogger.Buffer.buffer_initialize | train | def buffer_initialize(options={})
if ! self.class.method_defined?(:flush)
raise ArgumentError, "Any class including Stud::Buffer must define a flush() method."
end
@buffer_config = {
:max_items => options[:max_items] || 50,
:max_interval => options[:max_interval] || 5,
... | ruby | {
"resource": ""
} |
q16650 | LogStashLogger.Buffer.buffer_receive | train | def buffer_receive(event, group=nil)
buffer_initialize if ! @buffer_state
# block if we've accumulated too many events
while buffer_full? do
on_full_buffer_receive(
:pending => @buffer_state[:pending_count],
:outgoing => @buffer_state[:outgoing_count]
) if @buffer_... | ruby | {
"resource": ""
} |
q16651 | LogStashLogger.Buffer.buffer_flush | train | def buffer_flush(options={})
force = options[:force] || options[:final]
final = options[:final]
# final flush will wait for lock, so we are sure to flush out all buffered events
if options[:final]
@buffer_state[:flush_mutex].lock
elsif ! @buffer_state[:flush_mutex].try_lock # fail... | ruby | {
"resource": ""
} |
q16652 | Axiom.Relation.each | train | def each
return to_enum unless block_given?
seen = {}
tuples.each do |tuple|
tuple = Tuple.coerce(header, tuple)
yield seen[tuple] = tuple unless seen.key?(tuple)
end
self
end | ruby | {
"resource": ""
} |
q16653 | Axiom.Relation.replace | train | def replace(other)
other = coerce(other)
delete(difference(other)).insert(other.difference(self))
end | ruby | {
"resource": ""
} |
q16654 | Axiom.Aliasable.define_inheritable_alias_method | train | def define_inheritable_alias_method(new_method, original_method)
define_method(new_method) do |*args, &block|
public_send(original_method, *args, &block)
end
end | ruby | {
"resource": ""
} |
q16655 | Axiom.Tuple.extend | train | def extend(header, extensions)
join(
header,
extensions.map { |extension| Function.extract_value(extension, self) }
)
end | ruby | {
"resource": ""
} |
q16656 | Axiom.Tuple.predicate | train | def predicate
header.reduce(Function::Proposition::Tautology.instance) do |predicate, attribute|
predicate.and(attribute.eq(attribute.call(self)))
end
end | ruby | {
"resource": ""
} |
q16657 | GeoPattern.GeoPatternTask.run_task | train | def run_task(_verbose)
data.each do |path, string|
opts = {}
path = File.expand_path(path)
if string.is_a?(Hash)
input = string[:input]
opts[:patterns] = string[:patterns] if string.key? :patterns
opts[:color] = string[:color] if string.key... | ruby | {
"resource": ""
} |
q16658 | GeoPattern.RakeTask.include | train | def include(modules)
modules = Array(modules)
modules.each { |m| self.class.include m }
end | ruby | {
"resource": ""
} |
q16659 | Kontena::Cli::Master.LoginCommand.authentication_path | train | def authentication_path(local_port: nil, invite_code: nil, expires_in: nil, remote: false)
auth_url_params = {}
if remote
auth_url_params[:redirect_uri] = "/code"
elsif local_port
auth_url_params[:redirect_uri] = "http://localhost:#{local_port}/cb"
else
raise ArgumentErro... | ruby | {
"resource": ""
} |
q16660 | Kontena::Cli::Master.LoginCommand.authentication_url_from_master | train | def authentication_url_from_master(master_url, auth_params)
client = Kontena::Client.new(master_url)
vspinner "Sending authentication request to receive an authorization URL" do
response = client.request(
http_method: :get,
path: authentication_path(auth_params),
expect... | ruby | {
"resource": ""
} |
q16661 | Kontena::Cli::Master.LoginCommand.select_a_server | train | def select_a_server(name, url)
# no url, no name, try to use current master
if url.nil? && name.nil?
if config.current_master
return config.current_master
else
exit_with_error 'URL not specified and current master not selected'
end
end
if name && url
... | ruby | {
"resource": ""
} |
q16662 | Kontena::NetworkAdapters.Weave.ensure_exposed | train | def ensure_exposed(cidr)
# configure new address
# these will be added alongside any existing addresses
if @executor_pool.expose(cidr)
info "Exposed host node at cidr=#{cidr}"
else
error "Failed to expose host node at cidr=#{cidr}"
end
# cleanup any old addresses
... | ruby | {
"resource": ""
} |
q16663 | Kontena::NetworkAdapters.Weave.get_containers | train | def get_containers
containers = { }
@executor_pool.ps() do |id, mac, *cidrs|
next if id == 'weave:expose'
containers[id] = cidrs
end
containers
end | ruby | {
"resource": ""
} |
q16664 | Kontena::NetworkAdapters.Weave.migrate_container | train | def migrate_container(container_id, cidr, attached_cidrs)
# first remove any existing addresses
# this is required, since weave will not attach if the address already exists, but with a different netmask
attached_cidrs.each do |attached_cidr|
if cidr != attached_cidr
warn "Migrate co... | ruby | {
"resource": ""
} |
q16665 | Kontena::NetworkAdapters.Weave.remove_container | train | def remove_container(container_id, overlay_network, overlay_cidr)
info "Remove container=#{container_id} from network=#{overlay_network} at cidr=#{overlay_cidr}"
@ipam_client.release_address(overlay_network, overlay_cidr)
rescue IpamError => error
# Cleanup will take care of these later on
... | ruby | {
"resource": ""
} |
q16666 | Kontena::Workers.WeaveWorker.start_container | train | def start_container(container)
overlay_cidr = container.overlay_cidr
if overlay_cidr
wait_weave_running?
register_container_dns(container) if container.service_container?
attach_overlay(container)
else
debug "skip start for container=#{container.name} without overlay_c... | ruby | {
"resource": ""
} |
q16667 | Kontena.PluginManager.init | train | def init
ENV["GEM_HOME"] = Common.install_dir
Gem.paths = ENV
Common.use_dummy_ui unless Kontena.debug?
plugins
true
end | ruby | {
"resource": ""
} |
q16668 | Kontena.Logging.debug | train | def debug(message = nil, &block)
logger.add(Logger::DEBUG, message, self.logging_prefix, &block)
end | ruby | {
"resource": ""
} |
q16669 | Kontena.Logging.info | train | def info(message = nil, &block)
logger.add(Logger::INFO, message, self.logging_prefix, &block)
end | ruby | {
"resource": ""
} |
q16670 | Kontena.Logging.warn | train | def warn(message = nil, &block)
logger.add(Logger::WARN, message, self.logging_prefix, &block)
end | ruby | {
"resource": ""
} |
q16671 | Kontena.Logging.error | train | def error(message = nil, &block)
logger.add(Logger::ERROR, message, self.logging_prefix, &block)
end | ruby | {
"resource": ""
} |
q16672 | Mutations.Command.add_error | train | def add_error(key, error, message = nil)
if error.is_a? Symbol
error = ErrorAtom.new(key, error, message: message)
elsif error.is_a?(Mutations::ErrorAtom) || error.is_a?(Mutations::ErrorArray) || error.is_a?(Mutations::ErrorHash)
else
raise ArgumentError.new("Invalid error of kind #{e... | ruby | {
"resource": ""
} |
q16673 | Kontena::Cli::Helpers.ExecHelper.websocket_exec_write_thread | train | def websocket_exec_write_thread(ws, tty: nil)
Thread.new do
begin
if tty
console_height, console_width = TTY::Screen.size
websocket_exec_write(ws, 'tty_size' => {
width: console_width, height: console_height
})
end
read_stdin(... | ruby | {
"resource": ""
} |
q16674 | Kontena::Cli::Helpers.ExecHelper.websocket_exec | train | def websocket_exec(path, cmd, interactive: false, shell: false, tty: false)
exit_status = nil
write_thread = nil
query = {}
query[:interactive] = interactive if interactive
query[:shell] = shell if shell
query[:tty] = tty if tty
server = require_current_master
url = web... | ruby | {
"resource": ""
} |
q16675 | Kontena::Launchers.Etcd.update_membership | train | def update_membership(node)
info 'checking if etcd previous membership needs to be updated'
etcd_connection = find_etcd_node(node)
return 'new' unless etcd_connection # No etcd hosts available, bootstrapping first node --> new cluster
weave_ip = node.overlay_ip
peer_url = "http://#{weave... | ruby | {
"resource": ""
} |
q16676 | Kontena::Launchers.Etcd.find_etcd_node | train | def find_etcd_node(node)
grid_subnet = IPAddr.new(node.grid['subnet'])
tries = node.grid['initial_size']
begin
etcd_host = "http://#{grid_subnet[tries]}:2379/v2/members"
info "connecting to existing etcd at #{etcd_host}"
connection = Excon.new(etcd_host)
members = JSON... | ruby | {
"resource": ""
} |
q16677 | Kontena::Launchers.Etcd.add_membership | train | def add_membership(connection, peer_url)
info "Adding new etcd membership info with peer URL #{peer_url}"
connection.post(:body => JSON.generate(peerURLs: [peer_url]),
:headers => { 'Content-Type' => 'application/json' })
end | ruby | {
"resource": ""
} |
q16678 | Kontena.Observable.update | train | def update(value)
raise RuntimeError, "Observable crashed: #{@value}" if crashed?
raise ArgumentError, "Update with nil value" if value.nil?
debug { "update: #{value}" }
set_and_notify(value)
end | ruby | {
"resource": ""
} |
q16679 | Kontena::Workers::Volumes.VolumeManager.volume_exist? | train | def volume_exist?(volume_name, driver)
begin
debug "volume #{volume_name} exists"
volume = Docker::Volume.get(volume_name)
if volume && volume.info['Driver'] == driver
return true
elsif volume && volume.info['Driver'] != driver
raise DriverMismatchError.new("Vol... | ruby | {
"resource": ""
} |
q16680 | Kontena::Workers.ServicePodWorker.on_container_die | train | def on_container_die(exit_code: )
cancel_restart_timers
return unless @service_pod.running?
# backoff restarts
backoff = @restarts ** 2
backoff = max_restart_backoff if backoff > max_restart_backoff
info "#{@service_pod} exited with code #{exit_code}, restarting (delay: #{backoff}s)... | ruby | {
"resource": ""
} |
q16681 | Kontena::Workers.ServicePodWorker.restart | train | def restart(at = Time.now, container_id: nil, started_at: nil)
if container_id && @container.id != container_id
debug "stale #{@service_pod} restart for container id=#{container_id}"
return
end
if started_at && @container.started_at != started_at
debug "stale #{@service_pod} re... | ruby | {
"resource": ""
} |
q16682 | Kontena::Workers.ServicePodWorker.check_starting! | train | def check_starting!(service_pod, container)
raise "service stopped" if !@service_pod.running?
raise "service redeployed" if @service_pod.deploy_rev != service_pod.deploy_rev
raise "container recreated" if @container.id != container.id
raise "container restarted" if @container.started_at != conta... | ruby | {
"resource": ""
} |
q16683 | GridServices.Helpers.document_changes | train | def document_changes(document)
(document.changed + document._children.select{|child| child.changed? }.map { |child|
"#{child.metadata_name.to_s}{#{child.changed.join(", ")}}"
}).join(", ")
end | ruby | {
"resource": ""
} |
q16684 | GridServices.Helpers.save_grid_service | train | def save_grid_service(grid_service)
if grid_service.save
return grid_service
else
grid_service.errors.each do |key, message|
add_error(key, :invalid, message)
end
return nil
end
end | ruby | {
"resource": ""
} |
q16685 | GridServices.Helpers.update_grid_service | train | def update_grid_service(grid_service, force: false)
if grid_service.changed? || force
grid_service.revision += 1
info "updating service #{grid_service.to_path} revision #{grid_service.revision} with changes: #{document_changes(grid_service)}"
else
debug "not updating service #{grid_s... | ruby | {
"resource": ""
} |
q16686 | Kontena.Observer.error | train | def error
@values.each_pair{|observable, value|
return Error.new(observable, value) if Exception === value
}
return nil
end | ruby | {
"resource": ""
} |
q16687 | Kontena.Observer.each | train | def each(timeout: nil)
@deadline = Time.now + timeout if timeout
while true
# prevent any intervening messages from being processed and discarded before we're back in Celluloid.receive()
Celluloid.exclusive {
if error?
debug { "raise: #{self.describe_observables}" }
... | ruby | {
"resource": ""
} |
q16688 | GridServices.Common.validate_secrets | train | def validate_secrets
validate_each :secrets do |s|
secret = self.grid.grid_secrets.find_by(name: s[:secret])
unless secret
[:not_found, "Secret #{s[:secret]} does not exist"]
else
nil
end
end
end | ruby | {
"resource": ""
} |
q16689 | GridServices.Common.validate_certificates | train | def validate_certificates
validate_each :certificates do |c|
cert = self.grid.certificates.find_by(subject: c[:subject])
unless cert
[:not_found, "Certificate #{c[:subject]} does not exist"]
else
nil
end
end
end | ruby | {
"resource": ""
} |
q16690 | Docker.StreamingExecutor.start | train | def start(ws)
@ws = ws
@ws.on(:message) do |event|
on_websocket_message(event.data)
end
@ws.on(:error) do |exc|
warn exc
end
@ws.on(:close) do |event|
on_websocket_close(event.code, event.reason)
end
started!
end | ruby | {
"resource": ""
} |
q16691 | Kontena::Cli::Services.UpdateCommand.parse_service_data_from_options | train | def parse_service_data_from_options
data = {}
data[:strategy] = deploy_strategy if deploy_strategy
data[:ports] = parse_ports(ports_list) unless ports_list.empty?
data[:links] = parse_links(link_list) unless link_list.empty?
data[:memory] = parse_memory(memory) if memory
data[:memory... | ruby | {
"resource": ""
} |
q16692 | Kontena::Cli::Helpers.TimeHelper.time_since | train | def time_since(time, terse: false)
return '' if time.nil? || time.empty?
dt = Time.now - Time.parse(time)
dt_s = dt.to_i
dt_m, dt_s = dt_s / 60, dt_s % 60
dt_h, dt_m = dt_m / 60, dt_m % 60
dt_d, dt_h = dt_h / 60, dt_h % 60
parts = []
parts << "%dd" % dt_d if dt_d > 0
... | ruby | {
"resource": ""
} |
q16693 | Kontena.WebsocketClient.connect! | train | def connect!
info "connecting to master at #{@api_uri}"
headers = {
'Kontena-Node-Id' => @node_id.to_s,
'Kontena-Node-Name' => @node_name,
'Kontena-Version' => Kontena::Agent::VERSION,
'Kontena-Node-Labels' => @node_labels.join(','),
'Kontena-Connected-At' =... | ruby | {
"resource": ""
} |
q16694 | Kontena.WebsocketClient.send_message | train | def send_message(msg)
ws.send(msg)
rescue => exc
warn exc
abort exc
end | ruby | {
"resource": ""
} |
q16695 | Kontena.WebsocketClient.on_error | train | def on_error(exc)
case exc
when Kontena::Websocket::SSLVerifyError
if exc.cert
error "unable to connect to SSL server with KONTENA_SSL_VERIFY=true: #{exc} (subject #{exc.subject}, issuer #{exc.issuer})"
else
error "unable to connect to SSL server with KONTENA_SSL_VERIFY=t... | ruby | {
"resource": ""
} |
q16696 | Kontena.WebsocketClient.on_close | train | def on_close(code, reason)
debug "Server closed connection with code #{code}: #{reason}"
case code
when 4001
handle_invalid_token
when 4010
handle_invalid_version(reason)
when 4040, 4041
handle_invalid_connection(reason)
else
warn "connection closed w... | ruby | {
"resource": ""
} |
q16697 | Kontena::Cli.SubcommandLoader.symbolize_path | train | def symbolize_path(path)
path.gsub(/.*\/cli\//, '').split('/').map do |path_part|
path_part.split('_').map{ |e| e.capitalize }.join
end.map(&:to_sym)
end | ruby | {
"resource": ""
} |
q16698 | Kontena.Client.authentication_ok? | train | def authentication_ok?(token_verify_path)
return false unless token
return false unless token['access_token']
return false unless token_verify_path
final_path = token_verify_path.gsub(/\:access\_token/, token['access_token'])
debug { "Requesting user info from #{final_path}" }
reque... | ruby | {
"resource": ""
} |
q16699 | Kontena.Client.exchange_code | train | def exchange_code(code)
return nil unless token_account
return nil unless token_account['token_endpoint']
response = request(
http_method: token_account['token_method'].downcase.to_sym,
path: token_account['token_endpoint'],
headers: { CONTENT_TYPE => token_account['token_post... | ruby | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.