_id
stringlengths
2
6
title
stringlengths
9
130
partition
stringclasses
3 values
text
stringlengths
66
10.5k
language
stringclasses
1 value
meta_information
dict
q11400
Solargraph.Source.location
train
def location st = Position.new(0, 0) en = Position.from_offset(code, code.length) range = Range.new(st, en) Location.new(filename, range) end
ruby
{ "resource": "" }
q11401
Solargraph.Source.associated_comments
train
def associated_comments @associated_comments ||= begin result = {} Parser::Source::Comment.associate_locations(node, comments).each_pair do |loc, all| block = all #.select{ |l| l.document? || code.lines[l.loc.line].strip.start_with?('#')} next if block.empty? result[loc.line] ||= [] result[loc.line].concat block end result end end
ruby
{ "resource": "" }
q11402
Solargraph.Source.stringify_comment_array
train
def stringify_comment_array comments ctxt = '' num = nil started = false last_line = nil comments.each { |l| # Trim the comment and minimum leading whitespace p = l.text.gsub(/^#+/, '') if num.nil? and !p.strip.empty? num = p.index(/[^ ]/) started = true elsif started and !p.strip.empty? cur = p.index(/[^ ]/) num = cur if cur < num end # Include blank lines between comments ctxt += ("\n" * (l.loc.first_line - last_line - 1)) unless last_line.nil? || l.loc.first_line - last_line <= 0 ctxt += "#{p[num..-1]}\n" if started last_line = l.loc.last_line if last_line.nil? || l.loc.last_line > last_line } ctxt end
ruby
{ "resource": "" }
q11403
Solargraph.Source.foldable_comment_block_ranges
train
def foldable_comment_block_ranges return [] unless synchronized? result = [] grouped = [] # @param cmnt [Parser::Source::Comment] @comments.each do |cmnt| if cmnt.document? result.push Range.from_expr(cmnt.loc.expression) elsif code.lines[cmnt.loc.expression.line].strip.start_with?('#') if grouped.empty? || cmnt.loc.expression.line == grouped.last.loc.expression.line + 1 grouped.push cmnt else result.push Range.from_to(grouped.first.loc.expression.line, 0, grouped.last.loc.expression.line, 0) unless grouped.length < 3 grouped = [cmnt] end else unless grouped.length < 3 result.push Range.from_to(grouped.first.loc.expression.line, 0, grouped.last.loc.expression.line, 0) end grouped.clear end end result.push Range.from_to(grouped.first.loc.expression.line, 0, grouped.last.loc.expression.line, 0) unless grouped.length < 3 result end
ruby
{ "resource": "" }
q11404
Solargraph.Workspace.merge
train
def merge source unless directory == '*' || source_hash.key?(source.filename) # Reload the config to determine if a new source should be included @config = Solargraph::Workspace::Config.new(directory) return false unless config.calculated.include?(source.filename) end source_hash[source.filename] = source true end
ruby
{ "resource": "" }
q11405
Solargraph.Workspace.would_merge?
train
def would_merge? filename return true if directory == '*' || source_hash.include?(filename) @config = Solargraph::Workspace::Config.new(directory) config.calculated.include?(filename) end
ruby
{ "resource": "" }
q11406
Solargraph.Workspace.remove
train
def remove filename return false unless source_hash.key?(filename) source_hash.delete filename true end
ruby
{ "resource": "" }
q11407
Solargraph.Workspace.would_require?
train
def would_require? path require_paths.each do |rp| return true if File.exist?(File.join(rp, "#{path}.rb")) end false end
ruby
{ "resource": "" }
q11408
Solargraph.Workspace.synchronize!
train
def synchronize! updater source_hash[updater.filename] = source_hash[updater.filename].synchronize(updater) end
ruby
{ "resource": "" }
q11409
Solargraph.Workspace.generate_require_paths
train
def generate_require_paths return configured_require_paths unless gemspec? result = [] gemspecs.each do |file| base = File.dirname(file) # @todo Evaluating gemspec files violates the goal of not running # workspace code, but this is how Gem::Specification.load does it # anyway. begin spec = eval(File.read(file), binding, file) next unless Gem::Specification === spec result.concat(spec.require_paths.map { |path| File.join(base, path) }) rescue Exception => e # Don't die if we have an error during eval-ing a gem spec. # Concat the default lib directory instead. Solargraph.logger.warn "Error reading #{file}: [#{e.class}] #{e.message}" result.push File.join(base, 'lib') end end result.concat config.require_paths result.push File.join(directory, 'lib') if result.empty? result end
ruby
{ "resource": "" }
q11410
Solargraph.Workspace.configured_require_paths
train
def configured_require_paths return ['lib'] if directory.empty? return [File.join(directory, 'lib')] if config.require_paths.empty? config.require_paths.map{|p| File.join(directory, p)} end
ruby
{ "resource": "" }
q11411
Solargraph.Library.create
train
def create filename, text result = false mutex.synchronize do next unless contain?(filename) || open?(filename) || workspace.would_merge?(filename) @synchronized = false source = Solargraph::Source.load_string(text, filename) workspace.merge(source) result = true end result end
ruby
{ "resource": "" }
q11412
Solargraph.Library.create_from_disk
train
def create_from_disk filename result = false mutex.synchronize do next if File.directory?(filename) || !File.exist?(filename) next unless contain?(filename) || open?(filename) || workspace.would_merge?(filename) @synchronized = false source = Solargraph::Source.load_string(File.read(filename), filename) workspace.merge(source) result = true end result end
ruby
{ "resource": "" }
q11413
Solargraph.Library.delete
train
def delete filename detach filename result = false mutex.synchronize do result = workspace.remove(filename) @synchronized = !result if synchronized? end result end
ruby
{ "resource": "" }
q11414
Solargraph.Library.completions_at
train
def completions_at filename, line, column position = Position.new(line, column) cursor = Source::Cursor.new(checkout(filename), position) api_map.clip(cursor).complete end
ruby
{ "resource": "" }
q11415
Solargraph.Library.definitions_at
train
def definitions_at filename, line, column position = Position.new(line, column) cursor = Source::Cursor.new(checkout(filename), position) api_map.clip(cursor).define end
ruby
{ "resource": "" }
q11416
Solargraph.Library.signatures_at
train
def signatures_at filename, line, column position = Position.new(line, column) cursor = Source::Cursor.new(checkout(filename), position) api_map.clip(cursor).signify end
ruby
{ "resource": "" }
q11417
Solargraph.Library.diagnose
train
def diagnose filename # @todo Only open files get diagnosed. Determine whether anything or # everything in the workspace should get diagnosed, or if there should # be an option to do so. # return [] unless open?(filename) catalog result = [] source = read(filename) workspace.config.reporters.each do |name| reporter = Diagnostics.reporter(name) raise DiagnosticsError, "Diagnostics reporter #{name} does not exist" if reporter.nil? result.concat reporter.new.diagnose(source, api_map) end result end
ruby
{ "resource": "" }
q11418
Solargraph.Library.catalog
train
def catalog @catalog_mutex.synchronize do break if synchronized? logger.info "Cataloging #{workspace.directory.empty? ? 'generic workspace' : workspace.directory}" api_map.catalog bundle @synchronized = true logger.info "Catalog complete (#{api_map.pins.length} pins)" end end
ruby
{ "resource": "" }
q11419
Solargraph.Library.merge
train
def merge source result = nil mutex.synchronize do result = workspace.merge(source) @synchronized = !result if synchronized? end result end
ruby
{ "resource": "" }
q11420
Solargraph.Library.read
train
def read filename return @current if @current && @current.filename == filename raise FileNotFoundError, "File not found: #{filename}" unless workspace.has_file?(filename) workspace.source(filename) end
ruby
{ "resource": "" }
q11421
Vault.Request.extract_headers!
train
def extract_headers!(options = {}) extract = { wrap_ttl: Vault::Client::WRAP_TTL_HEADER, } {}.tap do |h| extract.each do |k,v| if options[k] h[v] = options.delete(k) end end end end
ruby
{ "resource": "" }
q11422
Vault.Sys.unseal
train
def unseal(shard) json = client.put("/v1/sys/unseal", JSON.fast_generate( key: shard, )) return SealStatus.decode(json) end
ruby
{ "resource": "" }
q11423
Vault.Logical.list
train
def list(path, options = {}) headers = extract_headers!(options) json = client.list("/v1/#{encode_path(path)}", {}, headers) json[:data][:keys] || [] rescue HTTPError => e return [] if e.code == 404 raise end
ruby
{ "resource": "" }
q11424
Vault.Logical.read
train
def read(path, options = {}) headers = extract_headers!(options) json = client.get("/v1/#{encode_path(path)}", {}, headers) return Secret.decode(json) rescue HTTPError => e return nil if e.code == 404 raise end
ruby
{ "resource": "" }
q11425
Vault.Logical.unwrap
train
def unwrap(wrapper) client.with_token(wrapper) do |client| json = client.get("/v1/cubbyhole/response") secret = Secret.decode(json) # If there is nothing in the cubbyhole, return early. if secret.nil? || secret.data.nil? || secret.data[:response].nil? return nil end # Extract the response and parse it into a new secret. json = JSON.parse(secret.data[:response], symbolize_names: true) secret = Secret.decode(json) return secret end rescue HTTPError => e return nil if e.code == 404 raise end
ruby
{ "resource": "" }
q11426
Vault.Logical.unwrap_token
train
def unwrap_token(wrapper) # If provided a secret, grab the token. This is really just to make the # API a bit nicer. if wrapper.is_a?(Secret) wrapper = wrapper.wrap_info.token end # Unwrap response = unwrap(wrapper) # If nothing was there, return nil if response.nil? || response.auth.nil? return nil end return response.auth.client_token rescue HTTPError => e raise end
ruby
{ "resource": "" }
q11427
Vault.Sys.auths
train
def auths json = client.get("/v1/sys/auth") json = json[:data] if json[:data] return Hash[*json.map do |k,v| [k.to_s.chomp("/").to_sym, Auth.decode(v)] end.flatten] end
ruby
{ "resource": "" }
q11428
Vault.Sys.enable_auth
train
def enable_auth(path, type, description = nil) payload = { type: type } payload[:description] = description if !description.nil? client.post("/v1/sys/auth/#{encode_path(path)}", JSON.fast_generate(payload)) return true end
ruby
{ "resource": "" }
q11429
Vault.Sys.auth_tune
train
def auth_tune(path) json = client.get("/v1/sys/auth/#{encode_path(path)}/tune") return AuthConfig.decode(json) rescue HTTPError => e return nil if e.code == 404 raise end
ruby
{ "resource": "" }
q11430
Vault.Sys.put_auth_tune
train
def put_auth_tune(path, config = {}) json = client.put("/v1/sys/auth/#{encode_path(path)}/tune", JSON.fast_generate(config)) if json.nil? return true else return Secret.decode(json) end end
ruby
{ "resource": "" }
q11431
Vault.PersistentHTTP.expired?
train
def expired? connection return true if @max_requests && connection.requests >= @max_requests return false unless @idle_timeout return true if @idle_timeout.zero? Time.now - connection.last_use > @idle_timeout end
ruby
{ "resource": "" }
q11432
Vault.PersistentHTTP.idempotent?
train
def idempotent? req case req when Net::HTTP::Delete, Net::HTTP::Get, Net::HTTP::Head, Net::HTTP::Options, Net::HTTP::Put, Net::HTTP::Trace then true end end
ruby
{ "resource": "" }
q11433
Vault.PersistentHTTP.pipeline
train
def pipeline uri, requests, &block # :yields: responses connection_for uri do |connection| connection.http.pipeline requests, &block end end
ruby
{ "resource": "" }
q11434
Vault.PersistentHTTP.proxy_from_env
train
def proxy_from_env env_proxy = ENV['http_proxy'] || ENV['HTTP_PROXY'] return nil if env_proxy.nil? or env_proxy.empty? uri = URI normalize_uri env_proxy env_no_proxy = ENV['no_proxy'] || ENV['NO_PROXY'] # '*' is special case for always bypass return nil if env_no_proxy == '*' if env_no_proxy then uri.query = "no_proxy=#{escape(env_no_proxy)}" end unless uri.user or uri.password then uri.user = escape ENV['http_proxy_user'] || ENV['HTTP_PROXY_USER'] uri.password = escape ENV['http_proxy_pass'] || ENV['HTTP_PROXY_PASS'] end uri end
ruby
{ "resource": "" }
q11435
Vault.PersistentHTTP.proxy_bypass?
train
def proxy_bypass? host, port host = host.downcase host_port = [host, port].join ':' @no_proxy.each do |name| return true if host[-name.length, name.length] == name or host_port[-name.length, name.length] == name end false end
ruby
{ "resource": "" }
q11436
Vault.PersistentHTTP.request_failed
train
def request_failed exception, req, connection # :nodoc: due_to = "(due to #{exception.message} - #{exception.class})" message = "too many connection resets #{due_to} #{error_message connection}" finish connection raise Error, message, exception.backtrace end
ruby
{ "resource": "" }
q11437
Vault.PersistentHTTP.request_setup
train
def request_setup req_or_uri # :nodoc: req = if URI === req_or_uri then Net::HTTP::Get.new req_or_uri.request_uri else req_or_uri end @headers.each do |pair| req.add_field(*pair) end @override_headers.each do |name, value| req[name] = value end unless req['Connection'] then req.add_field 'Connection', 'keep-alive' req.add_field 'Keep-Alive', @keep_alive end req end
ruby
{ "resource": "" }
q11438
Vault.Authenticate.token
train
def token(new_token) old_token = client.token client.token = new_token json = client.get("/v1/auth/token/lookup-self") secret = Secret.decode(json) return secret rescue client.token = old_token raise end
ruby
{ "resource": "" }
q11439
Vault.Authenticate.app_id
train
def app_id(app_id, user_id, options = {}) payload = { app_id: app_id, user_id: user_id }.merge(options) json = client.post("/v1/auth/app-id/login", JSON.fast_generate(payload)) secret = Secret.decode(json) client.token = secret.auth.client_token return secret end
ruby
{ "resource": "" }
q11440
Vault.Authenticate.approle
train
def approle(role_id, secret_id=nil) payload = { role_id: role_id } payload[:secret_id] = secret_id if secret_id json = client.post("/v1/auth/approle/login", JSON.fast_generate(payload)) secret = Secret.decode(json) client.token = secret.auth.client_token return secret end
ruby
{ "resource": "" }
q11441
Vault.Authenticate.userpass
train
def userpass(username, password, options = {}) payload = { password: password }.merge(options) json = client.post("/v1/auth/userpass/login/#{encode_path(username)}", JSON.fast_generate(payload)) secret = Secret.decode(json) client.token = secret.auth.client_token return secret end
ruby
{ "resource": "" }
q11442
Vault.Authenticate.github
train
def github(github_token, path="/v1/auth/github/login") payload = {token: github_token} json = client.post(path, JSON.fast_generate(payload)) secret = Secret.decode(json) client.token = secret.auth.client_token return secret end
ruby
{ "resource": "" }
q11443
Vault.Authenticate.aws_ec2
train
def aws_ec2(role, pkcs7, nonce = nil, route = nil) route ||= '/v1/auth/aws-ec2/login' payload = { role: role, pkcs7: pkcs7 } # Set a custom nonce if client is providing one payload[:nonce] = nonce if nonce json = client.post(route, JSON.fast_generate(payload)) secret = Secret.decode(json) client.token = secret.auth.client_token return secret end
ruby
{ "resource": "" }
q11444
Vault.Authenticate.gcp
train
def gcp(role, jwt, path = 'gcp') payload = { role: role, jwt: jwt } json = client.post("/v1/auth/#{CGI.escape(path)}/login", JSON.fast_generate(payload)) secret = Secret.decode(json) client.token = secret.auth.client_token return secret end
ruby
{ "resource": "" }
q11445
Vault.Authenticate.tls
train
def tls(pem = nil, path = 'cert') new_client = client.dup new_client.ssl_pem_contents = pem if !pem.nil? json = new_client.post("/v1/auth/#{CGI.escape(path)}/login") secret = Secret.decode(json) client.token = secret.auth.client_token return secret end
ruby
{ "resource": "" }
q11446
Vault.Sys.policy
train
def policy(name) json = client.get("/v1/sys/policy/#{encode_path(name)}") return Policy.decode(json) rescue HTTPError => e return nil if e.code == 404 raise end
ruby
{ "resource": "" }
q11447
Vault.Sys.put_policy
train
def put_policy(name, rules) client.put("/v1/sys/policy/#{encode_path(name)}", JSON.fast_generate( rules: rules, )) return true end
ruby
{ "resource": "" }
q11448
Vault.Sys.audits
train
def audits json = client.get("/v1/sys/audit") json = json[:data] if json[:data] return Hash[*json.map do |k,v| [k.to_s.chomp("/").to_sym, Audit.decode(v)] end.flatten] end
ruby
{ "resource": "" }
q11449
Vault.Sys.audit_hash
train
def audit_hash(path, input) json = client.post("/v1/sys/audit-hash/#{encode_path(path)}", JSON.fast_generate(input: input)) json = json[:data] if json[:data] json[:hash] end
ruby
{ "resource": "" }
q11450
Vault.AuthToken.accessors
train
def accessors(options = {}) headers = extract_headers!(options) json = client.list("/v1/auth/token/accessors", options, headers) return Secret.decode(json) end
ruby
{ "resource": "" }
q11451
Vault.AuthToken.create
train
def create(options = {}) headers = extract_headers!(options) json = client.post("/v1/auth/token/create", JSON.fast_generate(options), headers) return Secret.decode(json) end
ruby
{ "resource": "" }
q11452
Vault.AuthToken.create_with_role
train
def create_with_role(name, options = {}) headers = extract_headers!(options) json = client.post("/v1/auth/token/create/#{encode_path(name)}", JSON.fast_generate(options), headers) return Secret.decode(json) end
ruby
{ "resource": "" }
q11453
Vault.AuthToken.lookup
train
def lookup(token, options = {}) headers = extract_headers!(options) json = client.post("/v1/auth/token/lookup", JSON.fast_generate( token: token, ), headers) return Secret.decode(json) end
ruby
{ "resource": "" }
q11454
Vault.AuthToken.lookup_accessor
train
def lookup_accessor(accessor, options = {}) headers = extract_headers!(options) json = client.post("/v1/auth/token/lookup-accessor", JSON.fast_generate( accessor: accessor, ), headers) return Secret.decode(json) end
ruby
{ "resource": "" }
q11455
Vault.AuthToken.renew
train
def renew(token, increment = 0, options = {}) headers = extract_headers!(options) json = client.put("/v1/auth/token/renew", JSON.fast_generate( token: token, increment: increment, ), headers) return Secret.decode(json) end
ruby
{ "resource": "" }
q11456
Vault.AuthToken.renew_self
train
def renew_self(increment = 0, options = {}) headers = extract_headers!(options) json = client.put("/v1/auth/token/renew-self", JSON.fast_generate( increment: increment, ), headers) return Secret.decode(json) end
ruby
{ "resource": "" }
q11457
Vault.Client.pool
train
def pool @lock.synchronize do return @nhp if @nhp @nhp = PersistentHTTP.new("vault-ruby", nil, pool_size) if proxy_address proxy_uri = URI.parse "http://#{proxy_address}" proxy_uri.port = proxy_port if proxy_port if proxy_username proxy_uri.user = proxy_username proxy_uri.password = proxy_password end @nhp.proxy = proxy_uri end # Use a custom open timeout if open_timeout || timeout @nhp.open_timeout = (open_timeout || timeout).to_i end # Use a custom read timeout if read_timeout || timeout @nhp.read_timeout = (read_timeout || timeout).to_i end @nhp.verify_mode = OpenSSL::SSL::VERIFY_PEER # Vault requires TLS1.2 @nhp.ssl_version = "TLSv1_2" # Only use secure ciphers @nhp.ciphers = ssl_ciphers # Custom pem files, no problem! pem = ssl_pem_contents || (ssl_pem_file ? File.read(ssl_pem_file) : nil) if pem @nhp.cert = OpenSSL::X509::Certificate.new(pem) @nhp.key = OpenSSL::PKey::RSA.new(pem, ssl_pem_passphrase) end # Use custom CA cert for verification if ssl_ca_cert @nhp.ca_file = ssl_ca_cert end # Use custom CA path that contains CA certs if ssl_ca_path @nhp.ca_path = ssl_ca_path end if ssl_cert_store @nhp.cert_store = ssl_cert_store end # Naughty, naughty, naughty! Don't blame me when someone hops in # and executes a MITM attack! if !ssl_verify @nhp.verify_mode = OpenSSL::SSL::VERIFY_NONE end # Use custom timeout for connecting and verifying via SSL if ssl_timeout || timeout @nhp.ssl_timeout = (ssl_timeout || timeout).to_i end @nhp end end
ruby
{ "resource": "" }
q11458
Vault.Client.list
train
def list(path, params = {}, headers = {}) params = params.merge(list: true) request(:get, path, params, headers) end
ruby
{ "resource": "" }
q11459
Vault.Sys.renew
train
def renew(id, increment = 0) json = client.put("/v1/sys/renew/#{id}", JSON.fast_generate( increment: increment, )) return Secret.decode(json) end
ruby
{ "resource": "" }
q11460
Vault.Response.to_h
train
def to_h self.class.fields.inject({}) do |h, (k, opts)| if opts[:as].nil? h[k] = self.public_send(k) else h[k] = self.public_send(opts[:as]) end if !h[k].nil? && !h[k].is_a?(Array) && h[k].respond_to?(:to_h) h[k] = h[k].to_h end h end end
ruby
{ "resource": "" }
q11461
Vault.Sys.init
train
def init(options = {}) json = client.put("/v1/sys/init", JSON.fast_generate( root_token_pgp_key: options.fetch(:root_token_pgp_key, nil), secret_shares: options.fetch(:secret_shares, options.fetch(:shares, 5)), secret_threshold: options.fetch(:secret_threshold, options.fetch(:threshold, 3)), pgp_keys: options.fetch(:pgp_keys, nil), stored_shares: options.fetch(:stored_shares, nil), recovery_shares: options.fetch(:recovery_shares, nil), recovery_threshold: options.fetch(:recovery_threshold, nil), recovery_pgp_keys: options.fetch(:recovery_pgp_keys, nil), )) return InitResponse.decode(json) end
ruby
{ "resource": "" }
q11462
Vault.AppRole.set_role
train
def set_role(name, options = {}) headers = extract_headers!(options) client.post("/v1/auth/approle/role/#{encode_path(name)}", JSON.fast_generate(options), headers) return true end
ruby
{ "resource": "" }
q11463
Vault.AppRole.role
train
def role(name) json = client.get("/v1/auth/approle/role/#{encode_path(name)}") return Secret.decode(json) rescue HTTPError => e return nil if e.code == 404 raise end
ruby
{ "resource": "" }
q11464
Vault.AppRole.role_id
train
def role_id(name) json = client.get("/v1/auth/approle/role/#{encode_path(name)}/role-id") return Secret.decode(json).data[:role_id] rescue HTTPError => e return nil if e.code == 404 raise end
ruby
{ "resource": "" }
q11465
Vault.AppRole.set_role_id
train
def set_role_id(name, role_id) options = { role_id: role_id } client.post("/v1/auth/approle/role/#{encode_path(name)}/role-id", JSON.fast_generate(options)) return true end
ruby
{ "resource": "" }
q11466
Vault.AppRole.create_secret_id
train
def create_secret_id(role_name, options = {}) headers = extract_headers!(options) if options[:secret_id] json = client.post("/v1/auth/approle/role/#{encode_path(role_name)}/custom-secret-id", JSON.fast_generate(options), headers) else json = client.post("/v1/auth/approle/role/#{encode_path(role_name)}/secret-id", JSON.fast_generate(options), headers) end return Secret.decode(json) end
ruby
{ "resource": "" }
q11467
Vault.AppRole.secret_id
train
def secret_id(role_name, secret_id) opts = { secret_id: secret_id } json = client.post("/v1/auth/approle/role/#{encode_path(role_name)}/secret-id/lookup", JSON.fast_generate(opts), {}) return nil unless json return Secret.decode(json) rescue HTTPError => e if e.code == 404 || e.code == 405 begin json = client.get("/v1/auth/approle/role/#{encode_path(role_name)}/secret-id/#{encode_path(secret_id)}") return Secret.decode(json) rescue HTTPError => e return nil if e.code == 404 raise e end end raise end
ruby
{ "resource": "" }
q11468
Vault.AppRole.secret_id_accessors
train
def secret_id_accessors(role_name, options = {}) headers = extract_headers!(options) json = client.list("/v1/auth/approle/role/#{encode_path(role_name)}/secret-id", options, headers) return Secret.decode(json).data[:keys] || [] rescue HTTPError => e return [] if e.code == 404 raise end
ruby
{ "resource": "" }
q11469
Vault.EncodePath.encode_path
train
def encode_path(path) path.b.gsub(%r!([^a-zA-Z0-9_.-/]+)!) { |m| '%' + m.unpack('H2' * m.bytesize).join('%').upcase } end
ruby
{ "resource": "" }
q11470
Vault.Sys.mounts
train
def mounts json = client.get("/v1/sys/mounts") json = json[:data] if json[:data] return Hash[*json.map do |k,v| [k.to_s.chomp("/").to_sym, Mount.decode(v)] end.flatten] end
ruby
{ "resource": "" }
q11471
Vault.Sys.mount_tune
train
def mount_tune(path, data = {}) json = client.post("/v1/sys/mounts/#{encode_path(path)}/tune", JSON.fast_generate(data)) return true end
ruby
{ "resource": "" }
q11472
Vault.Sys.remount
train
def remount(from, to) client.post("/v1/sys/remount", JSON.fast_generate( from: from, to: to, )) return true end
ruby
{ "resource": "" }
q11473
SimpleCov.Configuration.coverage_path
train
def coverage_path @coverage_path ||= begin coverage_path = File.expand_path(coverage_dir, root) FileUtils.mkdir_p coverage_path coverage_path end end
ruby
{ "resource": "" }
q11474
SimpleCov.Configuration.at_exit
train
def at_exit(&block) return proc {} unless running || block_given? @at_exit = block if block_given? @at_exit ||= proc { SimpleCov.result.format! } end
ruby
{ "resource": "" }
q11475
SimpleCov.Configuration.project_name
train
def project_name(new_name = nil) return @project_name if defined?(@project_name) && @project_name && new_name.nil? @project_name = new_name if new_name.is_a?(String) @project_name ||= File.basename(root.split("/").last).capitalize.tr("_", " ") end
ruby
{ "resource": "" }
q11476
SimpleCov.Configuration.parse_filter
train
def parse_filter(filter_argument = nil, &filter_proc) filter = filter_argument || filter_proc if filter SimpleCov::Filter.build_filter(filter) else raise ArgumentError, "Please specify either a filter or a block to filter with" end end
ruby
{ "resource": "" }
q11477
SimpleCov.RawCoverage.merge_resultsets
train
def merge_resultsets(result1, result2) (result1.keys | result2.keys).each_with_object({}) do |filename, merged| file1 = result1[filename] file2 = result2[filename] merged[filename] = merge_file_coverage(file1, file2) end end
ruby
{ "resource": "" }
q11478
SimpleCov.Profiles.load
train
def load(name) name = name.to_sym raise "Could not find SimpleCov Profile called '#{name}'" unless key?(name) SimpleCov.configure(&self[name]) end
ruby
{ "resource": "" }
q11479
Reek.DocumentationLink.build
train
def build(subject) Kernel.format(HELP_LINK_TEMPLATE, version: Version::STRING, item: name_to_param(subject)) end
ruby
{ "resource": "" }
q11480
Inline.C.load_cache
train
def load_cache begin file = File.join("inline", File.basename(so_name)) if require file then dir = Inline.directory warn "WAR\NING: #{dir} exists but is not being used" if test ?d, dir and $VERBOSE return true end rescue LoadError end return false end
ruby
{ "resource": "" }
q11481
Inline.C.add_type_converter
train
def add_type_converter(type, r2c, c2r) warn "WAR\NING: overridding #{type} on #{caller[0]}" if @@type_map.has_key? type @@type_map[type] = [r2c, c2r] end
ruby
{ "resource": "" }
q11482
Inline.C.c
train
def c src, options = {} options = { :expand_types => true, }.merge options self.generate src, options end
ruby
{ "resource": "" }
q11483
Inline.C.c_singleton
train
def c_singleton src, options = {} options = { :expand_types => true, :singleton => true, }.merge options self.generate src, options end
ruby
{ "resource": "" }
q11484
Inline.C.c_raw_singleton
train
def c_raw_singleton src, options = {} options = { :singleton => true, }.merge options self.generate src, options end
ruby
{ "resource": "" }
q11485
Reek.Spec.reek_of
train
def reek_of(smell_type, smell_details = {}, configuration = Configuration::AppConfiguration.default) ShouldReekOf.new(smell_type, smell_details, configuration) end
ruby
{ "resource": "" }
q11486
Reek.Spec.reek_only_of
train
def reek_only_of(smell_type, configuration = Configuration::AppConfiguration.default) ShouldReekOnlyOf.new(smell_type, configuration) end
ruby
{ "resource": "" }
q11487
Reek.ContextBuilder.build
train
def build(exp, parent_exp = nil) context_processor = "process_#{exp.type}" if context_processor_exists?(context_processor) send(context_processor, exp, parent_exp) else process exp end current_context end
ruby
{ "resource": "" }
q11488
Reek.ContextBuilder.process
train
def process(exp) exp.children.grep(AST::Node).each { |child| build(child, exp) } end
ruby
{ "resource": "" }
q11489
Reek.ContextBuilder.process_def
train
def process_def(exp, parent) inside_new_context(current_context.method_context_class, exp, parent) do increase_statement_count_by(exp.body) process(exp) end end
ruby
{ "resource": "" }
q11490
Reek.ContextBuilder.process_send
train
def process_send(exp, _parent) process(exp) case current_context when Context::ModuleContext handle_send_for_modules exp when Context::MethodContext handle_send_for_methods exp end end
ruby
{ "resource": "" }
q11491
Reek.ContextBuilder.process_if
train
def process_if(exp, _parent) children = exp.children increase_statement_count_by(children[1]) increase_statement_count_by(children[2]) decrease_statement_count process(exp) end
ruby
{ "resource": "" }
q11492
Reek.ContextBuilder.process_rescue
train
def process_rescue(exp, _parent) increase_statement_count_by(exp.children.first) decrease_statement_count process(exp) end
ruby
{ "resource": "" }
q11493
Reek.ContextBuilder.inside_new_context
train
def inside_new_context(klass, *args) new_context = append_new_context(klass, *args) orig, self.current_context = current_context, new_context yield self.current_context = orig end
ruby
{ "resource": "" }
q11494
Reek.ContextBuilder.append_new_context
train
def append_new_context(klass, *args) klass.new(*args).tap do |new_context| new_context.register_with_parent(current_context) end end
ruby
{ "resource": "" }
q11495
Reek.SmellConfiguration.value
train
def value(key, context) overrides_for(context).each { |conf| return conf[key] if conf.key?(key) } options.fetch(key) end
ruby
{ "resource": "" }
q11496
Reek.Overrides.for_context
train
def for_context(context) contexts = hash.keys.select { |ckey| context.matches?([ckey]) } contexts.map { |exc| hash[exc] } end
ruby
{ "resource": "" }
q11497
Reek.TreeDresser.dress
train
def dress(sexp, comment_map) return sexp unless sexp.is_a? ::Parser::AST::Node type = sexp.type children = sexp.children.map { |child| dress(child, comment_map) } comments = comment_map[sexp] klass_map.klass_for(type).new(type, children, location: sexp.loc, comments: comments) end
ruby
{ "resource": "" }
q11498
Fluent::Plugin.ElasticsearchOutput.append_record_to_messages
train
def append_record_to_messages(op, meta, header, record, msgs) case op when UPDATE_OP, UPSERT_OP if meta.has_key?(ID_FIELD) header[UPDATE_OP] = meta msgs << @dump_proc.call(header) << BODY_DELIMITER msgs << @dump_proc.call(update_body(record, op)) << BODY_DELIMITER return true end when CREATE_OP if meta.has_key?(ID_FIELD) header[CREATE_OP] = meta msgs << @dump_proc.call(header) << BODY_DELIMITER msgs << @dump_proc.call(record) << BODY_DELIMITER return true end when INDEX_OP header[INDEX_OP] = meta msgs << @dump_proc.call(header) << BODY_DELIMITER msgs << @dump_proc.call(record) << BODY_DELIMITER return true end return false end
ruby
{ "resource": "" }
q11499
Fluent::Plugin.ElasticsearchOutput.send_bulk
train
def send_bulk(data, tag, chunk, bulk_message_count, extracted_values, info) begin log.on_trace { log.trace "bulk request: #{data}" } response = client(info.host).bulk body: data, index: info.index log.on_trace { log.trace "bulk response: #{response}" } if response['errors'] error = Fluent::Plugin::ElasticsearchErrorHandler.new(self) error.handle_error(response, tag, chunk, bulk_message_count, extracted_values) end rescue RetryStreamError => e emit_tag = @retry_tag ? @retry_tag : tag router.emit_stream(emit_tag, e.retry_stream) rescue => e ignore = @ignore_exception_classes.any? { |clazz| e.class <= clazz } log.warn "Exception ignored in tag #{tag}: #{e.class.name} #{e.message}" if ignore @_es = nil if @reconnect_on_error @_es_info = nil if @reconnect_on_error raise UnrecoverableRequestFailure if ignore && @exception_backup # FIXME: identify unrecoverable errors and raise UnrecoverableRequestFailure instead raise RecoverableRequestFailure, "could not push logs to Elasticsearch cluster (#{connection_options_description(info.host)}): #{e.message}" unless ignore end end
ruby
{ "resource": "" }