_id
stringlengths
2
6
title
stringlengths
9
130
partition
stringclasses
3 values
text
stringlengths
66
10.5k
language
stringclasses
1 value
meta_information
dict
q21400
ConfigVar.Context.optional_string
train
def optional_string(name, default) optional_custom(name) do |env| if value = env[name.to_s.upcase] {name => value} else {name => default} end end end
ruby
{ "resource": "" }
q21401
ConfigVar.Context.optional_int
train
def optional_int(name, default) optional_custom(name) do |env| if value = env[name.to_s.upcase] {name => parse_int(name, value)} else {name => default} end end end
ruby
{ "resource": "" }
q21402
ConfigVar.Context.optional_bool
train
def optional_bool(name, default) optional_custom(name) do |env| if value = env[name.to_s.upcase] {name => parse_bool(name, value)} else {name => default} end end end
ruby
{ "resource": "" }
q21403
ConfigVar.Context.parse_bool
train
def parse_bool(name, value) if ['1', 'true', 'enabled'].include?(value.downcase) true elsif ['0', 'false'].include?(value.downcase) false else raise ArgumentError.new("#{value} is not a valid boolean for #{name.to_s.upcase}") end end
ruby
{ "resource": "" }
q21404
ConfigVar.Context.define_config
train
def define_config(name, &blk) if @definitions.has_key?(name) raise ConfigError.new("#{name.to_s.upcase} is already registered") end @definitions[name] = Proc.new do |env| value = yield env if value.kind_of?(Hash) value else {name => value} end end end
ruby
{ "resource": "" }
q21405
Churn.ChurnCalculator.analyze
train
def analyze @changes = sort_changes(@changes) @changes = @changes.map {|file_path, times_changed| {:file_path => file_path, :times_changed => times_changed }} calculate_revision_changes @method_changes = sort_changes(@method_changes) @method_changes = @method_changes.map {|method, times_changed| {'method' => method, 'times_changed' => times_changed }} @class_changes = sort_changes(@class_changes) @class_changes = @class_changes.map {|klass, times_changed| {'klass' => klass, 'times_changed' => times_changed }} end
ruby
{ "resource": "" }
q21406
Churn.ChurnCalculator.to_h
train
def to_h hash = {:churn => {:changes => @changes}} hash[:churn][:class_churn] = @class_changes hash[:churn][:method_churn] = @method_changes #detail the most recent changes made this revision first_revision = @revisions.first first_revision_changes = @revision_changes[first_revision] if first_revision_changes changes = first_revision_changes hash[:churn][:changed_files] = changes[:files] hash[:churn][:changed_classes] = changes[:classes] hash[:churn][:changed_methods] = changes[:methods] end # TODO crappy place to do this but save hash to revision file but # while entirely under metric_fu only choice ChurnHistory.store_revision_history(first_revision, hash, @churn_options.data_directory) hash end
ruby
{ "resource": "" }
q21407
PBS.Batch.get_status
train
def get_status(filters: []) connect do |cid| filters = PBS::Torque::Attrl.from_list filters batch_status = Torque.pbs_statserver cid, filters, nil batch_status.to_h.tap { Torque.pbs_statfree batch_status } end end
ruby
{ "resource": "" }
q21408
PBS.Batch.get_queues
train
def get_queues(id: '', filters: []) connect do |cid| filters = PBS::Torque::Attrl.from_list(filters) batch_status = Torque.pbs_statque cid, id.to_s, filters, nil batch_status.to_h.tap { Torque.pbs_statfree batch_status } end end
ruby
{ "resource": "" }
q21409
PBS.Batch.get_nodes
train
def get_nodes(id: '', filters: []) connect do |cid| filters = PBS::Torque::Attrl.from_list(filters) batch_status = Torque.pbs_statnode cid, id.to_s, filters, nil batch_status.to_h.tap { Torque.pbs_statfree batch_status } end end
ruby
{ "resource": "" }
q21410
PBS.Batch.select_jobs
train
def select_jobs(attribs: []) connect do |cid| attribs = PBS::Torque::Attropl.from_list(attribs.map(&:to_h)) batch_status = Torque.pbs_selstat cid, attribs, nil batch_status.to_h.tap { Torque.pbs_statfree batch_status } end end
ruby
{ "resource": "" }
q21411
PBS.Batch.get_jobs
train
def get_jobs(id: '', filters: []) connect do |cid| filters = PBS::Torque::Attrl.from_list(filters) batch_status = Torque.pbs_statjob cid, id.to_s, filters, nil batch_status.to_h.tap { Torque.pbs_statfree batch_status } end end
ruby
{ "resource": "" }
q21412
PBS.Batch.submit_script
train
def submit_script(script, queue: nil, headers: {}, resources: {}, envvars: {}, qsub: true) send(qsub ? :qsub_submit : :pbs_submit, script.to_s, queue.to_s, headers, resources, envvars) end
ruby
{ "resource": "" }
q21413
PBS.Batch.submit_string
train
def submit_string(string, **kwargs) Tempfile.open('qsub.') do |f| f.write string.to_s f.close submit_script(f.path, **kwargs) end end
ruby
{ "resource": "" }
q21414
PBS.Batch.submit
train
def submit(content, args: [], env: {}, chdir: nil) call(:qsub, *args, env: env, stdin: content, chdir: chdir).strip end
ruby
{ "resource": "" }
q21415
PBS.Batch.pbs_submit
train
def pbs_submit(script, queue, headers, resources, envvars) attribs = [] headers.each do |name, value| attribs << { name: name, value: value } end resources.each do |rsc, value| attribs << { name: :Resource_List, resource: rsc, value: value } end unless envvars.empty? attribs << { name: :Variable_List, value: envvars.map {|k,v| "#{k}=#{v}"}.join(",") } end connect do |cid| attropl = Torque::Attropl.from_list attribs Torque.pbs_submit cid, attropl, script, queue, nil end end
ruby
{ "resource": "" }
q21416
PBS.Batch.qsub_arg
train
def qsub_arg(key, value) case key # common attributes when :Execution_Time ['-a', value.to_s] when :Checkpoint ['-c', value.to_s] when :Error_Path ['-e', value.to_s] when :fault_tolerant ['-f'] when :Hold_Types ['-h'] when :Join_Path ['-j', value.to_s] when :Keep_Files ['-k', value.to_s] when :Mail_Points ['-m', value.to_s] when :Output_Path ['-o', value.to_s] when :Priority ['-p', value.to_s] when :Rerunable ['-r', value.to_s] when :job_array_request ['-t', value.to_s] when :User_List ['-u', value.to_s] when :Account_Name ['-A', value.to_s] when :Mail_Users ['-M', value.to_s] when :Job_Name ['-N', value.to_s] when :Shell_Path_List ['-S', value.to_s] # uncommon attributes when :job_arguments ['-F', value.to_s] when :init_work_dir ['-d', value.to_s] # sets PBS_O_INITDIR when :reservation_id ['-W', "x=advres:#{value}"] # use resource manager extensions for Moab # everything else else ['-W', "#{key}=#{value}"] end end
ruby
{ "resource": "" }
q21417
PBS.Batch.call
train
def call(cmd, *args, env: {}, stdin: "", chdir: nil) cmd = bin.join(cmd.to_s).to_s args = args.map(&:to_s) env = env.to_h.each_with_object({}) {|(k,v), h| h[k.to_s] = v.to_s}.merge({ "PBS_DEFAULT" => host, "LD_LIBRARY_PATH" => %{#{lib}:#{ENV["LD_LIBRARY_PATH"]}} }) stdin = stdin.to_s chdir ||= "." o, e, s = Open3.capture3(env, cmd, *args, stdin_data: stdin, chdir: chdir.to_s) s.success? ? o : raise(PBS::Error, e) end
ruby
{ "resource": "" }
q21418
Riml.IncludeCache.fetch
train
def fetch(included_filename) if source = @cache[included_filename] return source end if @m.locked? && @owns_lock == Thread.current @cache[included_filename] = yield else ret = nil @cache[included_filename] = @m.synchronize do begin @owns_lock = Thread.current ret = yield ensure @owns_lock = nil end end ret end end
ruby
{ "resource": "" }
q21419
Riml.Compiler.compile
train
def compile(root_node) root_node.extend CompilerAccessible root_node.current_compiler = self root_node.accept(NodesVisitor.new) root_node.compiled_output end
ruby
{ "resource": "" }
q21420
Dockerspec.DockerExceptionParser.parse_exception
train
def parse_exception(e) msg = e.to_s json = msg.to_s.sub(/^Couldn't find id: /, '').split("\n").map(&:chomp) json.map { |str| JSON.parse(str) } rescue JSON::ParserError raise e end
ruby
{ "resource": "" }
q21421
Dockerspec.DockerExceptionParser.parse_streams
train
def parse_streams(e_ary) e_ary.map { |x| x.is_a?(Hash) && x['stream'] }.compact.join end
ruby
{ "resource": "" }
q21422
Dockerspec.Builder.source
train
def source return @source unless @source.nil? @source = %i(string template id path).find { |from| @options.key?(from) } end
ruby
{ "resource": "" }
q21423
Dockerspec.Builder.image
train
def image(img = nil) return @image if img.nil? ImageGC.instance.add(img.id) if @options[:rm] @image = img end
ruby
{ "resource": "" }
q21424
Dockerspec.Builder.rspec_options
train
def rspec_options config = ::RSpec.configuration {}.tap do |opts| opts[:path] = config.dockerfile_path if config.dockerfile_path? opts[:rm] = config.rm_build if config.rm_build? opts[:log_level] = config.log_level if config.log_level? end end
ruby
{ "resource": "" }
q21425
Dockerspec.Builder.default_options
train
def default_options { path: ENV['DOCKERFILE_PATH'] || '.', # Autoremove images in all CIs except Travis (not supported): rm: ci? && !travis_ci?, # Avoid CI timeout errors: log_level: ci? ? :ci : :silent }.merge(rspec_options) end
ruby
{ "resource": "" }
q21426
Dockerspec.Builder.parse_options
train
def parse_options(opts) opts_hs_ary = opts.map { |x| x.is_a?(Hash) ? x : { path: x } } opts_hs_ary.reduce(default_options) { |a, e| a.merge(e) } end
ruby
{ "resource": "" }
q21427
Dockerspec.Builder.build_from_string
train
def build_from_string(string, dir = '.') dir = @options[:string_build_path] if @options[:string_build_path] Dir.mktmpdir do |tmpdir| FileUtils.cp_r("#{dir}/.", tmpdir) dockerfile = File.join(tmpdir, 'Dockerfile') File.open(dockerfile, 'w') { |f| f.write(string) } build_from_dir(tmpdir) end end
ruby
{ "resource": "" }
q21428
Dockerspec.Builder.build_from_dir
train
def build_from_dir(dir) image(::Docker::Image.build_from_dir(dir, &build_block)) add_repository_tag rescue ::Docker::Error::DockerError => e DockerExceptionParser.new(e) end
ruby
{ "resource": "" }
q21429
Dockerspec.Builder.build_from_path
train
def build_from_path(path) if !File.directory?(path) && File.basename(path) == 'Dockerfile' path = File.dirname(path) end File.directory?(path) ? build_from_dir(path) : build_from_file(path) end
ruby
{ "resource": "" }
q21430
Dockerspec.Builder.build_from_template
train
def build_from_template(file) context = @options[:context] || {} template = IO.read(file) eruby = Erubis::Eruby.new(template) string = eruby.evaluate(context) build_from_string(string, File.dirname(file)) end
ruby
{ "resource": "" }
q21431
Dockerspec.Builder.build_from_id
train
def build_from_id(id) @image = ::Docker::Image.get(id) add_repository_tag rescue ::Docker::Error::NotFoundError @image = ::Docker::Image.create('fromImage' => id) add_repository_tag rescue ::Docker::Error::DockerError => e DockerExceptionParser.new(e) end
ruby
{ "resource": "" }
q21432
Dockerspec.Builder.add_repository_tag
train
def add_repository_tag return unless @options.key?(:tag) repo, repo_tag = @options[:tag].split(':', 2) @image.tag(repo: repo, tag: repo_tag, force: true) end
ruby
{ "resource": "" }
q21433
WorkingHours.Computation.in_config_zone
train
def in_config_zone time, config: nil if time.respond_to? :in_time_zone time.in_time_zone(config[:time_zone]) elsif time.is_a? Date config[:time_zone].local(time.year, time.month, time.day) else raise TypeError.new("Can't convert #{time.class} to a Time") end end
ruby
{ "resource": "" }
q21434
ForeverStyleGuide.ApplicationHelper.is_active?
train
def is_active?(page_name, product_types = nil) controller.controller_name.include?(page_name) || controller.action_name.include?(page_name) || (@product != nil && product_types !=nil && product_types.split(',').include?(@product.product_type) && !@product.name.include?('Historian')) end
ruby
{ "resource": "" }
q21435
Etsy.Listing.admirers
train
def admirers(options = {}) options = options.merge(:access_token => token, :access_secret => secret) if (token && secret) favorite_listings = FavoriteListing.find_all_listings_favored_by(id, options) user_ids = favorite_listings.map {|f| f.user_id }.uniq (user_ids.size > 0) ? Array(Etsy::User.find(user_ids, options)) : [] end
ruby
{ "resource": "" }
q21436
Etsy.Shop.listings
train
def listings(state = nil, options = {}) state = state ? {:state => state} : {} Listing.find_all_by_shop_id(id, state.merge(options).merge(oauth)) end
ruby
{ "resource": "" }
q21437
Etsy.User.addresses
train
def addresses options = (token && secret) ? {:access_token => token, :access_secret => secret} : {} @addresses ||= Address.find(username, options) end
ruby
{ "resource": "" }
q21438
Etsy.User.profile
train
def profile unless @profile if associated_profile @profile = Profile.new(associated_profile) else options = {:fields => 'user_id', :includes => 'Profile'} options = options.merge(:access_token => token, :access_secret => secret) if (token && secret) tmp = User.find(username, options) @profile = Profile.new(tmp.associated_profile) end end @profile end
ruby
{ "resource": "" }
q21439
Etsy.Response.result
train
def result if success? results = to_hash['results'] || [] count == 1 ? results.first : results else Etsy.silent_errors ? [] : validate! end end
ruby
{ "resource": "" }
q21440
Etsy.BasicClient.client
train
def client # :nodoc: if @client return @client else @client = Net::HTTP.new(@host, Etsy.protocol == "http" ? 80 : 443) @client.use_ssl = true if Etsy.protocol == "https" return @client end end
ruby
{ "resource": "" }
q21441
Etsy.SecureClient.add_multipart_data
train
def add_multipart_data(req, params) crlf = "\r\n" boundary = Time.now.to_i.to_s(16) req["Content-Type"] = "multipart/form-data; boundary=#{boundary}" body = "" params.each do |key,value| esc_key = CGI.escape(key.to_s) body << "--#{boundary}#{crlf}" if value.respond_to?(:read) body << "Content-Disposition: form-data; name=\"#{esc_key}\"; filename=\"#{File.basename(value.path)}\"#{crlf}" body << "Content-Type: image/jpeg#{crlf*2}" body << open(value.path, "rb") {|io| io.read} else body << "Content-Disposition: form-data; name=\"#{esc_key}\"#{crlf*2}#{value}" end body << crlf end body << "--#{boundary}--#{crlf*2}" req.body = body req["Content-Length"] = req.body.size end
ruby
{ "resource": "" }
q21442
Multitenancy.Filter.fix_headers!
train
def fix_headers!(env) env.keys.select { |k| k =~ /^HTTP_X_/ }.each do |k| env[k.gsub("HTTP_", "")] = env[k] env.delete(k) end env end
ruby
{ "resource": "" }
q21443
StubShell.Shell.resolve
train
def resolve command_string if detected_command = @commands.detect{|cmd| cmd.matches? command_string } detected_command elsif parent_context parent_context.resolve(command_string) else raise "Command #{command_string} could not be resolved from the current context." end end
ruby
{ "resource": "" }
q21444
Transit.Decoder.decode
train
def decode(node, cache=RollingCache.new, as_map_key=false) case node when String if cache.has_key?(node) cache.read(node) else parsed = if !node.start_with?(ESC) node elsif node.start_with?(TAG) Tag.new(node[2..-1]) elsif handler = @handlers[node[1]] handler.from_rep(node[2..-1]) elsif node.start_with?(ESC_ESC, ESC_SUB, ESC_RES) node[1..-1] else @default_handler.from_rep(node[1], node[2..-1]) end if cache.cacheable?(node, as_map_key) cache.write(parsed) end parsed end when Array return node if node.empty? e0 = decode(node.shift, cache, false) if e0 == MAP_AS_ARRAY decode(Hash[*node], cache) elsif Tag === e0 v = decode(node.shift, cache) if handler = @handlers[e0.value] handler.from_rep(v) else @default_handler.from_rep(e0.value,v) end else [e0] + node.map {|e| decode(e, cache, as_map_key)} end when Hash if node.size == 1 k = decode(node.keys.first, cache, true) v = decode(node.values.first, cache, false) if Tag === k if handler = @handlers[k.value] handler.from_rep(v) else @default_handler.from_rep(k.value,v) end else {k => v} end else node.keys.each do |k| node.store(decode(k, cache, true), decode(node.delete(k), cache)) end node end else node end end
ruby
{ "resource": "" }
q21445
DatabaseConsistency.Helper.parent_models
train
def parent_models models.group_by(&:table_name).each_value.map do |models| models.min_by { |model| models.include?(model.superclass) ? 1 : 0 } end end
ruby
{ "resource": "" }
q21446
VCAP.Subprocess.run
train
def run(command, expected_exit_status=0, timeout=nil, options={}, env={}) # We use a pipe to ourself to time out long running commands (if desired) as follows: # 1. Set up a pipe to ourselves # 2. Install a signal handler that writes to one end of our pipe on SIGCHLD # 3. Select on the read end of our pipe and check if our process exited sigchld_r, sigchld_w = IO.pipe prev_sigchld_handler = install_sigchld_handler(sigchld_w) start = Time.now.to_i child_pid, stdin, stdout, stderr = POSIX::Spawn.popen4(env, command, options) stdin.close # Used to look up the name of an io object when an errors occurs while # reading from it, as well as to look up the corresponding buffer to # append to. io_map = { stderr => { :name => 'stderr', :buf => '' }, stdout => { :name => 'stdout', :buf => '' }, sigchld_r => { :name => 'sigchld_r', :buf => '' }, sigchld_w => { :name => 'sigchld_w', :buf => '' }, } status = nil time_left = timeout read_cands = [stdout, stderr, sigchld_r] error_cands = read_cands.dup begin while read_cands.length > 0 active_ios = IO.select(read_cands, nil, error_cands, time_left) # Check if timeout was hit if timeout time_left = timeout - (Time.now.to_i - start) unless active_ios && (time_left > 0) raise VCAP::SubprocessTimeoutError.new(timeout, command, io_map[stdout][:buf], io_map[stderr][:buf]) end end # Read as much as we can from the readable ios before blocking for io in active_ios[0] begin io_map[io][:buf] << io.read_nonblock(READ_SIZE) rescue IO::WaitReadable # Reading would block, so put ourselves back on the loop rescue EOFError # Pipe has no more data, remove it from the readable/error set # NB: We cannot break from the loop here, as the other pipes may have data to be read read_cands.delete(io) error_cands.delete(io) end # Our signal handler notified us that >= 1 children have exited; # check if our child has exited. if (io == sigchld_r) && Process.waitpid(child_pid, Process::WNOHANG) status = $? read_cands.delete(sigchld_r) error_cands.delete(sigchld_r) end end # Error reading from one or more pipes. unless active_ios[2].empty? io_names = active_ios[2].map {|io| io_map[io][:name] } raise SubprocessReadError.new(io_names.join(', '), command, io_map[stdout][:buf], io_map[stderr][:buf]) end end rescue # A timeout or an error occurred while reading from one or more pipes. # Kill the process if we haven't reaped its exit status already. kill_pid(child_pid) unless status raise ensure # Make sure we reap the child's exit status, close our fds, and restore # the previous SIGCHLD handler unless status Process.waitpid(child_pid) status = $? end io_map.each_key {|io| io.close unless io.closed? } trap('CLD') { prev_sigchld_handler.call } if prev_sigchld_handler end unless status.exitstatus == expected_exit_status raise SubprocessStatusError.new(command, io_map[stdout][:buf], io_map[stderr][:buf], status) end [io_map[stdout][:buf], io_map[stderr][:buf], status] end
ruby
{ "resource": "" }
q21447
GoogleApps.DocumentHandler.create_doc
train
def create_doc(text, type = nil) @documents.include?(type) ? doc_of_type(text, type) : unknown_type(text) end
ruby
{ "resource": "" }
q21448
GoogleApps.DocumentHandler.doc_of_type
train
def doc_of_type(text, type) raise "No Atom document of type: #{type}" unless @documents.include?(type.to_s) GoogleApps::Atom.send(type, text) end
ruby
{ "resource": "" }
q21449
GoogleApps.Client.export_status
train
def export_status(username, id) response = make_request(:get, URI(export + "/#{username}" + build_id(id)).to_s, headers: {'content-type' => 'application/atom+xml'}) create_doc(response.body, :export_status) end
ruby
{ "resource": "" }
q21450
GoogleApps.Client.fetch_export
train
def fetch_export(username, req_id, filename) export_status_doc = export_status(username, req_id) if export_ready?(export_status_doc) download_export(export_status_doc, filename).each_with_index { |url, index| url.gsub!(/.*/, "#{filename}#{index}") } else nil end end
ruby
{ "resource": "" }
q21451
GoogleApps.Client.download
train
def download(url, filename) File.open(filename, "w") do |file| file.puts(make_request(:get, url, headers: {'content-type' => 'application/atom+xml'}).body) end end
ruby
{ "resource": "" }
q21452
GoogleApps.Client.get_groups
train
def get_groups(options = {}) limit = options[:limit] || 1000000 response = make_request(:get, group + "#{options[:extra]}" + "?startGroup=#{options[:start]}", headers: {'content-type' => 'application/atom+xml'}) pages = fetch_pages(response, limit, :feed) return_all(pages) end
ruby
{ "resource": "" }
q21453
GoogleApps.Client.get_next_page
train
def get_next_page(next_page_url, type) response = make_request(:get, next_page_url, headers: {'content-type' => 'application/atom+xml'}) GoogleApps::Atom.feed(response.body) end
ruby
{ "resource": "" }
q21454
GoogleApps.Client.fetch_pages
train
def fetch_pages(response, limit, type) pages = [GoogleApps::Atom.feed(response.body)] while (pages.last.next_page) and (pages.count * PAGE_SIZE[:user] < limit) pages << get_next_page(pages.last.next_page, type) end pages end
ruby
{ "resource": "" }
q21455
Duckface.ParameterPair.argument_name_without_leading_underscore
train
def argument_name_without_leading_underscore name = if argument_name_string[FIRST_CHARACTER] == UNDERSCORE argument_name_string.reverse.chop.reverse else argument_name_string end name.to_sym end
ruby
{ "resource": "" }
q21456
Conduit.CLI.copy_files
train
def copy_files files_to_copy.each do |origin, destination| template(origin, destination, force: true) end end
ruby
{ "resource": "" }
q21457
Conduit.CLI.modify_files
train
def modify_files gemspec_file = "#{@base_path}/conduit-#{@dasherized_name}.gemspec" # add gemspec dependencies str = " # Dependencies\n"\ " #\n"\ " spec.add_dependency \"conduit\", \"~> 1.0.6\"\n"\ " # xml parser\n"\ " spec.add_dependency \"nokogiri\"\n\n"\ " # Development Dependencies\n"\ " #\n"\ " # to compare xml files in tests\n"\ " spec.add_development_dependency \"equivalent-xml\"\n"\ " spec.add_development_dependency \"rspec-its\"\n"\ " # for building CLI\n"\ " spec.add_development_dependency \"thor\"\n"\ " # for debugging\n"\ " spec.add_development_dependency \"byebug\"\n" insert_into_file gemspec_file, str, after: "spec.require_paths = [\"lib\"]\n\n" # remove description gsub_file(gemspec_file, /spec\.description(.*)\n/, "") # update summary new_summary = "spec.summary = \"#{ActiveSupport::Inflector.humanize @underscored_name} Driver for Conduit\"" gsub_file(gemspec_file, /spec\.summary(.*)/, new_summary) # update homepage new_homepage = "spec.homepage = \"http://www.github.com/conduit/conduit-#{@dasherized_name}\"" gsub_file(gemspec_file, /spec\.homepage(.*)/, new_homepage) end
ruby
{ "resource": "" }
q21458
Pipekit.Deal.update_by_person
train
def update_by_person(email, params, person_repo: Person.new) person = person_repo.find_exactly_by_email(email) deal = get_by_person_id(person[:id], person_repo: person_repo).first update(deal[:id], params) end
ruby
{ "resource": "" }
q21459
Bio.Tree.clone_subtree
train
def clone_subtree start_node new_tree = self.class.new list = [start_node] + start_node.descendents list.each do |x| new_tree.add_node(x) end each_edge do |node1, node2, edge| if new_tree.include?(node1) and new_tree.include?(node2) new_tree.add_edge(node1, node2, edge) end end new_tree end
ruby
{ "resource": "" }
q21460
Bio.Tree.clone_tree_without_branch
train
def clone_tree_without_branch node new_tree = self.class.new original = [root] + root.descendents # p "Original",original skip = [node] + node.descendents # p "Skip",skip # p "Retain",root.descendents - skip nodes.each do |x| if not skip.include?(x) new_tree.add_node(x) else end end each_edge do |node1, node2, edge| if new_tree.include?(node1) and new_tree.include?(node2) new_tree.add_edge(node1, node2, edge) end end new_tree end
ruby
{ "resource": "" }
q21461
Usable.ConfigMulti.+
train
def +(other) config = clone specs = other.spec.to_h specs.each { |key, val| config[key] = val } methods = other.spec.singleton_methods methods.map! { |name| name.to_s.tr('=', '').to_sym } methods.uniq! methods -= specs.keys methods.each do |name| config.spec.define_singleton_method(name) do other.spec.public_method(name).call end config.instance_variable_get(:@lazy_loads) << name end config end
ruby
{ "resource": "" }
q21462
Pipekit.Request.parse_body
train
def parse_body(body) body.reduce({}) do |result, (field, value)| value = Config.field_value_id(resource.singular, field, value) field = Config.field_id(resource.singular, field) result.tap { |result| result[field] = value } end end
ruby
{ "resource": "" }
q21463
Liquigen::Scaffold.Config.process
train
def process # if not exist the .liquigen file create it File.write(CONFIG_FILE, prepare_default_content.join("\n")) unless File.exist?(CONFIG_FILE) # then open the vim editor system('vi ' + CONFIG_FILE) end
ruby
{ "resource": "" }
q21464
Callcredit.Request.perform
train
def perform(checks, check_data = {}) # check_data = Callcredit::Validator.clean_check_data(check_data) response = @connection.get do |request| request.path = @config[:api_endpoint] request.body = build_request_xml(checks, check_data).to_s end @config[:raw] ? response : response.body rescue Faraday::Error::ClientError => e if e.response.nil? raise APIError.new else raise APIError.new(e.response[:body], e.response[:status], e.response) end end
ruby
{ "resource": "" }
q21465
Callcredit.Request.build_request_xml
train
def build_request_xml(checks, check_data = {}) builder = Nokogiri::XML::Builder.new do |xml| xml.callvalidate do authentication(xml) xml.sessions do xml.session("RID" => Time.now.to_f) do xml.data do personal_data(xml, check_data[:personal_data]) card_data(xml, check_data[:card_data]) bank_data(xml, check_data[:bank_data]) income_data(xml, check_data[:income_data]) required_checks(xml, checks) end end end xml.application @config[:application_name] end end builder.doc end
ruby
{ "resource": "" }
q21466
Callcredit.Request.required_checks
train
def required_checks(xml, checks) required_checks = [*checks].map { |c| Util.underscore(c).to_sym } xml.ChecksRequired do Constants::CHECKS.each do |check| included = required_checks.include?(Util.underscore(check).to_sym) xml.send(check, included ? "yes" : "no") end end end
ruby
{ "resource": "" }
q21467
SongkickQueue.Worker.stop_if_signal_caught
train
def stop_if_signal_caught Thread.new do loop do sleep 1 if @shutdown logger.info "Recevied SIG#{@shutdown}, shutting down consumers" @consumer_instances.each { |instance| instance.shutdown } @client.channel.work_pool.shutdown @shutdown = nil end end end end
ruby
{ "resource": "" }
q21468
SongkickQueue.Worker.subscribe_to_queue
train
def subscribe_to_queue(consumer_class) queue = channel.queue(consumer_class.queue_name, durable: true, arguments: { 'x-ha-policy' => 'all' }) queue.subscribe(manual_ack: true) do |delivery_info, properties, message| process_message(consumer_class, delivery_info, properties, message) end logger.info "Subscribed #{consumer_class} to #{consumer_class.queue_name}" end
ruby
{ "resource": "" }
q21469
SongkickQueue.Worker.process_message
train
def process_message(consumer_class, delivery_info, properties, message) message = JSON.parse(message, symbolize_names: true) message_id = message.fetch(:message_id) produced_at = message.fetch(:produced_at) payload = message.fetch(:payload) logger.info "Processing message #{message_id} via #{consumer_class}, produced at #{produced_at}" set_process_name(consumer_class, message_id) consumer = consumer_class.new(delivery_info, logger) @consumer_instances << consumer instrumentation_options = { consumer_class: consumer_class.to_s, queue_name: consumer_class.queue_name, message_id: message_id, produced_at: produced_at, } ActiveSupport::Notifications.instrument('consume_message.songkick_queue', instrumentation_options) do begin consumer.process(payload) ensure @consumer_instances.delete(consumer) end end rescue Object => exception logger.error(exception) channel.reject(delivery_info.delivery_tag, config.requeue_rejected_messages) else channel.ack(delivery_info.delivery_tag, false) ensure set_process_name end
ruby
{ "resource": "" }
q21470
SongkickQueue.Worker.set_process_name
train
def set_process_name(status = 'idle', message_id = nil) formatted_status = String(status) .split('::') .last ident = [formatted_status, message_id] .compact .join('#') $PROGRAM_NAME = "#{process_name}[#{ident}]" end
ruby
{ "resource": "" }
q21471
SongkickQueue.Producer.publish
train
def publish(queue_name, payload, options = {}) message_id = options.fetch(:message_id) { SecureRandom.hex(6) } produced_at = options.fetch(:produced_at) { Time.now.utc.iso8601 } message = { message_id: message_id, produced_at: produced_at, payload: payload } message = JSON.generate(message) exchange = client.default_exchange instrumentation_options = { queue_name: String(queue_name), message_id: message_id, produced_at: produced_at, } ActiveSupport::Notifications.instrument('produce_message.songkick_queue', instrumentation_options) do exchange.publish(message, routing_key: String(queue_name)) end self.reconnect_attempts = 0 logger.info "Published message #{message_id} to '#{queue_name}' at #{produced_at}" exchange rescue Bunny::ConnectionClosedError self.reconnect_attempts += 1 if (reconnect_attempts > config.max_reconnect_attempts) fail TooManyReconnectAttemptsError, "Attempted to reconnect more than " + "#{config.max_reconnect_attempts} times" end logger.info "Attempting to reconnect to RabbitMQ, attempt #{reconnect_attempts} " + "of #{config.max_reconnect_attempts}" wait_for_bunny_session_to_reconnect retry end
ruby
{ "resource": "" }
q21472
INotify.Watcher.close
train
def close if Native.inotify_rm_watch(@notifier.fd, @id) == 0 @notifier.watchers.delete(@id) return end raise SystemCallError.new("Failed to stop watching #{path.inspect}", FFI.errno) end
ruby
{ "resource": "" }
q21473
INotify.Event.absolute_name
train
def absolute_name return watcher.path if name.empty? return File.join(watcher.path, name) end
ruby
{ "resource": "" }
q21474
INotify.Notifier.process
train
def process read_events.each do |event| event.callback! event.flags.include?(:ignored) && event.notifier.watchers.delete(event.watcher_id) end end
ruby
{ "resource": "" }
q21475
Fauxhai.Mocker.data
train
def data @fauxhai_data ||= lambda do # If a path option was specified, use it if @options[:path] filepath = File.expand_path(@options[:path]) unless File.exist?(filepath) raise Fauxhai::Exception::InvalidPlatform.new("You specified a path to a JSON file on the local system that does not exist: '#{filepath}'") end else filepath = File.join(platform_path, "#{version}.json") end if File.exist?(filepath) parse_and_validate(File.read(filepath)) elsif @options[:github_fetching] # Try loading from github (in case someone submitted a PR with a new file, but we haven't # yet updated the gem version). Cache the response locally so it's faster next time. begin response = open("#{RAW_BASE}/lib/fauxhai/platforms/#{platform}/#{version}.json") rescue OpenURI::HTTPError raise Fauxhai::Exception::InvalidPlatform.new("Could not find platform '#{platform}/#{version}' on the local disk and an HTTP error was encountered when fetching from Github. #{PLATFORM_LIST_MESSAGE}") end if response.status.first.to_i == 200 response_body = response.read path = Pathname.new(filepath) FileUtils.mkdir_p(path.dirname) begin File.open(filepath, 'w') { |f| f.write(response_body) } rescue Errno::EACCES # a pretty common problem in CI systems puts "Fetched '#{platform}/#{version}' from GitHub, but could not write to the local path: #{filepath}. Fix the local file permissions to avoid downloading this file every run." end return parse_and_validate(response_body) else raise Fauxhai::Exception::InvalidPlatform.new("Could not find platform '#{platform}/#{version}' on the local disk and an Github fetching returned http error code #{response.status.first.to_i}! #{PLATFORM_LIST_MESSAGE}") end else raise Fauxhai::Exception::InvalidPlatform.new("Could not find platform '#{platform}/#{version}' on the local disk and Github fetching is disabled! #{PLATFORM_LIST_MESSAGE}") end end.call end
ruby
{ "resource": "" }
q21476
Fauxhai.Mocker.parse_and_validate
train
def parse_and_validate(unparsed_data) parsed_data = JSON.parse(unparsed_data) if parsed_data['deprecated'] STDERR.puts "WARNING: Fauxhai platform data for #{parsed_data['platform']} #{parsed_data['platform_version']} is deprecated and will be removed in the 7.0 release 3/2019. #{PLATFORM_LIST_MESSAGE}" end parsed_data end
ruby
{ "resource": "" }
q21477
TheHelp.ServiceCaller.call_service
train
def call_service(service, **args, &block) service_args = { context: service_context, logger: service_logger }.merge(args) service_logger.debug("#{self.class.name}/#{__id__} called service " \ "#{service.name}") service.call(**service_args, &block) end
ruby
{ "resource": "" }
q21478
AwesomeSpawn.CommandLineBuilder.build
train
def build(command, params = nil) params = assemble_params(sanitize(params)) params.empty? ? command.to_s : "#{command} #{params}" end
ruby
{ "resource": "" }
q21479
AuthlogicConnect::Common::User.InstanceMethods.save
train
def save(options = {}, &block) self.errors.clear # log_state options = {} if options == false options[:validate] = true unless options.has_key?(:validate) save_options = ActiveRecord::VERSION::MAJOR < 3 ? options[:validate] : options # kill the block if we're starting authentication authenticate_via_protocol(block_given?, options) do |start_authentication| block = nil if start_authentication # redirecting # forces you to validate, only if a block is given result = super(save_options) # validate! unless block.nil? cleanup_authentication_session(options) yield(result) end result end end
ruby
{ "resource": "" }
q21480
AuthlogicConnect::Oauth::User.InstanceMethods.save_oauth_session
train
def save_oauth_session super auth_session[:auth_attributes] = attributes.reject!{|k, v| v.blank? || !self.respond_to?(k)} unless is_auth_session? end
ruby
{ "resource": "" }
q21481
AuthlogicConnect::Oauth::User.InstanceMethods.complete_oauth_transaction
train
def complete_oauth_transaction token = token_class.new(oauth_token_and_secret) old_token = token_class.find_by_key_or_token(token.key, token.token) token = old_token if old_token if has_token?(oauth_provider) self.errors.add(:tokens, "you have already created an account using your #{token_class.service_name} account, so it") else self.access_tokens << token end end
ruby
{ "resource": "" }
q21482
Spark.CommandBuilder.serialize_function
train
def serialize_function(func) case func when String serialize_function_from_string(func) when Symbol serialize_function_from_symbol(func) when Proc serialize_function_from_proc(func) when Method serialize_function_from_method(func) else raise Spark::CommandError, 'You must enter String, Symbol, Proc or Method.' end end
ruby
{ "resource": "" }
q21483
Spark.CommandBuilder.serialize_function_from_method
train
def serialize_function_from_method(meth) if pry? meth = Pry::Method.new(meth) end {type: 'method', name: meth.name, content: meth.source} rescue raise Spark::SerializeError, 'Method can not be serialized. Use full path or Proc.' end
ruby
{ "resource": "" }
q21484
Spark.Config.from_file
train
def from_file(file) check_read_only if file && File.exist?(file) file = File.expand_path(file) RubyUtils.loadPropertiesFile(spark_conf, file) end end
ruby
{ "resource": "" }
q21485
Spark.Config.get
train
def get(key) value = spark_conf.get(key.to_s) case TYPES[key] when :boolean parse_boolean(value) when :integer parse_integer(value) else value end rescue nil end
ruby
{ "resource": "" }
q21486
Spark.Config.load_executor_envs
train
def load_executor_envs prefix = 'SPARK_RUBY_EXECUTOR_ENV_' envs = ENV.select{|key, _| key.start_with?(prefix)} envs.each do |key, value| key = key.dup # ENV keys are frozen key.slice!(0, prefix.size) set("spark.ruby.executor.env.#{key}", value) end end
ruby
{ "resource": "" }
q21487
Worker.Base.compute
train
def compute before_start # Load split index @split_index = socket.read_int # Load files SparkFiles.root_directory = socket.read_string # Load broadcast count = socket.read_int count.times do Spark::Broadcast.register(socket.read_long, socket.read_string) end # Load command @command = socket.read_data # Load iterator @iterator = @command.deserializer.load_from_io(socket).lazy # Compute @iterator = @command.execute(@iterator, @split_index) # Result is not iterable @iterator = [@iterator] unless @iterator.respond_to?(:each) # Send result @command.serializer.dump_to_io(@iterator, socket) end
ruby
{ "resource": "" }
q21488
Spark.RDD.inspect
train
def inspect comms = @command.commands.join(' -> ') result = %{#<#{self.class.name}:0x#{object_id}} result << %{ (#{comms})} unless comms.empty? result << %{ (cached)} if cached? result << %{\n} result << %{ Serializer: "#{serializer}"\n} result << %{Deserializer: "#{deserializer}"} result << %{>} result end
ruby
{ "resource": "" }
q21489
Spark.RDD.take
train
def take(count) buffer = [] parts_count = self.partitions_size # No parts was scanned, yet last_scanned = -1 while buffer.empty? last_scanned += 1 buffer += context.run_job_with_command(self, [last_scanned], true, Spark::Command::Take, 0, -1) end # Assumption. Depend on batch_size and how Spark divided data. items_per_part = buffer.size left = count - buffer.size while left > 0 && last_scanned < parts_count parts_to_take = (left.to_f/items_per_part).ceil parts_for_scanned = Array.new(parts_to_take) do last_scanned += 1 end # We cannot take exact number of items because workers are isolated from each other. # => once you take e.g. 50% from last part and left is still > 0 then its very # difficult merge new items items = context.run_job_with_command(self, parts_for_scanned, true, Spark::Command::Take, left, last_scanned) buffer += items left = count - buffer.size # Average size of all parts items_per_part = [items_per_part, items.size].reduce(0){|sum, x| sum + x.to_f/2} end buffer.slice!(0, count) end
ruby
{ "resource": "" }
q21490
Spark.RDD.aggregate
train
def aggregate(zero_value, seq_op, comb_op) _reduce(Spark::Command::Aggregate, seq_op, comb_op, zero_value) end
ruby
{ "resource": "" }
q21491
Spark.RDD.coalesce
train
def coalesce(num_partitions) if self.is_a?(PipelinedRDD) deser = @command.serializer else deser = @command.deserializer end new_jrdd = jrdd.coalesce(num_partitions) RDD.new(new_jrdd, context, @command.serializer, deser) end
ruby
{ "resource": "" }
q21492
Spark.RDD.shuffle
train
def shuffle(seed=nil) seed ||= Random.new_seed new_rdd_from_command(Spark::Command::Shuffle, seed) end
ruby
{ "resource": "" }
q21493
Spark.RDD.reserialize
train
def reserialize(new_serializer) if serializer == new_serializer return self end new_command = @command.deep_copy new_command.serializer = new_serializer PipelinedRDD.new(self, new_command) end
ruby
{ "resource": "" }
q21494
Spark.RDD.intersection
train
def intersection(other) mapping_function = 'lambda{|item| [item, nil]}' filter_function = 'lambda{|(key, values)| values.size > 1}' self.map(mapping_function) .cogroup(other.map(mapping_function)) .filter(filter_function) .keys end
ruby
{ "resource": "" }
q21495
Spark.RDD.partition_by
train
def partition_by(num_partitions, partition_func=nil) num_partitions ||= default_reduce_partitions partition_func ||= 'lambda{|x| Spark::Digest.portable_hash(x.to_s)}' _partition_by(num_partitions, Spark::Command::PartitionBy::Basic, partition_func) end
ruby
{ "resource": "" }
q21496
Spark.RDD.take_sample
train
def take_sample(with_replacement, num, seed=nil) if num < 0 raise Spark::RDDError, 'Size have to be greater than 0' elsif num == 0 return [] end # Taken from scala num_st_dev = 10.0 # Number of items initial_count = self.count return [] if initial_count == 0 # Create new generator seed ||= Random.new_seed rng = Random.new(seed) # Shuffle elements if requested num if greater than array size if !with_replacement && num >= initial_count return self.shuffle(seed).collect end # Max num max_sample_size = Integer::MAX - (num_st_dev * Math.sqrt(Integer::MAX)).to_i if num > max_sample_size raise Spark::RDDError, "Size can not be greate than #{max_sample_size}" end # Approximate fraction with tolerance fraction = compute_fraction(num, initial_count, with_replacement) # Compute first samled subset samples = self.sample(with_replacement, fraction, seed).collect # If the first sample didn't turn out large enough, keep trying to take samples; # this shouldn't happen often because we use a big multiplier for their initial size. index = 0 while samples.size < num log_warning("Needed to re-sample due to insufficient sample size. Repeat #{index}") samples = self.sample(with_replacement, fraction, rng.rand(0..Integer::MAX)).collect index += 1 end samples.shuffle!(random: rng) samples[0, num] end
ruby
{ "resource": "" }
q21497
Spark.RDD.group_by_key
train
def group_by_key(num_partitions=nil) create_combiner = 'lambda{|item| [item]}' merge_value = 'lambda{|combiner, item| combiner << item; combiner}' merge_combiners = 'lambda{|combiner_1, combiner_2| combiner_1 += combiner_2; combiner_1}' combine_by_key(create_combiner, merge_value, merge_combiners, num_partitions) end
ruby
{ "resource": "" }
q21498
Spark.RDD.aggregate_by_key
train
def aggregate_by_key(zero_value, seq_func, comb_func, num_partitions=nil) _combine_by_key( [Spark::Command::CombineByKey::CombineWithZero, zero_value, seq_func], [Spark::Command::CombineByKey::Merge, comb_func], num_partitions ) end
ruby
{ "resource": "" }
q21499
Spark.RDD.cogroup
train
def cogroup(*others) unioned = self others.each do |other| unioned = unioned.union(other) end unioned.group_by_key end
ruby
{ "resource": "" }