query
stringlengths
7
9.5k
document
stringlengths
10
1.07M
negatives
listlengths
19
19
metadata
dict
Dataset used to retrieve index information
def _indexes_ds @_indexes_ds ||= begin if server_version >= 90500 order = [Sequel[:indc][:relname], Sequel.function(:array_position, Sequel[:ind][:indkey], Sequel[:att][:attnum])] # :nocov: else range = 0...32 order = [Sequel[:indc][:relname], SQ...
[ "def index\n @dataset_data = DatasetDatum.all\n end", "def index\n @dataset = Dataset.find( params[:dataset_id] )\n @dataset_header_props = @dataset.get_dataset_header_props\n end", "def dataset= index_dataset\n free!\n @dataset = index_dataset\n end", "def dataset(index)\n idx_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dataset used to determine custom serial sequences for tables
def _select_custom_sequence_ds @_select_custom_sequence_ds ||= metadata_dataset. from{pg_class.as(:t)}. join(:pg_namespace, {:oid => :relnamespace}, :table_alias=>:name). join(:pg_attribute, {:attrelid => Sequel[:t][:oid]}, :table_alias=>:attr). join(:pg_attrdef, {:adreli...
[ "def _select_serial_sequence_ds\n @_serial_sequence_ds ||= metadata_dataset.\n from{[\n pg_class.as(:seq),\n pg_attribute.as(:attr),\n pg_depend.as(:dep),\n pg_namespace.as(:name),\n pg_constraint.as(:cons),\n pg_class.as(:t)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dataset used to determine normal serial sequences for tables
def _select_serial_sequence_ds @_serial_sequence_ds ||= metadata_dataset. from{[ pg_class.as(:seq), pg_attribute.as(:attr), pg_depend.as(:dep), pg_namespace.as(:name), pg_constraint.as(:cons), pg_class.as(:t) ]}. ...
[ "def _select_custom_sequence_ds\n @_select_custom_sequence_ds ||= metadata_dataset.\n from{pg_class.as(:t)}.\n join(:pg_namespace, {:oid => :relnamespace}, :table_alias=>:name).\n join(:pg_attribute, {:attrelid => Sequel[:t][:oid]}, :table_alias=>:attr).\n join(:pg_attrdef...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dataset used to determine primary keys for tables
def _select_pk_ds @_select_pk_ds ||= metadata_dataset. from(:pg_class, :pg_attribute, :pg_index, :pg_namespace). where{[ [pg_class[:oid], pg_attribute[:attrelid]], [pg_class[:relnamespace], pg_namespace[:oid]], [pg_class[:oid], pg_index[:indrelid]], ...
[ "def primary_key_columns\n @columns.values.find_all { |c| c.primary_key? }\n end", "def dataset_need_primary_key?\n true\n end", "def pk_and_sequence_for(table); end", "def dataset_need_primary_key?\n false\n end", "def primary_key\n unless @primary_key\n pk_colum...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the :synchronous option is given and nonnil, set synchronous_commit appropriately. Valid values for the :synchronous option are true, :on, false, :off, :local, and :remote_write.
def begin_new_transaction(conn, opts) super if opts.has_key?(:synchronous) case sync = opts[:synchronous] when true sync = :on when false sync = :off when nil return end log_connection_execute(conn, "SET LOC...
[ "def synchronous_commit=(value)\n raise ArgumentError, \"argument must be true or false\" unless value == true || value == false\n\n execute \"SET #{'LOCAL' if open_transactions > 0} synchronous_commit TO #{value ? 'ON' : 'OFF'}\"\n end", "def synchronous!\n @asynchronous = false\n end", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the READ ONLY transaction setting per savepoint, as PostgreSQL supports that.
def begin_savepoint(conn, opts) super unless (read_only = opts[:read_only]).nil? log_connection_execute(conn, "SET TRANSACTION READ #{read_only ? 'ONLY' : 'WRITE'}") end end
[ "def set_transaction_isolation(conn, opts)\n level = opts.fetch(:isolation, transaction_isolation_level)\n read_only = opts[:read_only]\n deferrable = opts[:deferrable]\n if level || !read_only.nil? || !deferrable.nil?\n sql = String.new\n sql << \"SET TRANSACTION\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Literalize nonString collate options. This is because unquoted collatations are folded to lowercase, and PostgreSQL used mixed case or capitalized collations.
def column_definition_collate_sql(sql, column) if collate = column[:collate] collate = literal(collate) unless collate.is_a?(String) sql << " COLLATE #{collate}" end end
[ "def add_column_options_with_string_collation!(sql, options)\n self.add_column_options_without_string_collation!(sql, options)\n sql << ' COLLATE NOCASE' if options[:column].type.eql?(:string)\n end", "def collations; end", "def collation\n nil\n end", "def column_definition_collate_sql(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Support identity columns, but only use the identity SQL syntax if no default value is given.
def column_definition_default_sql(sql, column) super if !column[:serial] && !['smallserial', 'serial', 'bigserial'].include?(column[:type].to_s) && !column[:default] if (identity = column[:identity]) sql << " GENERATED " sql << (identity == :always ? "ALWAYS" : "BY DEFA...
[ "def auto_increment_sql\n 'IDENTITY'\n end", "def auto_increment_sql\n 'IDENTITY(1,1)'\n end", "def type_literal(column)\n if column[:identity]\n sql = \"#{super} GENERATED BY DEFAULT AS IDENTITY\"\n if sw = column[:start_with]\n sql += \" (STA...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the :prepare option is given and we aren't in a savepoint, prepare the transaction for a twophase commit.
def commit_transaction(conn, opts=OPTS) if (s = opts[:prepare]) && savepoint_level(conn) <= 1 log_connection_execute(conn, "PREPARE TRANSACTION #{literal(s)}") else super end end
[ "def commit_transaction(conn, opts=OPTS)\n if (s = opts[:prepare]) && savepoint_level(conn) <= 1\n log_connection_execute(conn, \"PREPARE COMMIT #{s}\")\n else\n super\n end\n end", "def commit_transaction(conn, opts=OPTS)\n if (s = opts[:prepare]) &&...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SQL for doing fast table insert from stdin.
def copy_into_sql(table, opts) sql = String.new sql << "COPY #{literal(table)}" if cols = opts[:columns] sql << literal(Array(cols)) end sql << " FROM STDIN" if opts[:options] || opts[:format] sql << " (" sql << "FORMAT #{opts[:format]}" if o...
[ "def copy_into_sql(table, opts)\n sql = \"COPY #{literal(table)} \"\n if cols = opts[:columns]\n sql << literal(Array(cols))\n end\n sql << \" FROM STDIN\"\n if opts[:options]\n sql << \" #{opts[:options]}\" if opts[:options]\n end\n sql\n end"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SQL statement to create database function.
def create_function_sql(name, definition, opts=OPTS) args = opts[:args] if !opts[:args].is_a?(Array) || !opts[:args].any?{|a| Array(a).length == 3 and %w'OUT INOUT'.include?(a[2].to_s)} returns = opts[:returns] || 'void' end language = opts[:language] || 'SQL' <<-END ...
[ "def create_function(name, definition, opts=OPTS)\n self << create_function_sql(name, definition, opts)\n end", "def function(name, *args)\n SQL::Function.new(name, *args)\n end", "def create_function(function_name, returning, definition, options = {})\n\n function_name = full_functio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SQL for creating a procedural language.
def create_language_sql(name, opts=OPTS) "CREATE#{' OR REPLACE' if opts[:replace] && server_version >= 90000}#{' TRUSTED' if opts[:trusted]} LANGUAGE #{name}#{" HANDLER #{opts[:handler]}" if opts[:handler]}#{" VALIDATOR #{opts[:validator]}" if opts[:validator]}" end
[ "def create_language(name, opts=OPTS)\n self << create_language_sql(name, opts)\n end", "def program\n stmt_list\n end", "def add_code(cheatsheet_db, language, type, code, comment)\n cheatsheet_db.execute(\"INSERT INTO #{language} (type, code, comment) VALUES (?, ?, ?)\", [type, code, comment])...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SQL for creating a partition of another table.
def create_partition_of_table_sql(name, generator, options) sql = create_table_prefix_sql(name, options).dup sql << " PARTITION OF #{quote_schema_table(options[:partition_of])}" case generator.partition_type when :range from, to = generator.range sql << " FOR VALUES...
[ "def create\n connection.execute %{\n CREATE TABLE IF NOT EXISTS #{table_name} (\n CONSTRAINT #{name}_#{partition_key}_check CHECK (\n #{partition_key} >= '#{date_from.to_s(:db)} 00:00:00+00'\n AND #{partition_key} < '#{date_to.to_s(:db)} 00:00:00+00'\n )\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DDL fragment for initial part of CREATE VIEW statement
def create_view_prefix_sql(name, options) sql = create_view_sql_append_columns("CREATE #{'OR REPLACE 'if options[:replace]}#{'TEMPORARY 'if options[:temp]}#{'RECURSIVE ' if options[:recursive]}#{'MATERIALIZED ' if options[:materialized]}VIEW #{quote_schema_table(name)}", options[:columns] || options[:recursive]...
[ "def create_view_prefix_sql(name, options)\n create_view_sql_append_columns(\"CREATE #{'OR REPLACE 'if options[:replace]}VIEW #{quote_schema_table(name)}\", options[:columns])\n end", "def create_view(view)\n repository.adapter.execute <<-SQL.compress_lines\n create or replace view #{quote_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SQL for dropping a function from the database.
def drop_function_sql(name, opts=OPTS) "DROP FUNCTION#{' IF EXISTS' if opts[:if_exists]} #{name}#{sql_function_args(opts[:args])}#{' CASCADE' if opts[:cascade]}" end
[ "def drop_function(function)\n repository.adapter.execute <<-SQL.compress_lines\n drop function if exists #{quote_name(function.name)}(#{function.args}) cascade\n SQL\n end", "def drop_function(function_name, options = {})\n function_name = full_function_name(function_name, options)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SQL for dropping a procedural language from the database.
def drop_language_sql(name, opts=OPTS) "DROP LANGUAGE#{' IF EXISTS' if opts[:if_exists]} #{name}#{' CASCADE' if opts[:cascade]}" end
[ "def drop_language(language, options = {})\n sql = 'DROP PROCEDURAL LANGUAGE '\n sql << 'IF EXISTS ' if options[:if_exists]\n sql << quote_language(language)\n sql << ' CASCADE' if options[:cascade]\n execute(\"#{sql};\")\n end", "def drop_language(name, opts=OPTS)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SQL for dropping a schema from the database.
def drop_schema_sql(name, opts=OPTS) "DROP SCHEMA#{' IF EXISTS' if opts[:if_exists]} #{quote_identifier(name)}#{' CASCADE' if opts[:cascade]}" end
[ "def drop_schema schema_name\n execute \"DROP SCHEMA #{schema_name} CASCADE\"\n end", "def drop_schema(schema_name)\n execute(\"DROP SCHEMA \\\"#{schema_name}\\\"\")\n end", "def drop_schema(schema)\n execute \"DROP SCHEMA #{schema} RESTRICT\", 'Drop Schema'\n end", "def drop_schema(*a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SQL for dropping a trigger from the database.
def drop_trigger_sql(table, name, opts=OPTS) "DROP TRIGGER#{' IF EXISTS' if opts[:if_exists]} #{name} ON #{quote_schema_table(table)}#{' CASCADE' if opts[:cascade]}" end
[ "def drop_trigger_sql( event )\n \"DROP TRIGGER IF EXISTS #{$options.schema}.#{base_name}_#{event}\"\n end", "def drop_trigger(table_name, trigger_name, options = {})\n SchemaMonkey::Middleware::Migration::CreateTrigger.start(connection: self, table_name: table_name, trigger_name: trigger_name, optio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SQL for dropping a view from the database.
def drop_view_sql(name, opts=OPTS) "DROP #{'MATERIALIZED ' if opts[:materialized]}VIEW#{' IF EXISTS' if opts[:if_exists]} #{quote_schema_table(name)}#{' CASCADE' if opts[:cascade]}" end
[ "def drop_view(name)\n execute(\"DROP VIEW #{name}\")\n end", "def drop_view(name)\n execute \"DROP VIEW #{name};\"\n end", "def drop_view(name)\n execute \"DROP VIEW #{name}\"\n end", "def drop_view(view)\n repository.adapter.execute <<-SQL.compress_lines\n d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If opts includes a :schema option, use it, otherwise restrict the filter to only the currently visible schemas.
def filter_schema(ds, opts) expr = if schema = opts[:schema] schema.to_s else Sequel.function(:any, Sequel.function(:current_schemas, false)) end ds.where{{pg_namespace[:nspname]=>expr}} end
[ "def filter(schema, options = {}, \n builder = DbAgile::Core::Schema::builder, \n &filter_block)\n schema!(schema, :schema, caller)\n hash!(options, :options, caller)\n builder!(builder, :builder, caller)\n options = Computations::Filter::DEFAULT_OPTIONS.m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup datastructures shared by all postgres adapters.
def initialize_postgres_adapter @primary_keys = {} @primary_key_sequences = {} @supported_types = {} procs = @conversion_procs = CONVERSION_PROCS.dup procs[1184] = procs[1114] = method(:to_application_timestamp) end
[ "def setup_connection(conn)\n conn = super(conn)\n conn.extend(Sequel::Swift::Postgres::AdapterMethods)\n conn.db = self\n conn.apply_connection_settings\n conn\n end", "def setup_databases\n postgres_user = app_name\n postgres_pass = SecureRandom.urls...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an expression the oid for the table expr. Used by the metadata parsing code to disambiguate unqualified tables.
def regclass_oid(expr, opts=OPTS) if expr.is_a?(String) && !expr.is_a?(LiteralString) expr = Sequel.identifier(expr) end sch, table = schema_and_table(expr) sch ||= opts[:schema] if sch expr = Sequel.qualify(sch, table) end expr = if ...
[ "def table_expression_of(ast)\n\t\tquery = query_expression_of ast\n\t\tif query.nil? then\n\t\t\treturn nil\n\t\telse\n\t\t\treturn query.table_expression\n\t\tend\n\tend", "def unaliased_identifier(c)\n case c\n when Symbol\n table, column, aliaz = split_symbol(c)\n if aliaz\n t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the cached entries for primary keys and sequences when a table is changed.
def remove_cached_schema(table) tab = quote_schema_table(table) Sequel.synchronize do @primary_keys.delete(tab) @primary_key_sequences.delete(tab) end super end
[ "def remove_cached_schema(table)\n tab = quote_schema_table(table)\n Sequel.synchronize do\n @primary_keys.delete(tab)\n end\n super\n end", "def clean\n tables_cache.keys.reverse_each do |table|\n ActiveRecord::Base.connection.execute %(\n DELE...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle interval and citext types.
def schema_column_type(db_type) case db_type when /\Ainterval\z/io :interval when /\Acitext\z/io :string else super end end
[ "def deparse_interval_type(node)\n type = ['interval']\n\n if node['typmods']\n typmods = node['typmods'].map { |typmod| deparse_item(typmod) }\n type << Interval.from_int(typmods.first.to_i).map do |part|\n # only the `second` type can take an argument.\n if part == 'secon...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The schema :type entry to use for array types.
def schema_array_type(db_type) :array end
[ "def pg_array_schema_type(type)\n @pg_array_schema_types[type]\n end", "def render_field_type(type)\n array_type = type[/\\[(.+)\\]/, 1]\n\n if array_type\n \"#{array_type} => Array\"\n else\n type\n end\n end", "def schema_column_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The schema :type entry to use for row/composite types.
def schema_composite_type(db_type) :composite end
[ "def type\n @type ||= builder.column_type(object, attr)\n end", "def type_at(index)\n @schema[index][:type]\n end", "def type_schema\n steps.type_schema\n end", "def type()\n\t\t@db.hget('sgt-structure:' + @id, 'type')\n\tend", "def type_column\n arel_table[polymorphic_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The schema :type entry to use for enum types.
def schema_enum_type(db_type) :enum end
[ "def parse_enum_type_definition\n description = parse_description\n expect_keyword('enum')\n ASTNode.new(kind: Kinds::ENUM_TYPE_DEFINITION, params: {\n description: description,\n name: parse_name,\n directives: parse_directives(true),\n values: parse_enum_values_definitio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The schema :type entry to use for range types.
def schema_range_type(db_type) :range end
[ "def schema_range_type(db_type)\n @pg_range_schema_types[db_type] || super\n end", "def schema_multirange_type(db_type)\n :multirange\n end", "def range_key_type\n @table.attribute_definitions.each do |attr|\n if attr.attribute_name == range_key_name\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The schema :type entry to use for multirange types.
def schema_multirange_type(db_type) :multirange end
[ "def schema_multirange_type(db_type)\n @pg_multirange_schema_types[db_type] || super\n end", "def schema_range_type(db_type)\n :range\n end", "def schema_range_type(db_type)\n @pg_range_schema_types[db_type] || super\n end", "def register_multirange_type(db_type, op...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the transaction isolation level on the given connection
def set_transaction_isolation(conn, opts) level = opts.fetch(:isolation, transaction_isolation_level) read_only = opts[:read_only] deferrable = opts[:deferrable] if level || !read_only.nil? || !deferrable.nil? sql = String.new sql << "SET TRANSACTION" sql <<...
[ "def set_transaction_isolation(conn, opts)\n level = opts.fetch(:isolation, transaction_isolation_level)\n if (jdbc_level = JDBC_TRANSACTION_ISOLATION_LEVELS[level]) &&\n conn.getMetaData.supportsTransactionIsolationLevel(jdbc_level)\n _trans(conn)[:original_jdbc_isolation_level] =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turns an array of argument specifiers into an SQL fragment used for function arguments. See create_function_sql.
def sql_function_args(args) "(#{Array(args).map{|a| Array(a).reverse.join(' ')}.join(', ')})" end
[ "def function_sql(f)\n args = f.args\n \"#{f.f}#{args.empty? ? '()' : literal(args)}\"\n end", "def irregular_function_sql(f)\n \"#{f.f}(#{literal(f.arg1)} #{f.joiner} #{literal(f.arg2)})\"\n end", "def sql_from_args\n # Remove all the flags.\n # Sample Regex: /\\b?\\\\-[xth]+\\s*/\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PostgreSQL can combine multiple alter table ops into a single query.
def supports_combining_alter_table_ops? true end
[ "def apply_alter_table(name, ops)\n alter_table_sql_list(name, ops).each{|sql| execute_ddl(sql)}\n end", "def alter_table_sql(table, op)\n case op[:op]\n when :add_column\n \"ALTER TABLE #{quote_schema_table(table)} ADD #{column_definition_sql(op)}\"\n when :drop_column\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle bigserial type if :serial option is present
def type_literal_generic_bignum_symbol(column) column[:serial] ? :bigserial : super end
[ "def is_serial\n @comm_type == CommType::SERIAL\n end", "def test_schema_dump_with_not_bigserial\n output = dump_table_schema \"postgresql_big_serials\"\n assert_match %r{t\\.bigserial\\s+\"serials_id\"$}, output\n end", "def test_not_bigserial_column\n column = PostgresqlBigSerial.col...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PostgreSQL uses the bytea data type for blobs
def type_literal_generic_file(column) :bytea end
[ "def typecast_value_blob(value)\n value.is_a?(Sequel::SQL::Blob) ? value : Sequel::SQL::Blob.new(value)\n end", "def literal_blob_append(sql, v)\n sql << \"CAST(X'\" << v.unpack(\"H*\").first << \"' AS BLOB)\"\n end", "def type_literal_generic_file(column)\n :blob\n end", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PostgreSQL prefers the text datatype. If a fixed size is requested, the char type is used. If the text type is specifically disallowed or there is a size specified, use the varchar type. Otherwise use the text type.
def type_literal_generic_string(column) if column[:text] :text elsif column[:fixed] "char(#{column[:size]||default_string_column_size})" elsif column[:text] == false || column[:size] "varchar(#{column[:size]||default_string_column_size})" else :tex...
[ "def type_literal_generic_string(column)\n if column[:text]\n uses_clob_for_text? ? :clob : :text\n elsif column[:fixed]\n \"char(#{column[:size]||default_string_column_size})\"\n else\n \"varchar(#{column[:size]||default_string_column_size})\"\n end\n end", "def revert...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the results of an EXPLAIN ANALYZE query as a string
def analyze explain(:analyze=>true) end
[ "def analyze_explain\n analyze_query_type\n analyze_key\n end", "def explain(opts=OPTS)\n with_sql((opts[:analyze] ? 'EXPLAIN ANALYZE ' : 'EXPLAIN ') + select_sql).map(:'QUERY PLAN').join(\"\\r\\n\")\n end", "def exec_explain(queries) # :nodoc:\n str = queries && queries.map do |sql, bin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disables automatic use of INSERT ... RETURNING. You can still use returning manually to force the use of RETURNING when inserting. This is designed for cases where INSERT RETURNING cannot be used, such as when you are using partitioning with trigger functions or conditional rules, or when you are using a PostgreSQL ver...
def disable_insert_returning clone(:disable_insert_returning=>true) end
[ "def sql_for_insert(sql, pk, binds)\n unless pk == false || pk.nil? || pk.is_a?(Array) || pk.is_a?(String)\n sql = \"#{sql} RETURNING #{quote_column_name(pk)} INTO :returning_id\"\n (binds = binds.dup) << ActiveRecord::Relation::QueryAttribute.new(\"returning_id\", nil, Type::OracleEn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the results of an EXPLAIN query as a string
def explain(opts=OPTS) with_sql((opts[:analyze] ? 'EXPLAIN ANALYZE ' : 'EXPLAIN ') + select_sql).map(:'QUERY PLAN').join("\r\n") end
[ "def exec_explain(queries) # :nodoc:\n str = queries && queries.map do |sql, bind|\n [].tap do |msg|\n msg << \"EXPLAIN for: #{sql}\"\n unless bind.empty?\n bind_msg = bind.map {|col, val| [col.name, val]}.inspect\n msg.last << \" #{bind_msg}\"\n end\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run a full text search on PostgreSQL. By default, searching for the inclusion of any of the terms in any of the cols. Options: :headline :: Append a expression to the selected columns aliased to headline that contains an extract of the matched text. :language :: The language to use for the search (default: 'simple') :p...
def full_text_search(cols, terms, opts = OPTS) lang = Sequel.cast(opts[:language] || 'simple', :regconfig) unless opts[:tsvector] phrase_cols = full_text_string_join(cols) cols = Sequel.function(:to_tsvector, lang, phrase_cols) end unless opts[:tsquery] ph...
[ "def full_text_search(cols, terms, opts = {})\n filter(\"CONTAINS (#{literal(cols)}, #{literal(terms)})\")\n end", "def full_text_search(cols, terms, opts = OPTS)\n terms = \"\\\"#{terms.join('\" OR \"')}\\\"\" if terms.is_a?(Array)\n where(Sequel.lit(\"CONTAINS (?, ?)\", cols, terms))\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Support SQL::AliasedExpression as expr to setup a USING join with a table alias for the USING columns.
def join_table(type, table, expr=nil, options=OPTS, &block) if expr.is_a?(SQL::AliasedExpression) && expr.expression.is_a?(Array) && !expr.expression.empty? && expr.expression.all? options = options.merge(:join_using=>true) end super end
[ "def join_using_clause_sql_append(sql, jc)\n join_clause_sql_append(sql, jc)\n sql << ' USING ('\n column_list_append(sql, jc.using)\n sql << ')'\n end", "def join_using_clause_sql(jc)\n \"#{join_clause_sql(jc)} USING (#{column_list(jc.using)})\"\n end", "def join_table(type, tabl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locks all tables in the dataset's FROM clause (but not in JOINs) with the specified mode (e.g. 'EXCLUSIVE'). If a block is given, starts a new transaction, locks the table, and yields. If a block is not given, just locks the tables. Note that PostgreSQL will probably raise an error if you lock the table outside of an e...
def lock(mode, opts=OPTS) if defined?(yield) # perform locking inside a transaction and yield to block @db.transaction(opts){lock(mode, opts); yield} else sql = 'LOCK TABLE '.dup source_list_append(sql, @opts[:from]) mode = mode.to_s.upcase.strip unless ...
[ "def lock(mode, &block)\n sql = LOCK % [source_list(@opts[:from]), mode]\n @db.synchronize do\n if block # perform locking inside a transaction and yield to block\n @db.transaction {@db.execute(sql); yield}\n else\n @db.execute(sql) # lock without a transaction\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DISTINCT ON is a PostgreSQL extension
def supports_distinct_on? true end
[ "def supports_distinct_on?\n false\n end", "def distinct(*args)\n raise(InvalidOperation, \"DISTINCT ON not supported\") if !args.empty? && !supports_distinct_on?\n clone(:distinct => args)\n end", "def select_distinct_sql(sql)\n if distinct = @opts[:distinct]\n sql << \" DISTIN...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True unless insert returning has been disabled for this dataset.
def supports_insert_select? !@opts[:disable_insert_returning] end
[ "def allow_insert_rows\n return @allow_insert_rows\n end", "def insert_only?\n @ops.all?(&:insert?)\n end", "def insertable?\n true\n end", "def allow_insert_rows=(value)\n @allow_insert_rows = value\n end", "def allow_delete_rows\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PostgreSQL supports modifying joined datasets
def supports_modifying_joins? true end
[ "def add_join(dataset)\n # FIXME: doesn't take any care to pick a unique alias for the table when joining to it\n # FIXME: doesn't use mapping to determine id_column\n dataset.join(target_repo.table_name, foreign_key_mapper.column_name => @repository.identity_mapper.column_name)\n end", "def wit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PostgreSQL supports timezones in literal timestamps
def supports_timestamp_timezones? true end
[ "def supports_timestamp_timezones?\n false\n end", "def ensure_utc_timestamps\n #ActiveRecord::Base.connection.execute(\"SET TIME_ZONE = '+00:00'\")\n end", "def convert_db_timestamp(ts, tz = nil)\n unless ts.to_s =~ TIME_ZONE_RE\n raise \"internal error: malformed database timestamp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append the INSERT sql used in a MERGE
def _merge_insert_sql(sql, data) sql << " THEN INSERT " columns, values = _parse_insert_sql_args(data[:values]) _insert_columns_sql(sql, columns) if override = data[:override] sql << override end _insert_values_sql(sql, values) end
[ "def merge_target_sql(sql)\n\t\t sql << \" INTO #{table_ref(@opts[:into])}\"\n\t\tend", "def insert_into_sql(sql)\n sql << \" INTO #{source_list(@opts[:from])}\"\n end", "def insert_trigger_sql\n trigger_body( \"insert\" )\n end", "def merge_statement(target, stage)\n <<-SQLMERGE\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format TRUNCATE statement with PostgreSQL specific options.
def _truncate_sql(table) to = @opts[:truncate_opts] || OPTS "TRUNCATE TABLE#{' ONLY' if to[:only]} #{table}#{' RESTART IDENTITY' if to[:restart]}#{' CASCADE' if to[:cascade]}" end
[ "def truncate_sql\n if opts[:sql]\n static_sql(opts[:sql])\n else\n check_truncation_allowed!\n check_not_limited!(:truncate)\n raise(InvalidOperation, \"Can't truncate filtered datasets\") if opts[:where] || opts[:having]\n t = String.new\n source_list_append(t, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use from_self for aggregate dataset using VALUES.
def aggreate_dataset_use_from_self? super || @opts[:values] end
[ "def single_value_ds\n clone(:limit=>1).ungraphed.naked\n end", "def from_self(opts=OPTS)\n fs = {}\n @opts.keys.each{|k| fs[k] = nil unless non_sql_option?(k)}\n pr = proc do\n c = clone(fs).from(opts[:alias] ? as(opts[:alias], opts[:column_aliases]) : self)\n if cols = _colu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use USING to specify additional tables in a delete query
def delete_using_sql(sql) join_from_sql(:USING, sql) end
[ "def drop_join_table(table_1, table_2, **options); end", "def unjoin(obj1, obj2, table)\n first, second = join_object_ordering(obj1, obj2)\n first_key, second_key = ordered_join_table_keys(obj1.class, obj2.class)\n exec \"DELETE FROM #{table} WHERE #{first_key}=#{quote(first.pk)} AND #{second_key}=#{quot...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Support OVERRIDING SYSTEM|USER VALUE in insert statements
def insert_override_sql(sql) case opts[:override] when :system sql << " OVERRIDING SYSTEM VALUE" when :user sql << " OVERRIDING USER VALUE" end end
[ "def mssql_current_user_escalation(opts={});\n\t\t\"declare @moo varchar(50); set @moo = (select SYSTEM_USER); exec master..sp_addsrvrolemember @moo, 'sysadmin'\"\n\tend", "def insert_createdby\nself.recordcreatedby = User.current_user.username\nend", "def set_insert_user(user)\n logger.info(\"Setting insert...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PostgreSQL quotes NaN and Infinity.
def literal_float(value) if value.finite? super elsif value.nan? "'NaN'" elsif value.infinite? == 1 "'Infinity'" else "'-Infinity'" end end
[ "def allow_nan?\n @allow_nan\n end", "def allow_nan?\n @allow_nan\n end", "def allow_nan?()\n #This is a stub, used for indexing\n end", "def nan\n BigDecimal('0')/BigDecimal('0')\n end", "def handle_nan(result)\n result.nan? ? 0.0 : result\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle Ruby integers outside PostgreSQL bigint range specially.
def literal_integer(v) if v > 9223372036854775807 || v < -9223372036854775808 literal_integer_outside_bigint_range(v) else v.to_s end end
[ "def literal_integer_outside_bigint_range(v)\n raise IntegerOutsideBigintRange, \"attempt to literalize Ruby integer outside PostgreSQL bigint range: #{v}\"\n end", "def literal_integer_outside_bigint_range(v)\n case @opts[:integer_outside_bigint_range_strategy] || @db.opts[:integer_outside_big...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise IntegerOutsideBigintRange when attempting to literalize Ruby integer outside PostgreSQL bigint range, so PostgreSQL doesn't treat the value as numeric.
def literal_integer_outside_bigint_range(v) raise IntegerOutsideBigintRange, "attempt to literalize Ruby integer outside PostgreSQL bigint range: #{v}" end
[ "def literal_integer_outside_bigint_range(v)\n case @opts[:integer_outside_bigint_range_strategy] || @db.opts[:integer_outside_bigint_range_strategy]\n when :raise\n super\n when :raw\n v.to_s\n else # when :quote\n \"'#{v}'\"\n end\n end", "def l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Support FOR SHARE locking when using the :share lock style. Use SKIP LOCKED if skipping locked rows.
def select_lock_sql(sql) lock = @opts[:lock] if lock == :share sql << ' FOR SHARE' else super end if lock if @opts[:skip_locked] sql << " SKIP LOCKED" elsif @opts[:nowait] sql << " NOWAIT" end end ...
[ "def nolock\n clone(:table_options => \"(NOLOCK)\")\n end", "def supports_skip_locked?\n true\n end", "def skip_locked\n cached_dataset(:_skip_locked_ds) do\n raise(Error, 'This dataset does not support skipping locked rows') unless supports_skip_locked?\n clone(:skip_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Support VALUES clause instead of the SELECT clause to return rows.
def select_values_sql(sql) sql << "VALUES " expression_list_append(sql, opts[:values]) end
[ "def select_values_sql(sql)\n sql << SELECT_VALUES\n expression_list_append(sql, opts[:values])\n end", "def test_values\n value = nil\n assert_nothing_raised do\n value = ActiveRecord::Base.connection.send(:select_rows, \"VALUES('ur', 'doin', 'it', 'right')\")\n end\n assert_e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Support PostgreSQL 14+ CTE SEARCH/CYCLE clauses
def select_with_sql_cte(sql, cte) super select_with_sql_cte_search_cycle(sql, cte) end
[ "def select_with_sql_cte_search_cycle(sql,cte)\n if auto_param?(sql) && cte[:cycle]\n sql.skip_auto_param{super}\n else\n super\n end\n end", "def traverse_example_group_trees_until(&block); end", "def hierarchical_query_clause\n # -> uncomment the ne...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The version of the database server
def server_version db.server_version(@opts[:server]) end
[ "def version\n @version ||= exec('SHOW server_version')[0]['server_version'].split[0]\n end", "def get_database_version\n execute_sql 'SELECT version();'\n end", "def version\n @db['db_version'].to_i || 0\n end", "def get_server_version\n server_info[:server_version]\n end"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PostgreSQL supports quoted function names.
def supports_quoted_function_names? true end
[ "def supports_quoted_function_names?\n true\n end", "def supports_quoted_function_names?\n false\n end", "def irregular_function_sql(f)\n \"#{f.f}(#{literal(f.arg1)} #{f.joiner} #{literal(f.arg2)})\"\n end", "def function_sql(f)\n args = f.args\n \"#{f.f}#{args.empty?...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use FROM to specify additional tables in an update query
def update_from_sql(sql) join_from_sql(:FROM, sql) end
[ "def join_to_update(update, select) #:nodoc:\n if select.limit || select.offset || select.orders.any?\n subsubselect = select.clone\n subsubselect.projections = [update.key]\n\n subselect = Arel::SelectManager.new(select.engine)\n subselect.project Arel.sql(update.key.name...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
allows you to push an array/row to the 2 demensional array. all rows must be the same width
def << (val) # if this is first row set the width based on size of first array to be pushed if @h == 0 @w = val.size elsif val.size != @w or val.class.to_s != 'Array' raise "Only add rows/arrays the same size as the width(#{@w})" end @data[@h] = val @h += 1 end
[ "def import_array(rows)\n raise ArgumentError, \"Can only work with arrays\" unless rows.is_a?(Array)\n if self.labels.size > 1 and rows.dimensions == 1\n self.add_item(rows)\n else\n # self.items = self.items + rows\n rows.each do |row|\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
yeilds each row of the slice from position x,y of width w and height h
def slice_rows(x,y,w,h) (y..y+h-1).each do |i| yield @data[i][x..x+w-1] end end
[ "def slice_tiles(xcount, ycount)\n tile_w = @width / xcount\n tile_h = @height / ycount\n return (0...ycount).flat_map{|v|\n (0...xcount).map{|u|\n slice(tile_w * u, tile_h * v, tile_w, tile_h)\n }\n }\n end", "def slice_row inimage, rowbeg, rowend, outimage\n m_be...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
simplified version of fill that only takes one argument
def fill(x) @data.each do |a| a.fill x end end
[ "def fill(value)\n end", "def fill\n \t@fill\n end", "def fill(dims, value, name: nil)\n _op(:fill, dims, value, name: name)\n end", "def fills; end", "def fill!(subexpr, value)\n return self[size] = value if Frame === value\n return @value = value if empty?\n last.fill!(sube...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns res directory for the mapped locale Base implementation does nothing
def directory(_mapped_locale, _is_default) '' end
[ "def directory(locale, is_default)\n return ''\n end", "def get_locale_dir(prefix, locale)\n return '.' if locale == 'disable'\n\n with_locale(locale) do |str|\n path = \"#{prefix}/#{str}\"\n return path if File.directory?(path)\n end || '.'\n end", "def locale_path\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the locale to a directory
def write_locale(directory, file, strings, plurals, locale, date); end
[ "def write!\n Dir.mkdir @directory unless File.directory? @directory\n\n @locales.each_with_index do |locale, index|\n strings = @strings[locale]\n plurals = @plurals[locale]\n\n l_directory = locale_directory locale, index.zero?\n\n file = @table.to_s.empty? ? @adapter.default...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save the contents of source file into a master_hash hash with the given key, so long as the source file has more action entries in it. Otherwise, remove key from master_hash if the source file has been emptied.
def build_file_into_master_hash(master_hash, key, source_file) entries = grab_and_hashify(source_file, SIZE_OF_HASH_BLOCKS) if(!entries.blank?) master_hash.merge({ key.to_sym => entries }) else master_hash.delete(key.to_sym) master_hash end end
[ "def save( master_key = nil)\n @master_key = master_key if master_key\n if @file_store\n @file_store.save( @keys.to_yaml, @master_key )\n else\n false\n end\n end", "def process_key(key)\n if ::File.exists?(key)\n key\n else\n temp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Grab the time of the next action after our current file position, then return to that position
def get_next_action_time(file) start_pos = file.pos next_time = get_next_result_time(file) file.seek(start_pos, IO::SEEK_SET) next_time end
[ "def get_time_of_last_file_action(file)\r\n\t\tresult_time = nil\r\n\t\tstarting_pos = file.pos\r\n\t\tfile.seek(0, IO::SEEK_END)\r\n\t\tfile.seek(file.pos-[10000, file.pos].min, IO::SEEK_SET)\r\n\t\t\r\n\t\twhile(!file.eof? && (this_line = file.readline))\r\n\t\t\tresult_time = Time.parse($1) if(this_line =~NEW_AC...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read backwards through file until we find the time of the last action written to it.
def get_time_of_last_file_action(file) result_time = nil starting_pos = file.pos file.seek(0, IO::SEEK_END) file.seek(file.pos-[10000, file.pos].min, IO::SEEK_SET) while(!file.eof? && (this_line = file.readline)) result_time = Time.parse($1) if(this_line =~NEW_ACTION_REGEX) end # Reset fi...
[ "def last_action\n state_file.read[:last_action]\n end", "def get_next_action_time(file)\r\n\t\tstart_pos = file.pos\r\n\t\tnext_time = get_next_result_time(file)\r\n\t\tfile.seek(start_pos, IO::SEEK_SET)\r\n\t\tnext_time\r\n\tend", "def get_last_activity\n if not File.exist?(MAIN_DIR + \"prev_activity...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do a binary search until we find a file position that is less than MAX_TIME_BETWEEN_CONTROLLER_ACTIONS before the time we seek.
def seek_to_time_helper(file, target_time, start_pos, end_pos, logger) return nil unless file && target_time logger.info("Checking between file pos #{start_pos} and #{end_pos}.") rewind_pos = file.pos middle_pos = (start_pos+end_pos)/2 file.seek(middle_pos, IO::SEEK_SET) file.each do |line| if(l...
[ "def get_next_action_time(file)\r\n\t\tstart_pos = file.pos\r\n\t\tnext_time = get_next_result_time(file)\r\n\t\tfile.seek(start_pos, IO::SEEK_SET)\r\n\t\tnext_time\r\n\tend", "def wait(seconds = INFINITE)\n seconds *= 1000 unless seconds == INFINITE\n \n fni = 0.chr * 65536 # FILE_NOTIF...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively builds children until it reaches the max_depth.
def build_children(node, parent = nil) node.children = get_all_moves(node) unless node.depth + 1 == @max_depth node.children.each {|child| build_children(child, node)} end end
[ "def build_tree(pages = nil)\n pages ||= all\n return pages if pages.empty? # Do not process empty array\n @tree_wrapper = OpenStruct.new(children: []) # Dummy container as traversing begin, contains roots as children\n old_tree_slice = @tree_wrapper.children\n new_tree_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns TRUE if all of the coordinates fall on the board. Based on BOARD_SIZE
def on_board?(coords) coords.any? {|coord| coord < 1 || coord > @size } ? false : true end
[ "def ship_coords_are_all_on_board\n all_aboard = true\n @ship.coords.each do |coord|\n row, col = coord\n # if row & col are NOT between 0 and 9...\n unless row.between?(0,9) && col.between?(0,9)\n # ...then not all the coords are ok.\n all_aboard = false\n end\n break u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively gets the number of children of a node
def count_children(node) return 0 if node.children.nil? node.children.count + (node.children.map {|child| count_children(child)}).inject(&:+) end
[ "def children_count\n i = 0\n children { |c| i+=1}\n return i\n end", "def nodeCount\n count = 1\n\n if @children.size\n @children.each do |key, val|\n count += val.nodeCount\n end\n end\n\n count\n end", "def count_children\n render :text => @node.children.cou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given the game state as a string (0 represent empty grid, 1 represent X and 2 represent O) determine if the game has a winner by connection a row, column or diagonal outputs either the name of player1 or player2, if the game has no winner return nil
def checkWinner(state, player1, player2) for i in 0..2 row_candidate = true column_candidate = true for j in 1..2 # check rows if state[i * 3 + j] == '0' || state[i * 3 + j] != state[i * 3 + j - 1] then row_candidate = false elsif row_candidate && state[i * 3 ...
[ "def winner\n index = won?\n if index && @board.cells[index[0]] == \"X\"\n return \"X\"\n elsif index && @board.cells[index[0]] == \"O\"\n return \"O\"\n else\n return nil\n end\n end", "def winner\n winner = \"no one\"\n board = state_of_piece\n black_moves = []\n red_m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
delete the current ongoing game in the channel
def del channel = params['channel_id'] existing = Board.find_by(:channel => channel) if existing then existing.destroy return render json: { :response_type => 'in_channel', :text => 'Removed the current game for the channel. It was between *' + existing.player1 + '* a...
[ "def finish()\n @@games.delete(@id)\n end", "def open_channel\n @@game_channels.delete(@event.channel)\n nil\n end", "def finish_game_session game_id\n craft_firebase_command(\"minesweeper/game#{game_id}.json\", \"DELETE\")\n\n # also, delete game id so that the queue will not contain...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
POST /dividas POST /dividas.json
def create @divida = Divida.new(divida_params) respond_to do |format| if @divida.save format.html { redirect_to @divida, notice: 'Divida was successfully created.' } format.json { render :show, status: :created, location: @divida } else format.html { render :new } fo...
[ "def create\n @divulgacao = Divulgacao.new(divulgacao_params)\n\n respond_to do |format|\n if @divulgacao.save\n format.html { redirect_to divulgacaos_path, notice: 'Divulgação foi criada com sucesso.' }\n format.json { render :show, status: :created, location: @divulgacao }\n else\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that required payout details are present (and creates MangoPay benificiary if needed)
def required_payout_details_present?(person) return false unless person.bank_account_owner_name && person.bank_account_owner_address && person.iban && person.bic.present? return person.mangopay_beneficiary_id.present? || create_mangopay_beneficiary(person) end
[ "def create\n\t\tif missing_params\n\t\t\tif enough_money\n\t\t\t\tcheck_same_bank\n\t\t\telse\n\t\t\t\tcreate_failed_transaction\n\t\t\tend\n\t\telse\n\t\t\tnil\n\t\tend\n\tend", "def create_payment_profile_if_required\n if self.payment_profile.blank?\n project = self.invoice.project\n \n new_p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists ReservationInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning.
def list(reservation_status: :unset, limit: nil, page_size: nil) self.stream( reservation_status: reservation_status, limit: limit, page_size: page_size ).entries end
[ "def list(limit: nil, page_size: nil)\n self.stream(limit: limit, page_size: page_size).entries\n end", "def index\n @reservable_instances = ReservableInstance.all\n end", "def list(resource,limit=0,params={})\n uri = '/api/' + resource.to_s\n params.merge!({limit: limit.to_s})...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sends all the pages in this message returns the message
def send_pages Msg.resolve(recipient).each do |user| next if !user.sms_validated? p = Page.new_page_to_user(self, user) end self end
[ "def send_page\n send_message(\"page\")\n end", "def sent\n @messages = current_user.messages.order('created_at DESC').unarchived.paginate(:page => params[:page], :per_page => 10)\n end", "def sent \n @messages = @user.sent_messages.paginate(:page => params[:page], :per_page => 10)\n render ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET /projectareas/1 GET /projectareas/1.xml
def show @projectarea = Projectarea.find(params[:id]) respond_to do |format| format.html # show.html.erb format.xml { render :xml => @projectarea } end end
[ "def index\n @project_focus_area_associations = @project.project_focus_area_associations\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @project_focus_area_associations }\n end\n end", "def index\n @project_areas = ProjectArea.all\n end", "def in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET /projectareas/new GET /projectareas/new.xml
def new @projectarea = Projectarea.new respond_to do |format| format.html # new.html.erb format.xml { render :xml => @projectarea } end end
[ "def create\n @projectarea = Projectarea.new(params[:projectarea])\n\n respond_to do |format|\n if @projectarea.save\n flash[:notice] = 'Projectarea was successfully created.'\n format.html { redirect_to(@projectarea) }\n format.xml { render :xml => @projectarea, :status => :created...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
POST /projectareas POST /projectareas.xml
def create @projectarea = Projectarea.new(params[:projectarea]) respond_to do |format| if @projectarea.save flash[:notice] = 'Projectarea was successfully created.' format.html { redirect_to(@projectarea) } format.xml { render :xml => @projectarea, :status => :created, :location ...
[ "def create\n @project_area = ProjectArea.new(project_area_params)\n\n respond_to do |format|\n if @project_area.save\n format.html { redirect_to @project_area, notice: 'Project area was successfully created.' }\n format.json { render :show, status: :created, location: @project_area }\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PUT /projectareas/1 PUT /projectareas/1.xml
def update @projectarea = Projectarea.find(params[:id]) respond_to do |format| if @projectarea.update_attributes(params[:projectarea]) flash[:notice] = 'Projectarea was successfully updated.' format.html { redirect_to(@projectarea) } format.xml { head :ok } else for...
[ "def update\n respond_to do |format|\n if @project_area.update(project_area_params)\n format.html { redirect_to @project_area, notice: 'Project area was successfully updated.' }\n format.json { render :show, status: :ok, location: @project_area }\n else\n format.html { render :edit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DELETE /projectareas/1 DELETE /projectareas/1.xml
def destroy @projectarea = Projectarea.find(params[:id]) @projectarea.destroy respond_to do |format| format.html { redirect_to(projectareas_url) } format.xml { head :ok } end end
[ "def destroy\n @project_focus_area_association = @project.project_focus_area_associations.find(params[:id])\n @project_focus_area_association.destroy\n\n respond_to do |format|\n format.html { redirect_to(admin_project_project_focus_area_associations_url(@project)) }\n format.xml { head :ok }\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
There are two situations where we want to avoid checking for an active (ongoing) production deployment: A caller of this trigger could set SKIP_DEPLOYMENT_CHECK to avoid the check, i.e. during feature flag activation When we are checking the status during an ongoing deployment (deployment_check? == true)
def skip_active_deployment_check? ENV['SKIP_DEPLOYMENT_CHECK'] == 'true' || deployment_check? end
[ "def deploy_disabled?\n ENV['NO_DEPLOY'] == '1'\nend", "def deployed?\n deployed_at_least_once? && self.deployments.exists?(:conditions => {\"deployments.task\" => Deployment::DEPLOY_TASKS, \"deployments.status\" => Deployment::STATUS_SUCCESS})\n end", "def deploy_pending?\n self.grid_service_deploys.pe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET /users/blocks GET /users/blocks.json
def index @users_blocks = Block.all.where(user_id: current_user.id) end
[ "def index\n @user_blocks = current_user.user_blocks\n end", "def user_blocks_path\n @user_blocks_path ||= '/api/v2/user-blocks'\n end", "def getBlocks(offset = nil)\n raise \"not logged in\" unless @logged_in\n paramstr = offset ? '&offset=' + offset.to_s : \"\"\n blockobj = cal...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
POST /users/blocks POST /users/blocks.json
def create @users_block = Block.new(blocked_id: params[:block][:blocked], user_id: current_user.id) @users = get_blockable_users respond_to do |format| if @users_block.save format.html { redirect_to block_path, notice: 'Block was successfully created.' } f...
[ "def create\n @user_block = UserBlock.new(params[:user_block])\n\n respond_to do |format|\n if @user_block.save\n format.html { redirect_to @user_block, notice: 'User block was successfully created.' }\n format.json { render json: @user_block, status: :created, location: @user_block }\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DELETE /users/blocks/1 DELETE /users/blocks/1.json
def destroy @users_block.destroy respond_to do |format| format.html { redirect_to block_path, notice: 'Block was successfully destroyed.' } format.json { head :no_content } end end
[ "def destroy\n @user_block = UserBlock.find(params[:id])\n @user_block.destroy\n\n respond_to do |format|\n format.html { redirect_to user_blocks_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @user_block = UserBlock.find(params[:id])\n @user_block.destroy\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create a method that takes a full name and returns a fake full name
def make_fake_full_name (full_name) # put names into an array and swap first and last (if you wanted a string instead: name.partition(' ').reverse.join) full_name = full_name.split(' ').reverse # for each name, replace it with a fake name full_name.map! {|name| make_fake_name(name)} # convert the full name b...
[ "def full_name\n name\n end", "def full_name(first, last)\n\tfirst + \" \" + last\nend", "def full_name\n \"#{prename} #{name}\"\n end", "def clean_full_name(first_name, last_name)\n puts \"#{first_name.downcase.capitalize}#{last_name.downcase.capitalize}\"\nend", "def full_name\n return Contact...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a hash of protocol => port.
def protocol_ports (self.overview.listeners || []). reduce(Hash.new) { |acc, lnr| acc[lnr.protocol] = lnr.port; acc } end
[ "def to_hash\n { host: host, port: port }\n end", "def network_interface_port_nums\n get_network_interface_configs.keys.sort\n end", "def protocol_name\n PROTOCOLS[protocol.to_s]\n end", "def hash_ip_address\n # server address refers to the server hosting this rails app\n (\"%02X%02X%02X...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return dependencies for an object file.
def obj_dep( o ) deps = [] inc_dirs = [ SRC_DIR, TEST_DIR ] cpp = o.sub(/\.o$/,'.cpp') \ .sub(/^obj\/src\//, 'src/') \ .sub(/^obj\/test\//, 'test/') # print cpp, "\n" headers = cpp_headers( cpp, inc_dirs ) # print headers.join(" "), "\n" deps << cpp deps << headers # ...
[ "def find_deps(object_file)\n base = File.basename(object_file, '.o')\n\n SRC.find_all do |source| \n File.basename(source, '.c') == base || \n File.basename(source, '.c') == base.sub(/_test/, '')\n end\nend", "def all_obj_dependencies obj\n\t\tcpp = resolve_obj_source_file(obj)\n\t\t# Build a list o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create and enter a buffer with the given contents
def create_buffer(contents) end
[ "def newbuf(x)\n Hash === x or raise ArgumentError, \"argument must be a hash!\"\n x[:name] || x[:file] or raise ArgumentError, \"`:name' or `:file' key is mandatory!\"\n x[:name] and b = get_buffer_create(x[:name])\n x[:file] && !x[:name] and b = find_file_noselect(x[:f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enter the given key sequence in the active buffer
def press(key_sequence) end
[ "def key_press(locator,keySequence)\n do_command(\"keyPress\", [locator,keySequence,])\n end", "def key_up(locator,keySequence)\n do_command(\"keyUp\", [locator,keySequence,])\n end", "def press(key); end", "def enter\n performAction 'send_key_enter'\n end", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes all entries with keys matching the regular expression. The +matcher+ must be valid pattern for N1QL +REGEXP_MATCHES+ function. More info at Because the operation performed on query engine, and it might take time to propagate changes from key/value engine to the indexer. Therefore the keys, that were created a m...
def delete_matched(matcher, _options = nil) pattern = case matcher when Regexp matcher.inspect[1..-2] when String matcher.tr("?", ".").gsub("*", ".*") else raise NotImplementedError, "Unable to convert #{matcher.inspect} to Regexp patte...
[ "def delete_matched(matcher, options = nil)\n options = merged_options(options)\n instrument(:delete_matched, matcher.inspect) do\n matcher = key_matcher(matcher, options)\n begin\n with do |store|\n keys = []\n if store.supports_sca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clears the entire cache. Be careful with this method since it could affect other processes if shared cache is being used. When +use_flush+ option set to +true+ it will flush the bucket. Otherwise, it uses N1QL query and relies on default index.
def clear(use_flush: false, **_options) failsafe(:clear) do if use_flush cluster.buckets.flush_bucket(@couchbase_options[:bucket_name]) else operation_options = ::Couchbase::Options::Query.new operation_options.consistent_with(::Couchbase::MutationState.ne...
[ "def clear\n synchronize do\n @keys.clear\n @cache.clear\n end\n end", "def clear!\n NastyCache.instance.delete(\"#{name}#all\")\n NastyCache.instance.delete(\"#{name}#records\")\n end", "def clear\n @cache.clear\n entries.clear\n self\n end", "def cle...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes multiple entries in the cache. Returns the number of entries deleted.
def delete_multi_entries(entries, **_options) return if entries.empty? failsafe(:delete_multi_entries, returning: nil) do successful = collection.remove_multi(entries).select(&:success?) return 0 if successful.empty? @last_mutation_token = successful.max_by { |r| r.mutati...
[ "def delete_multi_entries(entries, **options)\n entries.count { |key| delete_entry(key, **options) }\n end", "def delete_multi_entries(entries, **_options)\n failsafe :delete_multi_entries, returning: 0 do\n redis.then { |c| c.del(entries) }\n end\n end", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connects to the Couchbase cluster
def build_cluster ::Couchbase::Cluster.connect( @couchbase_options[:connection_string], ::Couchbase::Options::Cluster(authenticator: ::Couchbase::PasswordAuthenticator.new( @couchbase_options[:username], @couchbase_options[:password] )) ) end
[ "def connect_cassandra\n @client = Cql::Client.connect(hosts: ['localhost'])\n @client.use('oink')\nend", "def connect\n @cluster.connect\n end", "def connect\n start_time = Time.now\n cluster_config = config.cluster\n cluster_config = cluster_config.merge(logger: logger) if logger\n clust...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connects to the Couchbase cluster, opens specified bucket and returns collection object.
def build_collection bucket = cluster.bucket(@couchbase_options[:bucket]) if @couchbase_options[:scope] && @couchbase_options[:collection] bucket.scope(@couchbase_options[:scope]).collection(@couchbase_options[:collection]) else bucket.default_collection end end
[ "def build_cluster\n ::Couchbase::Cluster.connect(\n @couchbase_options[:connection_string],\n ::Couchbase::Options::Cluster(authenticator: ::Couchbase::PasswordAuthenticator.new(\n @couchbase_options[:username], @couchbase_options[:password]\n ))\n )\n end",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a MageeTextEdit param parent, name: The QWidget's parent and name
def initialize(parent = nil, name = nil) super(parent, name) viewport.set_w_flags(Qt::WNoAutoErase | Qt::WStaticContents) set_static_background(true) @invalid_rows = [] @model = TextModel.new() @model.set_first_line_in_view_handler { first_line_in_view } @model.set_last_line_in_view_handler { last_lin...
[ "def set_NewParent(value)\n set_input(\"NewParent\", value)\n end", "def parent_argument(name)\n name = name.to_s\n @parent_arguments << name.to_s\n end", "def t(name, *args)\n field = JTextField.new *args\n field.name = name\n field\n end", "def edit_window_component(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the display coordinate of line 'n'
def line_num_to_coord(n) (n + 1) * font_metrics.height end
[ "def line(n)\n @lines[n]\n end", "def coord_to_line_num(y)\n\t\ty / font_metrics.height - 1\n\tend", "def lvl_text_coordinates\n return 121, 5, 35, 16\n end", "def point_n(n)\n raise Error::UnsupportedOperation, \"Method LineString#point_n not defined.\"\n end", "def line\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the line number of the display ycoordinate 'y'
def coord_to_line_num(y) y / font_metrics.height - 1 end
[ "def y_num\n return @y_num\n end", "def get_y\n if DEBUG_COORDS\n logger.debug \"get_y(), %s %s ## %s\" % [get_class(), get_id(), get_unique_id()]\n end\n y = 0\n if @use_uiautomator\n y = @attributes['boun...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the line number of the last visible line
def last_line_in_view() coord_to_line_num(contents_y + height) end
[ "def last_line\n @lines[-1]\n end", "def line_number\n if active?\n @session.request(:nvim_get_current_win).get_cursor[0]\n end\n end", "def line_number\n @current_line\n end", "def last_line_number\n line_number = 1\n @tokens[0..@token_pointer-1].each do |token, i|\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }