sql stringlengths 6 1.05M |
|---|
<reponame>windorg/app
CREATE TYPE subscription_update_kind AS ENUM ('suk_board', 'suk_card', 'suk_card_update', 'suk_reply');
CREATE TABLE subscription_updates (
id UUID DEFAULT uuid_generate_v4() PRIMARY KEY NOT NULL,
subscriber_id UUID NOT NULL,
board_id UUID DEFAULT NULL,
card_id UUID DEFAULT NULL,
card_update_id UUID DEFAULT NULL,
reply_id UUID DEFAULT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL,
update_kind subscription_update_kind NOT NULL,
is_read BOOLEAN DEFAULT false NOT NULL
);
CREATE INDEX subscription_updates_card_update_id_index ON subscription_updates (card_update_id);
CREATE INDEX subscription_updates_subscriber_id_index ON subscription_updates (subscriber_id);
CREATE INDEX subscription_updates_card_id_index ON subscription_updates (card_id);
CREATE INDEX subscription_updates_board_id_index ON subscription_updates (board_id);
CREATE INDEX subscription_updates_reply_id_index ON subscription_updates (reply_id);
ALTER TABLE subscription_updates ADD CONSTRAINT subscription_updates_ref_board_id FOREIGN KEY (board_id) REFERENCES boards (id) ON DELETE CASCADE;
ALTER TABLE subscription_updates ADD CONSTRAINT subscription_updates_ref_card_id FOREIGN KEY (card_id) REFERENCES cards (id) ON DELETE CASCADE;
ALTER TABLE subscription_updates ADD CONSTRAINT subscription_updates_ref_card_update_id FOREIGN KEY (card_update_id) REFERENCES card_updates (id) ON DELETE CASCADE;
ALTER TABLE subscription_updates ADD CONSTRAINT subscription_updates_ref_reply_id FOREIGN KEY (reply_id) REFERENCES replies (id) ON DELETE NO ACTION;
ALTER TABLE subscription_updates ADD CONSTRAINT subscription_updates_ref_subscriber_id FOREIGN KEY (subscriber_id) REFERENCES users (id) ON DELETE CASCADE;
alter table replies drop column is_read;
|
/*
con - Continous block stats.
*/
create schema con;
-- Mean ERG age and transferred value for each block
create table con.block_stats(
height integer primary key,
circulating_supply bigint not null,
transferred_value bigint not null,
mean_age_ms bigint not null, -- milliseconds
transactions bigint not null
);
-- Initialise block stats
insert into con.block_stats (height, circulating_supply, transferred_value, mean_age_ms, transactions)
values (1, 75 * 10^9, 0, 0, 1);
create table con.mean_age_series_daily (
timestamp bigint primary key, -- first of day block
mean_age_days float not null
);
create table con.aggregate_series_daily (
timestamp bigint primary key, -- first of day block
transferred_value bigint not null,
transactions bigint not null
);
create table con.preview (
singleton integer primary key default 1 check(singleton = 1),
timestamp bigint, -- latest available
mean_age_days real not null, -- latest available
transferred_value_24h bigint not null, -- total transverred volume in last 24h
transactions_24h bigint not null -- total transactions in last 24h
);
|
drop database if exists project2_dev;
create database project2_dev;
use project2_dev;
SELECT * FROM Users;
SELECT * FROM userInfos;
SELECT * FROM continents;
SELECT * FROM asia;
SELECT * FROM africa;
SELECT * FROM australia;
SELECT * FROM north;
SELECT * FROM south;
SELECT * FROM europe;
SELECT * FROM users;
SELECT * FROM trips;
SELECT * FROM recomendations;
|
<reponame>fanghe124/lemon
ALTER TABLE STORE_INFO ADD COLUMN STATUS VARCHAR(50);
|
-- SCHEMA VERSION 4: 2020-02-14
alter table servers add column log_cleanup_enabled bool not null default false;
update info set schema_version = 5; |
SELECT *
FROM dbo.Cxc AS c
WHERE c.Mov = 'Solicitud Deposito'
AND c.Estatus = 'PENDIENTE'
AND c.Usuario = 'SITTI';
SELECT *
FROM dbo.Dinero AS d
WHERE d.Mov = 'Solicitud Deposito'
AND d.MovID IN (
SELECT c.MovID
FROM dbo.Cxc AS c
WHERE c.Mov = 'Solicitud Deposito'
AND c.Estatus = 'PENDIENTE'
AND c.Usuario = 'SITTI'
);
SELECT ID,
d.Mov,
d.MovID,
CONVERT(VARCHAR, d.FechaEmision, 120) AS 'Fecha Emision',
d.Importe,
d.OrigenTipo,
d.Origen,
d.OrigenID,
CASE
WHEN ISNULL(cii.IdCFD, 0) <> 0 THEN
'Si'
ELSE
'No'
END AS 'Factura en SITTI',
p.TipoPago,
p.Monto,
CONVERT(VARCHAR, p.FechaCancelacion, 120) AS 'Fecha de Cancelacion SITTI'
FROM dbo.Dinero AS d
LEFT JOIN GTPSITTIDB.sitti.dbo.CFDIntelisisInfo AS cii
ON d.OrigenID = cii.MovimientoId
LEFT JOIN GTPSITTIDB.sitti.dbo.Pagos AS p
ON p.IdCFD = cii.IdCFD
WHERE d.Mov = 'Solicitud Deposito'
AND d.Estatus = 'PENDIENTE'
AND d.Usuario = 'SITTI'; |
select
type || ' ' || name as resource,
case
when name in (select split_part((arguments ->> 'target_resource_id'), '.', 3) from terraform_resource where type = 'azurerm_monitor_diagnostic_setting' and split_part((arguments ->> 'target_resource_id'), '.', 2) = 'azurerm_logic_app_workflow')then 'ok'
else 'alarm'
end status,
name || case
when name in (select split_part((arguments ->> 'target_resource_id'), '.', 3) from terraform_resource where type = 'azurerm_monitor_diagnostic_setting' and split_part((arguments ->> 'target_resource_id'), '.', 2) = 'azurerm_logic_app_workflow')then ' logging enabled'
else ' logging disabled'
end || '.' reason,
path || ':' || start_line
from
terraform_resource
where
type = 'azurerm_logic_app_workflow'; |
ALTER TABLE backfill_runs
ADD COLUMN backoff_schedule VARBINARY(200) NULL DEFAULT NULL; |
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Sample SQL Jinja template to extract customer conversions into an internal format.
# Args:
# analytics_table: input Google Analytics BigQuery tablename containing the conversions.
# conversions_table: output conversions BigQuery tablename.
#
# This is only a sample file. Override it with your own query that extracts conversions. Note that
# the input data is not limited to conversions contained in GA. You can rewrite this query to
# extract data from Firebase, or even your own CRM data. The only requirement is that the output of
# the query matches the following schema:
#
# Output schema:
# user_id: STRING
# conversion_ts: TIMESTAMP (timestamp of the conversion)
# label: ANY type
# e.g. BOOL for binary classification, STRING for multi-class, or INT64/FLOAT64 for regression.
CREATE OR REPLACE TABLE `{{conversions_table}}`
AS (
SELECT DISTINCT
IFNULL(NULLIF(GaTable.clientId, ''), GaTable.fullVisitorId) AS user_id,
TIMESTAMP_SECONDS(GaTable.visitStartTime) AS conversion_ts,
TRUE AS label
FROM
`{{analytics_table}}` AS GaTable, UNNEST(GaTable.hits) as hits
WHERE
hits.eCommerceAction.action_type = '6' -- Google Analytics code for "Completed purchase"
);
|
<filename>sed2.sql
-- Copyright 2018 <NAME>. All rights reserved. More info at http://tanelpoder.com
-- Licensed under the Apache License, Version 2.0. See LICENSE.txt for terms & conditions.
prompt Show wait event descriptions matching &1...
col sed_name head EVENT_NAME for a55
col sed_p1 head PARAMETER1 for a25
col sed_p2 head PARAMETER2 for a25
col sed_p3 head PARAMETER3 for a25
col sed_event# head EVENT# for 99999
col sed_req_description HEAD REQ_DESCRIPTION for a100 WORD_WRAP
col sed_req_reason HEAD REQ_REASON for a32 WRAP
col sed_wait_class HEAD WAIT_CLASS for a20
col sed_eq_name HEAD ENQUEUE_NAME for a30
SELECT
e.event# sed_event#
, e.name sed_name
, e.wait_class sed_wait_class
, e.parameter1 sed_p1
, e.parameter2 sed_p2
, e.parameter3 sed_p3
, s.eq_name sed_eq_name
, s.req_reason sed_req_reason
, s.req_description sed_req_description
-- , e.display_name sed_display_name -- 12c
FROM
v$event_name e
, v$enqueue_statistics s
WHERE
e.event# = s.event# (+)
AND lower(e.name) like lower('&1')
ORDER BY
sed_name
/
|
<filename>tests/queries/0_stateless/01837_database_memory_ddl_dictionaries.sql
-- Tags: no-parallel, no-fasttest
DROP DATABASE IF EXISTS 01837_db;
CREATE DATABASE 01837_db ENGINE = Memory;
DROP TABLE IF EXISTS 01837_db.simple_key_dictionary_source;
CREATE TABLE 01837_db.simple_key_dictionary_source
(
id UInt64,
value String
) ENGINE = TinyLog;
INSERT INTO 01837_db.simple_key_dictionary_source VALUES (1, 'First');
INSERT INTO 01837_db.simple_key_dictionary_source VALUES (2, 'Second');
INSERT INTO 01837_db.simple_key_dictionary_source VALUES (3, 'Third');
DROP DICTIONARY IF EXISTS 01837_db.simple_key_direct_dictionary;
CREATE DICTIONARY 01837_db.simple_key_direct_dictionary
(
id UInt64,
value String
)
PRIMARY KEY id
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() DB '01837_db' TABLE 'simple_key_dictionary_source'))
LAYOUT(DIRECT());
SELECT * FROM 01837_db.simple_key_direct_dictionary;
DROP DICTIONARY 01837_db.simple_key_direct_dictionary;
DROP TABLE 01837_db.simple_key_dictionary_source;
DROP DATABASE 01837_db;
|
<gh_stars>10-100
\set ON_ERROR_ROLLBACK 1
\set ON_ERROR_STOP true
SELECT set_config('search_path','mimeo, dblink, public',false);
SELECT plan(2);
-- Test that non-dblink, non-owner roles can still insert to source tables
SELECT dblink_connect('mimeo_test', 'host=localhost port=5432 dbname=mimeo_source user=mimeo_dumb_role password=<PASSWORD>');
SELECT is(dblink_get_connections() @> '{mimeo_test}', 't', 'Remote database connection established');
-- Insert new data
SELECT diag('Inserting more data for: mimeo_source.snap_test_source');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.snap_test_source VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.snap_test_source_change_col');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.snap_test_source_change_col VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.Snap-test-Source');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source."Snap-test-Source" VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.inserter_test_source');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.inserter_test_source VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source."Inserter-Test-Source"');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source."Inserter-Test-Source" VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.updater_test_source');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.updater_test_source VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source."Updater-Test-Source"');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source."Updater-Test-Source" VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.dml_test_source');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.dml_test_source VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.dml_test_source2');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.dml_test_source2 VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.dml_test_source_nodata');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.dml_test_source_nodata VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.dml_test_source_filter');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.dml_test_source_filter VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.dml_test_source_condition');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.dml_test_source_condition VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source."Dml-Test-Source"');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source."Dml-Test-Source" VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.logdel_test_source');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.logdel_test_source VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.logdel_test_source2');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.logdel_test_source2 VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.logdel_test_source_nodata');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.logdel_test_source_nodata VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.logdel_test_source_filter');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.logdel_test_source_filter VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source.logdel_test_source_condition');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source.logdel_test_source_condition VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
SELECT diag('Inserting more data for: mimeo_source."LogDel-Test-Source"');
SELECT dblink_exec('mimeo_test', 'INSERT INTO mimeo_source."LogDel-Test-Source" VALUES (generate_series(10001,20000), ''test''||generate_series(10001,20000)::text)');
-- Data for testing updater
SELECT diag('Updating data for: mimeo_source.updater_test_source');
SELECT dblink_exec('mimeo_test', 'UPDATE mimeo_source.updater_test_source SET col2 = ''changed'', col3 = clock_timestamp(), col4 = nextval(''mimeo_source.updater_test_source_col4_seq'') WHERE col1 between 9500 and 11500');
-- Data for testing dml
SELECT diag('Updating data for: mimeo_source.dml_test_source2');
SELECT dblink_exec('mimeo_test', 'UPDATE mimeo_source.dml_test_source2 SET col2 = ''changed'' WHERE col1 = 4 AND col2 = ''test4''');
SELECT diag('Updating data for: mimeo_source.dml_test_source2');
SELECT dblink_exec('mimeo_test', 'UPDATE mimeo_source.dml_test_source2 SET col2 = ''changed'' WHERE col1 between 8000 and 9000');
SELECT diag('Deleting data for: mimeo_source.dml_test_source2');
SELECT dblink_exec('mimeo_test', 'DELETE FROM mimeo_source.dml_test_source2 WHERE col1 between 9500 and 10500');
SELECT diag('Updating data for: mimeo_source.dml_test_source_condition');
SELECT dblink_exec('mimeo_test', 'UPDATE mimeo_source.dml_test_source_condition SET col2 = ''changed''||col1 WHERE col1 > 15000');
SELECT diag('Deleting data for: mimeo_source.dml_test_source_condition');
SELECT dblink_exec('mimeo_test', 'DELETE FROM mimeo_source.dml_test_source_condition WHERE col1 <= 10000');
-- Data for testing logdel
SELECT diag('Updating data for: mimeo_source.logdel_test_source2');
SELECT dblink_exec('mimeo_test', 'UPDATE mimeo_source.logdel_test_source2 SET col2 = ''changed'' WHERE col1 = 4 AND col2 = ''test4''');
SELECT diag('Updating data for: mimeo_source.logdel_test_source2');
SELECT dblink_exec('mimeo_test', 'UPDATE mimeo_source.logdel_test_source2 SET col2 = ''changed'' WHERE col1 between 9600 and 10200');
SELECT diag('Deleting data for: mimeo_source.logdel_test_source2');
SELECT dblink_exec('mimeo_test', 'DELETE FROM mimeo_source.logdel_test_source2 WHERE col1 between 12500 and 12520');
SELECT diag('Updating data for: mimeo_source.logdel_test_source_condition');
SELECT dblink_exec('mimeo_test', 'UPDATE mimeo_source.logdel_test_source_condition SET col2 = ''changed''||col1 WHERE col1 > 18000');
SELECT dblink_disconnect('mimeo_test');
--SELECT is_empty('SELECT dblink_get_connections() @> ''{mimeo_test}''', 'Close remote database connection');
SELECT pass('Completed 2nd batch of data inserts/updates/deletes for remote tables. Sleeping for 10 seconds to ensure gap for incremental tests...');
SELECT pg_sleep(10);
SELECT * FROM finish();
|
<filename>util/ajustes/03.sql
ALTER TABLE TipoMovimiento
ADD color varchar(15) NULL;
UPDATE TipoMovimiento
SET color = 'green'
WHERE idTipoMovimiento = 1;
UPDATE TipoMovimiento
SET color = 'red'
WHERE idTipoMovimiento = 2;
INSERT INTO CategoriaMovimiento (descripcion, idTipoMovimiento)
VALUES ('Recurrente', 1);
INSERT INTO CategoriaMovimiento (descripcion, idTipoMovimiento)
VALUES ('Extraordinario', 1);
INSERT INTO CategoriaMovimiento (descripcion, idTipoMovimiento)
VALUES ('Obligatorio', 2);
INSERT INTO CategoriaMovimiento (descripcion, idTipoMovimiento)
VALUES ('Necesario', 2);
INSERT INTO CategoriaMovimiento (descripcion, idTipoMovimiento)
VALUES ('Ocasional', 2);
INSERT INTO CategoriaMovimiento (descripcion, idTipoMovimiento)
VALUES ('Ahorro', 2); |
<reponame>ziafadhilah/yii2advancedmentah
-- phpMyAdmin SQL Dump
-- version 5.0.1
-- https://www.phpmyadmin.net/
--
-- Host: 127.0.0.1
-- Generation Time: Oct 27, 2020 at 11:16 AM
-- Server version: 10.4.11-MariaDB
-- PHP Version: 7.4.2
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET AUTOCOMMIT = 0;
START TRANSACTION;
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
--
-- Database: `yiiadvanced2`
--
-- --------------------------------------------------------
--
-- Table structure for table `admin`
--
CREATE TABLE `admin` (
`id` int(11) NOT NULL,
`username` varchar(255) COLLATE utf8_unicode_ci NOT NULL,
`auth_key` varchar(32) COLLATE utf8_unicode_ci NOT NULL,
`password_hash` varchar(255) COLLATE utf8_unicode_ci NOT NULL,
`password_reset_token` varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL,
`email` varchar(255) COLLATE utf8_unicode_ci NOT NULL,
`status` smallint(6) NOT NULL DEFAULT 10,
`created_at` int(11) NOT NULL,
`updated_at` int(11) NOT NULL,
`verification_token` varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
--
-- Dumping data for table `admin`
--
INSERT INTO `admin` (`id`, `username`, `auth_key`, `password_hash`, `password_reset_token`, `email`, `status`, `created_at`, `updated_at`, `verification_token`) VALUES
(1, 'admin', '<PASSWORD>', <PASSWORD>', NULL, '<EMAIL>', 10, 1603294188, 1603294188, 'mpwqsgdoKIPxjUON0-cOS22FT4dUGpVz_1603294188');
-- --------------------------------------------------------
--
-- Table structure for table `category`
--
CREATE TABLE `category` (
`id` int(11) NOT NULL,
`name` varchar(255) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
--
-- Dumping data for table `category`
--
INSERT INTO `category` (`id`, `name`) VALUES
(1, 'Kayu Olahan'),
(2, 'Deking'),
(3, 'Kusen'),
(4, 'Pintu'),
(5, 'Lis Profil'),
(6, 'Tangga');
-- --------------------------------------------------------
--
-- Table structure for table `images`
--
CREATE TABLE `images` (
`id` int(11) NOT NULL,
`category_id` int(11) NOT NULL,
`product_id` int(11) NOT NULL,
`image` varchar(255) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
--
-- Dumping data for table `images`
--
INSERT INTO `images` (`id`, `category_id`, `product_id`, `image`) VALUES
(1, 1, 1, 'ini gambar'),
(2, 1, 1, 'gambar2'),
(3, 2, 2, 'deking gambar');
-- --------------------------------------------------------
--
-- Table structure for table `user`
--
CREATE TABLE `user` (
`id` int(11) NOT NULL,
`username` varchar(255) COLLATE utf8_unicode_ci NOT NULL,
`auth_key` varchar(32) COLLATE utf8_unicode_ci NOT NULL,
`password_hash` varchar(255) COLLATE utf8_unicode_ci NOT NULL,
`password_reset_token` varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL,
`email` varchar(255) COLLATE utf8_unicode_ci NOT NULL,
`status` smallint(6) NOT NULL DEFAULT 10,
`created_at` int(11) NOT NULL,
`updated_at` int(11) NOT NULL,
`verification_token` varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
--
-- Dumping data for table `user`
--
INSERT INTO `user` (`id`, `username`, `auth_key`, `password_hash`, `password_reset_token`, `email`, `status`, `created_at`, `updated_at`, `verification_token`) VALUES
(1, 'user', '<PASSWORD>', <PASSWORD>', NULL, '<EMAIL>', 10, 1603294188, 1603294188, 'mpwqsgdoKIPxjUON0-cOS22FT4dUGpVz_1603294188');
--
-- Indexes for dumped tables
--
--
-- Indexes for table `admin`
--
ALTER TABLE `admin`
ADD PRIMARY KEY (`id`),
ADD UNIQUE KEY `username` (`username`),
ADD UNIQUE KEY `email` (`email`),
ADD UNIQUE KEY `password_reset_token` (`password_reset_token`);
--
-- Indexes for table `category`
--
ALTER TABLE `category`
ADD PRIMARY KEY (`id`);
--
-- Indexes for table `images`
--
ALTER TABLE `images`
ADD PRIMARY KEY (`id`);
--
-- Indexes for table `user`
--
ALTER TABLE `user`
ADD PRIMARY KEY (`id`),
ADD UNIQUE KEY `username` (`username`),
ADD UNIQUE KEY `email` (`email`),
ADD UNIQUE KEY `password_reset_token` (`<PASSWORD>`);
--
-- AUTO_INCREMENT for dumped tables
--
--
-- AUTO_INCREMENT for table `admin`
--
ALTER TABLE `admin`
MODIFY `id` int(11) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=2;
--
-- AUTO_INCREMENT for table `category`
--
ALTER TABLE `category`
MODIFY `id` int(11) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=7;
--
-- AUTO_INCREMENT for table `images`
--
ALTER TABLE `images`
MODIFY `id` int(11) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=4;
--
-- AUTO_INCREMENT for table `user`
--
ALTER TABLE `user`
MODIFY `id` int(11) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=3;
COMMIT;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
-- @testpoint: degrees函数入参个数及类型校验,合理报错
select degrees('hello你好');
select degrees();
select degrees(1,99); |
CREATE VIEW vwPersonDetails
AS
SELECT
COALESCE(p.Title, ' ') AS Title
,p.[FirstName]
,COALESCE(p.MiddleName, ' ') AS MiddleName
,p.[LastName]
,e.[JobTitle]
FROM [HumanResources].[Employee] e
INNER JOIN [Person].[Person] p
ON p.[BusinessEntityID] = e.[BusinessEntityID]
GO
select * from vwProductInfo
exec sp_helptext vwPersonDetails
---
CREATE VIEW vwProductInfo AS
SELECT ProductID, ProductNumber,Name,SafetyStockLevel,
ReOrderPoint
FROM Production.Product
WHERE SafetyStockLevel <=1000
WITH CHECK OPTION;
GO
--
UPDATE vwProductInfo SET SafetyStockLevel= 999
WHERE ProductID=321
--
|
<gh_stars>0
/*
Visualizar todos los cargos y el numero de vendedores que hay en cada cargo
*/
SELECT cargo, COUNT(id) FROM vendedores GROUP BY cargo ORDER BY COUNT(id) DESC; |
-- Organised DIM_ProductTable --
SELECT
p.[ProductKey],
p.[ProductAlternateKey] AS ProductItemCode,
--,[ProductSubcategoryKey]
--,[WeightUnitMeasureCode]
--,[SizeUnitMeasureCode]
p.[EnglishProductName] AS [Product Name],
ps.EnglishProductSubcategoryName as [Sub Category], -- Joined in from Sub Category Table
pc.EnglishProductCategoryName AS [Product Category], -- Joined in from Category Table
--,[SpanishProductName]
--,[FrenchProductName]
--,[StandardCost]
--,[FinishedGoodsFlag]
p.[Color] AS [Product Color],
--,[SafetyStockLevel]
--,[ReorderPoint]
--,[ListPrice]
p.[Size] AS [Product Size],
--,[SizeRange]
--,[Weight]
--,[DaysToManufacture]
p.[ProductLine] AS [Product Line],
--,[DealerPrice]
--,[Class]
--,[Style]
p.[ModelName] AS [Product Model Name],
--,[LargePhoto]
p.[EnglishDescription] AS [Product Description],
--,[FrenchDescription]
--,[ChineseDescription]
--,[ArabicDescription]
--,[HebrewDescription]
--,[ThaiDescription]
--,[GermanDescription]
--,[JapaneseDescription]
--,[TurkishDescription]
--,[StartDate]
--,[EndDate]
ISNULL (p.Status, 'Outdated') AS [Product Status]
FROM
[dbo].[DimProduct] AS p
LEFT JOIN dbo.DimProductSubcategory AS ps ON ps.ProductSubcategoryKey = p.ProductSubcategoryKey
LEFT JOIN dbo.DimProductCategory AS pc ON ps.ProductSubcategoryKey = pc.ProductCategoryKey
ORDER BY
p.ProductKey ASC |
select datediff(ss,last_batch, getdate()) as age
, spid, blocked as blockedBy, db_name(sp.dbid) as dbname, loginame, hostname, program_name, text
, stmt_start, stmt_end, cmd, lastwaittype, waitresource, cpu as cpu_ms, physical_io, memusage, last_batch, open_tran, status
from sys.sysprocesses sp
CROSS APPLY sys.dm_exec_sql_text(sp.sql_handle) AS sq
where
open_tran > 0
order by datediff(ss,last_batch, getdate()) desc |
<reponame>yradsmikham/OHDSIonAzure
CREATE TABLE [dbo].[vocabulary] (
[vocabulary_id] VARCHAR (20) NOT NULL,
[vocabulary_name] VARCHAR (255) NOT NULL,
[vocabulary_reference] VARCHAR (255) NOT NULL,
[vocabulary_version] VARCHAR (255) NULL,
[vocabulary_concept_id] INT NOT NULL
);
|
DROP DATABASE IF EXISTS imkersleiden;
CREATE DATABASE imkersleiden;
USE imkersleiden;
CREATE TABLE accounts (
id int(11) NOT NULL,
individualid int(11) NOT NULL,
username varchar(20) DEFAULT NULL,
password varchar(20) DEFAULT NULL
);
CREATE TABLE userRoles (
id int(11) NOT NULL,
accountid int(11) DEFAULT NULL,
permissions int(1) DEFAULT NULL
);
CREATE TABLE individual (
id int(11) NOT NULL,
firstname varchar(40) DEFAULT NULL,
lastname varchar(40) DEFAULT NULL,
dateofbirth DATE DEFAULT NULL
);
CREATE TABLE individual_cursussen (
individualid int(11) DEFAULT NULL,
cursusid int(11) DEFAULT NULL
);
CREATE TABLE cursus (
id int(11) NOT NULL,
naam varchar(20) DEFAULT NULL,
descript text DEFAULT NULL,
startdate DATE DEFAULT NULL,
enddate DATE DEFAULT NULL
);
ALTER TABLE `accounts`
ADD PRIMARY KEY (`id`),
ADD FOREIGN KEY (`individualid`) REFERENCES `individual`( `id`);
ALTER TABLE `userRoles`
ADD PRIMARY KEY (`id`),
ADD FOREIGN KEY (`accountid`) REFERENCES `account`(`id`);
ALTER TABLE `individual`
ADD PRIMARY KEY (`id`);
ALTER TABLE `individual_cursussen`
ADD FOREIGN KEY (`individualid`) REFERENCES `individual`( `id`),
ADD FOREIGN KEY (`cursusid`) REFERENCES `cursus`( `id`);
ALTER TABLE `cursus`
ADD PRIMARY KEY (`id`);
INSERT INTO `individual` (`id`, `firstname`, `lastname`, `dateofbirth`) VALUES
(1627948226, 'Mohamed', 'Boukiour', '1969-07-05');
INSERT INTO `accounts` (`id`, `individualid`, `username`, `password`) VALUES
(2141233221, 1627948226, 'admin', 'admin');
|
<reponame>lenovike/StoreSyncer<filename>init.sql<gh_stars>0
CREATE TABLE `entity_mapping` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`source_shop_id` varchar(200) DEFAULT NULL,
`destination_shop_id` varchar(200) DEFAULT NULL,
`source_entity_id` bigint(21) DEFAULT NULL,
`destination_entity_id` bigint(21) DEFAULT NULL,
`entity_type` varchar(200) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=0 DEFAULT CHARSET=utf8 |
/*
SQLyog Community v13.1.6 (64 bit)
MySQL - 10.4.14-MariaDB : Database - ubl
*********************************************************************
*/
/*!40101 SET NAMES utf8 */;
/*!40101 SET SQL_MODE=''*/;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
CREATE DATABASE /*!32312 IF NOT EXISTS*/`ubl` /*!40100 DEFAULT CHARACTER SET utf8 COLLATE utf8_unicode_ci */;
USE `ubl`;
/*Table structure for table `doctors` */
DROP TABLE IF EXISTS `doctors`;
CREATE TABLE `doctors` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user_id` int(11) NOT NULL,
`doctor_name` varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL,
`designation` varchar(255) CHARACTER SET utf8 COLLATE utf8_unicode_nopad_ci DEFAULT NULL,
`department` varchar(55) CHARACTER SET utf8 COLLATE utf8_unicode_nopad_ci DEFAULT NULL,
`specialization` varchar(255) CHARACTER SET utf8 COLLATE utf8_unicode_nopad_ci DEFAULT NULL,
`bmdc_number` varchar(255) CHARACTER SET utf8 COLLATE utf8_unicode_nopad_ci DEFAULT NULL,
`chamber_name` varchar(255) CHARACTER SET utf8 COLLATE utf8_unicode_nopad_ci DEFAULT NULL,
`chamber_address` text CHARACTER SET utf8 COLLATE utf8_unicode_nopad_ci DEFAULT NULL,
`education` varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL,
`location` varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL,
`imagelink` varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL,
`online_consultation` varchar(22) COLLATE utf8_unicode_ci DEFAULT NULL,
`created_at` datetime NOT NULL,
`updated_at` datetime NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
/*Data for the table `doctors` */
insert into `doctors`(`id`,`user_id`,`doctor_name`,`designation`,`department`,`specialization`,`bmdc_number`,`chamber_name`,`chamber_address`,`education`,`location`,`imagelink`,`online_consultation`,`created_at`,`updated_at`) values
(3,20,'Dr.Md.<NAME>','Oral And Dental Surgeon, President,Bangladesh Dental Society,Sylhet Division.','1','Oral And Dental Surgeon, President,Bangladesh Dental Society,Sylhet Division.','182','Adhunik Dental Surgery and Research Center.','Shapla Bhaban,Manikpir road,Nayasarak,Sylhet.','BDS,FCPS',NULL,'../ubl_laravel/public/images/doctor/image_2020_10_04T10_29_54_580Z (1).png','asdas','2020-09-30 11:06:12','2020-09-30 11:08:10'),
(4,22,'dsad','asdas','2','asdas','dsa','asd','asd','asd',NULL,'../ubl_laravel/public/images/doctor/3aa.v1.jpg','asd','2020-10-05 16:32:02','2020-10-05 16:32:02');
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
|
<reponame>blommish/familie-ef-sak
ALTER TABLE vilkar_vurdering RENAME TO vilkarsvurdering; |
<gh_stars>0
ALTER TABLE public.search_courtaddress
ADD COLUMN description text,
ADD COLUMN description_cy text;
|
<reponame>aibenStunner/HackerRank
SELECT AVG(POPULATION) FROM CITY WHERE DISTRICT="California"; |
<filename>CustomerCare/src/main/resources/sql/sql_scripts_v6.sql
-- MySQL Workbench Forward Engineering
SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;
SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION';
-- -----------------------------------------------------
-- Schema customer_care_5
-- -----------------------------------------------------
-- -----------------------------------------------------
-- Schema customer_care_5
-- -----------------------------------------------------
CREATE SCHEMA IF NOT EXISTS `customer_care_5` DEFAULT CHARACTER SET utf8 ;
USE `customer_care_5` ;
-- -----------------------------------------------------
-- Table `customer_care_5`.`user`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `customer_care_5`.`user` (
`user_id` INT NOT NULL AUTO_INCREMENT,
`first_name` VARCHAR(45) NOT NULL,
`last_name` VARCHAR(45) NOT NULL,
`phone_number` VARCHAR(15) NULL,
`email_id` VARCHAR(45) NOT NULL,
`date_of_birth` DATETIME NOT NULL,
`gender` VARCHAR(10) NOT NULL,
`password` VARCHAR(256) NOT NULL,
`temp_password` VARCHAR(256) NOT NULL,
PRIMARY KEY (`user_id`))
ENGINE = InnoDB;
-- -----------------------------------------------------
-- Table `customer_care_5`.`admin`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `customer_care_5`.`admin` (
`admin_id` INT NOT NULL AUTO_INCREMENT,
`first_name` VARCHAR(45) NOT NULL,
`last_name` VARCHAR(45) NOT NULL,
`email_id` VARCHAR(45) NOT NULL,
`password` VARCHAR(256) NOT NULL,
`temp_password` VARCHAR(256) NOT NULL,
PRIMARY KEY (`admin_id`))
ENGINE = InnoDB;
-- -----------------------------------------------------
-- Table `customer_care_5`.`analyst`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `customer_care_5`.`analyst` (
`analyst_id` INT NOT NULL AUTO_INCREMENT,
`first_name` VARCHAR(45) NOT NULL,
`last_name` VARCHAR(45) NOT NULL,
`phone_number` VARCHAR(15) NOT NULL,
`email_id` VARCHAR(45) NOT NULL,
`date_of_birth` DATETIME NOT NULL,
`gender` VARCHAR(45) NOT NULL,
`support_level` VARCHAR(2) NOT NULL,
`password` VARCHAR(256) NOT NULL,
`temp_password` VARCHAR(256) NOT NULL,
PRIMARY KEY (`analyst_id`))
ENGINE = InnoDB;
-- -----------------------------------------------------
-- Table `customer_care_5`.`complaint`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `customer_care_5`.`complaint` (
`complaint_id` INT NOT NULL AUTO_INCREMENT,
`complaint_user_id` INT NOT NULL DEFAULT 1000,
`assigned_analyst_id` INT NOT NULL DEFAULT 1000,
`category` VARCHAR(45) NOT NULL,
`phone_number` VARCHAR(15) NOT NULL,
`status` VARCHAR(45) NOT NULL,
`date_of_complaint` DATETIME NOT NULL,
`description` VARCHAR(1024) NOT NULL,
`suggestions` VARCHAR(1024) NULL,
PRIMARY KEY (`complaint_id`),
INDEX `fk_complaint_user_idx` (`complaint_user_id` ASC) VISIBLE,
INDEX `fk_complaint_analyst1_idx` (`assigned_analyst_id` ASC) VISIBLE,
CONSTRAINT `fk_complaint_user`
FOREIGN KEY (`complaint_user_id`)
REFERENCES `customer_care_5`.`user` (`user_id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_complaint_analyst1`
FOREIGN KEY (`assigned_analyst_id`)
REFERENCES `customer_care_5`.`analyst` (`analyst_id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;
-- -----------------------------------------------------
-- Table `customer_care_5`.`secret_questions`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `customer_care_5`.`secret_questions` (
`question_id` INT NOT NULL AUTO_INCREMENT,
`question_description` VARCHAR(45) NOT NULL,
PRIMARY KEY (`question_id`))
ENGINE = InnoDB;
-- -----------------------------------------------------
-- Table `customer_care_5`.`u_sq_questions`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `customer_care_5`.`u_sq_questions` (
`u_sq_id` INT NOT NULL AUTO_INCREMENT,
`user_id` INT NOT NULL,
`question_id` INT NOT NULL,
`answer` VARCHAR(1024) NULL,
PRIMARY KEY (`u_sq_id`),
INDEX `fk_u_sq_questions_user1_idx` (`user_id` ASC) VISIBLE,
INDEX `fk_u_sq_questions_secret_questions1_idx` (`question_id` ASC) VISIBLE,
CONSTRAINT `fk_u_sq_questions_user1`
FOREIGN KEY (`user_id`)
REFERENCES `customer_care_5`.`user` (`user_id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_u_sq_questions_secret_questions1`
FOREIGN KEY (`question_id`)
REFERENCES `customer_care_5`.`secret_questions` (`question_id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;
-- -----------------------------------------------------
-- Table `customer_care_5`.`email_analyst`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `customer_care_5`.`email_analyst` (
`email_id` INT NOT NULL AUTO_INCREMENT,
`admin_id` INT NOT NULL,
`analyst_id` INT NOT NULL,
`sent_date` DATETIME NOT NULL,
`received` TINYINT NOT NULL,
`description` VARCHAR(1024) NOT NULL,
PRIMARY KEY (`email_id`),
INDEX `fk_email_analyst_admin1_idx` (`admin_id` ASC) VISIBLE,
INDEX `fk_email_analyst_analyst1_idx` (`analyst_id` ASC) VISIBLE,
CONSTRAINT `fk_email_analyst_admin1`
FOREIGN KEY (`admin_id`)
REFERENCES `customer_care_5`.`admin` (`admin_id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_email_analyst_analyst1`
FOREIGN KEY (`analyst_id`)
REFERENCES `customer_care_5`.`analyst` (`analyst_id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;
-- -----------------------------------------------------
-- Table `customer_care_5`.`email_user`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `customer_care_5`.`email_user` (
`email_id` INT NOT NULL AUTO_INCREMENT,
`admin_id` INT NOT NULL,
`user_id` INT NOT NULL,
`sent_date` DATETIME NOT NULL,
`received` TINYINT NOT NULL,
`description` VARCHAR(1024) NOT NULL,
PRIMARY KEY (`email_id`),
INDEX `fk_email_user_admin1_idx` (`admin_id` ASC) VISIBLE,
INDEX `fk_email_user_user1_idx` (`user_id` ASC) VISIBLE,
CONSTRAINT `fk_email_user_admin1`
FOREIGN KEY (`admin_id`)
REFERENCES `customer_care_5`.`admin` (`admin_id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_email_user_user1`
FOREIGN KEY (`user_id`)
REFERENCES `customer_care_5`.`user` (`user_id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;
-- -----------------------------------------------------
-- Table `customer_care_5`.`feedback_response`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `customer_care_5`.`feedback_response` (
`feedback_response_id` INT NOT NULL AUTO_INCREMENT,
`complaint_complaint_id` INT NOT NULL,
`question` VARCHAR(1024) NULL,
`answer` VARCHAR(1024) NULL,
PRIMARY KEY (`feedback_response_id`),
INDEX `fk_feedback_response_complaint1_idx` (`complaint_complaint_id` ASC) VISIBLE,
CONSTRAINT `fk_feedback_response_complaint1`
FOREIGN KEY (`complaint_complaint_id`)
REFERENCES `customer_care_5`.`complaint` (`complaint_id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;
-- -----------------------------------------------------
-- Table `customer_care_5`.`login_details`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `customer_care_5`.`login_details` (
`login_details_id` INT NOT NULL AUTO_INCREMENT,
`user_name` VARCHAR(45) NOT NULL,
`password` VARCHAR(45) NOT NULL,
`roles` VARCHAR(45) NOT NULL,
PRIMARY KEY (`login_details_id`))
ENGINE = InnoDB;
-- -----------------------------------------------------
-- Table `customer_care_5`.`a_sq_questions`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `customer_care_5`.`a_sq_questions` (
`a_sq_id` INT NOT NULL AUTO_INCREMENT,
`analyst_id` INT NOT NULL,
`question_id` INT NOT NULL,
`answer` VARCHAR(1024) NULL,
PRIMARY KEY (`a_sq_id`),
INDEX `fk_a_sq_questions_analyst1_idx` (`analyst_id` ASC) VISIBLE,
INDEX `fk_a_sq_questions_secret_questions1_idx` (`question_id` ASC) VISIBLE,
CONSTRAINT `fk_a_sq_questions_analyst1`
FOREIGN KEY (`analyst_id`)
REFERENCES `customer_care_5`.`analyst` (`analyst_id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_a_sq_questions_secret_questions1`
FOREIGN KEY (`question_id`)
REFERENCES `customer_care_5`.`secret_questions` (`question_id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;
SET SQL_MODE=@OLD_SQL_MODE;
SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS;
|
-- Sol 1 @ 2
select *
from departments
-- Sol 3
select *
from employees
-- Sol 4
select employee_id, last_name, job_id, hire_date
from employees
-- Sol 5
select job_id
from employees
-- Sol 6
SELECT last_name || ' ,' || job_id AS "Employee and Title" FROM employees;
-- Sol 7
select
EMPLOYEE_ID|| ',' ||
FIRST_NAME|| ',' ||
LAST_NAME|| ',' ||
EMAIL|| ',' ||
PHONE_NUMBER || ',' ||
HIRE_DATE|| ',' ||
EMPLOYEE_ID|| ',' ||
FIRST_NAME|| ',' ||
LAST_NAME|| ',' ||
EMAIL|| ',' ||
PHONE_NUMBER || ',' ||
HIRE_DATE|| ',' ||
SALARY AS DETAILS FROM employees; |
-- phpMyAdmin SQL Dump
-- https://www.phpmyadmin.net/
-- Host: localhost
-- Versão do servidor: 10.4.21-MariaDB
-- versão do PHP: 8.0.10
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
START TRANSACTION;
SET time_zone = "+00:00";
-- Estrutura da tabela `EMPRESTIMO`
CREATE TABLE `EMPRESTIMO` (
`CPF` int(30) NOT NULL,
`NOME` varchar(20) NOT NULL,
`CREDITO` varchar(20) NOT NULL,
`SPC` boolean (2) NOT NULL,
`RENDA` float (20) NOT NULL,
`PARCELAS` int(25) NOT NULL,
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
--
-- Extraindo dados da tabela `EMPRESTIMO`
--
INSERT INTO `EMPRESTIMO` (
`CPF`, `NOME`, `CREDITO`, `SPC`, `RENDA`, `PARCELAS`
) VALUES
(13289076421, 'ALESSANDA', 'SIM', 'S', '1400' , '5'),
(21328907642, 'CLOTILDE' , 'SIM', 'N', '40000', '10'),
(32132890764, 'JOAO' , 'NAO', 'N', '7667' , '8'),
(43213289076, 'CALIGULA' , 'NAO', 'S', '8800' , '4'),
(54321328907, 'PEDRO' , 'SIM', 'S', '65000', '2'),
(65432132890, 'TOMAS' , 'SIM', 'S', '44000', '1');
--
-- Índices para tabelas despejadas
-- Índices para tabela `emprestimo`
--
ALTER TABLE `EMPRESTIMO`
ADD PRIMARY KEY (`CPF`);
--
-- AUTO_INCREMENT de tabela `EMPRESTIMO`
--
ALTER TABLE `EMPRESTIMO`
MODIFY `CPF` int(30) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=7;
COMMIT;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; |
-- Revert seattleflu/schema:warehouse/encounter-location/triggers/update-modified-timestamp from pg
begin;
drop trigger update_modified_timestamp on warehouse.encounter_location;
commit;
|
<filename>src/test/resources/sql/truncate/b877581c.sql
-- file:alter_table.sql ln:1894 expect:true
TRUNCATE old_system_table
|
-- file:pg_lsn.sql ln:22 expect:true
SELECT '0/16AE7F7' < '0/16AE7F8'::pg_lsn
|
<gh_stars>10-100
ALTER TABLE "scene_images"
DROP CONSTRAINT "scene_images_image_id_fkey",
DROP CONSTRAINT "scene_images_scene_id_fkey",
ADD CONSTRAINT "scene_images_image_id_fkey"
FOREIGN KEY ("image_id") REFERENCES "images"("id") ON DELETE CASCADE,
ADD CONSTRAINT "scene_images_scene_id_fkey"
FOREIGN KEY ("scene_id") REFERENCES "scenes"("id") ON DELETE CASCADE;
ALTER TABLE "performer_images"
DROP CONSTRAINT "performer_images_image_id_fkey",
DROP CONSTRAINT "performer_images_performer_id_fkey",
ADD CONSTRAINT "performer_images_image_id_fkey"
FOREIGN KEY ("image_id") REFERENCES "images"("id") ON DELETE CASCADE,
ADD CONSTRAINT "performer_images_performer_id_fkey"
FOREIGN KEY ("performer_id") REFERENCES "performers"("id") ON DELETE CASCADE;
ALTER TABLE "studio_images"
DROP CONSTRAINT "studio_images_image_id_fkey",
DROP CONSTRAINT "studio_images_studio_id_fkey",
ADD CONSTRAINT "studio_images_image_id_fkey"
FOREIGN KEY ("image_id") REFERENCES "images"("id") ON DELETE CASCADE,
ADD CONSTRAINT "studio_images_studio_id_fkey"
FOREIGN KEY ("studio_id") REFERENCES "studios"("id") ON DELETE CASCADE;
|
<gh_stars>0
-- ========================================================================
-- Copyright (C) 2005 <NAME> <<EMAIL>>
-- Copyright (C) 2005-2012 <NAME> <<EMAIL>>
--
-- This program is free software; you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation; either version 3 of the License, or
-- (at your option) any later version.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
--
-- ========================================================================
-- redaction : 0
-- valide : 1
-- approuve : 2
-- envoye : 3
create table llx_mailing
(
rowid integer AUTO_INCREMENT PRIMARY KEY,
statut smallint DEFAULT 0, --
titre varchar(60), -- Ref of mailing
entity integer DEFAULT 1 NOT NULL, -- multi company id
sujet varchar(60), -- Sujet of mailing
body mediumtext,
bgcolor varchar(8), -- Backgroud color of mailing
bgimage varchar(255), -- Backgroud image of mailing
cible varchar(60),
nbemail integer,
email_from varchar(160), -- Email address of sender
email_replyto varchar(160), -- Email address for reply
email_errorsto varchar(160), -- Email addresse for errors
tag varchar(128) NULL,
date_creat datetime, -- creation date
date_valid datetime, --
date_appro datetime, --
date_envoi datetime, -- date d'envoi
fk_user_creat integer, -- user creator
fk_user_valid integer, -- user validator
fk_user_appro integer, -- not used
extraparams varchar(255), -- for stock other parameters with json format
joined_file1 varchar(255),
joined_file2 varchar(255),
joined_file3 varchar(255),
joined_file4 varchar(255)
)ENGINE=innodb;
|
<filename>database/atualizador/src/main/resources/db/migration/postgresql/client/V0001/V0000/V0001_0000_00000257__CreateTable_Tax.sql<gh_stars>0
CREATE TABLE tax (
id CHAVE NOT NULL,
rate DECIMAL(10, 3) NOT NULL,
active BOOLEAN NOT NULL,
deleted BOOLEAN NOT NULL,
CONSTRAINT pk_tax PRIMARY KEY (id)
); |
/*Please add ; after each select statement*/
CREATE PROCEDURE mischievousNephews()
BEGIN
SELECT weekday(mischief_date) as weekday, mischief_date, author, title
FROM mischief
ORDER BY weekday, field(author, "Huey", "Dewey", "Louie"), mischief_date, title;
END |
CREATE PROCEDURE [dbo].[InsertEmployeeDNA]
@employeeDNAData EmployeeDNAType READONLY
AS
BEGIN
DELETE FROM EmployeeDNA WHERE EmpId IN (SELECT EmpId FROM @employeeDNAData)
INSERT INTO EmployeeDNA (EmpId, Area, Rating, CreatedDate) SELECT EmpId, Area, Rating, GETDATE() FROM @employeeDNAData
END
|
<gh_stars>1-10
SELECT age,
COUNT(id) AS people_count
FROM people
GROUP BY age;
|
/*
Uebung FIRMA 11
<NAME>
21.1.2022
*/
create database if not exists 2022_3a_arbizhabjaku_Firma_11;
use 2022_3a_arbizhabjaku_Firma_11;
drop table if exists FIRMA_EGN;
drop table if exists EGN;
drop table if exists WOHNUNG;
drop table if exists ARBEITER;
drop table if exists VERWALTER;
drop table if exists FIRMA;
create table EGN(
UnternehmKennung int primary key not null,
UnternehmName varchar(100) not null,
Anwalt varchar(100) not null );
create table FIRMA(
FirmamNummer int primary key not null,
FirmaName varchar(76) not null
);
create table FIRMA_EGN(
FirmaNummer int not null,
UKennung int not null,
constraint FK_FIR foreign key(FirmaNummer) references FIRMA(FirmaNummer),
constraint FK_EIG foreign key(UKennung) references EGN(UKennung)
);
create table ARBEITER(
sozialnummer int primary key not null,
age int not null,
FirmaNummer int not null,
constraint FK_FIR2 foreign key(FirmaNummer) references FIRMA(FirmaNummer)
);
create table VERWALTER (
Verwalter_Id int not null primary key
);
create table WOHNUNG(
adresse varchar(100) primary key not null,
sozialnummer int not null,
Verwalter_Id int not null,
constraint FK_VER foreign key(Verwalter_Id) references VERWALTER(Verwalter_id),
constraint FK_ARB foreign key(sozialnummer) references ARBEITER(sozialnummer)
);
ALTER TABLE UNTERNEHMER RENAME TO EGN;
ALTER TABLE EGN drop Anwalt ;
ALTER TABLE FIRMA MODIFY FirmaName varchar(76) ;
ALTER TABLE ARBEITER ADD age int not null;
/*Ja es gibt ein problem. Mann kann nicht den table loschen*/
|
-- Contest Leaderboard (MySQL) [can use select alias in 'having']
select s.hacker_id, h.name, sum(s.max_score) ssum
from
(select hacker_id, challenge_id, max(score) max_score
from submissions
group by hacker_id, challenge_id) as s
left outer join hackers h on s.hacker_id = h.hacker_id
group by s.hacker_id, h.name
having ssum > 0
order by ssum desc, s.hacker_id asc;
-- MSSQL [cannot use 'select' alias in the 'having' clause]
select ha.hacker_id, ha.name, sum(mxs.maxscore) as sumscore
from
(
select hacker_id, challenge_id, max(score) as maxscore
from submissions
group by hacker_id, challenge_id
) as mxs
inner join hackers as ha
on mxs.hacker_id = ha.hacker_id
group by ha.hacker_id, ha.name
having sum(mxs.maxscore) > 0
order by sumscore desc, ha.hacker_id asc;
-- Contest Leaderboard (MSSQL) [can't use select aliases in 'having']
select md.hacker_id, h.name, sum(md.max_score) as sum_max_score
from hackers h
inner join
(select hacker_id, challenge_id, max(s.score) as max_score
from submissions s
group by hacker_id, challenge_id
) as md
on h.hacker_id = md.hacker_id
group by md.hacker_id, h.name
having sum(md.max_score) > 0
order by sum_max_score desc, md.hacker_id asc;
-- Second solution with partitioning (MSSQL) and CTE. Needed distinct row in the partition table
-- since we don't collapse the rows as we would if we had done a 'max' and group by
-- which would have collapsed the duplicate row!!
with ms as (select distinct hacker_id
, challenge_id
, score
, max(score) over (partition by hacker_id, challenge_id
order by hacker_id, challenge_id) as maxscore
from submissions)
select ha.hacker_id
, ha.name
, sum(ms.maxscore) as summaxscore
from ms
inner join hackers ha on ms.hacker_id = ha.hacker_id
where ms.score = ms.maxscore
group by ha.hacker_id, ha.name
having sum(ms.maxscore) > 0
order by sum(ms.maxscore) desc, ha.hacker_id asc; |
<reponame>mansimathpal/sql<filename>SQL/Page 4.39 of chapter-4.sql
use mansi
create partition function pfrange(int) as range left for values(1, 100, 1000); |
<filename>Mall/Others/user.sql
SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;
SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='TRADITIONAL,ALLOW_INVALID_DATES';
CREATE SCHEMA IF NOT EXISTS `Mall` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci ;
USE `Mall` ;
-- -----------------------------------------------------
-- Table `Mall`.`manager`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `Mall`.`manager` (
`uid` INT UNSIGNED NULL AUTO_INCREMENT COMMENT '主键 自增',
`username` CHAR(50) NOT NULL DEFAULT '' COMMENT '用户名 非重非空(字符串)',
`password` CHAR(32) NOT NULL DEFAULT '' COMMENT '密码 ',
`identification` CHAR(50) NOT NULL DEFAULT '' COMMENT '账号',
`loginTime` INT NOT NULL DEFAULT 0 COMMENT '登陆时间',
`loginIp` CHAR(100) NOT NULL DEFAULT '' COMMENT '登陆IP',
PRIMARY KEY (`uid`),
UNIQUE INDEX `username_UNIQUE` (`username` ASC))
ENGINE = MyISAM
COMMENT = '管理员表';
-- ----------------------------
-- Table structure for `band`
-- ----------------------------
DROP TABLE IF EXISTS `band`;
CREATE TABLE `band` (
`bid` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键自增',
`bname` char(50) NOT NULL DEFAULT '' COMMENT '品牌名称',
`logo` varchar(225) NOT NULL DEFAULT '' COMMENT 'logo图片路径',
`bsort` smallint(5) unsigned NOT NULL DEFAULT '0' COMMENT '排序',
`is_hot` tinyint(5) NOT NULL DEFAULT '0' COMMENT '是否热门',
PRIMARY KEY (`bid`)
) ENGINE=MyISAM AUTO_INCREMENT=7 DEFAULT CHARSET=utf8 COMMENT='品牌表';
-- ----------------------------
-- Records of band
-- ----------------------------
INSERT INTO `band` VALUES ('1', '小米', 'Upload/63971464764423.jpg', '0', '1');
INSERT INTO `band` VALUES ('2', '联想', 'Upload/64361464764439.jpg', '0', '0');
INSERT INTO `band` VALUES ('3', '苹果', 'Upload/19261464764495.jpg', '0', '0');
INSERT INTO `band` VALUES ('4', '戴尔', 'Upload/79621464941586.jpg', '0', '0');
INSERT INTO `band` VALUES ('5', '华硕', 'Upload/47351464941612.jpg', '0', '0');
INSERT INTO `band` VALUES ('6', '宏基', 'Upload/34071464941673.jpg', '0', '0');
-- ----------------------------
-- Table structure for `category`
-- ----------------------------
DROP TABLE IF EXISTS `category`;
CREATE TABLE `category` (
`cid` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键自增分类ID',
`cname` char(20) NOT NULL DEFAULT '' COMMENT '分类名称 非空-空字符串',
`pid` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '父级id 非空非负默认0',
`sort` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '排序非空非负默认0',
`type_tid` int(11) NOT NULL,
PRIMARY KEY (`cid`),
KEY `fk_category_type1_idx` (`type_tid`)
) ENGINE=MyISAM AUTO_INCREMENT=42 DEFAULT CHARSET=utf8 COMMENT='分类表';
-- ----------------------------
-- Records of category
-- ----------------------------
INSERT INTO `category` VALUES ('12', '手机', '0', '1', '0');
INSERT INTO `category` VALUES ('13', '手机通讯', '12', '100', '1');
INSERT INTO `category` VALUES ('14', '手机配件', '12', '100', '1');
INSERT INTO `category` VALUES ('15', '电脑', '0', '3', '0');
INSERT INTO `category` VALUES ('16', '笔记本', '15', '100', '1');
INSERT INTO `category` VALUES ('17', '平板', '15', '100', '1');
INSERT INTO `category` VALUES ('18', '品牌整机', '15', '100', '1');
INSERT INTO `category` VALUES ('19', '摄影', '0', '2', '0');
INSERT INTO `category` VALUES ('20', '摄影摄像', '19', '100', '1');
INSERT INTO `category` VALUES ('21', '相机配件', '19', '100', '1');
INSERT INTO `category` VALUES ('22', '电玩', '0', '4', '0');
INSERT INTO `category` VALUES ('23', '游戏电玩', '22', '100', '1');
INSERT INTO `category` VALUES ('24', '游戏本', '22', '100', '1');
INSERT INTO `category` VALUES ('25', '硬件', '0', '5', '0');
INSERT INTO `category` VALUES ('26', 'DIY硬件', '25', '100', '1');
INSERT INTO `category` VALUES ('27', ' 外设配件', '25', '100', '1');
INSERT INTO `category` VALUES ('28', '网络设备', '25', '100', '1');
INSERT INTO `category` VALUES ('29', '辅助', '0', '6', '0');
INSERT INTO `category` VALUES ('30', '智能生活', '29', '100', '1');
INSERT INTO `category` VALUES ('31', '数码配件', '29', '100', '1');
INSERT INTO `category` VALUES ('32', '平板配件', '29', '100', '1');
INSERT INTO `category` VALUES ('33', '共用设备', '0', '7', '0');
INSERT INTO `category` VALUES ('34', '家庭影音', '33', '100', '1');
INSERT INTO `category` VALUES ('35', '办公设备', '33', '100', '1');
INSERT INTO `category` VALUES ('36', '生活必备', '0', '8', '0');
INSERT INTO `category` VALUES ('37', '生活家电', '36', '100', '2');
INSERT INTO `category` VALUES ('38', '家庭影音', '36', '100', '2');
INSERT INTO `category` VALUES ('39', '个护', '36', '100', '2');
INSERT INTO `category` VALUES ('40', '华为', '13', '100', '1');
INSERT INTO `category` VALUES ('41', '三星', '13', '100', '1');
-- ----------------------------
-- Table structure for `goods`
-- ----------------------------
DROP TABLE IF EXISTS `goods`;
CREATE TABLE `goods` (
`gid` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键,自增',
`gname` char(50) NOT NULL DEFAULT '' COMMENT '商品名称',
`gcode` char(100) NOT NULL DEFAULT '' COMMENT '货号',
`gunit` varchar(45) NOT NULL DEFAULT '' COMMENT '单位',
`cprice` decimal(7,2) NOT NULL DEFAULT '0.00' COMMENT '市场价',
`mprice` decimal(7,2) NOT NULL DEFAULT '0.00' COMMENT '商城价',
`stock` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '总库存',
`listpic` varchar(225) NOT NULL DEFAULT '' COMMENT '列表图',
`click` smallint(5) unsigned NOT NULL DEFAULT '0' COMMENT '点击率',
`addtime` int(11) NOT NULL DEFAULT '0' COMMENT '上架时间',
`category_cid` int(11) NOT NULL,
`type_tid` int(11) NOT NULL,
`band_bid` int(11) NOT NULL,
`user_uid` int(10) unsigned NOT NULL,
PRIMARY KEY (`gid`),
KEY `fk_goods_category1_idx` (`category_cid`),
KEY `fk_goods_type1_idx` (`type_tid`),
KEY `fk_goods_band1_idx` (`band_bid`),
KEY `fk_goods_user1_idx` (`user_uid`)
) ENGINE=MyISAM AUTO_INCREMENT=8 DEFAULT CHARSET=utf8 COMMENT='商品表';
-- ----------------------------
-- Records of goods
-- ----------------------------
INSERT INTO `goods` VALUES ('2', '【顺丰包邮】联想 IdeaPad 710S(i5/4GB/128GB)轻薄时尚 13.3英寸 高分屏', '', '台', '5610.00', '4966.00', '56', 'Upload/Content/16/06/68421464868998.png', '545', '1464831442', '16', '1', '2', '1');
INSERT INTO `goods` VALUES ('7', '【顺丰包邮】戴尔 Inspiron 灵越 14 3000(INS14CD-4518B)', '', '台', '3648.00', '3349.00', '0', 'Upload/Content/16/06/83651465011241.jpg', '243', '1464944313', '16', '1', '4', '1');
INSERT INTO `goods` VALUES ('3', '【顺丰包邮】华硕 FL5500LD5500 15.6英吋笔记本 高清1920X1080P 大屏音影娱', '', '台', '4600.00', '4300.00', '0', 'Upload/Content/16/06/46241464942672.png', '323', '1464941912', '16', '1', '5', '1');
INSERT INTO `goods` VALUES ('4', '【 顺丰包邮】Acer VN7-591G-56ZA i5-4210H 4G内存 1TB硬盘 GTX8', '', '台', '5320.00', '5100.00', '0', 'Upload/Content/16/06/22011464942719.png', '32', '1464942125', '16', '1', '6', '1');
INSERT INTO `goods` VALUES ('5', '【顺丰包邮·官方授权】联想(Lenovo)N50-45-EON(双核E1-6010 4G 500G', '', '台', '5349.00', '5130.00', '0', 'Upload/Content/16/06/29371464942937.jpg', '243', '1464942942', '16', '1', '2', '1');
INSERT INTO `goods` VALUES ('6', '【顺丰包邮】华硕 VM510L5200 15.6英寸笔记本 强悍性能 影音娱乐 五代酷睿i5-52', '', '台', '3800.00', '3300.00', '0', 'Upload/Content/16/06/39201464943085.png', '432', '1464943094', '16', '1', '5', '1');
-- ----------------------------
-- Table structure for `goodslist`
-- ----------------------------
DROP TABLE IF EXISTS `goodslist`;
CREATE TABLE `goodslist` (
`did` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键,自增',
`grouppid` char(100) NOT NULL DEFAULT '' COMMENT '组合属性id',
`dcode` char(100) NOT NULL DEFAULT '' COMMENT '货号',
`dstock` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '库存',
`goods_gid` int(11) NOT NULL,
`glname` char(200) NOT NULL,
PRIMARY KEY (`did`),
KEY `fk_ description_goods1_idx` (`goods_gid`)
) ENGINE=MyISAM AUTO_INCREMENT=32 DEFAULT CHARSET=utf8 COMMENT='货品列表';
-- ----------------------------
-- Records of goodslist
-- ----------------------------
INSERT INTO `goodslist` VALUES ('14', '347,345', '54', '546', '2', '全新原装电脑+全国联保保修卡+说明书+电源适配器+电池(具体以厂家配置为准)');
INSERT INTO `goodslist` VALUES ('15', '347,343', '545', '533', '2', '官方标配+联想专用电脑包+联想专用鼠标+精美鼠标垫');
INSERT INTO `goodslist` VALUES ('16', '347,344', '657', '657', '2', '官方标配+联想专用电脑包+联想专用鼠标+高防炫光专用屏幕+防水硅胶键盘膜+内胆包+散热底座');
INSERT INTO `goodslist` VALUES ('17', '346,345', '65', '775', '2', '全新原装电脑+全国联保保修卡+说明书+电源适配器+电池(具体以厂家配置为准)');
INSERT INTO `goodslist` VALUES ('18', '346,343', '6567', '656', '2', '官方标配+联想专用电脑包+联想专用鼠标+精美鼠标垫');
INSERT INTO `goodslist` VALUES ('19', '346,344', '5454', '545', '2', '官方标配+联想专用电脑包+联想专用鼠标+高防炫光专用屏幕+防水硅胶键盘膜+内胆包+散热底座');
INSERT INTO `goodslist` VALUES ('31', '533,530', '546', '54654', '7', ' 官方标配+原装笔记电脑包+原装笔记本鼠标+原装屏幕保护膜+原装键盘保护膜+高级鼠标垫+笔记本专用清洁套装+游戏专用迷你便携耳机');
INSERT INTO `goodslist` VALUES ('30', '532,530', '6576', '657', '7', '笔记本电脑+笔记本充电器+笔记本保修卡+保修发票');
INSERT INTO `goodslist` VALUES ('29', '534,530', '657', '657', '7', ' 官方标配+原装笔记本电脑包+原装笔记本鼠标+高级鼠标垫');
INSERT INTO `goodslist` VALUES ('28', '533,531', '54', '6545', '7', '官方标配+原装笔记电脑包+原装笔记本鼠标+原装屏幕保护膜+原装键盘保护膜+高级鼠标垫+笔记本专用清洁套装+游戏专用迷你便携耳机');
INSERT INTO `goodslist` VALUES ('27', '534,531', '656', '765', '7', '官方标配+原装笔记本电脑包+原装笔记本鼠标+高级鼠标垫');
INSERT INTO `goodslist` VALUES ('26', '532,531', '6576', '76786', '7', '笔记本电脑+笔记本充电器+笔记本保修卡+保修发票');
-- ----------------------------
-- Table structure for `goods_property`
-- ----------------------------
DROP TABLE IF EXISTS `goods_property`;
CREATE TABLE `goods_property` (
`gpid` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键,自增',
`gpvalue` char(50) NOT NULL DEFAULT '' COMMENT '属性值',
`appendprice` decimal(7,3) NOT NULL DEFAULT '0.000' COMMENT '附加价格',
`goods_gid` int(11) NOT NULL,
`property_pid` int(11) NOT NULL,
PRIMARY KEY (`gpid`),
KEY `fk_goods_property_goods1_idx` (`goods_gid`),
KEY `fk_goods_property_property1_idx` (`property_pid`)
) ENGINE=MyISAM AUTO_INCREMENT=535 DEFAULT CHARSET=utf8 COMMENT='商品属性表';
-- ----------------------------
-- Records of goods_property
-- ----------------------------
INSERT INTO `goods_property` VALUES ('534', '套餐一', '100.000', '7', '14');
INSERT INTO `goods_property` VALUES ('533', '套餐二', '200.000', '7', '14');
INSERT INTO `goods_property` VALUES ('532', '官方套餐', '0.000', '7', '14');
INSERT INTO `goods_property` VALUES ('531', '皓月银', '100.000', '7', '2');
INSERT INTO `goods_property` VALUES ('530', '灰色', '80.000', '7', '2');
INSERT INTO `goods_property` VALUES ('529', '商城联保,享受三包', '0.000', '7', '13');
INSERT INTO `goods_property` VALUES ('528', '4GB(4GB×1)', '0.000', '7', '12');
INSERT INTO `goods_property` VALUES ('527', '双核心/二线程', '0.000', '7', '11');
INSERT INTO `goods_property` VALUES ('526', ' 2.9GHz', '0.000', '7', '10');
INSERT INTO `goods_property` VALUES ('525', '英特尔 酷睿i5 6代系列', '0.000', '7', '9');
INSERT INTO `goods_property` VALUES ('524', '预装Windows 10', '0.000', '7', '8');
INSERT INTO `goods_property` VALUES ('347', '香槟金', '100.000', '2', '2');
INSERT INTO `goods_property` VALUES ('346', '皓月银', '50.000', '2', '2');
INSERT INTO `goods_property` VALUES ('345', '官方套餐', '50.000', '2', '14');
INSERT INTO `goods_property` VALUES ('344', '套餐二', '100.000', '2', '14');
INSERT INTO `goods_property` VALUES ('343', '套餐一', '80.000', '2', '14');
INSERT INTO `goods_property` VALUES ('342', '全国联保,享受三包服务', '0.000', '2', '13');
INSERT INTO `goods_property` VALUES ('341', '4GB(4GB×1)', '0.000', '2', '12');
INSERT INTO `goods_property` VALUES ('340', '双核心/四线程', '0.000', '2', '11');
INSERT INTO `goods_property` VALUES ('339', ' 2.9GHz', '0.000', '2', '10');
INSERT INTO `goods_property` VALUES ('338', '英特尔 酷睿i5 6代系列', '0.000', '2', '9');
INSERT INTO `goods_property` VALUES ('337', '预装Windows 10', '0.000', '2', '8');
INSERT INTO `goods_property` VALUES ('336', '娱乐', '0.000', '2', '7');
INSERT INTO `goods_property` VALUES ('335', '时尚轻薄本', '0.000', '2', '6');
INSERT INTO `goods_property` VALUES ('334', '2016年04月', '0.000', '2', '5');
INSERT INTO `goods_property` VALUES ('333', '13寸', '0.000', '2', '1');
INSERT INTO `goods_property` VALUES ('387', '套餐一', '100.000', '3', '14');
INSERT INTO `goods_property` VALUES ('386', '官方套餐', '0.000', '3', '14');
INSERT INTO `goods_property` VALUES ('385', '钻石白', '80.000', '3', '2');
INSERT INTO `goods_property` VALUES ('384', '翡翠绿', '50.000', '3', '2');
INSERT INTO `goods_property` VALUES ('383', '全国联保,享受三包服务', '0.000', '3', '13');
INSERT INTO `goods_property` VALUES ('382', '4GB(4GB×1)', '0.000', '3', '12');
INSERT INTO `goods_property` VALUES ('381', '双核心/二线程', '0.000', '3', '11');
INSERT INTO `goods_property` VALUES ('380', ' 2.9GHz', '0.000', '3', '10');
INSERT INTO `goods_property` VALUES ('379', '英特尔 酷睿i5 6代系列', '0.000', '3', '9');
INSERT INTO `goods_property` VALUES ('378', ' 预装Windows 8', '0.000', '3', '8');
INSERT INTO `goods_property` VALUES ('377', '家用', '0.000', '3', '7');
INSERT INTO `goods_property` VALUES ('376', '时尚轻薄本', '0.000', '3', '6');
INSERT INTO `goods_property` VALUES ('375', '2016年04月', '0.000', '3', '5');
INSERT INTO `goods_property` VALUES ('401', '套餐一', '100.000', '4', '14');
INSERT INTO `goods_property` VALUES ('400', '官方套餐', '0.000', '4', '14');
INSERT INTO `goods_property` VALUES ('399', '白色', '10.000', '4', '2');
INSERT INTO `goods_property` VALUES ('398', '金色', '100.000', '4', '2');
INSERT INTO `goods_property` VALUES ('397', '全国联保,享受三包服务', '0.000', '4', '13');
INSERT INTO `goods_property` VALUES ('396', '8GB(8GB×1)', '0.000', '4', '12');
INSERT INTO `goods_property` VALUES ('395', '四核心/四线程', '0.000', '4', '11');
INSERT INTO `goods_property` VALUES ('394', ' 2.9GHz', '0.000', '4', '10');
INSERT INTO `goods_property` VALUES ('393', '英特尔 酷睿i5 6代系列', '0.000', '4', '9');
INSERT INTO `goods_property` VALUES ('392', '预装Windows 10', '0.000', '4', '8');
INSERT INTO `goods_property` VALUES ('391', '办公', '0.000', '4', '7');
INSERT INTO `goods_property` VALUES ('390', '影音娱乐本', '0.000', '4', '6');
INSERT INTO `goods_property` VALUES ('389', '2016年04月', '0.000', '4', '5');
INSERT INTO `goods_property` VALUES ('388', '13寸', '0.000', '4', '1');
INSERT INTO `goods_property` VALUES ('402', '14寸', '0.000', '5', '1');
INSERT INTO `goods_property` VALUES ('403', '2016年04月', '0.000', '5', '5');
INSERT INTO `goods_property` VALUES ('404', '影音娱乐本', '0.000', '5', '6');
INSERT INTO `goods_property` VALUES ('405', '家用', '0.000', '5', '7');
INSERT INTO `goods_property` VALUES ('406', ' 预装Windows 8', '0.000', '5', '8');
INSERT INTO `goods_property` VALUES ('407', '英特尔 酷睿i5 2代系列', '0.000', '5', '9');
INSERT INTO `goods_property` VALUES ('408', ' 1.8GHz', '0.000', '5', '10');
INSERT INTO `goods_property` VALUES ('409', '双核心/二线程', '0.000', '5', '11');
INSERT INTO `goods_property` VALUES ('410', '8GB(8GB×1)', '0.000', '5', '12');
INSERT INTO `goods_property` VALUES ('411', '商城联保,享受三包', '0.000', '5', '13');
INSERT INTO `goods_property` VALUES ('412', '皓月银', '80.000', '5', '2');
INSERT INTO `goods_property` VALUES ('413', '翡翠绿', '100.000', '5', '2');
INSERT INTO `goods_property` VALUES ('414', '官方套餐', '0.000', '5', '14');
INSERT INTO `goods_property` VALUES ('523', '家用', '0.000', '7', '7');
INSERT INTO `goods_property` VALUES ('522', '时尚轻薄本', '0.000', '7', '6');
INSERT INTO `goods_property` VALUES ('521', '2016年04月', '0.000', '7', '5');
INSERT INTO `goods_property` VALUES ('520', '14寸', '0.000', '7', '1');
-- ----------------------------
-- Table structure for `product_detail`
-- ----------------------------
DROP TABLE IF EXISTS `product_detail`;
CREATE TABLE `product_detail` (
`pdid` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键,自增',
`gallery` varchar(225) NOT NULL DEFAULT '' COMMENT '图册',
`detail` text COMMENT '商品详细',
`service` text COMMENT '售后服务',
`goods_gid` int(11) NOT NULL,
PRIMARY KEY (`pdid`),
KEY `fk_product_detail_goods1_idx` (`goods_gid`)
) ENGINE=MyISAM AUTO_INCREMENT=8 DEFAULT CHARSET=utf8 COMMENT='商品详细表';
-- ----------------------------
-- Records of product_detail
-- ----------------------------
INSERT INTO `product_detail` VALUES ('7', 'Upload/Content/16/06/77431465011257.jpg,Upload/Content/16/06/68501465011256.jpg,Upload/Content/16/06/84221465011255.jpg,Upload/Content/16/06/33681465011254.jpg,', '<p><br/></p><p></p><p><img src=\"/Mall/Publi/Upload/ueditor/image/20160604/1465011601932326.jpg\" style=\"width: 340px; height: 261px;\" width=\"340\" height=\"261\"/></p><p><img src=\"/Mall/Publi/Upload/ueditor/image/20160604/1465011601186499.jpg\" style=\"width: 348px; height: 274px;\" width=\"348\" height=\"274\"/></p><p><br/></p><p><span class=\"title\">48小时发货</span><span class=\"english-title\">48 Hours</span>\r\n </p><p>购买带有"Z<span class=\"add-plus\">+</span>"标识店铺的商品时,商家承诺您所下的订单,将在48小时内进行发货(部分商家节假日不发货),让您尽快收到商品。</p><ul class=\" list-paddingleft-2\"><li><p>1、赔付保障权益</p></li><li><p>如您的收货地址在商家承诺的服务区域内,商家承诺在下单后的48小时内将商品发出,从您下单时间开始计算,如超时未发出,您可根据ZOL商城“先行赔付”进行维权、获得赔偿。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>2、如何跟踪配送信息</p></li><li><p>配送信息可直接在您购买的商品订单中查看。或直接与卖家联系确认。您\r\n可以在商品详细页面查看入驻卖家联系信息,订单状态变为"已发货"后,点击查询"物流状态"即可查询到您所购买的商品的在途情况。或请您点击对应的物流承\r\n运商网站进行查询,快递单号可以登陆ZOL商城账号的订单管理中获取,建议发货后48小时后进行查询。查询方式如下: <br/>顺丰快递:服务热线 95338 快递单号由"10"或"01"开头的12位数字组成,例如:10******8888或01******9999。</p></li></ul><p><span class=\"title\">发票保障</span><span class=\"english-title\">Invoice</span>\r\n </p><p>指买家在zol购买商品时,店铺内带有"Z<span class=\"add-plus\">+</span>"标识的店铺内所购买商品均带有正规商品发票。且使用该服务不向买家收取任何其他费用。</p><ul class=\" list-paddingleft-2\"><li><p>1、赔付保障权益</p></li><li><p>如商家未履行所承诺的发票保障服务,您可根据ZOL商城"先行赔付"进行维权、获得赔偿。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>2、发票的开具</p></li><li><p>1. 开具发票的金额以实际支付的金额为准。 <br/>2. ZOL提供的发票种类有为"普通发票"。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>3、普通发票</p></li><li><p>1. 个人及不具有一般纳税人资格的企业客户,均开具普通发票 <br/> 2. 开具普通发票时,抬头默认为收货人"个人姓名",请需要更改抬头的客户在修改信息中进行修改。 <br/> 3. 普通发票信息与您输入的信息一致的情况下,发票一经开出,恕不退换。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>4、开发票的注意事项</p></li><li><p>1.发票金额不能高于订单金额。 <br/> 2.为了享受厂商提供的质保服务,请您将商品发票开具为明细。如果您购买的是数码类、手机及配件、笔记本、台式机、家电类商品,为了保证您能充分享受生产厂家提供的售后服务(售后服务需根据发票确认您的购买日期),发票内容默认为您订购的商品明细。 <br/> 3.不同物流中心开具的发票无法合并。 <br/> 4.使用优惠券、积分的金额不开具发票。 <br/> 5.一个包裹对应一张发票或多张发票。 <br/> 6、销售产品均可开具正规机打发票(普通增值税发票),无需加税点,但为了保证发票不遗漏和错开,请下订单时在补充说明(或者卖家留言),留言注明:需开发票抬头XXX公司或者XXX人名。如忘记注明,请及时联系客服帮助备注。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>5、发票的退换</p></li><li><p>1. 如果您收到的发票与您输入的开票信息、订单信息不一致,请及时联系我们的客服人员,我们会及时为您联系商家解决您的问题。 <br/> \r\n 2.未经易ZOL商城人员的允许,客服部门将不接受电话、传真、邮件、邮寄等形式的重开发票申请,如您擅自将发票寄到我公司的任一办公地址,在寄送过程中\r\n发生的发票遗失、缺失等情况,恕我们概不负责。 <br/></p></li></ul><p><span class=\"title\">物流配送</span><span class=\"english-title\">Logistics</span>\r\n </p><p>当您购买ZOL带有"Z<span class=\"add-plus\">+</span>"\r\n标识店铺,商家承诺全店商品满399元,如收货地址在顺丰速运(即顺丰速运(集团)有限公司及其子公司)所覆盖的派送区域内,均采用顺丰速运为消费者提供\r\n免费物流配送服务;如收货地址不在顺丰速运派送范围内时,商家需使用其它可送达快递为消费者提供免费物流配送服务。如在商家店铺未购买达到399元,除商\r\n家优惠外,物流费用由买家自己承担。</p><ul class=\" list-paddingleft-2\"><li><p>1、赔付保障权益</p></li><li><p>如商家向您收取物流费用,您可根据ZOL商城"先行赔付"进行维权、获得赔偿。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>2、因以下原因导致延误或退回的,不在此服务保障范围内:</p></li><li><p>1) 部份城市的偏远地区因交通等问题,配送时间可能在预计到达时间基础上延后1-2天。 <br/> 2) 因买家原因(更改收货地址、地址不详、地址错误、联系不上、拒收、无代收人等)导致包裹延误派送或无法送达的; <br/> 3) 因不可抗力造成延误的("不可抗力"指不可预见、不可避免或不可克服的客观情况以及其他影响配送时间、造成包裹配送延误的客观情况,包括但不\r\n限于全国性或区域性空中或地面交通系统管制或中断(如天气原因等)、或通讯系统干扰或故障、或政府行为、邮政主管部门政策变化、战争、地震、台风、洪水、\r\n火灾、大雨、大雾等其他类似事件) <br/> 4) 航空违禁品、手机、电子类产品、易碎品等因航空安检查验导致无法正常配载航班或改走陆路运输的; <br/> 5) 寄递物品违反禁、限寄规定或有关运输管理条例,经有关部门没收或依照有关法规、规定处理的; <br/> 6) 收件人地址为机关、单位等机构,而周六、周日和公众节假日不接收邮件、包裹,造成延误的; <br/> 7) 收件人学校、单位或住宅小区不允许投递人员入内,或买家代收方原因,导致造成延误的;</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>3、买家须知</p></li><li><p>1) 如收货地址不在顺丰速运派送范围内时,且在商家店铺购买达到399元,商家需使用其它可送达快递为消费者提供免费物流配送服务; <br/> 2) 寄递物品违反禁、限寄规定或有关运输管理条例,经有关部门没收或依照有关法规、规定处理的,不在补偿范围内; <br/> 3) 顺丰速运的配送范围以本页公示的范围为准。港澳台及海外地区,大陆地区的部队(含武警)或受部队(含武警)管制的等顺丰不予收送快件的机构不在配送范围内。</p></li></ul><p><span class=\"title\">签收与验货</span><span class=\"english-title\">Receipt and inspection</span>\r\n </p><ul class=\" list-paddingleft-2\"><li><p>1、开箱验货</p></li><li><p>签收时在付款后与配送人员当面核对:商品及配件、应付金额、商品数量\r\n及发货清单、发票(如有)、赠品(如有)等;如存在包装破损、商品错误、商品短缺、商品 \r\n存在质量问题等影响签收的因素,请您可以拒收全部或部分商品,相关的赠品,配件或捆绑商品应一起当场拒收;为了保护您的权益,建议您尽量不要委托他人代为\r\n签收;如由他人代为签收商品而没有在配送人员在场的情况下验货,则视为您所订购商品的包装无任何问题。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>2、签收与验货流程</p></li><li><p><img src=\"/Upload/ueditor/image/20160604/1465011453660021.jpg\" height=\"124\" width=\"624\"/>\r\n <br/> 1) 付款及签收完毕后,请在配送人员在场的情况下,参照顾客签收单对商品数量、型号、外观、配件等依次进行核对验收; <br/> 2) 若发现异常(如商品缺失、损坏、渗漏等),请务必让配送人员在顾客签收单上签字确认,并立即联系商家进行反馈和登记处理。 <br/> 3) 如您在配送人员离开后再提出包裹外包装或封带异常,以及商品的型号、数量或外观等存在异常而要求退换货,此时因无法确认责任,将无法为您做进一步处理,请您谅解。 <br/> 4) 货到付款的商品送达时,请您当面与配送员核对商品与款项,确保货、款两清;若您在配送员离开后发现款项有误,将无法为您核实处理。 <br/> 5) 如配送人员以公司没有规定或着急送货等借口,不配合您进行验货和签字确认,请及时联系ZOL商城客服进行投诉,客服人员将会在第一时间为您处理;一旦出现此情况,为了维护您的权益,建议您先不要拆开包裹。 <br/> 6) 为了保护您的权益,建议您尽量不要委托他人签收;如由他人代为签收商品而没有在配送人员在场的情况下验货,则视为您所订购商品的包装无任何问题。</p></li></ul><p><span class=\"title\">售后服务</span><span class=\"english-title\">After-sales Service</span>\r\n </p><p>购买带有"Z<span class=\"add-plus\">+</span>"标识店铺的商品,售出产品有非人为质量问题凭有效检测报告、不影响商家二次销售的情况下可享受商品在自售出之日(以实际收货日期为准)起7日内无理由退换货、15日内换货的服务。如商家未履行所承诺的售后服务,您可根据ZOL商城"先行赔付"进行维权、获得赔偿。</p><ul class=\" list-paddingleft-2\"><li><p>内容声明</p></li><li><p>ZOL商城为第三方交易平台及互联网信息服务提供者,ZOL商城(含\r\n网站、移动端等)所展示的商品的标题、价格、详情等信息内容系由店铺经营者发布,其真实性、准确性和合法性均由店铺经营者负责。ZOL商城提醒用户购买商\r\n品前注意谨慎核实。如用户对商品的标题、价格、详情等任何信息有任何疑问的,请在购买前与店铺经营者沟通确认;ZOL商城存在海量店铺,如用户发现店铺内\r\n有任何违法/侵权信息,请立即向ZOL商城举报并提供有效线索。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>权利声明</p></li><li><p>注:因厂家会在没有任何提前通知的情况下更改产品包装、产地或者一些附件,本司不能确保客户收到的货物与商城图片、产地、附件说明完全一致。只能确保为原厂正货!并且保证与当时市场上同样主流新品一致。若本商城没有及时更新,请大家谅解!</p></li></ul><p><br/></p><p><br/></p>', '<h4 class=\"zbz-sevice-title\">售后服务</h4><p>购买带有"Z"标识店铺的商品,售出产品有非人为质量问题凭有效检测报告、不影响商家二次销售的情况下可享受商品在自售出之日(以实际收货日期为准)起7日内无理由退换货、15日内换货的服务。</p><ul class=\" list-paddingleft-2\"><li><p>1、赔付保障权益</p></li><li><p>如商家未履行所承诺的售后服务,您可根据ZOL商城"先行赔付"进行维权、获得赔偿。</p></li></ul><h4 class=\"zbz-sevice-title\" id=\"tuihuo\"><em class=\"ico-8\"></em>退换货说明</h4><ul class=\" list-paddingleft-2\"><li><p>1、退换货成立条件:</p></li><li><p>1) 商品在国家三包政策范围内并且不影响二次销售。 <br/> 2) 经由生产厂家认可的售后服务中心或国家认可的第三方质检平台检测确认的非人为商品质量问题,并出具检测报告(检测报告需由维权方出具,如维权方当地无检测条件的请联系卖家是否提供代检测服务)。 <br/> 3) 当您购买的商品需要办理退换货时,商家会根据退换货规则在24小时内为您审核。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>2、退换货服务流程:</p></li><li><p><img src=\"/Upload/ueditor/image/20160604/1465011890261418.png\" height=\"271\" width=\"650\"/></p></li></ul><ul class=\" list-paddingleft-2\"><li><p>3、退换货规则说明:</p></li><li><table class=\"return-instructions\"><tbody><tr class=\"firstRow\"><th class=\"cell-1\">申请退换货的基本条件</th><th class=\"cell-2\">不允许申请退换货的情况</th></tr><tr><td class=\"cell-1\">退换商品应保持你收到商品时候的原貌</td><td class=\"cell-2\">除商品本身原因外的个人原因,如不喜欢、产品降价等</td></tr><tr><td class=\"cell-1\">退换商品应保持全新,相关附属配件齐全</td><td class=\"cell-2\">商品自身携带的商品序列号与商户售出时约定的不符(商户售出的商品序列号应与售出时约定的相符)</td></tr><tr><td class=\"cell-1\">保修卡等随货的书面材料没有填写和任何的污损、折叠</td><td class=\"cell-2\">商品质保标签、机身、包装、保修卡条码(S/N码)被涂改、撕毁、移动或无法辨认</td></tr><tr><td class=\"cell-1\">退换商品本身原包装应保持完整</td><td class=\"cell-2\">商品购买凭证、保修卡被退改、撕毁或丢失</td></tr><tr><td class=\"cell-1\">所退换的商品要求具有完整的外包装、原商品、附带商品</td><td class=\"cell-2\">未经同意自行拆卸、修理或升级引起的机器损坏</td></tr><tr><td class=\"cell-1\">收到商品与订单产品颜色、尺码、型号等不一致情</td><td class=\"cell-2\">未按商品说明要求使用、维护、保管而引起的机器损坏</td></tr><tr><td class=\"cell-1\">经由品牌商认可的售后服务中心或国家认可的第三方质检平台检测确认的商品问题,并出具有效测凭证</td><td class=\"cell-2\">机器结构因移动、跌落、碰撞、挤压而造成的故障或破损等人为损坏痕迹</td></tr><tr><td class=\"cell-1\">收到商品存在外观变形、损伤、少件等情况。少件指缺失主件或配件。提供第三方物流有效凭证(证明签收货物时商品即存在破损、少件等情况)</td><td class=\"cell-2\">商品配件缺损或包装有污染和严重积压痕迹</td></tr><tr><td rowspan=\"3\" class=\"cell-1\"><br/></td><td class=\"cell-2\">商品返回商户的过程中由于包装或运输操作不当造成损坏</td></tr><tr><td class=\"cell-2\">商品出厂时外包装有封条(以店铺及商品描述内容为准)</td></tr><tr><td class=\"cell-2\">因市场原因导致商品价格变动(以商品价格以拍下价格为准)</td></tr><tr><td colspan=\"2\">附注:买产品所赠礼品,不在本店退换货商品之列,且所送赠品不予折扣现金,抵价与退换!</td></tr></tbody></table></li></ul><ul class=\" list-paddingleft-2\"><li><p>4、退换货商品引起的运费问题:</p></li><li><p>1) 非商品质量问题而由买家发起的七天无理由退换货行为,买家退货的商品应当完好,退回商品的来回所有运费由买家承担。如因商品质量问题而导致的7天\r\n退货,15天换货行为,退回商品的所有来回运费均由卖家承担。卖家和买家另有约定的,按照约定。 <br/> 2) 售后商品经过检测后无质量问题,可以正常使用,商家一律使用顺丰到付返回。 <br/> 3) 退、换商品经售后检测无质量问题寄回后,如在7天内问题依然出现,将按上次的申请生效,请联系商家说明并将商品寄回,商家承担来回运费。 <br/> 4) 商城会根据检测结果判定谁来支付检测费用。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>5、赔付保障权益</p></li><li><p>如商家未履行所承诺的无理由退换货服务,您可根据ZOL商城"先行赔付"进行维权、获得赔偿。</p></li></ul><h4 class=\"zbz-sevice-title\"><em class=\"ico-9\"></em>如何退款</h4><ul class=\" list-paddingleft-2\"><li><p>1、退款流程:</p></li><li><p><img src=\"/Upload/ueditor/image/20160604/1465011890883399.png\" height=\"346\" width=\"650\"/></p></li></ul><ul class=\" list-paddingleft-2\"><li><p>2、退款说明</p></li><li><p>取消订单退款:如果您完成支付后取消订单,ZOL商城会在订单取消完成后1天内处理您的退款。如果您只取消了部分商品,会在剩余商品发货完成后1天内处理您的退款。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>3、赔付保障权益</p></li><li><p>如商家未履行所承诺的退款服务,您可根据ZOL商城"先行赔付"进行维权、获得赔偿。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>4、退款方式</p></li><li><table class=\"refund-way\"><tbody><tr class=\"firstRow\"><th class=\"cell-1\">支付方式</th><th class=\"cell-2\">退款方式</th><th class=\"cell-3\">退款处理时限</th></tr><tr><td class=\"cell-1\">支付宝</td><td class=\"cell-2\">支付宝账户</td><td class=\"cell-3\">1-2个工作日</td></tr><tr><td class=\"cell-1\">网上银行</td><td class=\"cell-2\">原借记卡帐户</td><td class=\"cell-3\">5-7个工作日</td></tr><tr><td class=\"cell-1\">信用卡</td><td class=\"cell-2\">原借记卡帐户</td><td class=\"cell-3\">7-15个工作日</td></tr></tbody></table><p><strong>注意:</strong>银行退款处理时限仅供您参考,具体退款到账时间依各银行、支付机构等的具体操作处理时间而定。</p><p><strong>各银行退款限制</strong>\r\n </p></li><ul class=\"refund-limit list-paddingleft-2\" style=\"list-style-type: square;\"><li><p>支付宝不受理90天以前支付成功订单的退款申请</p></li><li><p>建设银行不受理148天以前支付成功订单的退款申请</p></li><li><p>中信银行信用卡不受理228天以前支付成功订单的退款申请</p></li><li><p>浦发银行不受理同一支付成功订单的多次退款申请</p></li><li><p>民生银行、浦发银行、中国邮政、中国农业银行不受理85天以前支付成功订单的退款申请</p></li><li><p>其他银行不受理365天前支付成功订单的退款申请</p></li><li><p>浦发银行、华夏银行、中国邮政不受理同一支付成功订单的多次退款申请</p></li></ul></ul><h4 class=\"zbz-sevice-title\"><em class=\"ico-10\"></em>交易条款</h4><p>Zol商城为增加买家与商家双方之间的信任与交流,规避交易过程中的风险和受骗可能,确保您和商家不受损失。ZOL商城采用统一收款方式来保障买家与卖家的双方利益。</p><p>1、客户在接受商品订购与送货的同时,在您消费之前有义务遵守以下交易条款。请您仔细阅读以下条款。</p><ul class=\" list-paddingleft-2\"><li><p>1) 变化性</p></li><li><p>由于价格波动不可预知,以订单提交付款时的价格为标准;</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>2) 准确性</p></li><li><p>清楚准确地填写您的真实姓名、送货地址及联系方式;因如下情况造成订单延迟或无法配送等,ZOL商城将不承担责任; <br/> 1.客户提供错误信息和不详细的地址; <br/> 2.货物送达无人签收,由此造成的重复配送所产生的费用及相关的后果; <br/> 3.不可抗力,例如:自然灾害、交通戒严、突发战争等。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>3) 安全性</p></li><li><p>我们会保证交易信息的安全。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>4) 隐私权</p></li><li><p>ZOL商城尊重您的隐私权,在任何情况下,我们都不会将您的个人和订单信息出售或泄露给任何第三方(国家司法机关调取除外)。我们从线上得到的所有客户信息仅用来处理您的相关订单。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>5) 免责</p></li><li><p>如因不可抗力或其它ZOL商城无法控制的原因使本平台销售系统崩溃或无法正常使用导致网上交易无法完成或丢失有关的信息、记录等,ZOL商城会尽可能合理地协助处理善后事宜,并努力使客户免受经济损失。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>6) 客户监督</p></li><li><p>ZOL商城希望通过不懈努力,为客户提供优质服务,我们在给客户提供服务的全过程中接受客户的监督。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>7) 争议处理</p></li><li><p>如果客户与ZOL商家之间发生任何争议,可依据当时双方所认定的协议或相关法律来解决。</p></li></ul><h4 class=\"zbz-sevice-title\" id=\"chengnuo\"><em class=\"ico-11\"></em>服务承诺</h4><p>网站所售产品均为正品,如有任何问题可与我们客服人员联系,我们会在第一时间跟您沟通处\r\n理。我们将争取以低的价格、优质的服务来满足您的需求。 \r\n由于部分商品包装更换较为频繁,因此您收到的货品有可能与图片不完全一致,请您以收到的商品实物为准,同时我们会尽量做到及时更新,由此给您带来不便多多\r\n谅解,谢谢!</p><p><br/></p>', '7');
INSERT INTO `product_detail` VALUES ('2', 'Upload/Content/16/06/93801464868049.png,Upload/Content/16/06/44861464868049.png,Upload/Content/16/06/21451464868048.png,Upload/Content/16/06/93791464868047.png,Upload/Content/16/06/10061464868046.png', '<p><a href=\"http://go.zol.com/topic/5062097.html\" target=\"_blank\">\r\n </a> </p><p><br/></p><p><img src=\"http://i3.mercrt.fd.zol-img.com.cn/g5/M00/0E/02/ChMkJlclnV2IMzbYAATNOgsEhl0AAQ3FQMNSoEABM1S879.png\" style=\"width: 443px; height: 345px;\" height=\"345\" width=\"443\"/></p><p><span class=\"title\">48小时发货</span><span class=\"english-title\">48 Hours</span>\r\n </p><p>购买带有"Z<span class=\"add-plus\">+</span>"标识店铺的商品时,商家承诺您所下的订单,将在48小时内进行发货(部分商家节假日不发货),让您尽快收到商品。</p><ul class=\" list-paddingleft-2\"><li><p>1、赔付保障权益</p></li><li><p>如您的收货地址在商家承诺的服务区域内,商家承诺在下单后的48小时内将商品发出,从您下单时间开始计算,如超时未发出,您可根据ZOL商城“先行赔付”进行维权、获得赔偿。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>2、如何跟踪配送信息</p></li><li><p>配送信息可直接在您购买的商品订单中查看。或直接与卖家联系确认。您\r\n可以在商品详细页面查看入驻卖家联系信息,订单状态变为"已发货"后,点击查询"物流状态"即可查询到您所购买的商品的在途情况。或请您点击对应的物流承\r\n运商网站进行查询,快递单号可以登陆ZOL商城账号的订单管理中获取,建议发货后48小时后进行查询。查询方式如下: <br/>顺丰快递:服务热线 95338 快递单号由"10"或"01"开头的12位数字组成,例如:10******8888或01******9999。</p></li></ul><p><span class=\"title\">发票保障</span><span class=\"english-title\">Invoice</span>\r\n </p><p>指买家在zol购买商品时,店铺内带有"Z<span class=\"add-plus\">+</span>"标识的店铺内所购买商品均带有正规商品发票。且使用该服务不向买家收取任何其他费用。</p><ul class=\" list-paddingleft-2\"><li><p>1、赔付保障权益</p></li><li><p>如商家未履行所承诺的发票保障服务,您可根据ZOL商城"先行赔付"进行维权、获得赔偿。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>2、发票的开具</p></li><li><p>1. 开具发票的金额以实际支付的金额为准。 <br/>2. ZOL提供的发票种类有为"普通发票"。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>3、普通发票</p></li><li><p>1. 个人及不具有一般纳税人资格的企业客户,均开具普通发票 <br/> 2. 开具普通发票时,抬头默认为收货人"个人姓名",请需要更改抬头的客户在修改信息中进行修改。 <br/> 3. 普通发票信息与您输入的信息一致的情况下,发票一经开出,恕不退换。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>4、开发票的注意事项</p></li><li><p>1.发票金额不能高于订单金额。 <br/> 2.为了享受厂商提供的质保服务,请您将商品发票开具为明细。如果您购买的是数码类、手机及配件、笔记本、台式机、家电类商品,为了保证您能充分享受生产厂家提供的售后服务(售后服务需根据发票确认您的购买日期),发票内容默认为您订购的商品明细。 <br/> 3.不同物流中心开具的发票无法合并。 <br/> 4.使用优惠券、积分的金额不开具发票。 <br/> 5.一个包裹对应一张发票或多张发票。 <br/> 6、销售产品均可开具正规机打发票(普通增值税发票),无需加税点,但为了保证发票不遗漏和错开,请下订单时在补充说明(或者卖家留言),留言注明:需开发票抬头XXX公司或者XXX人名。如忘记注明,请及时联系客服帮助备注。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>5、发票的退换</p></li><li><p>1. 如果您收到的发票与您输入的开票信息、订单信息不一致,请及时联系我们的客服人员,我们会及时为您联系商家解决您的问题。 <br/> \r\n 2.未经易ZOL商城人员的允许,客服部门将不接受电话、传真、邮件、邮寄等形式的重开发票申请,如您擅自将发票寄到我公司的任一办公地址,在寄送过程中\r\n发生的发票遗失、缺失等情况,恕我们概不负责。 <br/></p></li></ul><p><span class=\"title\">物流配送</span><span class=\"english-title\">Logistics</span>\r\n </p><p>当您购买ZOL带有"Z<span class=\"add-plus\">+</span>"\r\n标识店铺,商家承诺全店商品满399元,如收货地址在顺丰速运(即顺丰速运(集团)有限公司及其子公司)所覆盖的派送区域内,均采用顺丰速运为消费者提供\r\n免费物流配送服务;如收货地址不在顺丰速运派送范围内时,商家需使用其它可送达快递为消费者提供免费物流配送服务。如在商家店铺未购买达到399元,除商\r\n家优惠外,物流费用由买家自己承担。</p><ul class=\" list-paddingleft-2\"><li><p>1、赔付保障权益</p></li><li><p>如商家向您收取物流费用,您可根据ZOL商城"先行赔付"进行维权、获得赔偿。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>2、因以下原因导致延误或退回的,不在此服务保障范围内:</p></li><li><p>1) 部份城市的偏远地区因交通等问题,配送时间可能在预计到达时间基础上延后1-2天。 <br/> 2) 因买家原因(更改收货地址、地址不详、地址错误、联系不上、拒收、无代收人等)导致包裹延误派送或无法送达的; <br/> 3) 因不可抗力造成延误的("不可抗力"指不可预见、不可避免或不可克服的客观情况以及其他影响配送时间、造成包裹配送延误的客观情况,包括但不\r\n限于全国性或区域性空中或地面交通系统管制或中断(如天气原因等)、或通讯系统干扰或故障、或政府行为、邮政主管部门政策变化、战争、地震、台风、洪水、\r\n火灾、大雨、大雾等其他类似事件) <br/> 4) 航空违禁品、手机、电子类产品、易碎品等因航空安检查验导致无法正常配载航班或改走陆路运输的; <br/> 5) 寄递物品违反禁、限寄规定或有关运输管理条例,经有关部门没收或依照有关法规、规定处理的; <br/> 6) 收件人地址为机关、单位等机构,而周六、周日和公众节假日不接收邮件、包裹,造成延误的; <br/> 7) 收件人学校、单位或住宅小区不允许投递人员入内,或买家代收方原因,导致造成延误的;</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>3、买家须知</p></li><li><p>1) 如收货地址不在顺丰速运派送范围内时,且在商家店铺购买达到399元,商家需使用其它可送达快递为消费者提供免费物流配送服务; <br/> 2) 寄递物品违反禁、限寄规定或有关运输管理条例,经有关部门没收或依照有关法规、规定处理的,不在补偿范围内; <br/> 3) 顺丰速运的配送范围以本页公示的范围为准。港澳台及海外地区,大陆地区的部队(含武警)或受部队(含武警)管制的等顺丰不予收送快件的机构不在配送范围内。</p></li></ul><p><span class=\"title\">签收与验货</span><span class=\"english-title\">Receipt and inspection</span>\r\n </p><ul class=\" list-paddingleft-2\"><li><p>1、开箱验货</p></li><li><p>签收时在付款后与配送人员当面核对:商品及配件、应付金额、商品数量\r\n及发货清单、发票(如有)、赠品(如有)等;如存在包装破损、商品错误、商品短缺、商品 \r\n存在质量问题等影响签收的因素,请您可以拒收全部或部分商品,相关的赠品,配件或捆绑商品应一起当场拒收;为了保护您的权益,建议您尽量不要委托他人代为\r\n签收;如由他人代为签收商品而没有在配送人员在场的情况下验货,则视为您所订购商品的包装无任何问题。</p></li><li><p>2、签收与验货流程</p></li><li><p><img src=\"http://icon.zol-img.com.cn/newshop/shop/detail/youdian/img-2.jpg\" height=\"124\" width=\"624\"/>\r\n <br/> 1) 付款及签收完毕后,请在配送人员在场的情况下,参照顾客签收单对商品数量、型号、外观、配件等依次进行核对验收; <br/> 2) 若发现异常(如商品缺失、损坏、渗漏等),请务必让配送人员在顾客签收单上签字确认,并立即联系商家进行反馈和登记处理。 <br/> 3) 如您在配送人员离开后再提出包裹外包装或封带异常,以及商品的型号、数量或外观等存在异常而要求退换货,此时因无法确认责任,将无法为您做进一步处理,请您谅解。 <br/> 4) 货到付款的商品送达时,请您当面与配送员核对商品与款项,确保货、款两清;若您在配送员离开后发现款项有误,将无法为您核实处理。 <br/> 5) 如配送人员以公司没有规定或着急送货等借口,不配合您进行验货和签字确认,请及时联系ZOL商城客服进行投诉,客服人员将会在第一时间为您处理;一旦出现此情况,为了维护您的权益,建议您先不要拆开包裹。 <br/> 6) 为了保护您的权益,建议您尽量不要委托他人签收;如由他人代为签收商品而没有在配送人员在场的情况下验货,则视为您所订购商品的包装无任何问题。</p></li></ul>', '<p>购买带有"Z<span class=\"add-plus\">+</span>"标识店铺的商品,售出产品有非人为质量问题凭有效检测报告、不影响商家二次销售的情况下可享受商品在自售出之日(以实际收货日期为准)起7日内无理由退换货、15日内换货的服务。如商家未履行所承诺的售后服务,您可根据ZOL商城"先行赔付"进行维权、获得赔偿。</p><ul class=\" list-paddingleft-2\"><li><p>内容声明</p></li><li><p>ZOL商城为第三方交易平台及互联网信息服务提供者,ZOL商城(含\r\n网站、移动端等)所展示的商品的标题、价格、详情等信息内容系由店铺经营者发布,其真实性、准确性和合法性均由店铺经营者负责。ZOL商城提醒用户购买商\r\n品前注意谨慎核实。如用户对商品的标题、价格、详情等任何信息有任何疑问的,请在购买前与店铺经营者沟通确认;ZOL商城存在海量店铺,如用户发现店铺内\r\n有任何违法/侵权信息,请立即向ZOL商城举报并提供有效线索。</p></li></ul><ul class=\" list-paddingleft-2\"><li><p>权利声明</p></li><li><p>注:因厂家会在没有任何提前通知的情况下更改产品包装、产地或者一些附件,本司不能确保客户收到的货物与商城图片、产地、附件说明完全一致。只能确保为原厂正货!并且保证与当时市场上同样主流新品一致。若本商城没有及时更新,请大家谅解!</p></li></ul><p><br/></p>', '2');
INSERT INTO `product_detail` VALUES ('3', '', '<p>87987</p>', '<p>89789</p>', '3');
INSERT INTO `product_detail` VALUES ('4', '', '<p>878</p>', '<p>89</p>', '4');
INSERT INTO `product_detail` VALUES ('5', '', '<p>76</p>', '<p>7687</p>', '5');
INSERT INTO `product_detail` VALUES ('6', '', '<p>67</p>', '<p>76</p>', '6');
-- ----------------------------
-- Table structure for `property`
-- ----------------------------
DROP TABLE IF EXISTS `property`;
CREATE TABLE `property` (
`pid` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键自增',
`pname` char(50) NOT NULL DEFAULT '' COMMENT '属性名称',
`value` varchar(45) NOT NULL DEFAULT '' COMMENT '属性值',
`ptype` char(100) NOT NULL DEFAULT '',
`type_tid` int(11) NOT NULL,
PRIMARY KEY (`pid`),
KEY `fk_property_type_idx` (`type_tid`)
) ENGINE=MyISAM AUTO_INCREMENT=15 DEFAULT CHARSET=utf8 COMMENT='类型属性表';
-- ----------------------------
-- Records of property
-- ----------------------------
INSERT INTO `property` VALUES ('1', '屏幕尺寸', '4寸|4.5寸|5寸|5.3寸|5.7寸|7寸|13寸|14寸|20寸|29寸', '0', '1');
INSERT INTO `property` VALUES ('2', '颜色', '星空灰|钻石白|翡翠绿|金色|灰色|黄色|白色|香槟金|皓月银', '1', '1');
INSERT INTO `property` VALUES ('3', '手机类型', '3G手机|4G手机|拍照手机|音乐手机', '0', '1');
INSERT INTO `property` VALUES ('4', '触摸屏类型', '电容屏|多点触控', '0', '1');
INSERT INTO `property` VALUES ('5', '上市时间', '2016年04月|2015年01月2014年09月', '0', '1');
INSERT INTO `property` VALUES ('6', '产品定位 ', '时尚轻薄本|影音娱乐本|Ultrabook笔记本', '0', '1');
INSERT INTO `property` VALUES ('7', '产品类型', '家用|办公|娱乐', '0', '1');
INSERT INTO `property` VALUES ('8', '操作系统', '预装Windows 10| 预装Windows 8| 预装Windows 7', '0', '1');
INSERT INTO `property` VALUES ('9', 'CPU系列', '英特尔 酷睿i5 6代系列|英特尔 酷睿i5 2代系列|英特尔 酷睿i5 1代系列', '0', '1');
INSERT INTO `property` VALUES ('10', 'CPU主频', '2.3GHz| 2.9GHz| 1.3GHz| 1.8GHz', '0', '1');
INSERT INTO `property` VALUES ('11', '核心/线程数', '双核心/四线程|双核心/二线程|四核心/四线程', '0', '1');
INSERT INTO `property` VALUES ('12', '内存容量 ', '4GB(4GB×1)|8GB(8GB×1)', '0', '1');
INSERT INTO `property` VALUES ('13', '保修政策', '全国联保,享受三包服务|商城联保,享受三包', '0', '1');
INSERT INTO `property` VALUES ('14', '套餐', '官方套餐|套餐一|套餐二', '1', '1');
-- ----------------------------
-- Table structure for `type`
-- ----------------------------
DROP TABLE IF EXISTS `type`;
CREATE TABLE `type` (
`tid` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键,自增',
`tname` char(30) NOT NULL DEFAULT '' COMMENT '分类名称',
PRIMARY KEY (`tid`)
) ENGINE=MyISAM AUTO_INCREMENT=4 DEFAULT CHARSET=utf8 COMMENT='类型表';
-- ----------------------------
-- Records of type
-- ----------------------------
INSERT INTO `type` VALUES ('1', '数码类');
INSERT INTO `type` VALUES ('2', '生活类');
INSERT INTO `type` VALUES ('3', '其他类');
-- -----------------------------------------------------
-- Table `Mall`.`client`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `Mall`.`client` (
`clid` INT NULL AUTO_INCREMENT COMMENT '主键',
`cIdentification` CHAR(50) NOT NULL DEFAULT '' COMMENT '用户账号',
`nickname` CHAR(50) NOT NULL DEFAULT '' COMMENT '昵称',
`pwd` CHAR(32) NOT NULL DEFAULT '' COMMENT '密码',
`mail` CHAR(50) NOT NULL DEFAULT '' COMMENT '邮箱',
`address` CHAR(100) NOT NULL DEFAULT '' COMMENT '地址',
`ceilphone` TINYINT NOT NULL DEFAULT 0 COMMENT '手机号',
`fixphone` TINYINT NOT NULL DEFAULT 0 COMMENT '固话',
PRIMARY KEY (`clid`))
ENGINE = MyISAM
COMMENT = '前台用户表';
-- -----------------------------------------------------
-- Table `Mall`.`order`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `Mall`.`order` (
`orid` INT NULL AUTO_INCREMENT COMMENT '主键',
`orderNumber` CHAR(50) NOT NULL DEFAULT '' COMMENT '订单号',
`consignee` CHAR(50) NOT NULL DEFAULT '' COMMENT '收货人',
`recieveAddress` VARCHAR(150) NOT NULL DEFAULT '' COMMENT '收货地址',
`priceAggregate` DECIMAL(7,2) NOT NULL DEFAULT 0 COMMENT '价格总计',
`riseTime` INT NOT NULL DEFAULT 0 COMMENT '生成时间',
`remark` VARCHAR(150) NOT NULL DEFAULT '',
`orderStatus` CHAR(50) NOT NULL DEFAULT '' COMMENT '订单状态',
`client_clid` INT NOT NULL,
PRIMARY KEY (`orid`),
INDEX `fk_order_client1_idx` (`client_clid` ASC))
ENGINE = MyISAM
COMMENT = '订单表';
-- -----------------------------------------------------
-- Table `Mall`.`orderList`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `Mall`.`orderList` (
`oid` INT NULL AUTO_INCREMENT COMMENT '主键,自增',
`amount` SMALLINT NOT NULL DEFAULT 0 COMMENT '数量',
`subtotalPrice` DECIMAL(7,2) NOT NULL DEFAULT 0 COMMENT '价格小计',
`descr` VARCHAR(300) NOT NULL DEFAULT '' COMMENT '备注说明',
`goods_gid` INT NOT NULL,
`order_orid` INT NOT NULL,
PRIMARY KEY (`oid`),
INDEX `fk_orderList_goods1_idx` (`goods_gid` ASC),
INDEX `fk_orderList_order1_idx` (`order_orid` ASC))
ENGINE = MyISAM
COMMENT = '订单列表';
-- -----------------------------------------------------
-- Table `Mall`.`comment`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `Mall`.`comment` (
`coid` INT NULL AUTO_INCREMENT COMMENT '主键',
`title` CHAR(100) NOT NULL DEFAULT '' COMMENT '标题',
`content` TEXT NULL COMMENT '内容',
`stars` TINYINT UNSIGNED NOT NULL DEFAULT 0 COMMENT '星级',
`commentTime` INT NOT NULL DEFAULT 0 COMMENT '评论时间',
`status` CHAR(50) NOT NULL DEFAULT '' COMMENT '审核状态',
`goods_gid` INT NOT NULL,
`client_clid` INT NOT NULL,
PRIMARY KEY (`coid`),
INDEX `fk_comment_goods1_idx` (`goods_gid` ASC),
INDEX `fk_comment_client1_idx` (`client_clid` ASC))
ENGINE = MyISAM
COMMENT = '评论表';
-- -----------------------------------------------------
-- Table `Mall`.`receiptAddress`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS `Mall`.`receiptAddress` (
`rid` INT NULL AUTO_INCREMENT COMMENT '主键自增',
`consignee` CHAR(50) NOT NULL DEFAULT '' COMMENT '收货人',
`inplace` CHAR(100) NOT NULL DEFAULT '' COMMENT '所在地区',
`FullAddress` CHAR(150) NOT NULL DEFAULT '' COMMENT '详细地址',
`rphone` TINYINT UNSIGNED NOT NULL DEFAULT 0 COMMENT '手机号码',
`rfixphone` TINYINT UNSIGNED NOT NULL DEFAULT 0 COMMENT '固话',
`client_clid` INT NOT NULL,
PRIMARY KEY (`rid`),
INDEX `fk_receiptAddress_client1_idx` (`client_clid` ASC))
ENGINE = MyISAM
COMMENT = '收货地址表';
SET SQL_MODE=@OLD_SQL_MODE;
SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS;
|
<filename>Hackerrank/SQL/S0030.sql
/*
https://www.hackerrank.com/challenges/weather-observation-station-14/problem
*/
SELECT TRUNCATE(MAX(LAT_N),4) FROM STATION WHERE LAT_N<137.2345;
|
<reponame>jonvestal/open-kilda
INSERT INTO "VERSION" (Version_ID, Version_Number, Version_Deployment_Date)
VALUES (10, 10, CURRENT_TIMESTAMP);
INSERT INTO "KILDA_STORE_TYPE" (store_type_id, store_type_name, store_type_code) VALUES
(2, 'Switch Store', 'SWITCH_STORE');
INSERT INTO "ACTIVITY_TYPE" (activity_type_id, activity_name) VALUES
(30, 'UPDATE_SWITCH_STORE_CONFIG'),
(31, 'DELETE_SWITCH_STORE_CONFIG');
|
<gh_stars>1000+
/* Copyright 2021 The Matrix.org Foundation C.I.C
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- Add a table that keeps track of a list of users who should, upon their next
-- sync request, receive presence for all currently online users that they are
-- "interested" in.
-- The motivation for a DB table over an in-memory list is so that this list
-- can be added to and retrieved from by any worker. Specifically, we don't
-- want to duplicate work across multiple sync workers.
CREATE TABLE IF NOT EXISTS users_to_send_full_presence_to(
-- The user ID to send full presence to.
user_id TEXT PRIMARY KEY,
-- A presence stream ID token - the current presence stream token when the row was last upserted.
-- If a user calls /sync and this token is part of the update they're to receive, we also include
-- full user presence in the response.
-- This allows multiple devices for a user to receive full presence whenever they next call /sync.
presence_stream_id BIGINT,
FOREIGN KEY (user_id)
REFERENCES users (name)
); |
--Optimisation des vues permettant le chargement de la liste des taxons
CREATE TABLE cor_boolean
(
expression character varying(25) NOT NULL,
bool boolean,
CONSTRAINT cor_boolean_pkey PRIMARY KEY (expression)
)
WITH (
OIDS=FALSE
);
ALTER TABLE cor_boolean OWNER TO geonatuser;
INSERT INTO cor_boolean VALUES('oui',true);
INSERT INTO cor_boolean VALUES('non',false);
DROP VIEW synthese.v_taxons_synthese;
CREATE OR REPLACE VIEW synthese.v_taxons_synthese AS
SELECT DISTINCT
t.nom_francais,
txr.lb_nom AS nom_latin,
f2.bool AS patrimonial,
f3.bool AS protection_stricte,
txr.cd_ref,
txr.cd_nom,
txr.nom_valide,
txr.famille,
txr.ordre,
txr.classe,
txr.regne,
prot.protections,
l.id_liste,
l.picto
FROM taxonomie.taxref txr
JOIN taxonomie.bib_taxons t ON txr.cd_nom = t.cd_nom
JOIN taxonomie.cor_taxon_liste ctl ON ctl.id_taxon = t.id_taxon
JOIN taxonomie.bib_listes l ON l.id_liste = ctl.id_liste AND (l.id_liste = ANY (ARRAY[3, 101, 105, 106, 107, 108, 109, 110, 111, 112, 113]))
LEFT JOIN
(
SELECT cd_nom, STRING_AGG(((((arrete || ' '::text) || article::text) || '__'::text) || url::text), '#'::text) AS protections
FROM taxonomie.taxref_protection_especes tpe
JOIN taxonomie.taxref_protection_articles tpa ON tpa.cd_protection::text = tpe.cd_protection::text AND tpa.concerne_mon_territoire = true
GROUP BY cd_nom
) prot ON prot.cd_nom = t.cd_nom
JOIN public.cor_boolean f2 ON f2.expression = t.filtre2
JOIN public.cor_boolean f3 ON f3.expression = t.filtre3
JOIN (SELECT DISTINCT cd_nom FROM synthese.syntheseff) s ON s.cd_nom = t.cd_nom
ORDER BY t.nom_francais;
ALTER TABLE synthese.v_taxons_synthese
OWNER TO geonatuser;
GRANT ALL ON TABLE synthese.v_taxons_synthese TO geonatuser;
GRANT ALL ON TABLE synthese.v_taxons_synthese TO postgres;
WITH taxon AS (
SELECT
tx.id_taxon,
tx.nom_latin,
tx.nom_francais,
taxref.cd_nom,
taxref.id_statut,
taxref.id_habitat,
taxref.id_rang,
taxref.regne,
taxref.phylum,
taxref.classe,
taxref.ordre,
taxref.famille,
taxref.cd_taxsup,
taxref.cd_ref,
taxref.lb_nom,
taxref.lb_auteur,
taxref.nom_complet,
taxref.nom_valide,
taxref.nom_vern,
taxref.nom_vern_eng,
taxref.group1_inpn,
taxref.group2_inpn
FROM
(
SELECT tx_1.id_taxon,
taxref_1.cd_nom,
taxref_1.cd_ref,
taxref_1.lb_nom AS nom_latin,
CASE
WHEN tx_1.nom_francais IS NULL THEN taxref_1.lb_nom
WHEN tx_1.nom_francais::text = ''::text THEN taxref_1.lb_nom
ELSE tx_1.nom_francais
END AS nom_francais
FROM taxonomie.taxref taxref_1
LEFT JOIN taxonomie.bib_taxons tx_1 ON tx_1.cd_nom = taxref_1.cd_nom
WHERE (taxref_1.cd_nom IN (SELECT DISTINCT cd_nom FROM synthese.syntheseff))
) tx
JOIN taxonomie.taxref taxref ON taxref.cd_nom = tx.cd_ref
)
SELECT t.id_taxon,
t.cd_ref,
t.nom_latin,
t.nom_francais,
t.id_regne,
t.nom_regne,
COALESCE(t.id_embranchement, t.id_regne) AS id_embranchement,
COALESCE(t.nom_embranchement, ' Sans embranchement dans taxref'::character varying) AS nom_embranchement,
COALESCE(t.id_classe, t.id_embranchement) AS id_classe,
COALESCE(t.nom_classe, ' Sans classe dans taxref'::character varying) AS nom_classe,
COALESCE(t.desc_classe, ' Sans classe dans taxref'::character varying) AS desc_classe,
COALESCE(t.id_ordre, t.id_classe) AS id_ordre,
COALESCE(t.nom_ordre, ' Sans ordre dans taxref'::character varying) AS nom_ordre,
COALESCE(t.id_famille, t.id_ordre) AS id_famille,
COALESCE(t.nom_famille, ' Sans famille dans taxref'::character varying) AS nom_famille
FROM
(
SELECT DISTINCT t_1.id_taxon,
t_1.cd_ref,
t_1.nom_latin,
t_1.nom_francais,
( SELECT taxref.cd_nom FROM taxonomie.taxref WHERE taxref.id_rang = 'KD'::bpchar AND taxref.lb_nom::text = t_1.regne::text) AS id_regne,
t_1.regne AS nom_regne,
ph.cd_nom AS id_embranchement,
t_1.phylum AS nom_embranchement,
t_1.phylum AS desc_embranchement,
cl.cd_nom AS id_classe,
t_1.classe AS nom_classe,
t_1.classe AS desc_classe,
ord.cd_nom AS id_ordre,
t_1.ordre AS nom_ordre,
f.cd_nom AS id_famille,
t_1.famille AS nom_famille
FROM taxon t_1
LEFT JOIN ( SELECT taxref.cd_nom,
taxref.lb_nom
FROM taxonomie.taxref
WHERE taxref.id_rang = 'PH'::bpchar AND taxref.cd_nom = taxref.cd_ref) ph ON ph.lb_nom::text = t_1.phylum::text AND NOT t_1.phylum IS NULL
LEFT JOIN ( SELECT taxref.cd_nom,
taxref.lb_nom
FROM taxonomie.taxref
WHERE taxref.id_rang = 'CL'::bpchar AND taxref.cd_nom = taxref.cd_ref) cl ON cl.lb_nom::text = t_1.classe::text AND NOT t_1.classe IS NULL
LEFT JOIN ( SELECT taxref.cd_nom,
taxref.lb_nom
FROM taxonomie.taxref
WHERE taxref.id_rang = 'OR'::bpchar AND taxref.cd_nom = taxref.cd_ref) ord ON ord.lb_nom::text = t_1.ordre::text AND NOT t_1.ordre IS NULL
LEFT JOIN ( SELECT taxref.cd_nom,
taxref.id_rang,
taxref.lb_nom,
taxref.phylum,
taxref.famille
FROM taxonomie.taxref
WHERE taxref.id_rang = 'FM'::bpchar AND taxref.cd_nom = taxref.cd_ref) f ON f.lb_nom::text = t_1.famille::text AND f.phylum::text = t_1.phylum::text AND NOT t_1.famille IS NULL
) t;
DROP VIEW synthese.v_tree_taxons_synthese;
CREATE OR REPLACE VIEW synthese.v_tree_taxons_synthese AS
WITH taxon AS (
SELECT tx.id_taxon,
tx.nom_latin,
tx.nom_francais,
taxref.cd_nom,
taxref.id_statut,
taxref.id_habitat,
taxref.id_rang,
taxref.regne,
taxref.phylum,
taxref.classe,
taxref.ordre,
taxref.famille,
taxref.cd_taxsup,
taxref.cd_ref,
taxref.lb_nom,
taxref.lb_auteur,
taxref.nom_complet,
taxref.nom_valide,
taxref.nom_vern,
taxref.nom_vern_eng,
taxref.group1_inpn,
taxref.group2_inpn
FROM ( SELECT tx_1.id_taxon,
taxref_1.cd_nom,
taxref_1.cd_ref,
taxref_1.lb_nom AS nom_latin,
CASE
WHEN tx_1.nom_francais IS NULL THEN taxref_1.lb_nom
WHEN tx_1.nom_francais::text = ''::text THEN taxref_1.lb_nom
ELSE tx_1.nom_francais
END AS nom_francais
FROM taxonomie.taxref taxref_1
LEFT JOIN taxonomie.bib_taxons tx_1 ON tx_1.cd_nom = taxref_1.cd_nom
WHERE (taxref_1.cd_nom IN ( SELECT DISTINCT syntheseff.cd_nom
FROM synthese.syntheseff))) tx
JOIN taxonomie.taxref taxref ON taxref.cd_nom = tx.cd_ref
)
SELECT t.id_taxon,
t.cd_ref,
t.nom_latin,
t.nom_francais,
t.id_regne,
t.nom_regne,
COALESCE(t.id_embranchement, t.id_regne) AS id_embranchement,
COALESCE(t.nom_embranchement, ' Sans embranchement dans taxref'::character varying) AS nom_embranchement,
COALESCE(t.id_classe, t.id_embranchement) AS id_classe,
COALESCE(t.nom_classe, ' Sans classe dans taxref'::character varying) AS nom_classe,
COALESCE(t.desc_classe, ' Sans classe dans taxref'::character varying) AS desc_classe,
COALESCE(t.id_ordre, t.id_classe) AS id_ordre,
COALESCE(t.nom_ordre, ' Sans ordre dans taxref'::character varying) AS nom_ordre,
COALESCE(t.id_famille, t.id_ordre) AS id_famille,
COALESCE(t.nom_famille, ' Sans famille dans taxref'::character varying) AS nom_famille
FROM ( SELECT DISTINCT t_1.id_taxon,
t_1.cd_ref,
t_1.nom_latin,
t_1.nom_francais,
( SELECT taxref.cd_nom
FROM taxonomie.taxref
WHERE taxref.id_rang = 'KD'::bpchar AND taxref.lb_nom::text = t_1.regne::text) AS id_regne,
t_1.regne AS nom_regne,
ph.cd_nom AS id_embranchement,
t_1.phylum AS nom_embranchement,
t_1.phylum AS desc_embranchement,
cl.cd_nom AS id_classe,
t_1.classe AS nom_classe,
t_1.classe AS desc_classe,
ord.cd_nom AS id_ordre,
t_1.ordre AS nom_ordre,
f.cd_nom AS id_famille,
t_1.famille AS nom_famille
FROM taxon t_1
LEFT JOIN taxonomie.taxref ph ON ph.id_rang = 'PH'::bpchar AND ph.cd_nom = ph.cd_ref AND ph.lb_nom::text = t_1.phylum::text AND NOT t_1.phylum IS NULL
LEFT JOIN taxonomie.taxref cl ON cl.id_rang = 'CL'::bpchar AND cl.cd_nom = cl.cd_ref AND cl.lb_nom::text = t_1.classe::text AND NOT t_1.classe IS NULL
LEFT JOIN taxonomie.taxref ord ON ord.id_rang = 'OR'::bpchar AND ord.cd_nom = ord.cd_ref AND ord.lb_nom::text = t_1.ordre::text AND NOT t_1.ordre IS NULL
LEFT JOIN taxonomie.taxref f ON f.id_rang = 'FM'::bpchar AND f.cd_nom = f.cd_ref AND f.lb_nom::text = t_1.famille::text AND f.phylum::text = t_1.phylum::text AND NOT t_1.famille IS NULL) t;
ALTER TABLE synthese.v_tree_taxons_synthese
OWNER TO geonatuser;
GRANT ALL ON TABLE synthese.v_tree_taxons_synthese TO geonatuser;
--Généricité
ALTER TABLE meta.bib_programmes RENAME sitpn TO programme_public;
ALTER TABLE meta.bib_programmes RENAME desc_programme_sitpn TO desc_programme_public;
--Gestion du contenu du "Comment ?" dans la synthèse
ALTER TABLE meta.bib_programmes ADD COLUMN actif boolean;
UPDATE meta.bib_programmes SET actif = true;
--gestion dynamique des liens d'accès aux formulaires sur la page d'accueil
ALTER TABLE synthese.bib_sources ADD COLUMN url character varying(255);
COMMENT ON COLUMN synthese.bib_sources.url IS 'Définir l''url d''accès au formulaire de saisie de cette source de données - optionnel';
ALTER TABLE synthese.bib_sources ADD COLUMN target character varying(10);
COMMENT ON COLUMN synthese.bib_sources.url IS 'Indiquer si le formulaire de saisie de cette source de données s''ouvre dans un nouvel onglet - optionnel';
ALTER TABLE synthese.bib_sources ADD COLUMN picto character varying(255);
COMMENT ON COLUMN synthese.bib_sources.url IS 'Définir le chemin du pictogramme identifiant le protocole en lien avec la source de données - optionnel';
ALTER TABLE synthese.bib_sources ADD COLUMN groupe character varying(50);
COMMENT ON COLUMN synthese.bib_sources.url IS 'Placer cette source de données dans un groupe (exemple FAUNE ou FLORE) - optionnel';
ALTER TABLE synthese.bib_sources ADD COLUMN actif boolean;
COMMENT ON COLUMN synthese.bib_sources.url IS 'Définir si le formulaire de saisie de cette source de données doit aparaitre ou non sur la page d''accueil - optionnel';
--Attention si vous avez déjà une sources avec l'identifiant 2, vous devez adapter la ligne ci-dessous
INSERT INTO synthese.bib_sources (id_source, nom_source, desc_source, host, port, username, pass, db_name, db_schema, db_table, db_field, url, target, picto, groupe, actif) VALUES (2, 'Mortalité', 'contenu des tables t_fiche_cf et t_releves_cf de la base faune postgres', 'localhost', 22, NULL, NULL, 'geonaturedb', 'contactfaune', 't_releves_cf', 'id_releve_cf', 'mortalite', NULL, 'images/pictos/squelette.png', 'FAUNE', true);
UPDATE synthese.bib_sources SET actif = true;
UPDATE synthese.bib_sources SET actif = false WHERE id_source = 4;
UPDATE synthese.bib_sources SET groupe = 'FAUNE' WHERE id_source IN(1,2,3);
UPDATE synthese.bib_sources SET groupe = 'FLORE' WHERE id_source IN(4,5,6);
UPDATE synthese.bib_sources SET url = 'cf' WHERE id_source = 1;
UPDATE synthese.bib_sources SET url = 'mortalite' WHERE id_source = 2;
UPDATE synthese.bib_sources SET url = 'invertebre' WHERE id_source = 3;
UPDATE synthese.bib_sources SET url = 'pda' WHERE id_source = 4;
UPDATE synthese.bib_sources SET url = 'fs' WHERE id_source = 5;
UPDATE synthese.bib_sources SET url = 'bryo' WHERE id_source = 6;
UPDATE synthese.bib_sources SET picto = 'images/pictos/amphibien.gif' WHERE id_source = 1;
UPDATE synthese.bib_sources SET picto = 'images/pictos/squelette.png' WHERE id_source = 2;
UPDATE synthese.bib_sources SET picto = 'images/pictos/insecte.gif' WHERE id_source = 3;
UPDATE synthese.bib_sources SET picto = 'images/pictos/plante.gif' WHERE id_source = 4;
UPDATE synthese.bib_sources SET picto = 'images/pictos/plante.gif' WHERE id_source = 5;
UPDATE synthese.bib_sources SET picto = 'images/pictos/mousse.gif' WHERE id_source = 6;
--mise à jour du trigger contactfaune.synthese_insert_releve_cf
CREATE OR REPLACE FUNCTION contactfaune.synthese_insert_releve_cf()
RETURNS trigger AS
$BODY$
DECLARE
fiche RECORD;
test integer;
mesobservateurs character varying(255);
criteresynthese integer;
idsource integer;
danslecoeur boolean;
unite integer;
BEGIN
--Récupération des données dans la table t_fiches_cf et de la liste des observateurs
SELECT INTO fiche * FROM contactfaune.t_fiches_cf WHERE id_cf = new.id_cf;
SELECT INTO criteresynthese id_critere_synthese FROM contactfaune.bib_criteres_cf WHERE id_critere_cf = new.id_critere_cf;
-- Récupération du id_source selon le critère d'observation, Si critère = 2 alors on est dans une source mortalité (=2) sinon cf (=1)
IF criteresynthese = 2 THEN idsource = 2;
ELSE
idsource = 1;
END IF;
SELECT INTO mesobservateurs o.observateurs FROM contactfaune.t_releves_cf r
JOIN contactfaune.t_fiches_cf f ON f.id_cf = r.id_cf
LEFT JOIN (
SELECT id_cf, array_to_string(array_agg(r.nom_role || ' ' || r.prenom_role), ', ') AS observateurs
FROM contactfaune.cor_role_fiche_cf c
JOIN utilisateurs.t_roles r ON r.id_role = c.id_role
GROUP BY id_cf
) o ON o.id_cf = f.id_cf
WHERE r.id_releve_cf = new.id_releve_cf;
-- on calcul si on est dans le coeur
IF st_intersects((SELECT the_geom FROM layers.l_zonesstatut WHERE id_zone = 3249), fiche.the_geom_2154) THEN
danslecoeur = true;
ELSE
danslecoeur = false;
END IF;
INSERT INTO synthese.synthesefaune (
id_source,
id_fiche_source,
code_fiche_source,
id_organisme,
id_protocole,
codeprotocole,
ids_protocoles,
id_precision,
cd_nom,
id_taxon,
insee,
dateobs,
observateurs,
altitude_retenue,
remarques,
derniere_action,
supprime,
the_geom_3857,
the_geom_2154,
the_geom_point,
id_lot,
id_critere_synthese,
effectif_total,
coeur
)
VALUES(
idsource,
new.id_releve_cf,
'f'||new.id_cf||'-r'||new.id_releve_cf,
fiche.id_organisme,
fiche.id_protocole,
1,
fiche.id_protocole,
1,
new.cd_ref_origine,
new.id_taxon,
fiche.insee,
fiche.dateobs,
mesobservateurs,
fiche.altitude_retenue,
new.commentaire,
'c',
false,
fiche.the_geom_3857,
fiche.the_geom_2154,
fiche.the_geom_3857,
fiche.id_lot,
criteresynthese,
new.am+new.af+new.ai+new.na+new.jeune+new.yearling+new.sai,
danslecoeur
);
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION contactfaune.synthese_insert_releve_cf()
OWNER TO geonatuser;
GRANT EXECUTE ON FUNCTION contactfaune.synthese_insert_releve_cf() TO geonatuser;
--mise à jour des vous taxonomique faune
CREATE OR REPLACE VIEW contactfaune.v_nomade_classes AS
SELECT g.id_liste AS id_classe,
g.nom_liste AS nom_classe_fr,
g.desc_liste AS desc_classe
FROM ( SELECT l.id_liste,
l.nom_liste,
l.desc_liste,
min(taxonomie.find_cdref(tx.cd_nom)) AS cd_ref
FROM taxonomie.bib_listes l
JOIN taxonomie.cor_taxon_liste ctl ON ctl.id_liste = l.id_liste
JOIN taxonomie.bib_taxons tx ON tx.id_taxon = ctl.id_taxon
WHERE l.id_liste >= 100 AND l.id_liste < 200
GROUP BY l.id_liste, l.nom_liste, l.desc_liste) g
JOIN taxonomie.taxref t ON t.cd_nom = g.cd_ref
WHERE t.phylum::text = 'Chordata'::text;
CREATE OR REPLACE VIEW contactinv.v_nomade_classes AS
SELECT g.id_liste AS id_classe,
g.nom_liste AS nom_classe_fr,
g.desc_liste AS desc_classe
FROM ( SELECT l.id_liste,
l.nom_liste,
l.desc_liste,
min(taxonomie.find_cdref(tx.cd_nom)) AS cd_ref
FROM taxonomie.bib_listes l
JOIN taxonomie.cor_taxon_liste ctl ON ctl.id_liste = l.id_liste
JOIN taxonomie.bib_taxons tx ON tx.id_taxon = ctl.id_taxon
WHERE l.id_liste >= 100
GROUP BY l.id_liste, l.nom_liste, l.desc_liste) g
JOIN taxonomie.taxref t ON t.cd_nom = g.cd_ref
WHERE t.phylum::text <> 'Chordata'::text AND t.regne::text = 'Animalia'::text;
CREATE OR REPLACE VIEW contactfaune.v_nomade_taxons_faune AS
SELECT DISTINCT t.id_taxon,
taxonomie.find_cdref(tx.cd_nom) AS cd_ref,
tx.cd_nom,
t.nom_latin,
t.nom_francais,
g.id_classe,
CASE
WHEN tx.cd_nom = ANY (ARRAY[61098, 61119, 61000]) THEN 6
ELSE 5
END AS denombrement,
f2.bool AS patrimonial,
m.texte_message_cf AS message,
CASE
WHEN tx.cd_nom = ANY (ARRAY[60577, 60612]) THEN false
ELSE true
END AS contactfaune,
true AS mortalite
FROM taxonomie.bib_taxons t
LEFT JOIN contactfaune.cor_message_taxon cmt ON cmt.id_taxon = t.id_taxon
LEFT JOIN contactfaune.bib_messages_cf m ON m.id_message_cf = cmt.id_message_cf
JOIN taxonomie.cor_taxon_liste ctl ON ctl.id_taxon = t.id_taxon
JOIN contactfaune.v_nomade_classes g ON g.id_classe = ctl.id_liste
JOIN taxonomie.taxref tx ON tx.cd_nom = t.cd_nom
JOIN public.cor_boolean f2 ON f2.expression::text = t.filtre2::text
WHERE t.filtre1::text = 'oui'::text
ORDER BY t.id_taxon, taxonomie.find_cdref(tx.cd_nom), t.nom_latin, t.nom_francais, g.id_classe, f2.bool, m.texte_message_cf;
CREATE OR REPLACE VIEW contactinv.v_nomade_taxons_inv AS
SELECT DISTINCT t.id_taxon,
taxonomie.find_cdref(tx.cd_nom) AS cd_ref,
tx.cd_nom,
t.nom_latin,
t.nom_francais,
g.id_classe,
f2.bool AS patrimonial,
m.texte_message_inv AS message
FROM taxonomie.bib_taxons t
LEFT JOIN contactinv.cor_message_taxon cmt ON cmt.id_taxon = t.id_taxon
LEFT JOIN contactinv.bib_messages_inv m ON m.id_message_inv = cmt.id_message_inv
JOIN taxonomie.cor_taxon_liste ctl ON ctl.id_taxon = t.id_taxon
JOIN contactinv.v_nomade_classes g ON g.id_classe = ctl.id_liste
JOIN taxonomie.taxref tx ON tx.cd_nom = t.cd_nom
JOIN public.cor_boolean f2 ON f2.expression::text = t.filtre2::text;
-- Pour le fun
INSERT INTO bib_listes (id_liste ,nom_liste,desc_liste,picto) VALUES (201, 'Bivalves',null, 'images/pictos/nopicto.gif');
INSERT INTO bib_listes (id_liste ,nom_liste,desc_liste,picto) VALUES (202, 'Gastéropodes',null, 'images/pictos/nopicto.gif');
------------Correction d'un bug à l'enregistrement
CREATE OR REPLACE FUNCTION contactfaune.couleur_taxon(
id integer,
maxdateobs date)
RETURNS text AS
$BODY$
--fonction permettant de renvoyer la couleur d'un taxon à partir de la dernière date d'observation
--
--<NAME> mars 2012
DECLARE
couleur text;
patri character(3);
BEGIN
SELECT filtre2 INTO patri
FROM taxonomie.bib_taxons
WHERE id_taxon = id;
IF patri = 'oui' THEN
IF date_part('year',maxdateobs)=date_part('year',now()) THEN couleur = 'gray';
ELSE couleur = 'red';
END IF;
ELSIF patri = 'non' THEN
IF date_part('year',maxdateobs)>=date_part('year',now())-3 THEN couleur = 'gray';
ELSE couleur = 'red';
END IF;
ELSE
return false;
END IF;
return couleur;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
CREATE OR REPLACE FUNCTION contactinv.couleur_taxon(
id integer,
maxdateobs date)
RETURNS text AS
$BODY$
--fonction permettant de renvoyer la couleur d'un taxon à partir de la dernière date d'observation
--
--<NAME> mars 2012
DECLARE
couleur text;
patri boolean;
BEGIN
SELECT filtre2 INTO patri
FROM taxonomie.bib_taxons
WHERE id_taxon = id;
IF patri = 'oui' THEN
IF date_part('year',maxdateobs)=date_part('year',now()) THEN couleur = 'gray';
ELSE couleur = 'red';
END IF;
ELSIF patri = 'non' THEN
IF date_part('year',maxdateobs)>=date_part('year',now())-3 THEN couleur = 'gray';
ELSE couleur = 'red';
END IF;
ELSE
return false;
END IF;
return couleur;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
CREATE OR REPLACE FUNCTION contactfaune.insert_releve_cf()
RETURNS trigger AS
$BODY$
DECLARE
cdnom integer;
re integer;
unite integer;
nbobs integer;
line record;
fiche record;
BEGIN
--récup du cd_nom du taxon
SELECT INTO cdnom cd_nom FROM taxonomie.bib_taxons WHERE id_taxon = new.id_taxon;
--récup du cd_ref du taxon pour le stocker en base au moment de l'enregistrement (= conseil inpn)
SELECT INTO re taxonomie.find_cdref(cd_nom) FROM taxonomie.bib_taxons WHERE id_taxon = new.id_taxon;
new.cd_ref_origine = re;
-- MAJ de la table cor_unite_taxon, on commence par récupérer l'unité à partir du pointage (table t_fiches_cf)
SELECT INTO fiche * FROM contactfaune.t_fiches_cf WHERE id_cf = new.id_cf;
SELECT INTO unite u.id_unite_geo FROM layers.l_unites_geo u WHERE ST_INTERSECTS(fiche.the_geom_2154,u.the_geom);
--si on est dans une des unités on peut mettre à jour la table cor_unite_taxon, sinon on fait rien
IF unite>0 THEN
SELECT INTO line * FROM contactfaune.cor_unite_taxon WHERE id_unite_geo = unite AND id_taxon = new.id_taxon;
--si la ligne existe dans cor_unite_taxon on la supprime
IF line IS NOT NULL THEN
DELETE FROM contactfaune.cor_unite_taxon WHERE id_unite_geo = unite AND id_taxon = new.id_taxon;
END IF;
--on compte le nombre d'enregistrement pour ce taxon dans l'unité
SELECT INTO nbobs count(*) from synthese.syntheseff s
JOIN layers.l_unites_geo u ON ST_Intersects(u.the_geom, s.the_geom_2154) AND u.id_unite_geo = unite
WHERE s.cd_nom = cdnom;
--on créé ou recréé la ligne
INSERT INTO contactfaune.cor_unite_taxon VALUES(unite,new.id_taxon,fiche.dateobs,contactfaune.couleur_taxon(new.id_taxon,fiche.dateobs), nbobs+1);
END IF;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
CREATE OR REPLACE FUNCTION contactinv.insert_releve_inv()
RETURNS trigger AS
$BODY$
DECLARE
cdnom integer;
re integer;
unite integer;
nbobs integer;
line record;
fiche record;
BEGIN
--récup du cd_nom du taxon
SELECT INTO cdnom cd_nom FROM taxonomie.bib_taxons WHERE id_taxon = new.id_taxon;
--récup du cd_ref du taxon pour le stocker en base au moment de l'enregistrement (= conseil inpn)
SELECT INTO re taxonomie.find_cdref(cd_nom) FROM taxonomie.bib_taxons WHERE id_taxon = new.id_taxon;
new.cd_ref_origine = re;
-- MAJ de la table cor_unite_taxon_inv, on commence par récupérer l'unité à partir du pointage (table t_fiches_inv)
SELECT INTO fiche * FROM contactinv.t_fiches_inv WHERE id_inv = new.id_inv;
SELECT INTO unite u.id_unite_geo FROM layers.l_unites_geo u WHERE ST_INTERSECTS(fiche.the_geom_2154,u.the_geom);
--si on est dans une des unités on peut mettre à jour la table cor_unite_taxon_inv, sinon on fait rien
IF unite>0 THEN
SELECT INTO line * FROM contactinv.cor_unite_taxon_inv WHERE id_unite_geo = unite AND id_taxon = new.id_taxon;
--si la ligne existe dans cor_unite_taxon_inv on la supprime
IF line IS NOT NULL THEN
DELETE FROM contactinv.cor_unite_taxon_inv WHERE id_unite_geo = unite AND id_taxon = new.id_taxon;
END IF;
--on compte le nombre d'enregistrement pour ce taxon dans l'unité
SELECT INTO nbobs count(*) from synthese.syntheseff s
JOIN layers.l_unites_geo u ON ST_Intersects(u.the_geom, s.the_geom_2154) AND u.id_unite_geo = unite
WHERE s.cd_nom = cdnom;
--on créé ou recréé la ligne
INSERT INTO contactinv.cor_unite_taxon_inv VALUES(unite,new.id_taxon,fiche.dateobs,contactinv.couleur_taxon(new.id_taxon,fiche.dateobs), nbobs+1);
END IF;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
CREATE OR REPLACE FUNCTION synthese.maj_cor_unite_taxon()
RETURNS trigger AS
$BODY$
DECLARE
monembranchement varchar;
monregne varchar;
monidtaxon integer;
BEGIN
IF (TG_OP = 'DELETE') THEN
--retrouver le id_taxon
SELECT INTO monidtaxon id_taxon FROM taxonomie.bib_taxons WHERE cd_nom = old.cd_nom LIMIT 1;
--calcul du règne du taxon supprimé
SELECT INTO monregne tx.regne FROM taxonomie.taxref tx WHERE tx.cd_nom = old.cd_nom;
IF monregne = 'Animalia' THEN
--calcul de l'embranchement du taxon supprimé
SELECT INTO monembranchement tx.phylum FROM taxonomie.taxref tx WHERE tx.cd_nom = old.cd_nom;
-- puis recalul des couleurs avec old.id_unite_geo et old.taxon selon que le taxon est vertébrés (embranchemet 1) ou invertébres
IF monembranchement = 'Chordata' THEN
IF (SELECT count(*) FROM synthese.cor_unite_synthese WHERE cd_nom = old.cd_nom AND id_unite_geo = old.id_unite_geo)= 0 THEN
DELETE FROM contactfaune.cor_unite_taxon WHERE id_taxon = monidtaxon AND id_unite_geo = old.id_unite_geo;
ELSE
PERFORM synthese.calcul_cor_unite_taxon_cf(monidtaxon, old.id_unite_geo);
END IF;
ELSE
IF (SELECT count(*) FROM synthese.cor_unite_synthese WHERE cd_nom = old.cd_nom AND id_unite_geo = old.id_unite_geo)= 0 THEN
DELETE FROM contactinv.cor_unite_taxon_inv WHERE id_taxon = monidtaxon AND id_unite_geo = old.id_unite_geo;
ELSE
PERFORM synthese.calcul_cor_unite_taxon_inv(monidtaxon, old.id_unite_geo);
END IF;
END IF;
END IF;
RETURN OLD;
ELSIF (TG_OP = 'INSERT') THEN
--retrouver le id_taxon
SELECT INTO monidtaxon id_taxon FROM taxonomie.bib_taxons WHERE cd_nom = new.cd_nom LIMIT 1;
--calcul du règne du taxon inséré
SELECT INTO monregne tx.regne FROM taxonomie.taxref tx WHERE tx.cd_nom = new.cd_nom;
IF monregne = 'Animalia' THEN
--calcul de l'embranchement du taxon inséré
SELECT INTO monembranchement tx.phylum FROM taxonomie.taxref tx WHERE tx.cd_nom = new.cd_nom;
-- puis recalul des couleurs avec new.id_unite_geo et new.taxon selon que le taxon est vertébrés (embranchemet 1) ou invertébres
IF monembranchement = 'Chordata' THEN
PERFORM synthese.calcul_cor_unite_taxon_cf(monidtaxon, new.id_unite_geo);
ELSE
PERFORM synthese.calcul_cor_unite_taxon_inv(monidtaxon, new.id_unite_geo);
END IF;
END IF;
RETURN NEW;
END IF;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
CREATE OR REPLACE FUNCTION contactfaune.synthese_update_fiche_cf()
RETURNS trigger AS
$BODY$
DECLARE
releves RECORD;
test integer;
mesobservateurs character varying(255);
sources RECORD;
idsourcem integer;
idsourcecf integer;
BEGIN
--on doit boucler pour récupérer le id_source car il y en a 2 possibles (cf et mortalité) pour le même schéma
FOR sources IN SELECT id_source, url FROM synthese.bib_sources WHERE db_schema='contactfaune' AND db_field = 'id_releve_cf' LOOP
IF sources.url = 'cf' THEN
idsourcecf = sources.id_source;
ELSIF sources.url = 'mortalite' THEN
idsourcem = sources.id_source;
END IF;
END LOOP;
--Récupération des données de la table t_releves_cf avec l'id_cf de la fiche modifié
-- Ici on utilise le OLD id_cf pour être sur qu'il existe dans la table synthese (cas improbable où on changerait la pk de la table t_fiches_cf
--le trigger met à jour avec le NEW --> SET code_fiche_source = ....
FOR releves IN SELECT * FROM contactfaune.t_releves_cf WHERE id_cf = old.id_cf LOOP
--test si on a bien l'enregistrement dans la table syntheseff avant de le mettre à jour
SELECT INTO test id_fiche_source FROM synthese.syntheseff WHERE id_fiche_source = releves.id_releve_cf::text AND (id_source = idsourcecf OR id_source = idsourcem);
IF test IS NOT NULL THEN
SELECT INTO mesobservateurs o.observateurs FROM contactfaune.t_releves_cf r
JOIN contactfaune.t_fiches_cf f ON f.id_cf = r.id_cf
LEFT JOIN (
SELECT id_cf, array_to_string(array_agg(r.nom_role || ' ' || r.prenom_role), ', ') AS observateurs
FROM contactfaune.cor_role_fiche_cf c
JOIN utilisateurs.t_roles r ON r.id_role = c.id_role
GROUP BY id_cf
) o ON o.id_cf = f.id_cf
WHERE r.id_releve_cf = releves.id_releve_cf;
IF NOT St_Equals(new.the_geom_3857,old.the_geom_3857) OR NOT St_Equals(new.the_geom_2154,old.the_geom_2154) THEN
--mise à jour de l'enregistrement correspondant dans syntheseff
UPDATE synthese.syntheseff SET
code_fiche_source = 'f'||new.id_cf||'-r'||releves.id_releve_cf,
id_organisme = new.id_organisme,
id_protocole = new.id_protocole,
insee = new.insee,
dateobs = new.dateobs,
observateurs = mesobservateurs,
altitude_retenue = new.altitude_retenue,
derniere_action = 'u',
supprime = new.supprime,
the_geom_3857 = new.the_geom_3857,
the_geom_2154 = new.the_geom_2154,
the_geom_point = new.the_geom_3857,
id_lot = new.id_lot
WHERE id_fiche_source = releves.id_releve_cf::text AND (id_source = idsourcecf OR id_source = idsourcem) ;
ELSE
--mise à jour de l'enregistrement correspondant dans syntheseff
UPDATE synthese.syntheseff SET
code_fiche_source = 'f'||new.id_cf||'-r'||releves.id_releve_cf,
id_organisme = new.id_organisme,
id_protocole = new.id_protocole,
insee = new.insee,
dateobs = new.dateobs,
observateurs = mesobservateurs,
altitude_retenue = new.altitude_retenue,
derniere_action = 'u',
supprime = new.supprime,
the_geom_3857 = new.the_geom_3857,
the_geom_2154 = new.the_geom_2154,
the_geom_point = new.the_geom_3857,
id_lot = new.id_lot
WHERE id_fiche_source = releves.id_releve_cf::text AND (id_source = idsourcecf OR id_source = idsourcem);
END IF;
END IF;
END LOOP;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
CREATE OR REPLACE FUNCTION contactfaune.synthese_update_releve_cf()
RETURNS trigger AS
$BODY$
DECLARE
test integer;
criteresynthese integer;
sources RECORD;
idsourcem integer;
idsourcecf integer;
BEGIN
--on doit boucler pour récupérer le id_source car il y en a 2 possibles (cf et mortalité) pour le même schéma
FOR sources IN SELECT id_source, url FROM synthese.bib_sources WHERE db_schema='contactfaune' AND db_field = 'id_releve_cf' LOOP
IF sources.url = 'cf' THEN
idsourcecf = sources.id_source;
ELSIF sources.url = 'mortalite' THEN
idsourcem = sources.id_source;
END IF;
END LOOP;
--test si on a bien l'enregistrement dans la table syntheseff avant de le mettre à jour
SELECT INTO test id_fiche_source FROM synthese.syntheseff WHERE id_fiche_source = old.id_releve_cf::text AND (id_source = idsourcecf OR id_source = idsourcem);
IF test IS NOT NULL THEN
SELECT INTO criteresynthese id_critere_synthese FROM contactfaune.bib_criteres_cf WHERE id_critere_cf = new.id_critere_cf;
--mise à jour de l'enregistrement correspondant dans syntheseff
UPDATE synthese.syntheseff SET
id_fiche_source = new.id_releve_cf,
code_fiche_source = 'f'||new.id_cf||'-r'||new.id_releve_cf,
cd_nom = new.cd_ref_origine,
remarques = new.commentaire,
determinateur = new.determinateur,
derniere_action = 'u',
supprime = new.supprime,
id_critere_synthese = criteresynthese,
effectif_total = new.am+new.af+new.ai+new.na+new.jeune+new.yearling+new.sai
WHERE id_fiche_source = old.id_releve_cf::text AND (id_source = idsourcecf OR id_source = idsourcem); -- Ici on utilise le OLD id_releve_cf pour être sur
--qu'il existe dans la table synthese (cas improbable où on changerait la pk de la table t_releves_cf
--le trigger met à jour avec le NEW --> SET id_fiche_source = new.id_releve_cf
END IF;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
CREATE OR REPLACE FUNCTION contactinv.synthese_update_releve_inv()
RETURNS trigger AS
$BODY$
DECLARE
test integer;
criteresynthese integer;
mesobservateurs character varying(255);
idsource integer;
BEGIN
--Récupération des données id_source dans la table synthese.bib_sources
SELECT INTO idsource id_source FROM synthese.bib_sources WHERE db_schema='contactinv' AND db_field = 'id_releve_inv';
--test si on a bien l'enregistrement dans la table syntheseff avant de le mettre à jour
SELECT INTO test id_fiche_source FROM synthese.syntheseff WHERE id_source = idsource AND id_fiche_source = old.id_releve_inv::text;
IF test IS NOT NULL THEN
--Récupération des données dans la table t_fiches_inv et de la liste des observateurs
SELECT INTO criteresynthese id_critere_synthese FROM contactinv.bib_criteres_inv WHERE id_critere_inv = new.id_critere_inv;
--mise à jour de l'enregistrement correspondant dans syntheseff
UPDATE synthese.syntheseff SET
id_fiche_source = new.id_releve_inv,
code_fiche_source = 'f'||new.id_inv||'-r'||new.id_releve_inv,
cd_nom = new.cd_ref_origine,
remarques = new.commentaire,
determinateur = new.determinateur,
derniere_action = 'u',
supprime = new.supprime,
id_critere_synthese = criteresynthese,
effectif_total = new.am+new.af+new.ai+new.na
WHERE id_source = idsource AND id_fiche_source = old.id_releve_inv::text; -- Ici on utilise le OLD id_releve_inv pour être sur
--qu'il existe dans la table synthese (cas improbable où on changerait la pk de la table t_releves_inv
--le trigger met à jour avec le NEW --> SET id_fiche_source = new.id_releve_inv
END IF;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
CREATE OR REPLACE FUNCTION public.application_aggregate_taxons_rang_sp(id integer)
RETURNS text AS
$BODY$
--fonction permettant de regroupper dans un tableau tous les cd_nom d'une espèce et de ces sous espèces, variétés et convariétés à partir du cd_nom d'un taxon
--si le cd_nom passé est d'un rang différent de l'espèce (genre, famille... ou sous-espèce, variété...), la fonction renvoie simplement le cd_ref du cd_nom passé en entré
--
--<NAME> septembre 2011
DECLARE
rang character(4);
rangsup character(4);
ref integer;
sup integer;
cd integer;
tab integer;
r text;
BEGIN
SELECT INTO rang id_rang FROM taxonomie.taxref WHERE cd_nom = id;
IF(rang='ES') THEN
cd = taxonomie.find_cdref(id);
--SELECT INTO tab cd_nom FROM taxonomie.taxref WHERE id_rang = 'SSES' AND cd_taxsup = taxonomie.find_cdref(id);
SELECT INTO r array_agg(a.cd_nom) FROM (
SELECT cd_nom FROM taxonomie.taxref WHERE cd_ref = cd
UNION
SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'SSES' AND cd_taxsup = cd
UNION
SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'VAR' AND cd_taxsup = cd
UNION
SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'CVAR' AND cd_taxsup = cd
UNION
SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'VAR' AND cd_taxsup IN (SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'SSES' AND cd_taxsup = cd)
UNION
SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'CVAR' AND cd_taxsup IN (SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'SSES' AND cd_taxsup = cd)
) a;
ELSE
SELECT INTO r array_agg(cd_ref) FROM taxonomie.taxref WHERE cd_nom = id;
END IF;
return r;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION public.application_aggregate_taxons_rang_sp(integer)
OWNER TO geonatuser;
CREATE OR REPLACE FUNCTION public.application_aggregate_taxons_all_rang_sp(id integer)
RETURNS text AS
$BODY$
--fonction permettant de regroupper dans un tableau au rang espèce tous les cd_nom d'une espèce et de ces sous espèces, variétés et convariétés à partir du cd_nom d'un taxon
--si le cd_nom passé est d'un rang supérieur à l'espèce (genre, famille...), la fonction renvoie simplement le cd_ref du cd_nom passé en entré
--
--<NAME> septembre 2011
DECLARE
rang character(4);
rangsup character(4);
ref integer;
sup integer;
cd integer;
tab integer;
r text;
BEGIN
SELECT INTO rang id_rang FROM taxonomie.taxref WHERE cd_nom = id;
IF(rang='ES' OR rang='SSES' OR rang = 'VAR' OR rang = 'CVAR') THEN
IF(rang = 'ES') THEN
cd = taxonomie.find_cdref(id);
END IF;
IF(rang = 'SSES') THEN
SELECT INTO cd cd_taxsup FROM taxonomie.taxref WHERE cd_nom = taxonomie.find_cdref(id);
END IF;
IF(rang = 'VAR' OR rang = 'CVAR') THEN
SELECT INTO sup cd_taxsup FROM taxonomie.taxref WHERE cd_nom = taxonomie.find_cdref(id);
SELECT INTO rangsup id_rang FROM taxonomie.taxref WHERE cd_nom = taxonomie.find_cdref(sup);
IF(rangsup = 'ES') THEN
cd = sup;
ELSE
SELECT INTO cd cd_taxsup FROM taxonomie.taxref WHERE cd_nom = taxonomie.find_cdref(sup);
END IF;
END IF;
--SELECT INTO tab cd_nom FROM taxonomie.taxref WHERE id_rang = 'SSES' AND cd_taxsup = taxonomie.find_cdref(id);
SELECT INTO r array_agg(a.cd_nom) FROM (
SELECT cd_nom FROM taxonomie.taxref WHERE cd_ref = cd
UNION
SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'SSES' AND cd_taxsup = cd
UNION
SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'VAR' AND cd_taxsup = cd
UNION
SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'CVAR' AND cd_taxsup = cd
UNION
SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'VAR' AND cd_taxsup IN (SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'SSES' AND cd_taxsup = cd)
UNION
SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'CVAR' AND cd_taxsup IN (SELECT cd_nom FROM taxonomie.taxref WHERE id_rang = 'SSES' AND cd_taxsup = cd)
) a;
ELSE
SELECT INTO r cd_ref FROM taxonomie.taxref WHERE cd_nom = id;
END IF;
return r;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION public.application_aggregate_taxons_all_rang_sp(integer)
OWNER TO geonatuser; |
--------------------------------
-- GUEST
--------------------------------
-- procedure to get an iso's default boot device if not the default setting
DELIMITER //
CREATE PROCEDURE get_iso_boot_device_id(IN iso_id BIGINT, OUT boot_device_id_out TINYINT)
BEGIN
DECLARE setting, current TINYINT;
SELECT guest_default_boot_device_id FROM settings LIMIT 1 INTO setting;
SELECT boot_device_id FROM iso WHERE id = iso_id INTO current;
SELECT IF(current > 0, current, setting) into setting;
SELECT setting INTO boot_device_id_out;
END //
DELIMITER ;
-- procedure to get an iso's default nic driver if not the default setting
DELIMITER //
CREATE PROCEDURE get_iso_nic_driver_id(IN guest_id BIGINT, OUT nic_driver_id_out TINYINT)
BEGIN
DECLARE setting, current TINYINT;
SELECT guest_default_nic_driver_id FROM settings LIMIT 1 INTO setting;
SELECT iso.nic_driver_id FROM iso INNER JOIN guest_iso WHERE guest_iso.iso_id = iso.id AND guest_iso.guest_id = guest_id INTO current;
SELECT IF(current > 0, current, setting) into setting;
SELECT setting INTO nic_driver_id_out;
END //
DELIMITER ;
-- create, iso os data
DELIMITER //
CREATE PROCEDURE create_guest__iso_os_data(IN guest_name VARCHAR(24), IN guest_ram VARCHAR(24), IN virtual_router_id BIGINT, IN iso_id BIGINT, IN os_disk_capacity FLOAT, IN data_disk_capacity FLOAT, OUT next_id BIGINT)
BEGIN
-- read the default settings for guest creation
SELECT create_guest_from_iso_default_machine_state_id FROM settings INTO @machine_state_id;
SELECT create_guest_from_iso_default_machine_substate_id FROM settings INTO @machine_substate_id;
CALL get_iso_boot_device_id(iso_id, @boot_device_id);
-- guest
INSERT INTO guest (name, ram_capacity, machine_state_id, machine_substate_id, boot_device_id) VALUES (guest_name, guest_ram, @machine_state_id, @machine_substate_id, @boot_device_id);
SELECT LAST_INSERT_ID() INTO @guest_id;
-- lab
INSERT INTO guest_virtual_router (guest_id, virtual_router_id) VALUES (@guest_id, virtual_router_id);
-- iso
INSERT INTO guest_iso (guest_id, iso_id) VALUES (@guest_id, iso_id);
-- os disk
INSERT INTO vdisk (capacity, primary_flag) VALUES (os_disk_capacity, true);
SELECT LAST_INSERT_ID() INTO @vdisk_id;
INSERT INTO guest_vdisk (guest_id, vdisk_id) VALUES (@guest_id, @vdisk_id);
-- data disk
INSERT INTO vdisk (capacity) VALUES (data_disk_capacity);
SELECT LAST_INSERT_ID() INTO @vdisk_id;
INSERT INTO guest_vdisk (guest_id, vdisk_id) VALUES (@guest_id, @vdisk_id);
-- nic
CALL get_iso_nic_driver_id(@guest_id, @nic_driver_id);
CALL create_nic(@nic_driver_id, @nic_id);
INSERT INTO guest_nic (guest_id, nic_id) VALUES (@guest_id, @nic_id);
-- output
SELECT @guest_id INTO next_id;
END //
DELIMITER ;
-- create, iso os
DELIMITER //
CREATE PROCEDURE create_guest__iso_os(IN guest_name VARCHAR(24), IN guest_ram VARCHAR(24), IN virtual_router_id BIGINT, IN iso_id BIGINT, IN os_disk_capacity FLOAT, OUT next_id BIGINT)
BEGIN
-- read the default settings for guest creation
SELECT create_guest_from_iso_default_machine_state_id FROM settings INTO @machine_state_id;
SELECT create_guest_from_iso_default_machine_substate_id FROM settings INTO @machine_substate_id;
CALL get_iso_boot_device_id(iso_id, @boot_device_id);
-- guest
INSERT INTO guest (name, ram_capacity, machine_state_id, machine_substate_id, boot_device_id) VALUES (guest_name, guest_ram, @machine_state_id, @machine_substate_id, @boot_device_id);
SELECT LAST_INSERT_ID() INTO @guest_id;
-- lab
INSERT INTO guest_virtual_router (guest_id, virtual_router_id) VALUES (@guest_id, virtual_router_id);
-- iso
INSERT INTO guest_iso (guest_id, iso_id) VALUES (@guest_id, iso_id);
-- os disk
INSERT INTO vdisk (capacity, primary_flag) VALUES (os_disk_capacity, true);
SELECT LAST_INSERT_ID() INTO @vdisk_id;
INSERT INTO guest_vdisk (guest_id, vdisk_id) VALUES (@guest_id, @vdisk_id);
-- nic
CALL get_iso_nic_driver_id(@guest_id, @nic_driver_id);
CALL create_nic(@nic_driver_id, @nic_id);
INSERT INTO guest_nic (guest_id, nic_id) VALUES (@guest_id, @nic_id);
-- output
SELECT @guest_id INTO next_id;
END //
DELIMITER ;
-- create, iso data
DELIMITER //
CREATE PROCEDURE create_guest__iso_data(IN guest_name VARCHAR(24), IN guest_ram VARCHAR(24), IN virtual_router_id BIGINT, IN iso_id BIGINT, IN data_disk_capacity FLOAT, OUT next_id BIGINT)
BEGIN
-- read the default settings for guest creation
SELECT create_guest_from_iso_default_machine_state_id FROM settings INTO @machine_state_id;
SELECT create_guest_from_iso_default_machine_substate_id FROM settings INTO @machine_substate_id;
CALL get_iso_boot_device_id(iso_id, @boot_device_id);
-- guest
INSERT INTO guest (name, ram_capacity, machine_state_id, machine_substate_id, boot_device_id) VALUES (guest_name, guest_ram, @machine_state_id, @machine_substate_id, @boot_device_id);
SELECT LAST_INSERT_ID() INTO @guest_id;
-- lab
INSERT INTO guest_virtual_router (guest_id, virtual_router_id) VALUES (@guest_id, virtual_router_id);
-- iso
INSERT INTO guest_iso (guest_id, iso_id) VALUES (@guest_id, iso_id);
-- data disk
INSERT INTO vdisk (capacity) VALUES (data_disk_capacity);
SELECT LAST_INSERT_ID() INTO @vdisk_id;
INSERT INTO guest_vdisk (guest_id, vdisk_id) VALUES (@guest_id, @vdisk_id);
-- nic
CALL get_iso_nic_driver_id(@guest_id, @nic_driver_id);
CALL create_nic(@nic_driver_id, @nic_id);
INSERT INTO guest_nic (guest_id, nic_id) VALUES (@guest_id, @nic_id);
-- output
SELECT @guest_id INTO next_id;
END //
DELIMITER ;
-- create, iso
DELIMITER //
CREATE PROCEDURE create_guest__iso(IN guest_name VARCHAR(24), IN guest_ram VARCHAR(24), IN virtual_router_id BIGINT, IN iso_id BIGINT, OUT next_id BIGINT)
BEGIN
-- read the default settings for guest creation
SELECT create_guest_from_iso_default_machine_state_id FROM settings INTO @machine_state_id;
SELECT create_guest_from_iso_default_machine_substate_id FROM settings INTO @machine_substate_id;
CALL get_iso_boot_device_id(iso_id, @boot_device_id);
-- guest
INSERT INTO guest (name, ram_capacity, machine_state_id, machine_substate_id, boot_device_id) VALUES (guest_name, guest_ram, @machine_state_id, @machine_substate_id, @boot_device_id);
SELECT LAST_INSERT_ID() INTO @guest_id;
-- lab
INSERT INTO guest_virtual_router (guest_id, virtual_router_id) VALUES (@guest_id, virtual_router_id);
-- iso
INSERT INTO guest_iso (guest_id, iso_id) VALUES (@guest_id, iso_id);
-- nic
CALL get_iso_nic_driver_id(@guest_id, @nic_driver_id);
CALL create_nic(@nic_driver_id, @nic_id);
INSERT INTO guest_nic (guest_id, nic_id) VALUES (@guest_id, @nic_id);
-- output
SELECT @guest_id INTO next_id;
END //
DELIMITER ;
-- create, template cow data
-- template data, uses template cow data procedure, pass in false as cow_flag_in
DELIMITER //
CREATE PROCEDURE create_guest__template_cow_data(IN guest_name VARCHAR(24), IN guest_ram VARCHAR(24), IN virtual_router_id BIGINT, IN template_id_in BIGINT, IN cow_flag_in BOOLEAN, IN data_disk_capacity FLOAT, OUT next_id BIGINT)
BEGIN
-- determine iso id by template id
SELECT iso.id FROM iso INNER JOIN template WHERE (iso.id = template.iso_id) AND template.id = template_id_in INTO @iso_id;
-- determine vdisk capacity, vdisk used by template id
SELECT vdisk.capacity FROM vdisk INNER JOIN template WHERE (vdisk.id = template.vdisk_id) AND template.id = template_id_in INTO @os_capacity;
SELECT vdisk.used FROM vdisk INNER JOIN template WHERE (vdisk.id = template.vdisk_id) AND template.id = template_id_in INTO @os_used;
-- read the default settings for guest creation
SELECT create_guest_from_template_default_machine_state_id FROM settings INTO @machine_state_id;
SELECT create_guest_from_template_default_machine_substate_id FROM settings INTO @machine_substate_id;
CALL get_iso_boot_device_id(@iso_id, @boot_device_id);
-- guest
INSERT INTO guest (name, ram_capacity, machine_state_id, machine_substate_id, boot_device_id) VALUES (guest_name, guest_ram, @machine_state_id, @machine_substate_id, @boot_device_id);
SELECT LAST_INSERT_ID() INTO @guest_id;
-- lab
INSERT INTO guest_virtual_router (guest_id, virtual_router_id) VALUES (@guest_id, virtual_router_id);
-- iso
INSERT INTO guest_iso (guest_id, iso_id) VALUES (@guest_id, @iso_id);
-- template
INSERT INTO guest_template (guest_id, template_id) VALUES (@guest_id, template_id_in);
-- os disk
INSERT INTO vdisk (capacity, used, primary_flag, cow_flag) VALUES (@os_capacity, @os_used, true, cow_flag_in);
SELECT LAST_INSERT_ID() INTO @vdisk_id;
INSERT INTO guest_vdisk (guest_id, vdisk_id) VALUES (@guest_id, @vdisk_id);
-- data disk
INSERT INTO vdisk (capacity, primary_flag) VALUES (data_disk_capacity, false);
SELECT LAST_INSERT_ID() INTO @vdisk_id;
INSERT INTO guest_vdisk (guest_id, vdisk_id) VALUES (@guest_id, @vdisk_id);
-- nic
CALL get_iso_nic_driver_id(@guest_id, @nic_driver_id);
CALL create_nic(@nic_driver_id, @nic_id);
INSERT INTO guest_nic (guest_id, nic_id) VALUES (@guest_id, @nic_id);
-- output
SELECT @guest_id INTO next_id;
END //
DELIMITER ;
-- create, template cow
-- template, uses template cow procedure, pass in false as cow_flag_in
DELIMITER //
CREATE PROCEDURE create_guest__template_cow(IN guest_name VARCHAR(24), IN guest_ram VARCHAR(24), IN virtual_router_id BIGINT, IN template_id_in BIGINT, IN cow_flag_in BOOLEAN, OUT next_id BIGINT)
BEGIN
-- determine iso id by template id
SELECT iso.id FROM iso INNER JOIN template WHERE (iso.id = template.iso_id) AND template.id = template_id_in INTO @iso_id;
-- determine vdisk capacity, vdisk used by template id
SELECT vdisk.capacity FROM vdisk INNER JOIN template WHERE (vdisk.id = template.vdisk_id) AND template.id = template_id_in INTO @os_capacity;
SELECT vdisk.used FROM vdisk INNER JOIN template WHERE (vdisk.id = template.vdisk_id) AND template.id = template_id_in INTO @os_used;
-- read the default settings for guest creation
SELECT create_guest_from_template_default_machine_state_id FROM settings INTO @machine_state_id;
SELECT create_guest_from_template_default_machine_substate_id FROM settings INTO @machine_substate_id;
CALL get_iso_boot_device_id(@iso_id, @boot_device_id);
-- guest
INSERT INTO guest (name, ram_capacity, machine_state_id, machine_substate_id, boot_device_id) VALUES (guest_name, guest_ram, @machine_state_id, @machine_substate_id, @boot_device_id);
SELECT LAST_INSERT_ID() INTO @guest_id;
-- lab
INSERT INTO guest_virtual_router (guest_id, virtual_router_id) VALUES (@guest_id, virtual_router_id);
-- iso
INSERT INTO guest_iso (guest_id, iso_id) VALUES (@guest_id, @iso_id);
-- template
INSERT INTO guest_template (guest_id, template_id) VALUES (@guest_id, template_id_in);
-- os disk
INSERT INTO vdisk (capacity, used, primary_flag, cow_flag) VALUES (@os_capacity, @os_used, true, cow_flag_in);
SELECT LAST_INSERT_ID() INTO @vdisk_id;
INSERT INTO guest_vdisk (guest_id, vdisk_id) VALUES (@guest_id, @vdisk_id);
-- nic
CALL get_iso_nic_driver_id(@guest_id, @nic_driver_id);
CALL create_nic(@nic_driver_id, @nic_id);
INSERT INTO guest_nic (guest_id, nic_id) VALUES (@guest_id, @nic_id);
-- output
SELECT @guest_id INTO next_id;
END //
DELIMITER ;
-- read, one
DELIMITER //
CREATE PROCEDURE read_guest__one(
IN guest_id_in BIGINT,
OUT name VARCHAR,
OUT state VARCHAR,
OUT substate VARCHAR,
OUT cpu_capacity FLOAT,
OUT cpu_use FLOAT,
OUT ram_capacity VARCHAR,
OUT ram_use FLOAT,
OUT access_port BIGINT,
OUT boot_device_id TINYINT,
OUT boot_device_name VARCHAR,
OUT nic_count_out BIGINT,
OUT nic_id_out BIGINT,
OUT ip_address_id_out BIGINT,
OUT ip_address_out VARCHAR(16),
OUT mac_address_id_out BIGINT,
OUT mac_address_out VARCHAR(24),
OUT vswitch_port_id_out BIGINT,
OUT vswitch_port_out SMALLINT,
OUT vlan_tag_id_out BIGINT,
OUT vlan_tag_out SMALLINT,
OUT nic_driver_id_out TINYINT,
OUT nic_driver_out VARCHAR(16),
OUT vdisk_count_out BIGINT,
OUT vdisk_id_out BIGINT,
OUT vdisk_capacity_out FLOAT,
OUT vdisk_used_out FLOAT,
OUT vdisk_primary_flag_out BOOLEAN,
OUT vdisk_cow_flag_out BOOLEAN,
OUT ,
OUT
)
BEGIN
-- guest
SELECT guest.name FROM guest WHERE guest.id = guest_id INTO name;
SELECT machine_state.state FROM guest INNER JOIN machine_state WHERE (guest.machine_state_id = machine_state.id) AND guest.id = guest_id_in INTO state;
SELECT machine_substate.state FROM guest INNER JOIN machine_substate WHERE (guest.machine_substate_id = machine_substate.id) AND guest.id = guest_id_in INTO substate;
SELECT guest.cpu_capacity FROM guest WHERE guest.id = guest_id INTO cpu_capacity;
SELECT guest.cpu_use FROM guest WHERE guest.id = guest_id INTO cpu_use;
SELECT guest.ram_capacity FROM guest WHERE guest.id = guest_id INTO ram_capacity;
SELECT guest.ram_use FROM guest WHERE guest.id = guest_id INTO ram_use;
SELECT guest.access_port_pool_id FROM guest WHERE guest.id = guest_id INTO access_port;
SELECT boot_device.id, boot_device.name FROM guest INNER JOIN boot_device WHERE (guest.boot_device_id = boot_device.id) AND guest.id = guest_id INTO boot_device_id, boot_device_name;
-- nic
CALL read_nic__first(guest_id_in, nic_count_out, nic_id_out, ip_address_id_out, ip_address_out, mac_address_id_out, mac_address_out, vswitch_port_id_out, vswitch_port_out, vlan_tag_id_out, vlan_tag_out, nic_driver_id_out, nic_driver_out);
-- vdisk
CALL read_vdisk__first(guest_id_in, vdisk_count_out, vdisk_id_out, vdisk_capacity_out, vdisk_used_out, vdisk_primary_flag_out, vdisk_cow_flag_out);
-- snapshot
-- iso
SELECT name FROM iso INNER JOIN guest_iso WHERE (guest_iso.iso_id = iso.id AND guest_id = guest_id_in) INTO iso_name;
--SELECT name, boot_device_id, nic_driver_id FROM iso INNER JOIN guest_iso WHERE (guest_iso.iso_id = iso.id AND guest_id = 48); -- full table read.
-- template ???
SELECT template.name AS tepmlate_name, template.iso_id, template.vdisk_id, iso.name AS iso_name FROM template INNER JOIN guest_template ON guest_template.template_id = template.id INNER JOIN iso ON iso.id = template.iso_id AND guest_template.guest_id = guest_id_in;
--SELECT template.name AS tepmlate_name, template.iso_id, template.vdisk_id, iso.name AS iso_name FROM template INNER JOIN guest_template ON guest_template.template_id = template.id INNER JOIN iso ON iso.id = template.iso_id AND guest_template.guest_id = guest_id_in; -- full table read.
-- ?virtual_router
-- hypervisor_server
-- storage_server
END //
DELIMITER ;
-- read, one
-- COUNT(*) as nic_count,
DELIMITER //
CREATE PROCEDURE read_guest__one(
IN guest_id_in BIGINT,
OUT name VARCHAR,
OUT state VARCHAR,
OUT substate VARCHAR,
OUT cpu_capacity FLOAT,
OUT cpu_use FLOAT,
OUT ram_capacity VARCHAR,
OUT ram_use FLOAT,
OUT access_port BIGINT,
OUT boot_device_id TINYINT,
OUT boot_device_name VARCHAR,
OUT nic_count_out BIGINT,
OUT nic_id_out BIGINT,
OUT ip_address_id_out BIGINT,
OUT ip_address_out VARCHAR(16),
OUT mac_address_id_out BIGINT,
OUT mac_address_out VARCHAR(24),
OUT vswitch_port_id_out BIGINT,
OUT vswitch_port_out SMALLINT,
OUT vlan_tag_id_out BIGINT,
OUT vlan_tag_out SMALLINT,
OUT nic_driver_id_out TINYINT,
OUT nic_driver_out VARCHAR(16),
OUT vdisk_count_out BIGINT,
OUT vdisk_id_out BIGINT,
OUT vdisk_capacity_out FLOAT,
OUT vdisk_used_out FLOAT,
OUT vdisk_primary_flag_out BOOLEAN,
OUT vdisk_cow_flag_out BOOLEAN,
OUT ,
OUT
)
BEGIN
SELECT guest.name FROM guest WHERE guest.id = guest_id_in INTO name;
SELECT machine_state.state FROM guest INNER JOIN machine_state WHERE (guest.machine_state_id = machine_state.id)
AND guest.id = guest_id_in INTO state;
SELECT machine_substate.state FROM guest INNER JOIN machine_substate WHERE (guest.machine_substate_id = machine_substate.id)
AND guest.id = guest_id_in INTO substate;
SELECT guest.cpu_capacity FROM guest WHERE guest.id = guest_id_in INTO cpu_capacity;
SELECT guest.cpu_use FROM guest WHERE guest.id = guest_id_in INTO cpu_use;
SELECT guest.ram_capacity FROM guest WHERE guest.id = guest_id_in INTO ram_capacity;
SELECT guest.ram_use FROM guest WHERE guest.id = guest_id_in INTO ram_use;
-- nic
CALL read_nic__first(guest_id_in, nic_count_out, nic_id_out, ip_address_id_out, ip_address_out, mac_address_id_out, mac_address_out, vswitch_port_id_out,
vswitch_port_out, vlan_tag_id_out, vlan_tag_out, nic_driver_id_out, nic_driver_out);
-- disk
SELECT disk.id, disk.capacity, disk.used INTO disk_id_out, disk_capacity_out, disk_used_out FROM disk INNER JOIN guest_disk ON guest_disk.disk_id = disk.id
AND hypervisor_server_disk.hypervisor_server_id = hypervisor_server_id_in;
END //
DELIMITER ;
-- read, all
DELIMITER //
CREATE PROCEDURE read_guest__all()
BEGIN
SELECT
guest.id,
guest.name,
machine_state.state,
machine_substate.state as substate,
guest.cpu_capacity,
guest.cpu_use,
guest.ram_capacity,
guest.ram_use,
nic.id as nic_id,
nic.ip_address_pool_id,
ip_address_pool.name as ip_address,
nic.mac_address_pool_id,
mac_address_pool.name as mac_address,
nic.vswitch_port_pool_id,
vswitch_port_pool.name as vswitch_port,
nic.vlan_tag_pool_id,
vlan_tag_pool.name as vlan_tag,
nic.nic_driver_id,
nic_driver.name as nic_driver,
vdisk.id as vdisk_id,
vdisk.capacity as vdisk_capacity,
vdisk.used as vdisk_used,
vdisk.primary_flag as vdisk_primary_flag,
vdisk.cow_flag as vdisk_cow_flag
FROM guest_nic
INNER JOIN nic ON guest_nic.nic_id = nic.id
INNER JOIN ip_address_pool ON ip_address_pool.id = nic.ip_address_pool_id
INNER JOIN mac_address_pool ON mac_address_pool.id = nic.mac_address_pool_id
INNER JOIN vswitch_port_pool ON vswitch_port_pool.id = nic.vswitch_port_pool_id
INNER JOIN vlan_tag_pool ON vlan_tag_pool.id = nic.vlan_tag_pool_id
INNER JOIN nic_driver ON nic_driver.id = nic.nic_driver_id
INNER JOIN guest_vdisk ON guest_vdisk.guest_id = guest_nic.guest_id
INNER JOIN vdisk ON vdisk.id = guest_vdisk.vdisk_id
INNER JOIN guest ON guest.id = guest_vdisk.guest_id
INNER JOIN machine_state ON machine_state.id = guest.machine_state_id
INNER JOIN machine_substate ON machine_substate.id = guest.machine_substate_id
;
END //
DELIMITER ;
-- delete
DELIMITER //
CREATE PROCEDURE delete_guest(
IN guest_id_in BIGINT
)
BEGIN
DECLARE disk_id_val, nic_id_val BIGINT;
SELECT guest_disk.disk_id INTO disk_id_val FROM guest_disk INNER JOIN disk ON guest_disk.disk_id = disk.id AND guest_id = 5;
SELECT guest_nic.nic_id INTO nic_id_val FROM guest_nic INNER JOIN nic ON guest_nic.nic_id = nic.id AND guest_id = 5;
DELETE FROM disk WHERE id = disk_id_val;
DELETE FROM guest_disk WHERE guest_id = guest_id_in;
DELETE FROM nic WHERE id = nic_id_val;
DELETE FROM guest_nic WHERE guest_id = guest_id_in;
DELETE FROM guest WHERE id = guest_id_in;
END //
DELIMITER ;
--------------------------------
-- VIRTUAL ROUTER
--------------------------------
--------------------------------
-- HYPERVISOR SERVER
--------------------------------
-- create
DELIMITER //
CREATE PROCEDURE create_hypervisor_server(
IN name_in VARCHAR(64),
OUT hypervisor_server_id_val BIGINT
)
BEGIN
DECLARE machine_state_id_val, machine_substate_id_val, storage_server_id_val, disk_id_val, nic_id_val BIGINT;
DECLARE nic_driver_id_val TINYINT;
-- read the default settings for hypervisor_server creation
SELECT create_physical_default_machine_state_id FROM settings INTO machine_state_id_val;
SELECT create_physical_default_machine_substate_id FROM settings INTO machine_substate_id_val;
SELECT physical_default_nic_driver_id FROM settings INTO nic_driver_id_val;
-- hypervisor_server
INSERT INTO hypervisor_server (name, machine_state_id, machine_substate_id) VALUES (name_in, machine_state_id_val, machine_substate_id_val);
SELECT LAST_INSERT_ID() INTO hypervisor_server_id_val;
-- disk
INSERT INTO disk (capacity) VALUES (0);
SELECT LAST_INSERT_ID() INTO disk_id_val;
INSERT INTO hypervisor_server_disk (hypervisor_server_id, disk_id) VALUES (hypervisor_server_id_val, disk_id_val);
-- nic
CALL create_nic(nic_driver_id_val, nic_id_val);
INSERT INTO hypervisor_server_nic (hypervisor_server_id, nic_id) VALUES (hypervisor_server_id_val, nic_id_val);
END //
DELIMITER ;
-- read, one
-- COUNT(*) as nic_count,
DELIMITER //
CREATE PROCEDURE read_hypervisor_server__one(
IN hypervisor_server_id_in BIGINT,
OUT name VARCHAR(64),
OUT state VARCHAR(16),
OUT substate VARCHAR(16),
OUT cpu_capacity FLOAT,
OUT cpu_use FLOAT,
OUT ram_capacity FLOAT,
OUT ram_use FLOAT,
OUT nic_count_out BIGINT,
OUT nic_id_out BIGINT,
OUT ip_address_id_out BIGINT,
OUT ip_address_out VARCHAR(16),
OUT mac_address_id_out BIGINT,
OUT mac_address_out VARCHAR(24),
OUT vswitch_port_id_out BIGINT,
OUT vswitch_port_out SMALLINT,
OUT vlan_tag_id_out BIGINT,
OUT vlan_tag_out SMALLINT,
OUT nic_driver_id_out TINYINT,
OUT nic_driver_out VARCHAR(16),
OUT disk_id_out BIGINT,
OUT disk_capacity_out FLOAT,
OUT disk_used_out FLOAT
)
BEGIN
SELECT hypervisor_server.name FROM hypervisor_server WHERE hypervisor_server.id = hypervisor_server_id_in INTO name;
SELECT machine_state.state FROM hypervisor_server INNER JOIN machine_state WHERE (hypervisor_server.machine_state_id = machine_state.id)
AND hypervisor_server.id = hypervisor_server_id_in INTO state;
SELECT machine_substate.state FROM hypervisor_server INNER JOIN machine_substate WHERE (hypervisor_server.machine_substate_id = machine_substate.id)
AND hypervisor_server.id = hypervisor_server_id_in INTO substate;
SELECT hypervisor_server.cpu_capacity FROM hypervisor_server WHERE hypervisor_server.id = hypervisor_server_id_in INTO cpu_capacity;
SELECT hypervisor_server.cpu_use FROM hypervisor_server WHERE hypervisor_server.id = hypervisor_server_id_in INTO cpu_use;
SELECT hypervisor_server.ram_capacity FROM hypervisor_server WHERE hypervisor_server.id = hypervisor_server_id_in INTO ram_capacity;
SELECT hypervisor_server.ram_use FROM hypervisor_server WHERE hypervisor_server.id = hypervisor_server_id_in INTO ram_use;
-- nic
CALL read_nic__first(hypervisor_server_id_in, nic_count_out, nic_id_out, ip_address_id_out, ip_address_out, mac_address_id_out, mac_address_out, vswitch_port_id_out,
vswitch_port_out, vlan_tag_id_out, vlan_tag_out, nic_driver_id_out, nic_driver_out);
-- disk
SELECT disk.id, disk.capacity, disk.used INTO disk_id_out, disk_capacity_out, disk_used_out FROM disk INNER JOIN hypervisor_server_disk ON hypervisor_server_disk.disk_id = disk.id
AND hypervisor_server_disk.hypervisor_server_id = hypervisor_server_id_in;
END //
DELIMITER ;
-- read, all
DELIMITER //
CREATE PROCEDURE read_hypervisor_server__all()
BEGIN
SELECT
hypervisor_server.id,
hypervisor_server.name,
machine_state.state,
machine_substate.state as substate,
hypervisor_server.cpu_capacity,
hypervisor_server.cpu_use,
hypervisor_server.ram_capacity,
hypervisor_server.ram_use,
disk.id as disk_id,
disk.capacity as disk_capacity,
disk.used as disk_used,
nic.id as nic_id,
nic.ip_address_pool_id,
ip_address_pool.name as ip_address,
nic.mac_address_pool_id,
mac_address_pool.name as mac_address,
nic.vswitch_port_pool_id,
vswitch_port_pool.name as vswitch_port,
nic.vlan_tag_pool_id,
vlan_tag_pool.name as vlan_tag,
nic.nic_driver_id,
nic_driver.name as nic_driver
FROM hypervisor_server_nic
INNER JOIN nic ON hypervisor_server_nic.nic_id = nic.id
INNER JOIN ip_address_pool ON ip_address_pool.id = nic.ip_address_pool_id
INNER JOIN mac_address_pool ON mac_address_pool.id = nic.mac_address_pool_id
INNER JOIN vswitch_port_pool ON vswitch_port_pool.id = nic.vswitch_port_pool_id
INNER JOIN vlan_tag_pool ON vlan_tag_pool.id = nic.vlan_tag_pool_id
INNER JOIN nic_driver ON nic_driver.id = nic.nic_driver_id
INNER JOIN hypervisor_server_disk ON hypervisor_server_disk.hypervisor_server_id = hypervisor_server_nic.hypervisor_server_id
INNER JOIN disk ON disk.id = hypervisor_server_disk.disk_id
INNER JOIN hypervisor_server ON hypervisor_server.id = hypervisor_server_disk.hypervisor_server_id
INNER JOIN machine_state ON machine_state.id = hypervisor_server.machine_state_id
INNER JOIN machine_substate ON machine_substate.id = hypervisor_server.machine_substate_id
;
END //
DELIMITER ;
-- update, agent interface
DELIMITER //
CREATE PROCEDURE update_hypervisor_server__agent(
IN id_in BIGINT,
IN cpu_capacity_in FLOAT,
IN cpu_use_in FLOAT,
IN ram_capacity_in FLOAT,
IN ram_use_in FLOAT,
IN disk_capacity_in FLOAT,
IN disk_used_in FLOAT
)
BEGIN
DECLARE disk_id_val BIGINT;
SELECT disk.id INTO disk_id_val FROM disk
INNER JOIN hypervisor_server_disk ON hypervisor_server_disk.disk_id = disk.id
INNER JOIN hypervisor_server ON hypervisor_server_disk.hypervisor_server_id = hypervisor_server.id
AND hypervisor_server.id = id_in;
UPDATE disk SET disk.capacity = disk_capacity_in, disk.used = disk_used_in WHERE disk.id = disk_id_val;
UPDATE hypervisor_server SET cpu_capacity = cpu_capacity_in, cpu_use = cpu_use_in, ram_capacity = ram_capacity_in, ram_use = ram_use_in WHERE id = id_in;
END //
DELIMITER ;
-- delete
DELIMITER //
CREATE PROCEDURE delete_hypervisor_server(
IN hypervisor_server_id_in BIGINT
)
BEGIN
DECLARE disk_id_val, nic_id_val BIGINT;
SELECT hypervisor_server_disk.disk_id INTO disk_id_val FROM hypervisor_server_disk INNER JOIN disk ON hypervisor_server_disk.disk_id = disk.id AND hypervisor_server_id = 5;
SELECT hypervisor_server_nic.nic_id INTO nic_id_val FROM hypervisor_server_nic INNER JOIN nic ON hypervisor_server_nic.nic_id = nic.id AND hypervisor_server_id = 5;
DELETE FROM disk WHERE id = disk_id_val;
DELETE FROM hypervisor_server_disk WHERE hypervisor_server_id = hypervisor_server_id_in;
DELETE FROM nic WHERE id = nic_id_val;
DELETE FROM hypervisor_server_nic WHERE hypervisor_server_id = hypervisor_server_id_in;
DELETE FROM hypervisor_server WHERE id = hypervisor_server_id_in;
END //
DELIMITER ;
--------------------------------
-- STORAGE SERVER
--------------------------------
-- create
DELIMITER //
CREATE PROCEDURE create_storage_server(
IN name_in VARCHAR(64),
OUT storage_server_id_val BIGINT
)
BEGIN
DECLARE machine_state_id_val, machine_substate_id_val, storage_server_id_val, disk_id_val, nic_id_val BIGINT;
DECLARE nic_driver_id_val TINYINT;
-- read the default settings for storage_server creation
SELECT create_physical_default_machine_state_id FROM settings INTO machine_state_id_val;
SELECT create_physical_default_machine_substate_id FROM settings INTO machine_substate_id_val;
SELECT physical_default_nic_driver_id FROM settings INTO nic_driver_id_val;
-- storage_server
INSERT INTO storage_server (name, machine_state_id, machine_substate_id) VALUES (name_in, machine_state_id_val, machine_substate_id_val);
SELECT LAST_INSERT_ID() INTO storage_server_id_val;
-- disk
INSERT INTO disk (capacity) VALUES (0);
SELECT LAST_INSERT_ID() INTO disk_id_val;
INSERT INTO storage_server_disk (storage_server_id, disk_id) VALUES (storage_server_id_val, disk_id_val);
-- nic
CALL create_nic(nic_driver_id_val, nic_id_val);
INSERT INTO storage_server_nic (storage_server_id, nic_id) VALUES (storage_server_id_val, nic_id_val);
END //
DELIMITER ;
-- read, one
-- COUNT(*) as nic_count,
DELIMITER //
CREATE PROCEDURE read_storage_server__one(
IN storage_server_id_in BIGINT,
OUT name VARCHAR(64),
OUT state VARCHAR(16),
OUT substate VARCHAR(16),
OUT cpu_capacity FLOAT,
OUT cpu_use FLOAT,
OUT ram_capacity FLOAT,
OUT ram_use FLOAT,
OUT volume_capacity FLOAT,
OUT volume_use FLOAT,
OUT nic_count_out BIGINT,
OUT nic_id_out BIGINT,
OUT ip_address_id_out BIGINT,
OUT ip_address_out VARCHAR(16),
OUT mac_address_id_out BIGINT,
OUT mac_address_out VARCHAR(24),
OUT vswitch_port_id_out BIGINT,
OUT vswitch_port_out SMALLINT,
OUT vlan_tag_id_out BIGINT,
OUT vlan_tag_out SMALLINT,
OUT nic_driver_id_out TINYINT,
OUT nic_driver_out VARCHAR(16),
OUT disk_id_out BIGINT,
OUT disk_capacity_out FLOAT,
OUT disk_used_out FLOAT
)
BEGIN
SELECT storage_server.name FROM storage_server WHERE storage_server.id = storage_server_id_in INTO name;
SELECT machine_state.state FROM storage_server INNER JOIN machine_state WHERE (storage_server.machine_state_id = machine_state.id)
AND storage_server.id = storage_server_id_in INTO state;
SELECT machine_substate.state FROM storage_server INNER JOIN machine_substate WHERE (storage_server.machine_substate_id = machine_substate.id)
AND storage_server.id = storage_server_id_in INTO substate;
SELECT storage_server.cpu_capacity FROM storage_server WHERE storage_server.id = storage_server_id_in INTO cpu_capacity;
SELECT storage_server.cpu_use FROM storage_server WHERE storage_server.id = storage_server_id_in INTO cpu_use;
SELECT storage_server.ram_capacity FROM storage_server WHERE storage_server.id = storage_server_id_in INTO ram_capacity;
SELECT storage_server.ram_use FROM storage_server WHERE storage_server.id = storage_server_id_in INTO ram_use;
SELECT storage_server.volume_capacity FROM storage_server WHERE storage_server.id = storage_server_id_in INTO volume_capacity;
SELECT storage_server.volume_use FROM storage_server WHERE storage_server.id = storage_server_id_in INTO volume_use;
-- nic
CALL read_nic__first(storage_server_id_in, nic_count_out, nic_id_out, ip_address_id_out, ip_address_out, mac_address_id_out, mac_address_out, vswitch_port_id_out,
vswitch_port_out, vlan_tag_id_out, vlan_tag_out, nic_driver_id_out, nic_driver_out);
-- disk
SELECT disk.id, disk.capacity, disk.used INTO disk_id_out, disk_capacity_out, disk_used_out FROM disk INNER JOIN storage_server_disk ON storage_server_disk.disk_id = disk.id
AND storage_server_disk.storage_server_id = storage_server_id_in;
END //
DELIMITER ;
-- read, all
DELIMITER //
CREATE PROCEDURE read_storage_server__all()
BEGIN
SELECT
storage_server.id,
storage_server.name,
machine_state.state,
machine_substate.state as substate,
storage_server.cpu_capacity,
storage_server.cpu_use,
storage_server.ram_capacity,
storage_server.ram_use,
disk.id as disk_id,
disk.capacity as disk_capacity,
disk.used as disk_used,
storage_server.volume_capacity,
storage_server.volume_use,
nic.id as nic_id,
nic.ip_address_pool_id,
ip_address_pool.name as ip_address,
nic.mac_address_pool_id,
mac_address_pool.name as mac_address,
nic.vswitch_port_pool_id,
vswitch_port_pool.name as vswitch_port,
nic.vlan_tag_pool_id,
vlan_tag_pool.name as vlan_tag,
nic.nic_driver_id,
nic_driver.name as nic_driver
FROM storage_server_nic
INNER JOIN nic ON storage_server_nic.nic_id = nic.id
INNER JOIN ip_address_pool ON ip_address_pool.id = nic.ip_address_pool_id
INNER JOIN mac_address_pool ON mac_address_pool.id = nic.mac_address_pool_id
INNER JOIN vswitch_port_pool ON vswitch_port_pool.id = nic.vswitch_port_pool_id
INNER JOIN vlan_tag_pool ON vlan_tag_pool.id = nic.vlan_tag_pool_id
INNER JOIN nic_driver ON nic_driver.id = nic.nic_driver_id
INNER JOIN storage_server_disk ON storage_server_disk.storage_server_id = storage_server_nic.storage_server_id
INNER JOIN disk ON disk.id = storage_server_disk.disk_id
INNER JOIN storage_server ON storage_server.id = storage_server_disk.storage_server_id
INNER JOIN machine_state ON machine_state.id = storage_server.machine_state_id
INNER JOIN machine_substate ON machine_substate.id = storage_server.machine_substate_id
;
END //
DELIMITER ;
-- update, agent interface
DELIMITER //
CREATE PROCEDURE update_storage_server__agent(
IN id_in BIGINT,
IN cpu_capacity_in FLOAT,
IN cpu_use_in FLOAT,
IN ram_capacity_in FLOAT,
IN ram_use_in FLOAT,
IN disk_capacity_in FLOAT,
IN disk_used_in FLOAT,
IN volume_capacity_in FLOAT,
IN volume_use_in FLOAT
)
BEGIN
DECLARE disk_id_val BIGINT;
SELECT disk.id INTO disk_id_val FROM disk
INNER JOIN storage_server_disk ON storage_server_disk.disk_id = disk.id
INNER JOIN storage_server ON storage_server_disk.storage_server_id = storage_server.id
AND storage_server.id = id_in;
UPDATE disk SET disk.capacity = disk_capacity_in, disk.used = disk_used_in WHERE disk.id = disk_id_val;
UPDATE storage_server SET cpu_capacity = cpu_capacity_in, cpu_use = cpu_use_in, ram_capacity = ram_capacity_in, ram_use = ram_use_in, volume_capacity = volume_capacity_in, volume_use = volume_use_in WHERE id = id_in;
END //
DELIMITER ;
-- delete
DELIMITER //
CREATE PROCEDURE delete_storage_server(
IN storage_server_id_in BIGINT
)
BEGIN
DECLARE disk_id_val, nic_id_val BIGINT;
SELECT storage_server_disk.disk_id INTO disk_id_val FROM storage_server_disk INNER JOIN disk ON storage_server_disk.disk_id = disk.id AND storage_server_id = 5;
SELECT storage_server_nic.nic_id INTO nic_id_val FROM storage_server_nic INNER JOIN nic ON storage_server_nic.nic_id = nic.id AND storage_server_id = 5;
DELETE FROM disk WHERE id = disk_id_val;
DELETE FROM storage_server_disk WHERE storage_server_id = storage_server_id_in;
DELETE FROM nic WHERE id = nic_id_val;
DELETE FROM storage_server_nic WHERE storage_server_id = storage_server_id_in;
DELETE FROM storage_server WHERE id = storage_server_id_in;
END //
DELIMITER ;
--------------------------------
-- NIC
--------------------------------
-- create
DELIMITER //
CREATE PROCEDURE create_nic(IN nic_driver_id TINYINT, OUT next_id BIGINT)
BEGIN
SELECT settings.guest_default_vlan_id FROM settings INTO @vlan;
SELECT nic_driver_id INTO @driver;
CALL get_ip_address(@ip);
CALL get_mac_address(@mac);
CALL get_vswitch_port(@vswitch);
INSERT INTO nic (nic.ip_address_pool_id, nic.mac_address_pool_id, nic.vswitch_port_pool_id, nic.vlan_tag_pool_id, nic.nic_driver_id) VALUES (@ip, @mac, @vswitch, @vlan, @driver);
SELECT LAST_INSERT_ID() INTO next_id;
END //
DELIMITER ;
-- release
DELIMITER //
CREATE PROCEDURE release_nic(IN nic_id_in BIGINT)
BEGIN
DECLARE ip_id, mac_id, vswitch_id BIGINT;
SELECT ip_address_pool_id, mac_address_pool_id, vswitch_port_pool_id INTO ip_id, mac_id, vswitch_id FROM nic WHERE (id = nic_id_in);
CALL release_ip_address(ip_id);
CALL release_mac_address(mac_id);
CALL release_vswitch_port(vswitch_id);
DELETE FROM nic WHERE (nic.id = nic_id_in);
DELETE FROM guest_nic WHERE (nic_id = nic_id_in);
END //
DELIMITER ;
-- add
DELIMITER //
CREATE PROCEDURE add_nic(IN guest_id_in BIGINT)
BEGIN
DECLARE driver_id_val TINYINT;
DECLARE nic_id_val BIGINT;
SELECT nic.nic_driver_id INTO driver_id_val FROM guest_nic INNER JOIN nic where guest_nic.nic_id = nic.id AND guest_nic.guest_id = guest_id_in LIMIT 1;
CALL create_nic(driver_id_val, nic_id_val);
INSERT INTO guest_nic (guest_id, nic_id) VALUES (guest_id_in, nic_id_val);
END //
DELIMITER ;
-- read, first
DELIMITER //
CREATE PROCEDURE read_nic__first(
IN guest_id_in BIGINT,
OUT nic_count_out BIGINT,
OUT nic_id_out BIGINT,
OUT ip_address_id_out BIGINT,
OUT ip_address_out VARCHAR(16),
OUT mac_address_id_out BIGINT,
OUT mac_address_out VARCHAR(24),
OUT vswitch_port_id_out BIGINT,
OUT vswitch_port_out SMALLINT,
OUT vlan_tag_id_out BIGINT,
OUT vlan_tag_out SMALLINT,
OUT nic_driver_id_out TINYINT,
OUT nic_driver_out VARCHAR(16)
)
BEGIN
SELECT COUNT(*) FROM guest_nic WHERE (guest_id = guest_id_in) INTO nic_count_out;
SELECT
nic.id,
nic.ip_address_pool_id,
ip_address_pool.name,
nic.mac_address_pool_id,
mac_address_pool.name,
nic.vswitch_port_pool_id,
vswitch_port_pool.name,
nic.vlan_tag_pool_id,
vlan_tag_pool.name,
nic.nic_driver_id,
nic_driver.name
INTO
nic_id_out,
ip_address_id_out,
ip_address_out,
mac_address_id_out,
mac_address_out,
vswitch_port_id_out,
vswitch_port_out,
vlan_tag_id_out,
vlan_tag_out,
nic_driver_id_out,
nic_driver_out
FROM nic
INNER JOIN guest_nic ON guest_nic.nic_id = nic.id
INNER JOIN nic_driver ON nic_driver.id = nic.nic_driver_id
INNER JOIN ip_address_pool ON ip_address_pool.id = nic.ip_address_pool_id
INNER JOIN mac_address_pool ON mac_address_pool.id = nic.mac_address_pool_id
INNER JOIN vswitch_port_pool ON vswitch_port_pool.id = nic.vswitch_port_pool_id
INNER JOIN vlan_tag_pool ON vlan_tag_pool.id = nic.vlan_tag_pool_id
AND guest_nic.guest_id = guest_id_in
LIMIT 1;
END //
DELIMITER ;
-- read, all (all which belong to a machine id)
DELIMITER //
CREATE PROCEDURE read_nic__all(IN guest_id_in BIGINT)
BEGIN
SELECT
nic.id,
nic.ip_address_pool_id,
ip_address_pool.name,
nic.mac_address_pool_id,
mac_address_pool.name,
nic.vswitch_port_pool_id,
vswitch_port_pool.name,
nic.vlan_tag_pool_id,
vlan_tag_pool.name,
nic.nic_driver_id,
nic_driver.name
FROM nic
INNER JOIN guest_nic ON guest_nic.nic_id = nic.id
INNER JOIN nic_driver ON nic_driver.id = nic.nic_driver_id
INNER JOIN ip_address_pool ON ip_address_pool.id = nic.ip_address_pool_id
INNER JOIN mac_address_pool ON mac_address_pool.id = nic.mac_address_pool_id
INNER JOIN vswitch_port_pool ON vswitch_port_pool.id = nic.vswitch_port_pool_id
INNER JOIN vlan_tag_pool ON vlan_tag_pool.id = nic.vlan_tag_pool_id
AND guest_nic.guest_id = guest_id_in;
END //
DELIMITER ;
--------------------------------
-- PNIC
--------------------------------
--------------------------------
-- DISK
--------------------------------
--------------------------------
-- VDISK
--------------------------------
-- add
DELIMITER //
CREATE PROCEDURE add_vdisk(IN guest_id_in BIGINT, IN disk_capacity_in FLOAT, primary_flag_in BOOLEAN, cow_flag_in BOOLEAN)
BEGIN
DECLARE vdisk_id_val BIGINT;
INSERT INTO vdisk (capacity, primary_flag, cow_flag) VALUES (disk_capacity_in, primary_flag_in, cow_flag_in);
SELECT LAST_INSERT_ID() INTO vdisk_id_val;
INSERT INTO guest_vdisk (guest_id, vdisk_id) VALUES (guest_id_in, vdisk_id_val);
END //
DELIMITER ;
-- destroy
DELIMITER //
CREATE PROCEDURE destroy_vdisk(IN vdisk_id_in BIGINT)
BEGIN
DELETE FROM vdisk WHERE vdisk.id = vdisk_id_in;
DELETE FROM guest_vdisk WHERE guest_vdisk.vdisk_id = vdisk_id_in;
END //
DELIMITER ;
-- read, first
DELIMITER //
CREATE PROCEDURE read_vdisk__first(IN guest_id_in BIGINT, OUT vdisk_count_out BIGINT, OUT vdisk_id_out BIGINT, OUT vdisk_capacity_out FLOAT, OUT vdisk_used_out FLOAT,
OUT vdisk_primary_flag_out BOOLEAN, OUT vdisk_cow_flag_out BOOLEAN)
BEGIN
SELECT COUNT(*) FROM guest_vdisk WHERE (guest_id = guest_id_in) INTO vdisk_count_out;
SELECT
vdisk.id,
vdisk.capacity,
vdisk.used,
vdisk.primary_flag,
vdisk.cow_flag
INTO
vdisk_id_out,
vdisk_capacity_out,
vdisk_used_out,
vdisk_primary_flag_out,
vdisk_cow_flag_out
FROM vdisk
INNER JOIN guest_vdisk
WHERE guest_vdisk.vdisk_id = vdisk.id
AND vdisk.primary_flag = true
AND guest_vdisk.guest_id = guest_id_in;
END //
DELIMITER ;
-- read, all (all which belong to a machine id)
DELIMITER //
CREATE PROCEDURE read_vdisk__all(IN guest_id_in BIGINT)
BEGIN
SELECT
vdisk.id,
vdisk.capacity,
vdisk.used,
vdisk.primary_flag,
vdisk.cow_flag
FROM vdisk
INNER JOIN guest_vdisk
WHERE guest_vdisk.vdisk_id = vdisk.id
AND guest_vdisk.guest_id = guest_id_in;
END //
DELIMITER ;
--------------------------------
-- SNAPSHOT
--------------------------------
-- add
DELIMITER //
CREATE PROCEDURE add_snapshot(IN guest_id_in BIGINT, IN disk_capacity_in FLOAT, primary_flag_in BOOLEAN, cow_flag_in BOOLEAN)
BEGIN
DECLARE vdisk_id_val BIGINT;
INSERT INTO vdisk (capacity, primary_flag, cow_flag) VALUES (disk_capacity_in, primary_flag_in, cow_flag_in);
SELECT LAST_INSERT_ID() INTO vdisk_id_val;
INSERT INTO guest_vdisk (guest_id, vdisk_id) VALUES (guest_id_in, vdisk_id_val);
END //
DELIMITER ;
-- destroy
DELIMITER //
CREATE PROCEDURE destroy_snapshot(IN snapshot_id_in BIGINT)
BEGIN
DELETE FROM vdisk WHERE vdisk.id = vdisk_id_in;
DELETE FROM guest_vdisk WHERE guest_vdisk.vdisk_id = vdisk_id_in;
END //
DELIMITER ;
-- read, first
--SELECT COUNT(*) FROM vdisk_snapshot WHERE (vdisk_id = vdisk_id_in) INTO snapshot_count;
-- sub proc or php query --SELECT snapshot.id AS snapshot_id, snapshot.name, snapshot.snapshot_id AS parent_snapshot FROM snapshot INNER JOIN vdisk_snapshot WHERE (vdisk_snapshot.snapshot_id = snapshot.id) AND vdisk_snapshot.vdisk_id = guest_id_in;
-- read, all
--------------------------------
-- ACCESS PORT POOL
--------------------------------
-- get
DELIMITER //
CREATE PROCEDURE get_access_port(OUT next_id BIGINT)
BEGIN
SELECT id FROM access_port_pool WHERE (taken_flag = 0) LIMIT 1 INTO next_id;
UPDATE access_port_pool SET taken_flag = 1 WHERE (id = next_id);
END //
DELIMITER ;
-- release
DELIMITER //
CREATE PROCEDURE release_access_port(IN current_id BIGINT)
BEGIN
UPDATE access_port_pool SET taken_flag = 0 WHERE (id = current_id);
END //
DELIMITER ;
--------------------------------
-- IP ADDRESS POOL
--------------------------------
-- get
DELIMITER //
CREATE PROCEDURE get_ip_address(OUT next_id BIGINT)
BEGIN
SELECT id FROM ip_address_pool WHERE (taken_flag = 0) LIMIT 1 INTO next_id;
UPDATE ip_address_pool SET taken_flag = 1 WHERE (id = next_id);
END //
DELIMITER ;
-- release
DELIMITER //
CREATE PROCEDURE release_ip_address(IN current_id BIGINT)
BEGIN
UPDATE ip_address_pool SET taken_flag = 0 WHERE (id = current_id);
END //
DELIMITER ;
--------------------------------
-- MAC ADDRESS POOL
--------------------------------
-- function to generate
DELIMITER //
CREATE FUNCTION generate_mac_address() RETURNS varchar(24) NOT DETERMINISTIC
BEGIN
DECLARE newmac varchar(24);
SELECT LPAD(HEX(FLOOR(RAND() * (256 - 0 + 1))),2,0) INTO @a;
SELECT LPAD(HEX(FLOOR(RAND() * (256 - 0 + 1))),2,0) INTO @b;
SELECT LPAD(HEX(FLOOR(RAND() * (256 - 0 + 1))),2,0) INTO @c;
SELECT LPAD(HEX(FLOOR(RAND() * (256 - 0 + 1))),2,0) INTO @d;
SELECT LPAD(HEX(FLOOR(RAND() * (256 - 0 + 1))),2,0) INTO @e;
SELECT LPAD(HEX(FLOOR(RAND() * (256 - 0 + 1))),2,0) INTO @f;
SELECT CONCAT_WS(':', @a, @b, @c, @d, @e, @f) INTO newmac;
RETURN (newmac);
END //
DELIMITER ;
-- get, generating if necessary
DELIMITER //
CREATE PROCEDURE get_mac_address(OUT mac_address_id BIGINT)
BEGIN
DECLARE value, newmac varchar(24);
SELECT id FROM mac_address_pool WHERE taken_flag = 0 LIMIT 1 INTO value;
IF value IS NULL THEN
SELECT generate_mac_address() INTO newmac;
INSERT INTO mac_address_pool (name, taken_flag) VALUES (newmac, true);
SELECT LAST_INSERT_ID() INTO mac_address_id;
ELSE
UPDATE mac_address_pool SET taken_flag = 1 WHERE (id = value);
SELECT value INTO mac_address_id;
END IF;
END //
DELIMITER ;
-- release
DELIMITER //
CREATE PROCEDURE release_mac_address(IN current_id BIGINT)
BEGIN
UPDATE mac_address_pool SET taken_flag = 0 WHERE (id = current_id);
END //
DELIMITER ;
-- create and get a specific mac address
DELIMITER //
CREATE PROCEDURE create_mac_address(IN newmac VARCHAR(24), OUT next_id BIGINT)
BEGIN
INSERT INTO mac_address_pool (name, taken_flag) VALUES (newmac, true);
SELECT LAST_INSERT_ID() INTO next_id;
END //
DELIMITER ;
-- delete a specific mac address
DELIMITER //
CREATE PROCEDURE delete_mac_address(IN current_id BIGINT)
BEGIN
DELETE FROM mac_address_pool WHERE (id = current_id);
END //
DELIMITER ;
--------------------------------
-- VSWITCH PORT POOL
--------------------------------
-- get
DELIMITER //
CREATE PROCEDURE get_vswitch_port(OUT next_id BIGINT)
BEGIN
SELECT id FROM vswitch_port_pool WHERE (taken_flag = 0) LIMIT 1 INTO next_id;
UPDATE vswitch_port_pool SET taken_flag = 1 WHERE (id = next_id);
END //
DELIMITER ;
-- release
DELIMITER //
CREATE PROCEDURE release_vswitch_port(IN current_id BIGINT)
BEGIN
UPDATE vswitch_port_pool SET taken_flag = 0 WHERE (id = current_id);
END //
DELIMITER ;
--------------------------------
-- VLAN TAG POOL
--------------------------------
-- get
DELIMITER //
CREATE PROCEDURE get_vlan_tag(OUT next_id BIGINT)
BEGIN
SELECT id FROM vlan_tag_pool WHERE (taken_flag = 0) LIMIT 1 INTO next_id;
UPDATE vlan_tag_pool SET taken_flag = 1 WHERE (id = next_id);
END //
DELIMITER ;
-- release
DELIMITER //
CREATE PROCEDURE release_vlan_tag(IN current_id BIGINT)
BEGIN
UPDATE vlan_tag_pool SET taken_flag = 0 WHERE (id = current_id);
END //
DELIMITER ;
|
-- +migrate Up
INSERT into action_parameter (action_id, name, description, type, value, advanced) values((select id from action where name = 'Serve Static Files' and type = 'Builtin'), 'static-key', 'Indicate a static-key which will be a reference to keep the same generated URL. Example: {{.git.branch}}', 'string', '', true);
ALTER TABLE workflow_node_run_static_files ADD COLUMN static_key TEXT DEFAULT '';
-- +migrate Down
DELETE from action_parameter where name = 'static-key' and action_id = (select id from action where name = 'Serve Static Files' and type = 'Builtin');
ALTER TABLE workflow_node_run_static_files DROP COLUMN static_key;
|
--------------------------------------------------------
-- DDL for Synonymn OBIS_ECNASAP_DATA
--------------------------------------------------------
CREATE OR REPLACE SYNONYM "GROUNDFISH"."OBIS_ECNASAP_DATA" FOR "NWAGSCOL"."OBIS_ECNASAP_DATA";
|
<reponame>CGGTeam/GGFlix<gh_stars>0
CREATE TABLE BD5W6_424P.dbo.Categories
(
NoCategorie int PRIMARY KEY NOT NULL,
Description nvarchar(50) NOT NULL
);
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (1, 'Action');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (2, 'Adolescent');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (3, 'Biographie');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (4, 'Cape et d''épée');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (5, 'Catastrophe');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (6, 'Chronique');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (7, 'Comédie de moeurs');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (8, 'Comédie d''horreur');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (9, 'Comédie dramatique');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (10, 'Comédie fantaisiste');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (11, 'Comédie musicale');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (12, 'Comédie policière');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (13, 'Comédie satirique');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (14, 'Comédie sentimentale');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (15, 'Comédie');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (16, 'Criminel');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (17, 'Danse');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (18, 'Dessins animés');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (19, 'Documentaire');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (20, 'Drame de guerre');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (21, 'Drame de moeurs');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (22, 'Drame d''horreur');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (23, 'Drame judiciaire');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (24, 'Drame musical');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (25, 'Drame poétique');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (26, 'Drame policier');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (27, 'Drame psychologique');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (28, 'Drame sentimental');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (29, 'Drame social');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (30, 'Drame');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (31, 'Espionnage');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (32, 'Expérimental');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (33, 'Fantastique');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (34, 'Film à sketches');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (35, 'Film d''animation');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (36, 'Film d''art martial');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (37, 'Historique');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (38, 'Horreur');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (39, 'Humoristique');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (40, 'Marionnettes');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (41, 'Mélodrame');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (42, 'Musical');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (43, 'Road movie');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (44, 'Romantique');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (45, 'Science-fiction');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (46, 'Série policière');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (47, 'Série TV');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (48, 'Spectacle d''humour');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (49, 'Spectacle musical');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (50, 'Suspense');
INSERT INTO BD5W6_424P.dbo.Categories (NoCategorie, Description) VALUES (51, 'Western'); |
<filename>2.sql
DROP DATABASE IF EXISTS school_sport_clubs;
CREATE DATABASE school_sport_clubs;
USE school_sport_clubs;
CREATE TABLE school_sport_clubs.sports(
id INT AUTO_INCREMENT PRIMARY KEY ,
name VARCHAR(255) NOT NULL
);
CREATE TABLE school_sport_clubs.coaches(
id INT AUTO_INCREMENT PRIMARY KEY ,
name VARCHAR(255) NOT NULL ,
egn VARCHAR(10) NOT NULL UNIQUE
);
CREATE TABLE school_sport_clubs.students(
id INT AUTO_INCREMENT PRIMARY KEY ,
name VARCHAR(255) NOT NULL ,
egn VARCHAR(10) NOT NULL UNIQUE ,
address VARCHAR(255) NOT NULL ,
phone VARCHAR(20) NULL DEFAULT NULL ,
class VARCHAR(10) NULL DEFAULT NULL
);
CREATE TABLE school_sport_clubs.sportGroups(
id INT AUTO_INCREMENT PRIMARY KEY ,
location VARCHAR(255) NOT NULL ,
dayOfWeek ENUM('Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday') ,
hourOfTraining TIME NOT NULL ,
sport_id INT NOT NULL ,
coach_id INT NOT NULL ,
UNIQUE KEY(location,dayOfWeek,hourOfTraining) ,
CONSTRAINT FOREIGN KEY(sport_id)
REFERENCES sports(id) ,
CONSTRAINT FOREIGN KEY (coach_id)
REFERENCES coaches(id)
);
CREATE TABLE school_sport_clubs.student_sport(
student_id INT NOT NULL ,
sportGroup_id INT NOT NULL ,
CONSTRAINT FOREIGN KEY (student_id)
REFERENCES students(id) ,
CONSTRAINT FOREIGN KEY (sportGroup_id)
REFERENCES sportGroups(id) ,
PRIMARY KEY(student_id,sportGroup_id)
);
CREATE TABLE taxesPayments(
id INT AUTO_INCREMENT PRIMARY KEY,
student_id INT NOT NULL,
group_id INT NOT NULL,
paymentAmount DOUBLE NOT NULL,
month TINYINT,
year YEAR,
dateOfPayment DATETIME NOT NULL ,
CONSTRAINT FOREIGN KEY (student_id)
REFERENCES students(id),
CONSTRAINT FOREIGN KEY (group_id)
REFERENCES sportgroups(id)
);
CREATE TABLE salaryPayments(
id INT AUTO_INCREMENT PRIMARY KEY,
coach_id INT NOT NULL,
month TINYINT,
year YEAR,
salaryAmount double,
dateOfPayment datetime not null,
CONSTRAINT FOREIGN KEY (coach_id)
REFERENCES coaches(id),
UNIQUE KEY(`coach_id`,`month`,`year`)
);
INSERT INTO sports
VALUES (NULL, 'Football') ,
(NULL, 'Volleyball'),
(NULL, 'Tennis');
INSERT INTO coaches
VALUES (NULL, '<NAME>', '7509041245') ,
(NULL, '<NAME>', '8010091245') ,
(NULL, '<NAME>', '8407106352') ,
(NULL, '<NAME>', '7010102045') ,
(NULL, '<NAME>', '8302160980') ,
(NULL, '<NAME>', '7106041278');
INSERT INTO students (name, egn, address, phone, class)
VALUES ('<NAME>', '9401150045', 'Sofia-Mladost 1', '0893452120', '10') ,
('<NAME>', '9510104512', 'Sofia-Liylin', '0894123456', '11') ,
('<NAME>', '9505052154', 'Sofia-Mladost 3', '0897852412', '11') ,
('<NAME>', '9510104542', 'Sofia-Mladost 3', '0894123457', '11') ,
('<NAME>', '9510104547', 'Sofia-Mladost 4', '0894123442', '11') ,
('<NAME>', '9411104547', 'Sofia-Krasno selo', '0874526235', '10');
INSERT INTO sportGroups
VALUES (NULL, 'Sofia-Mladost 1', 'Monday', '08:00:00', 1, 1 ) ,
(NULL, 'Sofia-Mladost 1', 'Monday', '09:30:00', 1, 2 ) ,
(NULL, 'Sofia-Liylin 7', 'Sunday', '08:00:00', 2, 1) ,
(NULL, 'Sofia-Liylin 7', 'Sunday', '09:30:00', 2, 2) ,
(NULL, 'Plovdiv', 'Monday', '12:00:00', '1', '1');
INSERT INTO student_sport
VALUES (1, 1),
(2, 1),
(3, 1),
(4, 2),
(5, 2),
(6, 2),
(1, 3),
(2, 3),
(3, 3);
INSERT INTO `school_sport_clubs`.`taxespayments`
VALUES (NULL, '1', '1', '200', '1', 2015, now()),
(NULL, '1', '1', '200', '2', 2015, now()),
(NULL, '1', '1', '200', '3', 2015, now()),
(NULL, '1', '1', '200', '4', 2015, now()),
(NULL, '1', '1', '200', '5', 2015, now()),
(NULL, '1', '1', '200', '6', 2015, now()),
(NULL, '1', '1', '200', '7', 2015, now()),
(NULL, '1', '1', '200', '8', 2015, now()),
(NULL, '1', '1', '200', '9', 2015, now()),
(NULL, '1', '1', '200', '10', 2015, now()),
(NULL, '1', '1', '200', '11', 2015, now()),
(NULL, '1', '1', '200', '12', 2015, now()),
(NULL, '2', '1', '250', '1', 2015, now()),
(NULL, '2', '1', '250', '2', 2015, now()),
(NULL, '2', '1', '250', '3', 2015, now()),
(NULL, '2', '1', '250', '4', 2015, now()),
(NULL, '2', '1', '250', '5', 2015, now()),
(NULL, '2', '1', '250', '6', 2015, now()),
(NULL, '2', '1', '250', '7', 2015, now()),
(NULL, '2', '1', '250', '8', 2015, now()),
(NULL, '2', '1', '250', '9', 2015, now()),
(NULL, '2', '1', '250', '10', 2015, now()),
(NULL, '2', '1', '250', '11', 2015, now()),
(NULL, '2', '1', '250', '12', 2015, now()),
(NULL, '3', '1', '250', '1', 2015, now()),
(NULL, '3', '1', '250', '2', 2015, now()),
(NULL, '3', '1', '250', '3', 2015, now()),
(NULL, '3', '1', '250', '4', 2015, now()),
(NULL, '3', '1', '250', '5', 2015, now()),
(NULL, '3', '1', '250', '6', 2015, now()),
(NULL, '3', '1', '250', '7', 2015, now()),
(NULL, '3', '1', '250', '8', 2015, now()),
(NULL, '3', '1', '250', '9', 2015, now()),
(NULL, '3', '1', '250', '10', 2015, now()),
(NULL, '3', '1', '250', '11', 2015, now()),
(NULL, '3', '1', '250', '12', 2015, now()),
(NULL, '1', '2', '200', '1', 2015, now()),
(NULL, '1', '2', '200', '2', 2015, now()),
(NULL, '1', '2', '200', '3', 2015, now()),
(NULL, '1', '2', '200', '4', 2015, now()),
(NULL, '1', '2', '200', '5', 2015, now()),
(NULL, '1', '2', '200', '6', 2015, now()),
(NULL, '1', '2', '200', '7', 2015, now()),
(NULL, '1', '2', '200', '8', 2015, now()),
(NULL, '1', '2', '200', '9', 2015, now()),
(NULL, '1', '2', '200', '10', 2015, now()),
(NULL, '1', '2', '200', '11', 2015, now()),
(NULL, '1', '2', '200', '12', 2015, now()),
(NULL, '4', '2', '200', '1', 2015, now()),
(NULL, '4', '2', '200', '2', 2015, now()),
(NULL, '4', '2', '200', '3', 2015, now()),
(NULL, '4', '2', '200', '4', 2015, now()),
(NULL, '4', '2', '200', '5', 2015, now()),
(NULL, '4', '2', '200', '6', 2015, now()),
(NULL, '4', '2', '200', '7', 2015, now()),
(NULL, '4', '2', '200', '8', 2015, now()),
(NULL, '4', '2', '200', '9', 2015, now()),
(NULL, '4', '2', '200', '10', 2015, now()),
(NULL, '4', '2', '200', '11', 2015, now()),
(NULL, '4', '2', '200', '12', 2015, now()),
/**2014**/
(NULL, '1', '1', '200', '1', 2014, now()),
(NULL, '1', '1', '200', '2', 2014, now()),
(NULL, '1', '1', '200', '3', 2014, now()),
(NULL, '1', '1', '200', '4', 2014, now()),
(NULL, '1', '1', '200', '5', 2014, now()),
(NULL, '1', '1', '200', '6', 2014, now()),
(NULL, '1', '1', '200', '7', 2014, now()),
(NULL, '1', '1', '200', '8', 2014, now()),
(NULL, '1', '1', '200', '9', 2014, now()),
(NULL, '1', '1', '200', '10', 2014, now()),
(NULL, '1', '1', '200', '11', 2014, now()),
(NULL, '1', '1', '200', '12', 2014, now()),
(NULL, '2', '1', '250', '1', 2014, now()),
(NULL, '2', '1', '250', '2', 2014, now()),
(NULL, '2', '1', '250', '3', 2014, now()),
(NULL, '2', '1', '250', '4', 2014, now()),
(NULL, '2', '1', '250', '5', 2014, now()),
(NULL, '2', '1', '250', '6', 2014, now()),
(NULL, '2', '1', '250', '7', 2014, now()),
(NULL, '2', '1', '250', '8', 2014, now()),
(NULL, '2', '1', '250', '9', 2014, now()),
(NULL, '2', '1', '250', '10', 2014, now()),
(NULL, '2', '1', '250', '11', 2014, now()),
(NULL, '2', '1', '250', '12', 2014, now()),
(NULL, '3', '1', '250', '1', 2014, now()),
(NULL, '3', '1', '250', '2', 2014, now()),
(NULL, '3', '1', '250', '3', 2014, now()),
(NULL, '3', '1', '250', '4', 2014, now()),
(NULL, '3', '1', '250', '5', 2014, now()),
(NULL, '3', '1', '250', '6', 2014, now()),
(NULL, '3', '1', '250', '7', 2014, now()),
(NULL, '3', '1', '250', '8', 2014, now()),
(NULL, '3', '1', '250', '9', 2014, now()),
(NULL, '3', '1', '250', '10', 2014, now()),
(NULL, '3', '1', '250', '11', 2014, now()),
(NULL, '3', '1', '250', '12', 2014, now()),
(NULL, '1', '2', '200', '1', 2014, now()),
(NULL, '1', '2', '200', '2', 2014, now()),
(NULL, '1', '2', '200', '3', 2014, now()),
(NULL, '1', '2', '200', '4', 2014, now()),
(NULL, '1', '2', '200', '5', 2014, now()),
(NULL, '1', '2', '200', '6', 2014, now()),
(NULL, '1', '2', '200', '7', 2014, now()),
(NULL, '1', '2', '200', '8', 2014, now()),
(NULL, '1', '2', '200', '9', 2014, now()),
(NULL, '1', '2', '200', '10', 2014, now()),
(NULL, '1', '2', '200', '11', 2014, now()),
(NULL, '1', '2', '200', '12', 2014, now()),
(NULL, '4', '2', '200', '1', 2014, now()),
(NULL, '4', '2', '200', '2', 2014, now()),
(NULL, '4', '2', '200', '3', 2014, now()),
(NULL, '4', '2', '200', '4', 2014, now()),
(NULL, '4', '2', '200', '5', 2014, now()),
(NULL, '4', '2', '200', '6', 2014, now()),
(NULL, '4', '2', '200', '7', 2014, now()),
(NULL, '4', '2', '200', '8', 2014, now()),
(NULL, '4', '2', '200', '9', 2014, now()),
(NULL, '4', '2', '200', '10', 2014, now()),
(NULL, '4', '2', '200', '11', 2014, now()),
(NULL, '4', '2', '200', '12', 2014, now()),
/**2016**/
(NULL, '1', '1', '200', '1', 2016, now()),
(NULL, '1', '1', '200', '2', 2016, now()),
(NULL, '1', '1', '200', '3', 2016, now()),
(NULL, '2', '1', '250', '1', 2016, now()),
(NULL, '3', '1', '250', '1', 2016, now()),
(NULL, '3', '1', '250', '2', 2016, now()),
(NULL, '1', '2', '200', '1', 2016, now()),
(NULL, '1', '2', '200', '2', 2016, now()),
(NULL, '1', '2', '200', '3', 2016, now()),
(NULL, '4', '2', '200', '1', 2016, now()),
(NULL, '4', '2', '200', '2', 2016, now());
select sports.name,sportGroups.location from sports left join sportGroups on sport.id=sportGroup.sport_id
union
select sports.name,sportGroups.location from sports right join sportGroups on sport.id=sportGroup.sport_id;
select s.name as Sport, sg.location as Place
from sports as s
join sportGroups as sg
on sport.id=sportGroups.sport_id;
select s1.name as Student1,s2.name as Student2, sports.name as Sport
from students as s1
join students as s2
on s1.id<s2.id
join sports
on (s1.id in
(select sport_id from student_sport
where sportGroup_id in
(select id from sportGroups
where sportGroups.sport_id=sport.id)
)
and
(s2.id in
(select sport_id from student_sport
where sportGroup_id in
(select id from sportGroups
where sportGroups.sport_id=sport.id)
)
where s1.id in
(select student_id from student_sport
where sportGroup_id in
(select sportGroup_id from student_sport
where student_id=s2.id)
)
order by Sport;
|
CREATE EXTENSION postgres_fdw;
CREATE SERVER books_0_server
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS( host 'postgresql-shard0', port '5432', dbname 'postgres' );
CREATE USER MAPPING FOR postgres
SERVER books_0_server
OPTIONS (user 'postgres', password '<PASSWORD>');
CREATE SERVER books_1_server
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS( host 'postgresql-shard1', port '5432', dbname 'postgres' );
CREATE USER MAPPING FOR postgres
SERVER books_1_server
OPTIONS (user 'postgres', password '<PASSWORD>');
CREATE FOREIGN TABLE books_0 (
id bigint not null,
category_id int not null,
author character varying not null,
title character varying not null,
year int not null
) SERVER books_0_server OPTIONS (schema_name 'public', table_name 'books');
CREATE FOREIGN TABLE books_1 (
id bigint not null,
category_id int not null,
author character varying not null,
title character varying not null,
year int not null
) SERVER books_1_server OPTIONS (schema_name 'public', table_name 'books');
CREATE VIEW books AS
SELECT * FROM books_0
UNION ALL
SELECT * FROM books_1;
CREATE RULE books_insert AS ON INSERT TO books DO INSTEAD NOTHING;
CREATE RULE books_update AS ON UPDATE TO books DO INSTEAD NOTHING;
CREATE RULE books_delete AS ON DELETE TO books DO INSTEAD NOTHING;
CREATE RULE books_insert_to_0 AS ON INSERT TO books
WHERE ( category_id % 2 = 0 ) DO INSTEAD INSERT INTO books_0 VALUES (NEW.*);
CREATE RULE books_insert_to_1 AS ON INSERT TO books
WHERE ( category_id % 2 = 1 ) DO INSTEAD INSERT INTO books_1 VALUES (NEW.*);
|
<reponame>htookhantlinn/health
-- phpMyAdmin SQL Dump
-- version 5.1.1
-- https://www.phpmyadmin.net/
--
-- Host: localhost
-- Generation Time: Jan 02, 2022 at 12:23 PM
-- Server version: 10.4.20-MariaDB
-- PHP Version: 8.0.9
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
START TRANSACTION;
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
--
-- Database: `one_health`
--
-- --------------------------------------------------------
--
-- Table structure for table `addresses`
--
CREATE TABLE `addresses` (
`id` bigint(20) UNSIGNED NOT NULL,
`city` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- --------------------------------------------------------
--
-- Table structure for table `blogs`
--
CREATE TABLE `blogs` (
`id` int(10) UNSIGNED NOT NULL,
`title` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`category_id` bigint(20) UNSIGNED NOT NULL,
`user_id` bigint(20) UNSIGNED NOT NULL,
`description` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`image` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
--
-- Dumping data for table `blogs`
--
INSERT INTO `blogs` (`id`, `title`, `category_id`, `user_id`, `description`, `image`, `created_at`, `updated_at`) VALUES
(2, 'Numquam', 6, 4, 'Mollitia laudantium ducimus ut quae laborum id est. Eveniet et ab aut iusto vel deserunt in qui. Eos nesciunt consequatur praesentium odit eum quo libero.', 'blog_5.jpg', '2022-01-02 03:57:46', '2022-01-02 03:57:46'),
(3, 'Dolorem', 9, 1, 'Voluptas ipsam praesentium eligendi quia quibusdam sequi. Voluptas eveniet delectus quam blanditiis. Non cupiditate cupiditate atque sint iste et reprehenderit. Est vitae odit aut sint quibusdam.', 'blog_5.jpg', '2022-01-02 03:57:46', '2022-01-02 03:57:46'),
(4, 'Porro', 7, 5, 'Consequatur eligendi sint qui itaque. Illo modi porro blanditiis temporibus. Porro perspiciatis autem aliquid repellendus accusantium fuga eveniet.', 'blog_5.jpg', '2022-01-02 03:57:46', '2022-01-02 03:57:46'),
(6, 'Eligendi', 5, 5, 'Necessitatibus eos dolores aut. Ipsam libero amet ea dicta quia qui dolore totam. Explicabo dignissimos rerum ex libero. Enim voluptas quas vitae dolorem architecto.', 'blog_5.jpg', '2022-01-02 03:57:46', '2022-01-02 03:57:46'),
(7, 'Magnam', 6, 3, 'Dolor iusto voluptatem magni ex tempore ullam nobis impedit. Dicta tenetur suscipit deserunt sed. Reiciendis culpa quo et atque quo praesentium.', 'blog_5.jpg', '2022-01-02 03:57:46', '2022-01-02 03:57:46'),
(8, 'Voluptas', 2, 3, 'Quia nam dolores et odio voluptatem. Libero officiis sequi ab sunt corrupti. Quam qui necessitatibus dolores nihil. Magni autem consequatur aut hic.', 'blog_5.jpg', '2022-01-02 03:57:46', '2022-01-02 03:57:46'),
(9, 'Est', 2, 6, 'Expedita qui sapiente consectetur qui sed consequatur doloribus. Veritatis aspernatur qui est ea. Magni illo soluta voluptatem. Saepe aut odio accusantium ut.', 'blog_5.jpg', '2022-01-02 03:57:46', '2022-01-02 03:57:46'),
(10, 'Nostrum', 2, 9, 'Nostrum nihil ut est ut ea doloremque. Sapiente voluptatem culpa velit ut adipisci sed. Consectetur sunt veniam qui officiis atque est veritatis. Minima mollitia fugit et aut facilis assumenda.', 'blog_5.jpg', '2022-01-02 03:57:46', '2022-01-02 03:57:46');
-- --------------------------------------------------------
--
-- Table structure for table `categories`
--
CREATE TABLE `categories` (
`id` bigint(20) UNSIGNED NOT NULL,
`name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
--
-- Dumping data for table `categories`
--
INSERT INTO `categories` (`id`, `name`, `created_at`, `updated_at`) VALUES
(1, 'Soluta', '2022-01-02 03:53:24', '2022-01-02 03:53:24'),
(2, 'Quaerat', '2022-01-02 03:53:24', '2022-01-02 03:53:24'),
(3, 'Quis', '2022-01-02 03:53:24', '2022-01-02 03:53:24'),
(4, 'Repellat', '2022-01-02 03:53:24', '2022-01-02 03:53:24'),
(5, 'Voluptatem', '2022-01-02 03:53:24', '2022-01-02 03:53:24'),
(6, 'Explicabo', '2022-01-02 03:53:24', '2022-01-02 03:53:24'),
(7, 'Quo', '2022-01-02 03:53:24', '2022-01-02 03:53:24'),
(8, 'Porro', '2022-01-02 03:53:24', '2022-01-02 03:53:24'),
(9, 'Cum', '2022-01-02 03:53:24', '2022-01-02 03:53:24'),
(10, 'Et', '2022-01-02 03:53:24', '2022-01-02 03:53:24');
-- --------------------------------------------------------
--
-- Table structure for table `doctors`
--
CREATE TABLE `doctors` (
`id` int(10) UNSIGNED NOT NULL,
`name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`field_id` int(10) UNSIGNED NOT NULL,
`phone` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`image` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
--
-- Dumping data for table `doctors`
--
INSERT INTO `doctors` (`id`, `name`, `field_id`, `phone`, `image`, `created_at`, `updated_at`) VALUES
(1, 'Et', 9, '+1-936-436-9069', 'doctor_3.jpg', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(3, 'Totam', 4, '+1.743.500.2195', 'doctor_3.jpg', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(4, 'Est', 9, '+15402197342', 'doctor_3.jpg', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(5, 'Id', 1, '1-914-539-3853', 'doctor_3.jpg', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(6, 'Quibusdam', 8, '+12122895007', 'doctor_3.jpg', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(8, 'Excepturi', 10, '+1-337-683-9500', 'doctor_3.jpg', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(9, 'Dignissimos', 3, '+1-850-900-5827', 'doctor_3.jpg', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(10, 'Architecto', 1, '1-930-328-5478', 'doctor_3.jpg', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(11, 'Doctor1', 10, '1-256-247-7176', 'doctor_3.jpg', '2021-12-29 21:11:18', '2021-12-29 21:11:18'),
(12, 'Doctor2', 5, '+14148103857', 'doctor_3.jpg', '2021-12-29 21:11:18', '2021-12-29 21:11:18'),
(13, 'Doctor3', 10, '+1-559-459-4858', 'doctor_3.jpg', '2021-12-29 21:11:18', '2021-12-29 21:11:18'),
(14, 'Doctor4', 8, '938.972.6780', 'doctor_3.jpg', '2021-12-29 21:11:18', '2021-12-29 21:11:18'),
(15, 'Doctor5', 6, '239-768-6063', 'doctor_3.jpg', '2021-12-29 21:11:18', '2021-12-29 21:11:18'),
(16, 'Doctor6', 5, '801-818-8797', 'doctor_3.jpg', '2021-12-29 21:11:18', '2021-12-29 21:11:18'),
(17, 'Doctor7', 6, '272.783.6145', 'doctor_3.jpg', '2021-12-29 21:11:18', '2021-12-29 21:11:18'),
(18, 'Doctor8', 4, '+17815567455', 'doctor_3.jpg', '2021-12-29 21:11:18', '2021-12-29 21:11:18'),
(19, 'Doctor9', 6, '(680) 737-1625', 'doctor_3.jpg', '2021-12-29 21:11:18', '2021-12-29 21:11:18'),
(20, 'Doctor10', 2, '770-351-5259', 'doctor_3.jpg', '2021-12-29 21:11:18', '2021-12-29 21:11:18'),
(21, 'Doctor_10', 9, '678.651.4873', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(22, 'Doctor_11', 4, '(424) 990-0444', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(23, 'Doctor_12', 4, '+1.818.913.9747', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(24, 'Doctor_13', 7, '1-774-633-2752', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(25, 'Doctor_14', 7, '478-665-8884', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(26, 'Doctor_15', 6, '+1.820.987.9334', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(27, 'Doctor_16', 3, '(606) 839-7691', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(28, 'Doctor_17', 3, '+1-440-991-6810', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(29, 'Doctor_18', 1, '+1 (505) 873-8155', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(30, 'Doctor_19', 1, '+1-872-752-3200', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(31, 'Doctor_20', 10, '(617) 992-9533', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(32, 'Doctor_21', 6, '+1.309.621.5415', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(33, 'Doctor_22', 9, '(773) 887-5307', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(34, 'Doctor_23', 9, '+1.231.437.8236', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(35, 'Doctor_24', 5, '1-779-298-1960', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(36, 'Doctor_25', 7, '+1-386-649-1127', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(37, 'Doctor_26', 6, '+1 (283) 823-3644', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(38, 'Doctor_27', 3, '+1.606.614.2618', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(39, 'Doctor_28', 3, '+1-510-409-7041', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(40, 'Doctor_29', 2, '+1-830-333-5702', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(41, 'Doctor_30', 1, '+1-678-959-0086', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(42, 'Doctor_31', 6, '423.899.8399', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(43, 'Doctor_32', 7, '1-480-276-2219', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(44, 'Doctor_33', 7, '(575) 740-3551', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(45, 'Doctor_34', 5, '262.287.5524', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(46, 'Doctor_35', 6, '(901) 752-7292', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(47, 'Doctor_36', 1, '+1.860.498.1149', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(48, 'Doctor_37', 1, '+1-332-977-0525', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(49, 'Doctor_38', 3, '(678) 629-4597', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(50, 'Doctor_39', 5, '+1-863-347-3187', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(51, 'Doctor_40', 6, '1-440-290-2930', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(52, 'Doctor_41', 10, '+17756535006', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(53, 'Doctor_42', 5, '469-777-3606', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(54, 'Doctor_43', 4, '1-520-271-8186', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(55, 'Doctor_44', 4, '+12342586047', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(56, 'Doctor_45', 7, '+1-734-994-5488', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(57, 'Doctor_46', 9, '+1.239.532.9928', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(58, 'Doctor_47', 7, '323.309.9473', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(59, 'Doctor_48', 1, '+1-256-838-0638', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(60, 'Doctor_49', 4, '706-870-7302', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(61, 'Doctor_50', 2, '469.774.0490', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(62, 'Doctor_51', 1, '+1 (534) 712-3261', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(63, 'Doctor_52', 6, '+1-270-460-7564', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(64, 'Doctor_53', 7, '+1-954-887-9103', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(65, 'Doctor_54', 9, '(640) 714-5586', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(66, 'Doctor_55', 2, '424-304-5190', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(67, 'Doctor_56', 2, '+1 (989) 582-1090', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(68, 'Doctor_57', 9, '+1 (364) 613-7249', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(69, 'Doctor_58', 10, '+1.719.930.8552', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(70, 'Doctor_59', 2, '279-857-7482', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(71, 'Doctor_60', 6, '731-383-2146', 'doctor_3.jpg', '2021-12-29 21:11:58', '2021-12-29 21:11:58'),
(72, '<NAME>', 4, '123456789', '1640871653.jpg', '2021-12-30 07:10:53', '2021-12-30 07:10:53');
-- --------------------------------------------------------
--
-- Table structure for table `failed_jobs`
--
CREATE TABLE `failed_jobs` (
`id` bigint(20) UNSIGNED NOT NULL,
`uuid` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`connection` text COLLATE utf8mb4_unicode_ci NOT NULL,
`queue` text COLLATE utf8mb4_unicode_ci NOT NULL,
`payload` longtext COLLATE utf8mb4_unicode_ci NOT NULL,
`exception` longtext COLLATE utf8mb4_unicode_ci NOT NULL,
`failed_at` timestamp NOT NULL DEFAULT current_timestamp()
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- --------------------------------------------------------
--
-- Table structure for table `fields`
--
CREATE TABLE `fields` (
`id` int(10) UNSIGNED NOT NULL,
`name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
--
-- Dumping data for table `fields`
--
INSERT INTO `fields` (`id`, `name`, `created_at`, `updated_at`) VALUES
(1, 'Non', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(2, 'In', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(3, 'Illum', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(4, 'Dolor', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(5, 'Tenetur', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(6, 'Voluptas', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(7, 'Cumque', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(8, 'Quo', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(9, 'Sed', '2021-12-20 03:58:09', '2021-12-20 03:58:09'),
(10, 'Eos', '2021-12-20 03:58:09', '2021-12-20 03:58:09');
-- --------------------------------------------------------
--
-- Table structure for table `items`
--
CREATE TABLE `items` (
`id` bigint(20) UNSIGNED NOT NULL,
`name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`description` varchar(1000) COLLATE utf8mb4_unicode_ci NOT NULL,
`quantity` int(11) NOT NULL,
`category_id` int(11) NOT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
--
-- Dumping data for table `items`
--
INSERT INTO `items` (`id`, `name`, `description`, `quantity`, `category_id`, `created_at`, `updated_at`) VALUES
(98, 'Rem', 'Est ab sed numquam et ducimus dolores magnam. Ducimus soluta consequuntur quaerat. Cum voluptatum culpa aliquam est.', 291, 1, '2021-12-21 09:13:10', '2021-12-21 09:13:10'),
(99, 'Vel', 'Velit nostrum nobis illo sunt tempore ut aliquid. Autem consequatur in delectus sapiente molestias repellendus. Error quae facilis totam facere eveniet non consectetur qui. Repellat in fugiat ratione a.', 206, 10, '2021-12-21 09:13:10', '2021-12-21 09:13:10'),
(127, 'In', 'Et cumque asperiores voluptates est veritatis iste aperiam et. Consequuntur unde quisquam dolorem ratione officiis. Distinctio illo impedit et est ea autem. Est omnis deleniti reprehenderit qui voluptas minus velit.', 294, 8, '2021-12-22 01:45:56', '2021-12-22 01:45:56'),
(128, 'Non', 'Provident aspernatur veritatis ipsum odit suscipit velit illo. Quaerat id quia minima dicta suscipit. Modi magnam illum sed dignissimos deleniti aut. Magni doloribus commodi in et. Consectetur eius et aut officia aut omnis praesentium.', 135, 7, '2021-12-22 01:45:56', '2021-12-22 01:45:56'),
(148, 'Sit', 'Doloribus quidem delectus temporibus animi soluta. A sunt exercitationem rerum et nulla amet cupiditate. Voluptas ex sit atque. Quo ad distinctio consectetur et nesciunt.', 215, 8, '2021-12-22 05:46:40', '2021-12-22 05:46:40'),
(149, 'Hello', 'This is <NAME>\'s Description.', 181, 3, '2021-12-22 05:46:40', '2021-12-31 21:23:26'),
(150, 'Sit', 'Fugit est vero voluptatem. Quia et ipsam laborum numquam consequatur est molestias neque. Aliquid sunt non consectetur quia aliquid.', 158, 6, '2021-12-22 05:46:40', '2021-12-22 05:46:40'),
(151, 'Et', 'Sunt architecto architecto consectetur. Quia aut occaecati quaerat in ea aut est accusantium.', 182, 5, '2021-12-22 05:46:40', '2021-12-22 05:46:40'),
(152, 'Sapiente', 'Quod enim quaerat labore sit consequatur. Maxime id sit ad cum. Eveniet eius ut accusamus dolores. Fugiat et cum ut est veritatis.', 184, 2, '2021-12-22 05:46:40', '2021-12-22 05:46:40'),
(153, 'Omnis', 'Tempora aliquid et similique ad non quos laudantium et. Ut asperiores et voluptatum suscipit ut quo. Cumque ea dignissimos et facilis.', 118, 8, '2021-12-22 05:46:40', '2021-12-22 05:46:40'),
(167, '<NAME>', 'Rerum quibusdam est nobis tenetur hic. Deleniti fugiat qui alias facilis neque nulla beatae pariatur. Quae quia quo doloribus vel in quidem. Vitae mollitia qui sapiente velit odio reprehenderit.', 900, 5, '2021-12-22 06:43:06', '2021-12-31 21:22:39'),
(168, 'a', 'Quisquam labore aut nisi reiciendis. Laborum corporis rem est excepturi quo dolor. Rerum voluptatem impedit ea doloremque.', 111, 2, '2021-12-22 06:43:06', '2021-12-29 10:00:15'),
(169, 'Quia', 'Et quo sit ratione voluptatem odio id. Voluptas placeat voluptate dicta officiis. Fugiat voluptas sequi doloremque aut vitae alias. Itaque voluptatum earum sint qui.', 264, 5, '2021-12-22 06:43:06', '2021-12-22 06:43:06'),
(170, 'Ad', 'Natus aut et dolore sunt id. Numquam amet rerum quibusdam est aut quidem nam dolores.', 202, 10, '2021-12-22 06:43:06', '2021-12-22 06:43:06'),
(171, 'Incidunt', 'Ex voluptate quam magnam laudantium. Voluptatem nobis voluptatem eveniet voluptatibus error ducimus. Iure error qui aliquid quaerat cum est atque.', 213, 3, '2021-12-22 06:43:06', '2021-12-22 06:43:06'),
(172, 'Ipsa', 'Tenetur quis non maiores voluptas praesentium. Quis iure quia nemo dignissimos quibusdam officiis. Quis ut fuga voluptas quia. Architecto deleniti vero rem ut omnis distinctio at nihil. Hic laudantium accusamus sit aspernatur enim aut.', 265, 9, '2021-12-22 06:43:06', '2021-12-22 06:43:06'),
(180, 'Lorem ispusm', 'Hello, This is description.', 1800, 5, '2021-12-31 21:27:30', '2021-12-31 21:27:53');
-- --------------------------------------------------------
--
-- Table structure for table `migrations`
--
CREATE TABLE `migrations` (
`id` int(10) UNSIGNED NOT NULL,
`migration` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`batch` int(11) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
--
-- Dumping data for table `migrations`
--
INSERT INTO `migrations` (`id`, `migration`, `batch`) VALUES
(1, '2021_12_20_085412_create_addresses_table', 1),
(2, '2021_12_20_085235_create_students_table', 2),
(3, '2021_12_13_073655_create_fields_table', 3),
(4, '2021_12_10_101206_create_doctors_table', 4),
(6, '2014_10_12_100000_create_password_resets_table', 5),
(7, '2019_08_19_000000_create_failed_jobs_table', 5),
(8, '2019_12_14_000001_create_personal_access_tokens_table', 5),
(11, '2021_12_18_023215_create_items_table', 5),
(13, '2021_12_20_102052_create_subjects_table', 5),
(14, '2021_12_20_102118_create_student__subjects_table', 5),
(21, '2021_12_14_042705_create_categories_table', 10),
(22, '2014_10_12_000000_create_users_table', 11),
(23, '2021_12_10_095155_create_blogs_table', 12),
(24, '2021_12_19_094210_create_passports_table', 13);
-- --------------------------------------------------------
--
-- Table structure for table `passports`
--
CREATE TABLE `passports` (
`id` bigint(20) UNSIGNED NOT NULL,
`user_id` bigint(20) UNSIGNED NOT NULL,
`passport_no` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- --------------------------------------------------------
--
-- Table structure for table `password_resets`
--
CREATE TABLE `password_resets` (
`email` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`token` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`created_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- --------------------------------------------------------
--
-- Table structure for table `personal_access_tokens`
--
CREATE TABLE `personal_access_tokens` (
`id` bigint(20) UNSIGNED NOT NULL,
`tokenable_type` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`tokenable_id` bigint(20) UNSIGNED NOT NULL,
`name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`token` varchar(64) COLLATE utf8mb4_unicode_ci NOT NULL,
`abilities` text COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`last_used_at` timestamp NULL DEFAULT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- --------------------------------------------------------
--
-- Table structure for table `students`
--
CREATE TABLE `students` (
`id` bigint(20) UNSIGNED NOT NULL,
`name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`address_id` bigint(20) UNSIGNED NOT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- --------------------------------------------------------
--
-- Table structure for table `student__subjects`
--
CREATE TABLE `student__subjects` (
`id` bigint(20) UNSIGNED NOT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- --------------------------------------------------------
--
-- Table structure for table `subjects`
--
CREATE TABLE `subjects` (
`id` bigint(20) UNSIGNED NOT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- --------------------------------------------------------
--
-- Table structure for table `users`
--
CREATE TABLE `users` (
`id` bigint(20) UNSIGNED NOT NULL,
`name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`email` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`mobile` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`email_verified_at` timestamp NULL DEFAULT NULL,
`password` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`remember_token` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
--
-- Dumping data for table `users`
--
INSERT INTO `users` (`id`, `name`, `email`, `mobile`, `email_verified_at`, `password`, `remember_token`, `created_at`, `updated_at`) VALUES
(1, '<NAME>', '<EMAIL>', '419-688-6723', '2022-01-02 03:55:53', '$2a$12$Ujlf69qcZTCfvtcLMD5Ug.i8i82FH3fLHpKjVDHNtYjdTLnsiWtGC', 'sT499Gs3TgzlJsuvIr97jmkvMmIvd8XzCsPPOYhXdlCWTuSIZ0UJoYf7jV1U', '2022-01-02 03:55:53', '2022-01-02 03:55:53'),
(2, '<NAME>', '<EMAIL>', '+1-667-578-5292', '2022-01-02 03:55:53', '$2a$12$Ujlf69qcZTCfvtcLMD5Ug.i8i82FH3fLHpKjVDHNtYjdTLnsiWtGC', 'o3JxZCehDd', '2022-01-02 03:55:53', '2022-01-02 03:55:53'),
(3, 'Moe Moe', '<EMAIL>', '1-341-364-7001', '2022-01-02 03:55:53', '$2a$12$Ujlf69qcZTCfvtcLMD5Ug.i8i82FH3fLHpKjVDHNtYjdTLnsiWtGC', 'K4npAHfULh', '2022-01-02 03:55:53', '2022-01-02 03:55:53'),
(4, 'Aye Aye', '<EMAIL>', '(564) 491-8431', '2022-01-02 03:55:53', '$2a$12$Ujlf69qcZTCfvtcLMD5Ug.i8i82FH3fLHpKjVDHNtYjdTLnsiWtGC', '4uw7Vn5ryd', '2022-01-02 03:55:53', '2022-01-02 03:55:53'),
(5, 'Aung Ko', '<EMAIL>', '+16416164409', '2022-01-02 03:55:53', '$2a$12$Ujlf69qcZTCfvtcLMD5Ug.i8i82FH3fLHpKjVDHNtYjdTLnsiWtGC', '4hb9iRBCRE', '2022-01-02 03:55:53', '2022-01-02 03:55:53'),
(6, '<NAME>', '<EMAIL>', '281-529-6647', '2022-01-02 03:55:53', '$2a$12$Ujlf69qcZTCfvtcLMD5Ug.i8i82FH3fLHpKjVDHNtYjdTLnsiWtGC', 'daY9QBfgEq', '2022-01-02 03:55:53', '2022-01-02 03:55:53'),
(7, '<NAME>', '<EMAIL>', '+1-929-913-9910', '2022-01-02 03:55:53', '$2a$12$Ujlf69qcZTCfvtcLMD5Ug.i8i82FH3fLHpKjVDHNtYjdTLnsiWtGC', 'yqmq4u7b3p', '2022-01-02 03:55:53', '2022-01-02 03:55:53'),
(8, 'Jack', '<EMAIL>', '651-610-8467', '2022-01-02 03:55:53', '$2a$12$Ujlf69qcZTCfvtcLMD5Ug.i8i82FH3fLHpKjVDHNtYjdTLnsiWtGC', 'PjaVcs4Rjk', '2022-01-02 03:55:53', '2022-01-02 03:55:53'),
(9, 'Alice', '<EMAIL>', '385.822.8259', '2022-01-02 03:55:53', '$2a$12$Ujlf69qcZTCfvtcLMD5Ug.i8i82FH3fLHpKjVDHNtYjdTLnsiWtGC', '9DtUZQlEaV', '2022-01-02 03:55:53', '2022-01-02 03:55:53'),
(10, 'John', '<EMAIL>', '+1-678-525-5654', '2022-01-02 03:55:53', '$2a$12$Ujlf69qcZTCfvtcLMD5Ug.i8i82FH3fLHpKjVDHNtYjdTLnsiWtGC', '8SjyySXZKL', '2022-01-02 03:55:53', '2022-01-02 03:55:53'),
(15, '<NAME>', '<EMAIL>', '09260965397', NULL, '$2y$10$ZbnchFOFh.3EBmpocGGUeefKgnpY68S52SBFavZ07lwjKAxl2zdf6', NULL, '2022-01-02 04:28:05', '2022-01-02 04:28:05');
--
-- Indexes for dumped tables
--
--
-- Indexes for table `addresses`
--
ALTER TABLE `addresses`
ADD PRIMARY KEY (`id`);
--
-- Indexes for table `blogs`
--
ALTER TABLE `blogs`
ADD PRIMARY KEY (`id`),
ADD KEY `blogs_category_id_foreign` (`category_id`),
ADD KEY `blogs_user_id_foreign` (`user_id`);
--
-- Indexes for table `categories`
--
ALTER TABLE `categories`
ADD PRIMARY KEY (`id`);
--
-- Indexes for table `doctors`
--
ALTER TABLE `doctors`
ADD PRIMARY KEY (`id`),
ADD KEY `doctors_field_id_foreign` (`field_id`);
--
-- Indexes for table `failed_jobs`
--
ALTER TABLE `failed_jobs`
ADD PRIMARY KEY (`id`),
ADD UNIQUE KEY `failed_jobs_uuid_unique` (`uuid`);
--
-- Indexes for table `fields`
--
ALTER TABLE `fields`
ADD PRIMARY KEY (`id`);
--
-- Indexes for table `items`
--
ALTER TABLE `items`
ADD PRIMARY KEY (`id`);
--
-- Indexes for table `migrations`
--
ALTER TABLE `migrations`
ADD PRIMARY KEY (`id`);
--
-- Indexes for table `passports`
--
ALTER TABLE `passports`
ADD PRIMARY KEY (`id`),
ADD KEY `passports_user_id_foreign` (`user_id`);
--
-- Indexes for table `password_resets`
--
ALTER TABLE `password_resets`
ADD KEY `password_resets_email_index` (`email`);
--
-- Indexes for table `personal_access_tokens`
--
ALTER TABLE `personal_access_tokens`
ADD PRIMARY KEY (`id`),
ADD UNIQUE KEY `personal_access_tokens_token_unique` (`token`),
ADD KEY `personal_access_tokens_tokenable_type_tokenable_id_index` (`tokenable_type`,`tokenable_id`);
--
-- Indexes for table `students`
--
ALTER TABLE `students`
ADD PRIMARY KEY (`id`),
ADD KEY `students_address_id_foreign` (`address_id`);
--
-- Indexes for table `student__subjects`
--
ALTER TABLE `student__subjects`
ADD PRIMARY KEY (`id`);
--
-- Indexes for table `subjects`
--
ALTER TABLE `subjects`
ADD PRIMARY KEY (`id`);
--
-- Indexes for table `users`
--
ALTER TABLE `users`
ADD PRIMARY KEY (`id`),
ADD UNIQUE KEY `users_email_unique` (`email`);
--
-- AUTO_INCREMENT for dumped tables
--
--
-- AUTO_INCREMENT for table `addresses`
--
ALTER TABLE `addresses`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT;
--
-- AUTO_INCREMENT for table `blogs`
--
ALTER TABLE `blogs`
MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=12;
--
-- AUTO_INCREMENT for table `categories`
--
ALTER TABLE `categories`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=11;
--
-- AUTO_INCREMENT for table `doctors`
--
ALTER TABLE `doctors`
MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=73;
--
-- AUTO_INCREMENT for table `failed_jobs`
--
ALTER TABLE `failed_jobs`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT;
--
-- AUTO_INCREMENT for table `fields`
--
ALTER TABLE `fields`
MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=11;
--
-- AUTO_INCREMENT for table `items`
--
ALTER TABLE `items`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=181;
--
-- AUTO_INCREMENT for table `migrations`
--
ALTER TABLE `migrations`
MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=25;
--
-- AUTO_INCREMENT for table `passports`
--
ALTER TABLE `passports`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT;
--
-- AUTO_INCREMENT for table `personal_access_tokens`
--
ALTER TABLE `personal_access_tokens`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT;
--
-- AUTO_INCREMENT for table `students`
--
ALTER TABLE `students`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT;
--
-- AUTO_INCREMENT for table `student__subjects`
--
ALTER TABLE `student__subjects`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT;
--
-- AUTO_INCREMENT for table `subjects`
--
ALTER TABLE `subjects`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT;
--
-- AUTO_INCREMENT for table `users`
--
ALTER TABLE `users`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=16;
--
-- Constraints for dumped tables
--
--
-- Constraints for table `blogs`
--
ALTER TABLE `blogs`
ADD CONSTRAINT `blogs_category_id_foreign` FOREIGN KEY (`category_id`) REFERENCES `categories` (`id`) ON DELETE CASCADE,
ADD CONSTRAINT `blogs_user_id_foreign` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`) ON DELETE CASCADE;
--
-- Constraints for table `doctors`
--
ALTER TABLE `doctors`
ADD CONSTRAINT `doctors_field_id_foreign` FOREIGN KEY (`field_id`) REFERENCES `fields` (`id`) ON DELETE CASCADE;
--
-- Constraints for table `passports`
--
ALTER TABLE `passports`
ADD CONSTRAINT `passports_user_id_foreign` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`) ON DELETE CASCADE;
--
-- Constraints for table `students`
--
ALTER TABLE `students`
ADD CONSTRAINT `students_address_id_foreign` FOREIGN KEY (`address_id`) REFERENCES `addresses` (`id`) ON DELETE CASCADE;
COMMIT;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
<reponame>abhim8/Programs
show databases;
use college;
show tables;
use course;
show tables;
select *from students;
create table if not exists students(id varchar(10), name char(15));
create table tasks(task_id int auto_increment,
title varchar(200) not null, start_date date,
due_date date, priority int not null default 5,
description text,primary key(task_id));
describe tasks;
insert into tasks(title,priority)
values("learning MySql insert stmt",1);
select *from tasks;
insert into tasks(title,priority)
values("learning MySql insert stmt2",default);
insert into tasks(title,start_date,due_date)
values("insert date into table","2020-09-01","2021-01-02");
insert into tasks(title,start_date,due_date)
values("use current date for the task",CURRENT_DATE(),
CURRENT_DATE());
insert into tasks(title,priority)
values("my first task",1),
("my second task",2),
("my third task",3); |
<reponame>littlefxc/seed-security-auth<filename>docs/db.sql
-- 记住我功能用的表
create table persistent_logins (username varchar(64) not null,
series varchar(64) primary key,
token varchar(64) not null,
last_used timestamp not null);
-- 社交登录用的表
create table imooc_UserConnection (userId varchar(255) not null,
providerId varchar(255) not null,
providerUserId varchar(255),
rank int not null,
displayName varchar(255),
profileUrl varchar(512),
imageUrl varchar(512),
accessToken varchar(512) not null,
secret varchar(512),
refreshToken varchar(512),
expireTime bigint,
primary key (userId, providerId, providerUserId));
create unique index UserConnectionRank on imooc_UserConnection(userId, providerId, rank); |
/*
SQLyog Community
MySQL - 5.7.26-log : Database - ppa
*********************************************************************
*/
/*!40101 SET NAMES utf8 */;
/*!40101 SET SQL_MODE=''*/;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
/*Data for the table `colors` */
insert into `colors`(`id`,`name`,`description`,`created_at`,`updated_at`) values
(1,'Black',NULL,NULL,NULL),
(2,'Black EDP',NULL,NULL,NULL),
(3,'Silver',NULL,NULL,NULL),
(4,'Yellow Cr6+',NULL,NULL,NULL),
(5,'Yellow Cr3+',NULL,NULL,NULL),
(6,'Blue Cr3+',NULL,NULL,NULL),
(7,'Blue Cr3 + Baking 4 Jam',NULL,NULL,NULL),
(8,'Black EDP',NULL,NULL,NULL),
(9,'Black Cr3+',NULL,NULL,NULL),
(10,'Yellow Cr3 + Baking 4 Jam',NULL,NULL,NULL),
(11,'Green Cr6+',NULL,NULL,NULL);
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
|
<filename>BANCO_SRESTAURANTE/sql10326340_FORNECEDOR.sql
-- MySQL dump 10.13 Distrib 5.7.29, for Linux (x86_64)
--
-- Host: sql10.freemysqlhosting.net Database: sql10326340
-- ------------------------------------------------------
-- Server version 5.5.58-0ubuntu0.14.04.1
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
--
-- Table structure for table `FORNECEDOR`
--
DROP TABLE IF EXISTS `FORNECEDOR`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `FORNECEDOR` (
`cnpj` varchar(18) NOT NULL,
`nome` varchar(65) NOT NULL,
`telefone` varchar(14) NOT NULL,
PRIMARY KEY (`cnpj`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `FORNECEDOR`
--
LOCK TABLES `FORNECEDOR` WRITE;
/*!40000 ALTER TABLE `FORNECEDOR` DISABLE KEYS */;
INSERT INTO `FORNECEDOR` VALUES ('28.857.589/0001-68','COCA COLA','(71) 3303-3933'),('63.719.229/0001-09','ITAIPAVA','(71) 3655-9933'),('74.310.105/0001-83','CEBOLAS S.A','(71) 3399-3246'),('97.104.031/0001-24','SCHIN','(71) 3373-3766');
/*!40000 ALTER TABLE `FORNECEDOR` ENABLE KEYS */;
UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2020-03-11 2:30:27
|
create type "TransactionCategoryType" as enum ('INCOME', 'EXPENSE', 'TRANSFER');
create table "TransactionCategories"
(
"OrganizationId" uuid not null references "Organizations"("OrganizationId"),
"CategoryId" serial,
"Name" varchar(200) not null,
"CategoryType" "TransactionCategoryType" not null,
"CreatedOn" timestamp not null default now(),
primary key ("OrganizationId", "CategoryId")
);
create table "TransactionClassificationPatterns"
(
"OrganizationId" uuid not null references "Organizations"("OrganizationId"),
"ClassificationPatternId" serial,
"CategoryId" int not null,
"MatchContent" varchar(250) not null,
"MinRank" float not null,
"CreatedOn" timestamp not null default now(),
primary key ("OrganizationId", "ClassificationPatternId"),
constraint "TransactionClassificationPatterns_TransactionCategories_fkey" foreign key ("OrganizationId", "CategoryId")
references "TransactionCategories"("OrganizationId", "CategoryId")
);
create table "Ledger"
(
"OrganizationId" uuid not null references "Organizations"("OrganizationId"),
"EquityAccountId" int not null,
primary key ("OrganizationId"),
constraint "Ledger_Accounts_EquityAccount_fkey" foreign key ("OrganizationId", "EquityAccountId")
references "Accounts"("OrganizationId", "AccountId")
);
create table "LedgerTransactions"
(
"OrganizationId" uuid not null,
"LedgerTransactionId" bigserial,
"DebitAccountId" int not null,
"DebitTransactionId" bigint not null,
"CreditAccountId" int not null,
"CreditTransactionId" bigint not null,
"CategoryId" int not null,
"Amount" numeric(14,2),
"CreatedOn" timestamp not null default now(),
primary key ("OrganizationId", "LedgerTransactionId"),
constraint "LedgerTransactions_Transactions_DebitTransaction_fkey" foreign key ("OrganizationId", "DebitAccountId", "DebitTransactionId")
references "Transactions"("OrganizationId", "AccountId", "TransactionId"),
constraint "LedgerTransactions_Transactions_CreditTransaction_fkey" foreign key ("OrganizationId", "CreditAccountId", "CreditTransactionId")
references "Transactions"("OrganizationId", "AccountId", "TransactionId"),
constraint "LedgerTransactions_TransactionCategories_fkey" foreign key ("OrganizationId", "CategoryId")
references "TransactionCategories"("OrganizationId", "CategoryId")
);
|
<reponame>steven-cd/web-ui<gh_stars>1-10
CREATE TABLE intervention_data(
id INT AUTO_INCREMENT PRIMARY KEY,
region_id VARCHAR(32) NOT NULL,
subregion_id VARCHAR(32),
policy VARCHAR(255) NOT NULL,
notes TEXT,
source TEXT,
issue_date DATE,
start_date DATE NOT NULL,
ease_date DATE,
expiration_date DATE,
end_date DATE,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
INDEX (region_id),
INDEX (subregion_id)
)
|
<reponame>Urbine/YohamGabrielB
SELECT Track.title, Artist.name, Album.title, Genre.name, Year.released FROM Track JOIN Artist JOIN Album JOIN Genre JOIN Year ON Track.album_id = Album.id AND Track.genre_id = Genre.id AND Album.artist_id = Artist.id and Track.year_id = Year.id |
<gh_stars>1-10
--
-- Name: kartoza_building_recode_mapper(); Type: FUNCTION; Schema: public; Owner: -
--
DROP FUNCTION IF EXISTS public.kartoza_building_recode_mapper CASCADE ;
CREATE FUNCTION public.kartoza_building_recode_mapper() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
SELECT
CASE
WHEN new.building_type = 'Clinic/Doctor' THEN 0.7
WHEN new.building_type = 'Commercial' THEN 0.7
WHEN new.building_type = 'School' THEN 1
WHEN new.building_type = 'Government' THEN 0.7
WHEN new.building_type ILIKE 'Place of Worship%' THEN 0.5
WHEN new.building_type = 'Residential' THEN 1
WHEN new.building_type = 'Police Station' THEN 0.7
WHEN new.building_type = 'Fire Station' THEN 0.7
WHEN new.building_type = 'Hospital' THEN 0.7
WHEN new.building_type = 'Supermarket' THEN 0.7
WHEN new.building_type = 'Sports Facility' THEN 0.3
WHEN new.building_type = 'University/College' THEN 1.0
ELSE 0.3
END
INTO new.building_type_score
FROM osm_buildings
;
RETURN NEW;
END
$$;
|
# ************************************************************
# Sequel Pro SQL dump
# Version 4541
#
# http://www.sequelpro.com/
# https://github.com/sequelpro/sequelpro
#
# Host: 127.0.0.1 (MySQL 5.7.21)
# Database: glue
# Generation Time: 2018-08-09 10:14:33 +0000
# ************************************************************
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
# Dump of table task
# ------------------------------------------------------------
CREATE TABLE `task` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`organisationId` int(11) unsigned NOT NULL,
`name` varchar(255) NOT NULL DEFAULT '',
`notes` varchar(255) DEFAULT NULL,
`userId` int(11) unsigned DEFAULT NULL,
`statusId` int(11) unsigned DEFAULT NULL,
`active` bit(1) NOT NULL,
`createdDate` datetime NOT NULL,
`modifiedDate` datetime DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `organisationId` (`organisationId`),
KEY `userId` (`userId`),
CONSTRAINT `task_ibfk_1` FOREIGN KEY (`organisationId`) REFERENCES `organisation` (`id`),
CONSTRAINT `task_ibfk_2` FOREIGN KEY (`userId`) REFERENCES `user` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
drop index index_targeting_infos_on_targeting_and_segment;
create unique index index_targeting_info_on_targeting_and_segment on targeting_infos(targeting_type, segment);
|
use idi_sandpit
SELECT 'table_name' = OBJECT_NAME(i.id),
i.indid,
'index_name' = i.name,
i.groupid,
'filegroup' = f.name,
'file_name' = d.physical_name,
'dataspace' = s.name,
sc.name
FROM sys.sysindexes i,
sys.filegroups f,
sys.database_files d,
sys.data_spaces s,
sys.schemas sc
WHERE OBJECTPROPERTY(i.id, 'IsUserTable') = 1
AND f.data_space_id = i.groupid
AND f.data_space_id = d.data_space_id
AND f.data_space_id = s.data_space_id
ORDER BY sc.name,
f.name,
OBJECT_NAME(i.id),
groupid |
/*
This script is monitoring the `EXA_ALL_SESSIONS` system table to identify "bad" sessions based on some criteria, and subsequently, kill/abort these sessions.
*/
CREATE SCHEMA IF NOT EXISTS EXA_TOOLBOX;
--/
CREATE OR REPLACE SCRIPT EXA_TOOLBOX.session_watchdog() RETURNS TABLE AS
-- requirement: KILL ANY SESSION
-- output: List of sessions that have been aborted/killed, including the respective reason
--[[
Configuration section
--]]
-- Table with user-specific limits
local USER_LIMITS = {
USER1 = { query_timeout = 300, temp_ram = 3000, idle_timeout = 1800 },
USER2 = { query_timeout = 150, idle_timeout = 300 },
SYS = { temp_ram = 10000 }
}
-- Logging table, will be returned by script
local log_data = {}
--[[
Given maximal and current measure value, the given session is killed or the current statement is aborted.
--]]
local function kill_session( session, measure_name, measure_value, max_value )
if measure_value / max_value <= 1.1 then
-- exceeded by 0-10% ... try to kill statement only
local success = pquery( [[kill statement ]] .. session.STMT_ID .. [[ in session ]] .. session.SESSION_ID )
if success then
log_data[1+#log_data] = { session.SESSION_ID, session.USER_NAME, 'soft ' .. measure_name, measure_value, max_value }
end
else
local success = pquery( [[kill session ]] .. session.SESSION_ID )
if success then
log_data[1+#log_data] = { session.SESSION_ID, session.USER_NAME, 'hard ' .. measure_name, measure_value, max_value }
end
end
end
--[[
Preparation section
--]]
-- get list of current sessions, excluding disconnected idle sessions
local session_list = query([[
select
to_char(SESSION_ID) as SESSION_ID, STMT_ID, USER_NAME, STATUS, COMMAND_NAME,
right(duration,2) + 60*regexp_substr(duration, '(?<=:)[0-9]{2}(?=:)') + 3600 * regexp_substr(duration, '^[0-9]+(?=:)') as duration,
temp_db_ram
from
sys.exa_all_sessions
where
temp_db_ram > 0
]])
--[[
Action section
--]]
-- go through the list and check each session for its limits
for snum = 1, #session_list do
-- session information
local usession = session_list[snum]
-- looking up the session's user
local ulimit = USER_LIMITS[usession.USER_NAME]
-- ignore service process and unlimited users
if usession.SESSION_ID ~= '4' and ulimit ~= nil then
-- dummy loop allows us to use 'break' instead of cascading if/else/if
repeat
-- check TEMP
if ulimit.temp_ram ~= nil and usession.TEMP_DB_RAM > ulimit.temp_ram then
kill_session( usession, 'TEMP', usession.TEMP_DB_RAM, ulimit.temp_ram )
break
end
-- check query runtime
if usession.STATUS ~= 'IDLE' and ulimit.query_timeout ~= nil and usession.DURATION > ulimit.query_timeout then
kill_session( usession, 'QUERY TIMEOUT', usession.DURATION, ulimit.query_timeout )
break
end
-- check idle timeout
if usession.STATUS == 'IDLE' and ulimit.idle_timeout ~= nil and usession.DURATION > ulimit.idle_timeout then
kill_session( usession, 'IDLE TIMEOUT', usession.DURATION, ulimit.idle_timeout )
break
end
-- dummy loop exits after first iteration
until true
end
end
return log_data, "SESSION_ID decimal(20), USER_NAME varchar(128), MEASURE_TYPE varchar(20), MEASURE_VALUE decimal(9,3), MEASURE_LIMIT decimal(9,3)"
/
--EXECUTE SCRIPT EXA_TOOLBOX.SESSION_WATCHDOG() |
-- file:rolenames.sql ln:320 expect:true
ALTER USER MAPPING FOR "user" SERVER sv4
OPTIONS (SET user '"user"_alt')
|
# Write your MySQL query statement below
# #Easy #Database
UPDATE Salary SET sex = IF(sex = 'm', 'f', 'm');
|
<reponame>salihhdemirr/thingsboard
--
-- Copyright © 2016-2020 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-- call create_partition_ts_kv_table();
CREATE OR REPLACE PROCEDURE create_partition_ts_kv_table()
LANGUAGE plpgsql AS
$$
BEGIN
ALTER TABLE ts_kv
DROP CONSTRAINT IF EXISTS ts_kv_unq_key;
ALTER TABLE ts_kv
DROP CONSTRAINT IF EXISTS ts_kv_pkey;
ALTER TABLE ts_kv
ADD CONSTRAINT ts_kv_pkey PRIMARY KEY (entity_type, entity_id, key, ts);
ALTER TABLE ts_kv
RENAME TO ts_kv_old;
ALTER TABLE ts_kv_old
RENAME CONSTRAINT ts_kv_pkey TO ts_kv_pkey_old;
CREATE TABLE IF NOT EXISTS ts_kv
(
LIKE ts_kv_old
)
PARTITION BY RANGE (ts);
ALTER TABLE ts_kv
DROP COLUMN entity_type;
ALTER TABLE ts_kv
ALTER COLUMN entity_id TYPE uuid USING entity_id::uuid;
ALTER TABLE ts_kv
ALTER COLUMN key TYPE integer USING key::integer;
ALTER TABLE ts_kv
ADD CONSTRAINT ts_kv_pkey PRIMARY KEY (entity_id, key, ts);
CREATE TABLE IF NOT EXISTS ts_kv_indefinite PARTITION OF ts_kv DEFAULT;
END;
$$;
-- call create_new_ts_kv_latest_table();
CREATE OR REPLACE PROCEDURE create_new_ts_kv_latest_table()
LANGUAGE plpgsql AS
$$
BEGIN
IF NOT EXISTS(SELECT FROM pg_tables WHERE schemaname = 'public' AND tablename = 'ts_kv_latest_old') THEN
ALTER TABLE ts_kv_latest
DROP CONSTRAINT IF EXISTS ts_kv_latest_unq_key;
ALTER TABLE ts_kv_latest
DROP CONSTRAINT IF EXISTS ts_kv_latest_pkey;
ALTER TABLE ts_kv_latest
ADD CONSTRAINT ts_kv_latest_pkey PRIMARY KEY (entity_type, entity_id, key);
ALTER TABLE ts_kv_latest
RENAME TO ts_kv_latest_old;
ALTER TABLE ts_kv_latest_old
RENAME CONSTRAINT ts_kv_latest_pkey TO ts_kv_latest_pkey_old;
CREATE TABLE IF NOT EXISTS ts_kv_latest
(
LIKE ts_kv_latest_old
);
ALTER TABLE ts_kv_latest
DROP COLUMN entity_type;
ALTER TABLE ts_kv_latest
ALTER COLUMN entity_id TYPE uuid USING entity_id::uuid;
ALTER TABLE ts_kv_latest
ALTER COLUMN key TYPE integer USING key::integer;
ALTER TABLE ts_kv_latest
ADD CONSTRAINT ts_kv_latest_pkey PRIMARY KEY (entity_id, key);
ELSE
RAISE NOTICE 'ts_kv_latest_old table already exists!';
IF NOT EXISTS(SELECT FROM pg_tables WHERE schemaname = 'public' AND tablename = 'ts_kv_latest') THEN
CREATE TABLE IF NOT EXISTS ts_kv_latest
(
entity_id uuid NOT NULL,
key int NOT NULL,
ts bigint NOT NULL,
bool_v boolean,
str_v varchar(10000000),
long_v bigint,
dbl_v double precision,
json_v json,
CONSTRAINT ts_kv_latest_pkey PRIMARY KEY (entity_id, key)
);
END IF;
END IF;
END;
$$;
CREATE OR REPLACE FUNCTION get_partitions_data(IN partition_type varchar)
RETURNS
TABLE
(
partition_date text,
from_ts bigint,
to_ts bigint
)
AS
$$
BEGIN
CASE
WHEN partition_type = 'DAYS' THEN
RETURN QUERY SELECT day_date.day AS partition_date,
(extract(epoch from (day_date.day)::timestamp) * 1000)::bigint AS from_ts,
(extract(epoch from (day_date.day::date + INTERVAL '1 DAY')::timestamp) *
1000)::bigint AS to_ts
FROM (SELECT DISTINCT TO_CHAR(TO_TIMESTAMP(ts / 1000), 'YYYY_MM_DD') AS day
FROM ts_kv_old) AS day_date;
WHEN partition_type = 'MONTHS' THEN
RETURN QUERY SELECT SUBSTRING(month_date.first_date, 1, 7) AS partition_date,
(extract(epoch from (month_date.first_date)::timestamp) * 1000)::bigint AS from_ts,
(extract(epoch from (month_date.first_date::date + INTERVAL '1 MONTH')::timestamp) *
1000)::bigint AS to_ts
FROM (SELECT DISTINCT TO_CHAR(TO_TIMESTAMP(ts / 1000), 'YYYY_MM_01') AS first_date
FROM ts_kv_old) AS month_date;
WHEN partition_type = 'YEARS' THEN
RETURN QUERY SELECT SUBSTRING(year_date.year, 1, 4) AS partition_date,
(extract(epoch from (year_date.year)::timestamp) * 1000)::bigint AS from_ts,
(extract(epoch from (year_date.year::date + INTERVAL '1 YEAR')::timestamp) *
1000)::bigint AS to_ts
FROM (SELECT DISTINCT TO_CHAR(TO_TIMESTAMP(ts / 1000), 'YYYY_01_01') AS year
FROM ts_kv_old) AS year_date;
ELSE
RAISE EXCEPTION 'Failed to parse partitioning property: % !', partition_type;
END CASE;
END;
$$ LANGUAGE plpgsql;
-- call create_partitions();
CREATE OR REPLACE PROCEDURE create_partitions(IN partition_type varchar)
LANGUAGE plpgsql AS
$$
DECLARE
partition_date varchar;
from_ts bigint;
to_ts bigint;
partitions_cursor CURSOR FOR SELECT *
FROM get_partitions_data(partition_type);
BEGIN
OPEN partitions_cursor;
LOOP
FETCH partitions_cursor INTO partition_date, from_ts, to_ts;
EXIT WHEN NOT FOUND;
EXECUTE 'CREATE TABLE IF NOT EXISTS ts_kv_' || partition_date ||
' PARTITION OF ts_kv FOR VALUES FROM (' || from_ts ||
') TO (' || to_ts || ');';
RAISE NOTICE 'A partition % has been created!',CONCAT('ts_kv_', partition_date);
END LOOP;
CLOSE partitions_cursor;
END;
$$;
-- call create_ts_kv_dictionary_table();
CREATE OR REPLACE PROCEDURE create_ts_kv_dictionary_table()
LANGUAGE plpgsql AS
$$
BEGIN
CREATE TABLE IF NOT EXISTS ts_kv_dictionary
(
key varchar(255) NOT NULL,
key_id serial UNIQUE,
CONSTRAINT ts_key_id_pkey PRIMARY KEY (key)
);
END;
$$;
-- call insert_into_dictionary();
CREATE OR REPLACE PROCEDURE insert_into_dictionary()
LANGUAGE plpgsql AS
$$
DECLARE
insert_record RECORD;
key_cursor CURSOR FOR SELECT DISTINCT key
FROM ts_kv_old
ORDER BY key;
BEGIN
OPEN key_cursor;
LOOP
FETCH key_cursor INTO insert_record;
EXIT WHEN NOT FOUND;
IF NOT EXISTS(SELECT key FROM ts_kv_dictionary WHERE key = insert_record.key) THEN
INSERT INTO ts_kv_dictionary(key) VALUES (insert_record.key);
RAISE NOTICE 'Key: % has been inserted into the dictionary!',insert_record.key;
ELSE
RAISE NOTICE 'Key: % already exists in the dictionary!',insert_record.key;
END IF;
END LOOP;
CLOSE key_cursor;
END;
$$;
CREATE OR REPLACE FUNCTION to_uuid(IN entity_id varchar, OUT uuid_id uuid) AS
$$
BEGIN
uuid_id := substring(entity_id, 8, 8) || '-' || substring(entity_id, 4, 4) || '-1' || substring(entity_id, 1, 3) ||
'-' || substring(entity_id, 16, 4) || '-' || substring(entity_id, 20, 12);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE insert_into_ts_kv(IN path_to_file varchar)
LANGUAGE plpgsql AS
$$
BEGIN
EXECUTE format('COPY (SELECT to_uuid(entity_id) AS entity_id,
ts_kv_records.key AS key,
ts_kv_records.ts AS ts,
ts_kv_records.bool_v AS bool_v,
ts_kv_records.str_v AS str_v,
ts_kv_records.long_v AS long_v,
ts_kv_records.dbl_v AS dbl_v
FROM (SELECT entity_id AS entity_id,
key_id AS key,
ts,
bool_v,
str_v,
long_v,
dbl_v
FROM ts_kv_old
INNER JOIN ts_kv_dictionary ON (ts_kv_old.key = ts_kv_dictionary.key)) AS ts_kv_records) TO %L;',
path_to_file);
EXECUTE format('COPY ts_kv FROM %L', path_to_file);
END
$$;
-- call insert_into_ts_kv_latest();
CREATE OR REPLACE PROCEDURE insert_into_ts_kv_latest(IN path_to_file varchar)
LANGUAGE plpgsql AS
$$
BEGIN
EXECUTE format('COPY (SELECT to_uuid(entity_id) AS entity_id,
ts_kv_latest_records.key AS key,
ts_kv_latest_records.ts AS ts,
ts_kv_latest_records.bool_v AS bool_v,
ts_kv_latest_records.str_v AS str_v,
ts_kv_latest_records.long_v AS long_v,
ts_kv_latest_records.dbl_v AS dbl_v
FROM (SELECT entity_id AS entity_id,
key_id AS key,
ts,
bool_v,
str_v,
long_v,
dbl_v
FROM ts_kv_latest_old
INNER JOIN ts_kv_dictionary ON (ts_kv_latest_old.key = ts_kv_dictionary.key)) AS ts_kv_latest_records) TO %L;',
path_to_file);
EXECUTE format('COPY ts_kv_latest FROM %L', path_to_file);
END;
$$;
-- call insert_into_ts_kv_cursor();
CREATE OR REPLACE PROCEDURE insert_into_ts_kv_cursor()
LANGUAGE plpgsql AS
$$
DECLARE
insert_size CONSTANT integer := 10000;
insert_counter integer DEFAULT 0;
insert_record RECORD;
insert_cursor CURSOR FOR SELECT to_uuid(entity_id) AS entity_id,
ts_kv_records.key AS key,
ts_kv_records.ts AS ts,
ts_kv_records.bool_v AS bool_v,
ts_kv_records.str_v AS str_v,
ts_kv_records.long_v AS long_v,
ts_kv_records.dbl_v AS dbl_v
FROM (SELECT entity_id AS entity_id,
key_id AS key,
ts,
bool_v,
str_v,
long_v,
dbl_v
FROM ts_kv_old
INNER JOIN ts_kv_dictionary ON (ts_kv_old.key = ts_kv_dictionary.key)) AS ts_kv_records;
BEGIN
OPEN insert_cursor;
LOOP
insert_counter := insert_counter + 1;
FETCH insert_cursor INTO insert_record;
IF NOT FOUND THEN
RAISE NOTICE '% records have been inserted into the partitioned ts_kv!',insert_counter - 1;
EXIT;
END IF;
INSERT INTO ts_kv(entity_id, key, ts, bool_v, str_v, long_v, dbl_v)
VALUES (insert_record.entity_id, insert_record.key, insert_record.ts, insert_record.bool_v, insert_record.str_v,
insert_record.long_v, insert_record.dbl_v);
IF MOD(insert_counter, insert_size) = 0 THEN
RAISE NOTICE '% records have been inserted into the partitioned ts_kv!',insert_counter;
END IF;
END LOOP;
CLOSE insert_cursor;
END;
$$;
-- call insert_into_ts_kv_latest_cursor();
CREATE OR REPLACE PROCEDURE insert_into_ts_kv_latest_cursor()
LANGUAGE plpgsql AS
$$
DECLARE
insert_size CONSTANT integer := 10000;
insert_counter integer DEFAULT 0;
insert_record RECORD;
insert_cursor CURSOR FOR SELECT to_uuid(entity_id) AS entity_id,
ts_kv_latest_records.key AS key,
ts_kv_latest_records.ts AS ts,
ts_kv_latest_records.bool_v AS bool_v,
ts_kv_latest_records.str_v AS str_v,
ts_kv_latest_records.long_v AS long_v,
ts_kv_latest_records.dbl_v AS dbl_v
FROM (SELECT entity_id AS entity_id,
key_id AS key,
ts,
bool_v,
str_v,
long_v,
dbl_v
FROM ts_kv_latest_old
INNER JOIN ts_kv_dictionary ON (ts_kv_latest_old.key = ts_kv_dictionary.key)) AS ts_kv_latest_records;
BEGIN
OPEN insert_cursor;
LOOP
insert_counter := insert_counter + 1;
FETCH insert_cursor INTO insert_record;
IF NOT FOUND THEN
RAISE NOTICE '% records have been inserted into the ts_kv_latest!',insert_counter - 1;
EXIT;
END IF;
INSERT INTO ts_kv_latest(entity_id, key, ts, bool_v, str_v, long_v, dbl_v)
VALUES (insert_record.entity_id, insert_record.key, insert_record.ts, insert_record.bool_v, insert_record.str_v,
insert_record.long_v, insert_record.dbl_v);
IF MOD(insert_counter, insert_size) = 0 THEN
RAISE NOTICE '% records have been inserted into the ts_kv_latest!',insert_counter;
END IF;
END LOOP;
CLOSE insert_cursor;
END;
$$;
|
with sat_order_details as (
select * from {{ ref('sat_order_details') }}
)
SELECT to_char(order_date, 'IYYY-IW') week, status, count(*)
FROM sat_order_details
{{ dbt_utils.group_by(n=2) }}
|
<gh_stars>1-10
DROP SCHEMA IF EXISTS "uni1";
DROP SCHEMA IF EXISTS "uni2";
CREATE SCHEMA "uni1";
CREATE TABLE "uni1"."student" (
"s_id" INT NOT NULL PRIMARY KEY,
"first_name" VARCHAR(40) NOT NULL,
"last_name" VARCHAR(40) NOT NULL
);
CREATE TABLE "uni1"."academic" (
"a_id" INT NOT NULL PRIMARY KEY,
"first_name" VARCHAR(40) NOT NULL,
"last_name" VARCHAR(40) NOT NULL,
"position" INT NOT NULL
);
CREATE INDEX ON "uni1"."academic" ("position");
CREATE TABLE "uni1"."course" (
"c_id" INT NOT NULL PRIMARY KEY,
"title" VARCHAR(100) NOT NULL
);
CREATE TABLE "uni1"."teaching" (
"c_id" INT NOT NULL,
"a_id" INT NOT NULL
);
CREATE INDEX ON "uni1"."teaching" ("c_id");
CREATE INDEX ON "uni1"."teaching" ("a_id");
CREATE TABLE "uni1"."course-registration" (
"c_id" INT NOT NULL,
"s_id" INT NOT NULL
);
CREATE INDEX ON "uni1"."course-registration" ("c_id");
CREATE INDEX ON "uni1"."course-registration" ("s_id");
ALTER TABLE "uni1"."teaching"
ADD FOREIGN KEY ("c_id") REFERENCES "uni1"."course"("c_id");
ALTER TABLE "uni1"."teaching"
ADD FOREIGN KEY ("a_id") REFERENCES "uni1"."academic"("a_id");
ALTER TABLE "uni1"."course-registration"
ADD FOREIGN KEY ("c_id") REFERENCES "uni1"."course"("c_id");
ALTER TABLE "uni1"."course-registration"
ADD FOREIGN KEY ("s_id") REFERENCES "uni1"."student"("s_id");
CREATE SCHEMA "uni2";
CREATE TABLE "uni2"."person" (
"pid" INT NOT NULL PRIMARY KEY,
"fname" VARCHAR(40) NOT NULL,
"lname" VARCHAR(40) NOT NULL,
"status" INT NOT NULL
);
CREATE INDEX ON "uni2"."person" ("status");
CREATE TABLE "uni2"."course" (
"cid" INT NOT NULL PRIMARY KEY,
"lecturer" INT NOT NULL,
"lab_teacher" INT NOT NULL,
"topic" VARCHAR(100) NOT NULL
);
CREATE INDEX ON "uni2"."course" ("lecturer");
CREATE INDEX ON "uni2"."course" ("lab_teacher");
CREATE TABLE "uni2"."registration" (
"pid" INT NOT NULL,
"cid" INT NOT NULL
);
CREATE INDEX ON "uni2"."registration" ("pid");
CREATE INDEX ON "uni2"."registration" ("cid");
ALTER TABLE "uni2"."course"
ADD FOREIGN KEY ("lecturer") REFERENCES "uni2"."person"("pid");
ALTER TABLE "uni2"."course"
ADD FOREIGN KEY ("lab_teacher") REFERENCES "uni2"."person"("pid");
ALTER TABLE "uni2"."registration"
ADD FOREIGN KEY ("pid") REFERENCES "uni2"."person"("pid");
ALTER TABLE "uni2"."registration"
ADD FOREIGN KEY ("cid") REFERENCES "uni2"."course"("cid");
INSERT INTO "uni1"."student"
("s_id","first_name","last_name") VALUES
(1,'Mary','Smith'),
(2,'John','Doe'),
(3, 'Franck', 'Combs'),
(4, 'Billy', 'Hinkley'),
(5, 'Alison', 'Robards');
INSERT INTO "uni1"."academic"
("a_id","first_name","last_name", "position") VALUES
(1,'Anna','Chambers', 1),
(2,'Edward','May', 9),
(3, 'Rachel', 'Ward', 8),
(4, 'Priscilla', 'Hildr', 2),
(5, 'Zlata', 'Richmal', 3),
(6, 'Nathaniel', 'Abolfazl', 4),
(7, 'Sergei', 'Elian', 5),
(8, 'Alois', 'Jayant', 6),
(9, 'Torborg', 'Chernobog',7),
(10, 'Udi', 'Heinrike', 8),
(11, 'Alvena', 'Merry', 9),
(12, 'Kyler', 'Josephina', 1),
(13, 'Gerard', 'Cosimo', 2),
(14, 'Karine', 'Attilio', 3);
INSERT INTO "uni1"."course" ("c_id", "title") VALUES
(1234, 'Linear Algebra'),
(1235, 'Analysis'),
(1236, 'Operating Systems'),
(1500, 'Data Mining'),
(1501, 'Theory of Computing'),
(1502, 'Research Methods');
INSERT INTO "uni1"."teaching" ("c_id", "a_id") VALUES
(1234, 1),
(1234, 2),
(1235, 1),
(1235, 3),
(1236, 4),
(1236, 8),
(1236, 9),
(1500, 12),
(1500, 2),
(1501, 12),
(1501, 14),
(1501, 7),
(1502, 13);
INSERT INTO "uni1"."course-registration" ("c_id","s_id") VALUES
(1234, 1),
(1234, 2),
(1234, 3),
(1235, 1),
(1235, 2),
(1236, 1),
(1236, 3),
(1500, 4),
(1500, 5),
(1501, 4),
(1502, 5);
INSERT INTO "uni2"."person"
("pid", "fname", "lname", "status") VALUES
(1, 'Zak', 'Lane', 8),
(2, 'Mattie', 'Moses', 1),
(3, 'Céline', 'Mendez', 2),
(4, 'Rachel', 'Ward', 9), -- external teacher uni1
(5, 'Alvena', 'Merry', 3), -- postDoc uni1
(6, 'Victor', 'Scott', 7),
(7, 'Kellie', 'Griffin', 8),
(8, 'Sueann', 'Samora', 9),
(9, 'Billy', 'Hinkley', 2), -- grad student uni1
(10, 'Larry', 'Alfaro', 1),
(11, 'John', 'Sims', 4);
INSERT INTO "uni2"."course"
("cid", "lecturer", "lab_teacher", "topic") VALUES
(1, 1, 3, 'Information security'),
(2, 8, 5, 'Software factory'),
(3, 7, 8, 'Software process management'),
(4, 7, 9, 'Introduction to programming'),
(5, 1, 8, 'Discrete mathematics and logic'),
(6, 7, 4, 'Intelligent Systems');
INSERT INTO "uni2"."registration" ("pid","cid") VALUES
(2, 1),
(10, 4),
(2, 5),
(10, 4),
(3, 2),
(3, 3),
(9, 2);
|
<filename>NcaaMarchMadness/src/main/data/load_tournament_participants-2018.sql
--NEXT STATISTICAL CATEGORY FOLLOWS:
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Virginia');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Cincinnati');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Tennessee');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Arizona');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Kentucky');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Miami FL');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Nevada');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Creighton');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Kansas St');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Texas');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Loyola Chicago');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Davidson');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Buffalo');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Wright St');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Georgia St');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'UMBC');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Xavier');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'North Carolina');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Michigan');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Gonzaga');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Ohio St');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Houston');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Texas AM');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Missouri');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Florida St');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Providence');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'San Diego St');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'South Dakota St');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'UNC Greensboro');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Montana');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Lipscomb');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'NC Central');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Texas Southern');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Villanova');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Purdue');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Texas Tech');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Wichita St');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'West Virginia');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Florida');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Arkansas');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Virginia Tech');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Alabama');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Butler');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'St Bonaventure');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'UCLA');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, '<NAME>');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Marshall');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'SFA');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, '<NAME>');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'LIU Brooklyn');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Radford');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Kansas');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Duke');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Michigan St');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Auburn');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Clemson');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'TCU');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Rhode Island');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Seton Hall');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'NC State');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Oklahoma');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Arizona St');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Syracuse');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'New Mexico St');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Col of Charleston');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Bucknell');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Iona');
INSERT INTO tournament_participant(YEAR, TEAM_NAME) VALUES(2018, 'Penn');
|
-- table structure `w_log_login`
DROP TABLE IF EXISTS `w_log_login`;
CREATE TABLE `w_log_login` (
`id` tinyint(3) unsigned NOT NULL AUTO_INCREMENT,
`uid` tinyint(3) unsigned NOT NULL COMMENT '登录用户ID',
`t` datetime NOT NULL COMMENT '登录时间',
`shebie` varchar(100) NOT NULL COMMENT '登录设备',
`ip` varchar(20) NOT NULL COMMENT '登录IP',
PRIMARY KEY (`id`)
) ENGINE=MyISAM AUTO_INCREMENT=7 DEFAULT CHARSET=utf8 COMMENT='登陆记录';
-- table data `w_log_login`
LOCK TABLES `w_log_login` WRITE; -- 锁表操作
/*!40000 ALTER TABLE `w_log_login` DISABLE KEYS */;
INSERT INTO `w_log_login` (`id`,`uid`,`t`,`shebie`,`ip`) VALUES ('1', '1', '2018-12-31 15:49:10', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safa', '127.0.0.1'),('2', '1', '2018-12-31 15:50:19', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safa', '127.0.0.1'),('4', '1', '2019-01-01 09:48:12', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safa', '127.0.0.1'),('5', '1', '2019-01-01 19:45:09', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safa', '127.0.0.1'),('6', '1', '2019-01-01 19:48:29', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safa', '127.0.0.1');
/*!40000 ALTER TABLE `w_log_login` ENABLE KEYS */;
UNLOCK TABLES;
-- --------------------------------------------------
-- table structure `w_log_operate`
DROP TABLE IF EXISTS `w_log_operate`;
CREATE TABLE `w_log_operate` (
`id` tinyint(3) unsigned NOT NULL AUTO_INCREMENT,
`uid` tinyint(3) unsigned NOT NULL COMMENT '管理员ID',
`t` datetime NOT NULL COMMENT '操作时间',
`behavior` tinyint(4) unsigned NOT NULL COMMENT '操作行为',
`details` varchar(255) NOT NULL COMMENT '操作详情',
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='操作记录';
-- --------------------------------------------------
-- table structure `w_sys_sset`
DROP TABLE IF EXISTS `w_sys_sset`;
CREATE TABLE `w_sys_sset` (
`id` tinyint(3) unsigned NOT NULL AUTO_INCREMENT,
`systid` tinyint(3) unsigned NOT NULL COMMENT '设置项类型',
`syskey` varchar(20) NOT NULL COMMENT '设置项名称',
`sysval` varchar(255) NOT NULL COMMENT '设置项值;多个值中间用英文逗号分隔',
`notes` varchar(50) DEFAULT NULL COMMENT '设置项说明',
`is_sys` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否系统内置;系统内置不可删除;1不删除,0可以删除;添加后不可修改',
PRIMARY KEY (`id`)
) ENGINE=MyISAM AUTO_INCREMENT=114 DEFAULT CHARSET=utf8 COMMENT='系统设置';
-- table data `w_sys_sset`
LOCK TABLES `w_sys_sset` WRITE; -- 锁表操作
/*!40000 ALTER TABLE `w_sys_sset` DISABLE KEYS */;
INSERT INTO `w_sys_sset` (`id`,`systid`,`syskey`,`sysval`,`notes`,`is_sys`) VALUES ('1', '1', 'user_only_sign', '0', '是否允许同一账号多设备终端同时登陆1允许0不允许', '1'),('2', '1', 'back_top_nav', '10,13', '后台顶部导航ID', '1'),('6', '3', 'smtp_server', 'smtp.sina.com', '新浪SMTP邮箱服务器', '1'),('7', '3', 'smtp_server_port', '25', 'SMTP服务器端口', '1'),('8', '3', 'smtp_user_email', '', 'SMTP服务器的用户邮箱账号', '1'),('10', '3', 'smtp_pass', '', 'SMTP服务器的用户密码', '1'),('3', '1', 'pass_error_num', '5', '管理员登录密码错误次数,超过限制自动封锁,lock_t 时间后自动解锁', '1'),('17', '4', 'web_title', '智能小工具', 'AI 站点网站名称', '1'),('4', '2', 'email_interval_t', '120', '发送邮件时间间隔,时间单位秒', '1'),('19', '1', 'email_send_c', '50', 'email_t 时间内可发送的邮件次数', '1'),('20', '2', 'lock_t', '86400', '登录密码错误解封时间', '1'),('21', '2', 'email_t', '86400', '邮件发送超过最大限额多长时间后可以再次发送,单位是秒', '1'),('22', '2', 'email_activate_t', '1800', '发送邮件后邮件有效时间内激活,单位是秒', '1'),('18', '4', 'web_foot', '个人学习交流网站<br>有问题请联系 <a href=\"https://mail.qq.com\"><EMAIL></a>', '网站底部', '1'),('27', '4', 'img_word_imgsize', '4194304', '图片提取文字上传图片的大小限制,单位字节', '1'),('28', '4', 'img_word_imgtype', '图片提取文字允许的图片类型', '', '1'),('111', '1', 'user_only_sign', '0', '是否允许同一账号多设备终端同时登陆1允许0不允许', '1'),('112', '2', '', '', '', '0'),('113', '2', '', '', '', '0');
/*!40000 ALTER TABLE `w_sys_sset` ENABLE KEYS */;
UNLOCK TABLES;
-- --------------------------------------------------
-- table structure `w_sys_stype`
DROP TABLE IF EXISTS `w_sys_stype`;
CREATE TABLE `w_sys_stype` (
`id` tinyint(4) unsigned NOT NULL AUTO_INCREMENT,
`systype` varchar(30) NOT NULL COMMENT '分类名称',
PRIMARY KEY (`id`)
) ENGINE=MyISAM AUTO_INCREMENT=5 DEFAULT CHARSET=utf8 COMMENT='设置分类';
-- table data `w_sys_stype`
LOCK TABLES `w_sys_stype` WRITE; -- 锁表操作
/*!40000 ALTER TABLE `w_sys_stype` DISABLE KEYS */;
INSERT INTO `w_sys_stype` (`id`,`systype`) VALUES ('1', '后端');
/*!40000 ALTER TABLE `w_sys_stype` ENABLE KEYS */;
UNLOCK TABLES; |
SET compile_expressions = 1;
SET min_count_to_compile_expression = 0;
DROP TABLE IF EXISTS test_jit_nonnull;
CREATE TABLE test_jit_nonnull (value UInt8) ENGINE = TinyLog;
INSERT INTO test_jit_nonnull VALUES (0), (1);
SELECT 'test_jit_nonnull';
SELECT value, multiIf(value = 1, 2, value, 1, 0), if (value, 1, 0) FROM test_jit_nonnull;
DROP TABLE IF EXISTS test_jit_nullable;
CREATE TABLE test_jit_nullable (value Nullable(UInt8)) ENGINE = TinyLog;
INSERT INTO test_jit_nullable VALUES (0), (1), (NULL);
SELECT 'test_jit_nullable';
SELECT value, multiIf(value = 1, 2, value, 1, 0), if (value, 1, 0) FROM test_jit_nullable;
|
--
-- GEOMETRY
--
-- Back off displayed precision a little bit to reduce platform-to-platform
-- variation in results.
SET extra_float_digits TO - 3;
--
-- Points
--
SELECT
'' AS four,
center(f1) AS center
FROM
BOX_TBL;
SELECT
'' AS four,
(@@ f1) AS center
FROM
BOX_TBL;
SELECT
'' AS six,
point(f1) AS center
FROM
CIRCLE_TBL;
SELECT
'' AS six,
(@@ f1) AS center
FROM
CIRCLE_TBL;
SELECT
'' AS two,
(@@ f1) AS center
FROM
POLYGON_TBL
WHERE (# f1) > 2;
-- "is horizontal" function
SELECT
'' AS two,
p1.f1
FROM
POINT_TBL p1
WHERE
ishorizontal(p1.f1, point '(0,0)');
-- "is horizontal" operator
SELECT
'' AS two,
p1.f1
FROM
POINT_TBL p1
WHERE
p1.f1 ? - point '(0,0)';
-- "is vertical" function
SELECT
'' AS one,
p1.f1
FROM
POINT_TBL p1
WHERE
isvertical(p1.f1, point '(5.1,34.5)');
-- "is vertical" operator
SELECT
'' AS one,
p1.f1
FROM
POINT_TBL p1
WHERE
p1.f1 ? | point '(5.1,34.5)';
-- Slope
SELECT
p1.f1,
p2.f1,
slope(p1.f1, p2.f1)
FROM
POINT_TBL p1,
POINT_TBL p2;
-- Add point
SELECT
p1.f1,
p2.f1,
p1.f1 + p2.f1
FROM
POINT_TBL p1,
POINT_TBL p2;
-- Subtract point
SELECT
p1.f1,
p2.f1,
p1.f1 - p2.f1
FROM
POINT_TBL p1,
POINT_TBL p2;
-- Multiply with point
SELECT
p1.f1,
p2.f1,
p1.f1 * p2.f1
FROM
POINT_TBL p1,
POINT_TBL p2
WHERE
p1.f1[0] BETWEEN 1 AND 1000;
-- Underflow error
SELECT
p1.f1,
p2.f1,
p1.f1 * p2.f1
FROM
POINT_TBL p1,
POINT_TBL p2
WHERE
p1.f1[0] < 1;
-- Divide by point
SELECT
p1.f1,
p2.f1,
p1.f1 / p2.f1
FROM
POINT_TBL p1,
POINT_TBL p2
WHERE
p2.f1[0] BETWEEN 1 AND 1000;
-- Overflow error
SELECT
p1.f1,
p2.f1,
p1.f1 / p2.f1
FROM
POINT_TBL p1,
POINT_TBL p2
WHERE
p2.f1[0] > 1000;
-- Division by 0 error
SELECT
p1.f1,
p2.f1,
p1.f1 / p2.f1
FROM
POINT_TBL p1,
POINT_TBL p2
WHERE
p2.f1 ~= '(0,0)'::point;
-- Distance to line
SELECT
p.f1,
l.s,
p.f1 <-> l.s
FROM
POINT_TBL p,
LINE_TBL l;
-- Distance to line segment
SELECT
p.f1,
l.s,
p.f1 <-> l.s
FROM
POINT_TBL p,
LSEG_TBL l;
-- Distance to box
SELECT
p.f1,
b.f1,
p.f1 <-> b.f1
FROM
POINT_TBL p,
BOX_TBL b;
-- Distance to path
SELECT
p.f1,
p1.f1,
p.f1 <-> p1.f1
FROM
POINT_TBL p,
PATH_TBL p1;
-- Distance to polygon
SELECT
p.f1,
p1.f1,
p.f1 <-> p1.f1
FROM
POINT_TBL p,
POLYGON_TBL p1;
-- Closest point to line
SELECT
p.f1,
l.s,
p.f1 ## l.s
FROM
POINT_TBL p,
LINE_TBL l;
-- Closest point to line segment
SELECT
p.f1,
l.s,
p.f1 ## l.s
FROM
POINT_TBL p,
LSEG_TBL l;
-- Closest point to box
SELECT
p.f1,
b.f1,
p.f1 ## b.f1
FROM
POINT_TBL p,
BOX_TBL b;
-- On line
SELECT
p.f1,
l.s
FROM
POINT_TBL p,
LINE_TBL l
WHERE
p.f1 <@ l.s;
-- On line segment
SELECT
p.f1,
l.s
FROM
POINT_TBL p,
LSEG_TBL l
WHERE
p.f1 <@ l.s;
-- On path
SELECT
p.f1,
p1.f1
FROM
POINT_TBL p,
PATH_TBL p1
WHERE
p.f1 <@ p1.f1;
--
-- Lines
--
-- Vertical
SELECT
s
FROM
LINE_TBL
WHERE
? | s;
-- Horizontal
SELECT
s
FROM
LINE_TBL
WHERE
? - s;
-- Same as line
SELECT
l1.s,
l2.s
FROM
LINE_TBL l1,
LINE_TBL l2
WHERE
l1.s = l2.s;
-- Parallel to line
SELECT
l1.s,
l2.s
FROM
LINE_TBL l1,
LINE_TBL l2
WHERE
l1.s ? || l2.s;
-- Perpendicular to line
SELECT
l1.s,
l2.s
FROM
LINE_TBL l1,
LINE_TBL l2
WHERE
l1.s ? - | l2.s;
-- Distance to line
SELECT
l1.s,
l2.s,
l1.s <-> l2.s
FROM
LINE_TBL l1,
LINE_TBL l2;
-- Distance to box
SELECT
l.s,
b.f1,
l.s <-> b.f1
FROM
LINE_TBL l,
BOX_TBL b;
-- Intersect with line
SELECT
l1.s,
l2.s
FROM
LINE_TBL l1,
LINE_TBL l2
WHERE
l1.s ? # l2.s;
-- Intersect with box
SELECT
l.s,
b.f1
FROM
LINE_TBL l,
BOX_TBL b
WHERE
l.s ? # b.f1;
-- Intersection point with line
SELECT
l1.s,
l2.s,
l1.s # l2.s
FROM
LINE_TBL l1,
LINE_TBL l2;
-- Closest point to line segment
SELECT
l.s,
l1.s,
l.s ## l1.s
FROM
LINE_TBL l,
LSEG_TBL l1;
-- Closest point to box
SELECT
l.s,
b.f1,
l.s ## b.f1
FROM
LINE_TBL l,
BOX_TBL b;
--
-- Line segments
--
-- intersection
SELECT
'' AS count,
p.f1,
l.s,
l.s # p.f1 AS intersection
FROM
LSEG_TBL l,
POINT_TBL p;
-- Length
SELECT
s,
@-@ s
FROM
LSEG_TBL;
-- Vertical
SELECT
s
FROM
LSEG_TBL
WHERE
? | s;
-- Horizontal
SELECT
s
FROM
LSEG_TBL
WHERE
? - s;
-- Center
SELECT
s,
@@ s
FROM
LSEG_TBL;
-- To point
SELECT
s,
s::point
FROM
LSEG_TBL;
-- Has points less than line segment
SELECT
l1.s,
l2.s
FROM
LSEG_TBL l1,
LSEG_TBL l2
WHERE
l1.s < l2.s;
-- Has points less than or equal to line segment
SELECT
l1.s,
l2.s
FROM
LSEG_TBL l1,
LSEG_TBL l2
WHERE
l1.s <= l2.s;
-- Has points equal to line segment
SELECT
l1.s,
l2.s
FROM
LSEG_TBL l1,
LSEG_TBL l2
WHERE
l1.s = l2.s;
-- Has points greater than or equal to line segment
SELECT
l1.s,
l2.s
FROM
LSEG_TBL l1,
LSEG_TBL l2
WHERE
l1.s >= l2.s;
-- Has points greater than line segment
SELECT
l1.s,
l2.s
FROM
LSEG_TBL l1,
LSEG_TBL l2
WHERE
l1.s > l2.s;
-- Has points not equal to line segment
SELECT
l1.s,
l2.s
FROM
LSEG_TBL l1,
LSEG_TBL l2
WHERE
l1.s != l2.s;
-- Parallel with line segment
SELECT
l1.s,
l2.s
FROM
LSEG_TBL l1,
LSEG_TBL l2
WHERE
l1.s ? || l2.s;
-- Perpendicular with line segment
SELECT
l1.s,
l2.s
FROM
LSEG_TBL l1,
LSEG_TBL l2
WHERE
l1.s ? - | l2.s;
-- Distance to line
SELECT
l.s,
l1.s,
l.s <-> l1.s
FROM
LSEG_TBL l,
LINE_TBL l1;
-- Distance to line segment
SELECT
l1.s,
l2.s,
l1.s <-> l2.s
FROM
LSEG_TBL l1,
LSEG_TBL l2;
-- Distance to box
SELECT
l.s,
b.f1,
l.s <-> b.f1
FROM
LSEG_TBL l,
BOX_TBL b;
-- Intersect with line segment
SELECT
l.s,
l1.s
FROM
LSEG_TBL l,
LINE_TBL l1
WHERE
l.s ? # l1.s;
-- Intersect with box
SELECT
l.s,
b.f1
FROM
LSEG_TBL l,
BOX_TBL b
WHERE
l.s ? # b.f1;
-- Intersection point with line segment
SELECT
l1.s,
l2.s,
l1.s # l2.s
FROM
LSEG_TBL l1,
LSEG_TBL l2;
-- Closest point to line
SELECT
l.s,
l1.s,
l.s ## l1.s
FROM
LSEG_TBL l,
LINE_TBL l1;
-- Closest point to line segment
SELECT
l1.s,
l2.s,
l1.s ## l2.s
FROM
LSEG_TBL l1,
LSEG_TBL l2;
-- Closest point to box
SELECT
l.s,
b.f1,
l.s ## b.f1
FROM
LSEG_TBL l,
BOX_TBL b;
-- On line
SELECT
l.s,
l1.s
FROM
LSEG_TBL l,
LINE_TBL l1
WHERE
l.s <@ l1.s;
-- On box
SELECT
l.s,
b.f1
FROM
LSEG_TBL l,
BOX_TBL b
WHERE
l.s <@ b.f1;
--
-- Boxes
--
SELECT
'' AS six,
box(f1) AS box
FROM
CIRCLE_TBL;
-- translation
SELECT
'' AS twentyfour,
b.f1 + p.f1 AS translation
FROM
BOX_TBL b,
POINT_TBL p;
SELECT
'' AS twentyfour,
b.f1 - p.f1 AS translation
FROM
BOX_TBL b,
POINT_TBL p;
-- Multiply with point
SELECT
b.f1,
p.f1,
b.f1 * p.f1
FROM
BOX_TBL b,
POINT_TBL p
WHERE
p.f1[0] BETWEEN 1 AND 1000;
-- Overflow error
SELECT
b.f1,
p.f1,
b.f1 * p.f1
FROM
BOX_TBL b,
POINT_TBL p
WHERE
p.f1[0] > 1000;
-- Divide by point
SELECT
b.f1,
p.f1,
b.f1 / p.f1
FROM
BOX_TBL b,
POINT_TBL p
WHERE
p.f1[0] BETWEEN 1 AND 1000;
-- To box
SELECT
f1::box
FROM
POINT_TBL;
SELECT
bound_box (a.f1, b.f1)
FROM
BOX_TBL a,
BOX_TBL b;
-- Below box
SELECT
b1.f1,
b2.f1,
b1.f1 <^ b2.f1
FROM
BOX_TBL b1,
BOX_TBL b2;
-- Above box
SELECT
b1.f1,
b2.f1,
b1.f1 >^ b2.f1
FROM
BOX_TBL b1,
BOX_TBL b2;
-- Intersection point with box
SELECT
b1.f1,
b2.f1,
b1.f1 # b2.f1
FROM
BOX_TBL b1,
BOX_TBL b2;
-- Diagonal
SELECT
f1,
diagonal(f1)
FROM
BOX_TBL;
-- Distance to box
SELECT
b1.f1,
b2.f1,
b1.f1 <-> b2.f1
FROM
BOX_TBL b1,
BOX_TBL b2;
--
-- Paths
--
-- Points
SELECT
f1,
npoints(f1)
FROM
PATH_TBL;
-- Area
SELECT
f1,
area(f1)
FROM
PATH_TBL;
-- Length
SELECT
f1,
@-@ f1
FROM
PATH_TBL;
-- Center
SELECT
f1,
@@ f1
FROM
PATH_TBL;
-- To polygon
SELECT
f1,
f1::polygon
FROM
PATH_TBL
WHERE
isclosed(f1);
-- Open path cannot be converted to polygon error
SELECT
f1,
f1::polygon
FROM
PATH_TBL
WHERE
isopen(f1);
-- Has points less than path
SELECT
p1.f1,
p2.f1
FROM
PATH_TBL p1,
PATH_TBL p2
WHERE
p1.f1 < p2.f1;
-- Has points less than or equal to path
SELECT
p1.f1,
p2.f1
FROM
PATH_TBL p1,
PATH_TBL p2
WHERE
p1.f1 <= p2.f1;
-- Has points equal to path
SELECT
p1.f1,
p2.f1
FROM
PATH_TBL p1,
PATH_TBL p2
WHERE
p1.f1 = p2.f1;
-- Has points greater than or equal to path
SELECT
p1.f1,
p2.f1
FROM
PATH_TBL p1,
PATH_TBL p2
WHERE
p1.f1 >= p2.f1;
-- Has points greater than path
SELECT
p1.f1,
p2.f1
FROM
PATH_TBL p1,
PATH_TBL p2
WHERE
p1.f1 > p2.f1;
-- Add path
SELECT
p1.f1,
p2.f1,
p1.f1 + p2.f1
FROM
PATH_TBL p1,
PATH_TBL p2;
-- Add point
SELECT
p.f1,
p1.f1,
p.f1 + p1.f1
FROM
PATH_TBL p,
POINT_TBL p1;
-- Subtract point
SELECT
p.f1,
p1.f1,
p.f1 - p1.f1
FROM
PATH_TBL p,
POINT_TBL p1;
-- Multiply with point
SELECT
p.f1,
p1.f1,
p.f1 * p1.f1
FROM
PATH_TBL p,
POINT_TBL p1;
-- Divide by point
SELECT
p.f1,
p1.f1,
p.f1 / p1.f1
FROM
PATH_TBL p,
POINT_TBL p1
WHERE
p1.f1[0] BETWEEN 1 AND 1000;
-- Division by 0 error
SELECT
p.f1,
p1.f1,
p.f1 / p1.f1
FROM
PATH_TBL p,
POINT_TBL p1
WHERE
p1.f1 ~= '(0,0)'::point;
-- Distance to path
SELECT
p1.f1,
p2.f1,
p1.f1 <-> p2.f1
FROM
PATH_TBL p1,
PATH_TBL p2;
--
-- Polygons
--
-- containment
SELECT
'' AS twentyfour,
p.f1,
poly.f1,
poly.f1 @> p.f1 AS contains
FROM
POLYGON_TBL poly,
POINT_TBL p;
SELECT
'' AS twentyfour,
p.f1,
poly.f1,
p.f1 <@ poly.f1 AS contained
FROM
POLYGON_TBL poly,
POINT_TBL p;
SELECT
'' AS four,
npoints(f1) AS npoints,
f1 AS polygon
FROM
POLYGON_TBL;
SELECT
'' AS four,
polygon(f1)
FROM
BOX_TBL;
SELECT
'' AS four,
polygon(f1)
FROM
PATH_TBL
WHERE
isclosed(f1);
SELECT
'' AS four,
f1 AS open_path,
polygon(pclose(f1)) AS polygon
FROM
PATH_TBL
WHERE
isopen(f1);
-- To box
SELECT
f1,
f1::box
FROM
POLYGON_TBL;
-- To path
SELECT
f1,
f1::path
FROM
POLYGON_TBL;
-- Same as polygon
SELECT
p1.f1,
p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2
WHERE
p1.f1 ~= p2.f1;
-- Contained by polygon
SELECT
p1.f1,
p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2
WHERE
p1.f1 <@ p2.f1;
-- Contains polygon
SELECT
p1.f1,
p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2
WHERE
p1.f1 @> p2.f1;
-- Overlap with polygon
SELECT
p1.f1,
p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2
WHERE
p1.f1 && p2.f1;
-- Left of polygon
SELECT
p1.f1,
p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2
WHERE
p1.f1 << p2.f1;
-- Overlap of left of polygon
SELECT
p1.f1,
p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2
WHERE
p1.f1 &< p2.f1;
-- Right of polygon
SELECT
p1.f1,
p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2
WHERE
p1.f1 >> p2.f1;
-- Overlap of right of polygon
SELECT
p1.f1,
p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2
WHERE
p1.f1 &> p2.f1;
-- Below polygon
SELECT
p1.f1,
p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2
WHERE
p1.f1 <<| p2.f1;
-- Overlap or below polygon
SELECT
p1.f1,
p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2
WHERE
p1.f1 &<| p2.f1;
-- Above polygon
SELECT
p1.f1,
p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2
WHERE
p1.f1 |>> p2.f1;
-- Overlap or above polygon
SELECT
p1.f1,
p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2
WHERE
p1.f1 |&> p2.f1;
-- Distance to polygon
SELECT
p1.f1,
p2.f1,
p1.f1 <-> p2.f1
FROM
POLYGON_TBL p1,
POLYGON_TBL p2;
--
-- Circles
--
SELECT
'' AS six,
circle(f1, 50.0)
FROM
POINT_TBL;
SELECT
'' AS four,
circle(f1)
FROM
BOX_TBL;
SELECT
'' AS two,
circle(f1)
FROM
POLYGON_TBL
WHERE (# f1) >= 3;
SELECT
'' AS twentyfour,
c1.f1 AS circle,
p1.f1 AS point,
(p1.f1 <-> c1.f1) AS distance
FROM
CIRCLE_TBL c1,
POINT_TBL p1
WHERE (p1.f1 <-> c1.f1) > 0
ORDER BY
distance,
area(c1.f1),
p1.f1[0];
-- To polygon
SELECT
f1,
f1::polygon
FROM
CIRCLE_TBL
WHERE
f1 >= '<(0,0),1>';
-- To polygon with less points
SELECT
f1,
polygon(8, f1)
FROM
CIRCLE_TBL
WHERE
f1 >= '<(0,0),1>';
-- Too less points error
SELECT
f1,
polygon(1, f1)
FROM
CIRCLE_TBL
WHERE
f1 >= '<(0,0),1>';
-- Zero radius error
SELECT
f1,
polygon(10, f1)
FROM
CIRCLE_TBL
WHERE
f1 < '<(0,0),1>';
-- Same as circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 ~= c2.f1;
-- Overlap with circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 && c2.f1;
-- Overlap or left of circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 &< c2.f1;
-- Left of circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 << c2.f1;
-- Right of circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 >> c2.f1;
-- Overlap or right of circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 &> c2.f1;
-- Contained by circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 <@ c2.f1;
-- Contain by circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 @> c2.f1;
-- Below circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 <<| c2.f1;
-- Above circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 |>> c2.f1;
-- Overlap or below circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 &<| c2.f1;
-- Overlap or above circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 |&> c2.f1;
-- Area equal with circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 = c2.f1;
-- Area not equal with circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 != c2.f1;
-- Area less than circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 < c2.f1;
-- Area greater than circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 > c2.f1;
-- Area less than or equal circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 <= c2.f1;
-- Area greater than or equal circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 >= c2.f1;
-- Area less than circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 < c2.f1;
-- Area greater than circle
SELECT
c1.f1,
c2.f1
FROM
CIRCLE_TBL c1,
CIRCLE_TBL c2
WHERE
c1.f1 < c2.f1;
-- Add point
SELECT
c.f1,
p.f1,
c.f1 + p.f1
FROM
CIRCLE_TBL c,
POINT_TBL p;
-- Subtract point
SELECT
c.f1,
p.f1,
c.f1 - p.f1
FROM
CIRCLE_TBL c,
POINT_TBL p;
-- Multiply with point
SELECT
c.f1,
p.f1,
c.f1 * p.f1
FROM
CIRCLE_TBL c,
POINT_TBL p;
-- Divide by point
SELECT
c.f1,
p.f1,
c.f1 / p.f1
FROM
CIRCLE_TBL c,
POINT_TBL p
WHERE
p.f1[0] BETWEEN 1 AND 1000;
-- Overflow error
SELECT
c.f1,
p.f1,
c.f1 / p.f1
FROM
CIRCLE_TBL c,
POINT_TBL p
WHERE
p.f1[0] > 1000;
-- Division by 0 error
SELECT
c.f1,
p.f1,
c.f1 / p.f1
FROM
CIRCLE_TBL c,
POINT_TBL p
WHERE
p.f1 ~= '(0,0)'::point;
-- Distance to polygon
SELECT
c.f1,
p.f1,
c.f1 <-> p.f1
FROM
CIRCLE_TBL c,
POLYGON_TBL p;
|
<gh_stars>0
INSERT INTO `tabla_valor_construccion` (`id_tabla_valor_construccion`, `abreviacion`, `grupo`, `caracteristicas`, `costo`) VALUES
(1, NULL, 'HABITACIONAL ', NULL, NULL),
(2, 'EM', NULL, 'ECONOMICA/MALA', 1750),
(3, 'EN', NULL, 'ECONOMICA/NORMAL', 3500),
(4, 'MM', NULL, 'MEDIA/MALA', 2187.5),
(5, 'MN', NULL, 'MEDIA/NORMAL', 4375),
(6, 'MP', NULL, 'MEDIA/DE PRIMERA', 5250),
(7, 'BM', NULL, 'BUENA/MALA', 2625),
(8, 'BN', NULL, 'BUENA/NORMAL', 5250),
(9, 'BP', NULL, 'BUENA/DE PRIMERA', 6562.5),
(10, NULL, 'CONDOMINIO', NULL, NULL),
(11, 'EM', NULL, 'ECONOMICA/MALA', 1750),
(12, 'EN', NULL, 'ECONOMICA/NORMAL', 3500),
(13, 'MM', NULL, 'MEDIA/MALA', 2187.5),
(14, 'MN', NULL, 'MEDIA/NORMAL', 4375),
(15, 'MP', NULL, 'MEDIA/DE PRIMERA', 5250),
(16, 'BM', NULL, 'BUENA/MALA', 2625),
(17, 'BN', NULL, 'BUENA/NORMAL', 5250),
(18, 'BP', NULL, 'BUENA/DE PRIMERA', 6562.5),
(19, NULL, 'CUARTOS DE RENTA', NULL, NULL),
(20, 'EM', NULL, 'ECONOMICA/MALA', 1500),
(21, 'EN', NULL, 'ECONOMICA/NORMAL', 3000),
(22, 'MM', NULL, 'MEDIA/MALA', 1875),
(23, 'MN', NULL, 'MEDIA/NORMAL', 3750),
(24, 'BM', NULL, 'BUENA/MALA', 2250),
(25, 'BN', NULL, 'BUENA/NORMAL', 4500),
(26, NULL, 'OFICINAS', NULL, NULL),
(27, 'EM', NULL, 'ECONOMICA/MALA', 1500),
(28, 'EN', NULL, 'ECONOMICA/NORMAL', 3000),
(29, 'MM', NULL, 'MEDIA/MALA', 1875),
(30, 'MN', NULL, 'MEDIA/NORMAL', 3750),
(31, 'MP', NULL, 'MEDIA/DE PRIMERA', 4500),
(32, 'BM', NULL, 'BUENA/MALA', 2250),
(33, 'BN', NULL, 'BUENA/NORMAL', 4500),
(34, 'BP', NULL, 'BUENA/DE PRIMERA', 5625),
(36, NULL, 'POSADAS/HOTELES 3 ESTRELLAS', NULL, NULL),
(38, 'EN', NULL, 'ECONOMICA/NORMAL', 3200),
(41, 'MN', NULL, 'MEDIA/NORMAL', 4000),
(42, 'MP', NULL, 'MEDIA/DE PRIMERA', 4800),
(44, 'BN', NULL, 'BUENA/NORMAL', 4800),
(45, 'BP', NULL, 'BUENA/DE PRIMERA', 6000),
(46, NULL, 'HOTELES 5 ESTRELLAS(G.TUR.)',NULL ,NULL ),
(47, 'EN', NULL, 'ECONOMICA/NORMAL', 7000),
(48, 'MN', NULL, 'MEDIA/NORMAL', 8750),
(49, 'MP', NULL, 'MEDIA/DE PRIMERA', 10500),
(50, 'BN', NULL, 'BUENA/NORMAL', 10500),
(51, 'BP', NULL, 'BUENA/DE PRIMERA', 13125),
(52, NULL, 'BANCOS', NULL, NULL),
(53, 'EN', NULL, 'ECONOMICA/NORMAL', 6000),
(54, 'MN', NULL, 'MEDIA/NORMAL', 7500),
(55, 'MP', NULL, 'MEDIA/DE PRIMERA', 9000),
(56, 'BN', NULL, 'MEDIA/NORMAL', 9000),
(57, 'BP', NULL, 'BUENA/DE PRIMERA', 13125),
(58, NULL, 'PLAZAS COMERCIALES', NULL, NULL),
(59, 'EN', NULL, 'ECONOMICA/NORMAL', 4000),
(60, 'MN', NULL, 'MEDIA/NORMAL', 5000),
(61, 'MP', NULL, 'MEDIA/DE PRIMERA', 6000),
(62, 'BN', NULL, 'BUENA/NORMAL', 6000),
(63, 'BP', NULL, 'BUENA/DE PRIMERA', 7500),
(64, NULL, 'LOCALES COMERCIALES', NULL, NULL),
(65, 'EN', NULL, 'ECONOMICA/NORMAL', 1500),
(66, 'EP', NULL, 'ECONOMICA/DE PRIMERA', 3000),
(67, 'MM', NULL, 'MEDIA/MALA', 1875),
(68, 'MN', NULL, 'MEDIA/NORMAL', 3750),
(69, 'MB', NULL, 'MEDIA/DE PRIMERA', 4500),
(70, 'BM', NULL, 'BUENA/MALA', 2250),
(71, 'BN', NULL, 'BUENA/NORMAL', 4500),
(72, 'BP', NULL, 'BUENA/DE PRIMERA', 5625),
(73, NULL, 'RESTAURANTES', NULL, NULL),
(74, 'EN', NULL, 'ECONOMICA/NORMAL', 2000),
(75, 'MN', NULL, 'MEDIA/NORMAL', 2500),
(76, 'MP', NULL, 'MEDIA/DE PRIMERA', 3000),
(77, 'BN', NULL, 'BUENA/NORMAL', 3000),
(78, 'BP', NULL, 'BUENA/DE PRIMERA', 3750),
(79, NULL, 'NAVES INDUSTRIALES', NULL, NULL),
(80, 'EM', NULL, 'ECONOMICA/MALA', 600),
(81, 'EN', NULL, 'ECONOMICA/NORMAL', 1200),
(82, 'MM', NULL, 'MEDIA/MALA', 750),
(83, 'MN', NULL, 'MEDIA/NORMAL', 1500),
(84, 'MP', NULL, 'MEDIA/DE PRIMERA', 1800),
(85, 'BM', NULL, 'BUENA/MALA', 900),
(86, 'BN', NULL, 'BUENA/NORMAL', 1800),
(87, 'BP', NULL, 'BUENA/DE PRIMERA', 2250),
(88, NULL, 'BODEGAS COMERCIALES', NULL, NULL),
(89, 'EN', NULL, 'ECONOMICA/NORMAL', 2400),
(90, 'MN', NULL, 'MEDIA/NORMAL', 3000),
(91, 'MP', NULL, 'MEDIA/DE PRIMERA', 3600),
(92, 'BN', NULL, 'BUENA/NORMAL', 3600),
(93, 'BP', NULL, 'BUENA/DE PRIMERA', 4000),
(94, NULL, 'CLINICAS Y HOSPITALES', NULL, NULL),
(95, 'EN', NULL, 'ECONOMICA/NORMAL', 3500),
(96, 'MN', NULL, 'MEDIA/NORMAL', 4375),
(97, 'MP', NULL, 'MEDIA/DE PRIMERA', 5250),
(98, 'BN', NULL, 'BUENA/NORMAL', 5250),
(99, 'BP', NULL, 'BUENA/DE PRIMERA', 6562.50),
(100, NULL, 'PALAPAS', NULL, NULL),
(101, 'EM', NULL, 'ECONOMICA/MALA', 500),
(102, 'EN', NULL, 'ECONOMICA/NORMAL', 1000),
(103, 'MM', NULL, 'MEDIA/MALA', 625),
(104, 'MN', NULL, 'MEDIA/NORMAL', 1250),
(105, 'MP', NULL, 'MEDIA/DE PRIMERA', 1500),
(106, 'BM', NULL, 'BUENA/MALA', 750),
(107, 'BN', NULL, 'BUENA/NORMAL', 1500),
(108, 'BP', NULL, 'BUENA/DE PRIMERA', 1875),
(109, NULL, '<NAME>', NULL, NULL),
(110, 'EN', NULL, 'ECONOMICA/NORMAL', 2000),
(111, 'MN', NULL, 'MEDIA/NORMAL', 2500),
(112, 'MP', NULL, 'MEDIA/DE PRIMERA', 3000),
(113, 'BN', NULL, 'BUENA/NORMAL', 3000),
(114, 'BP', NULL, 'BUENA/DE PRIMERA', 3750),
(115, NULL, 'CAMPO DE GOLF', NULL, NULL),
(116, 'MN', NULL, 'MEDIA/NORMAL', 500),
(117, 'BP', NULL, 'BUENA/DE PRIMERA', 750),
(118, NULL, 'CANCHAS DEPORTIVAS', NULL, NULL),
(119, 'MN', NULL, 'MEDIA/NORMAL', 500),
(120, 'BP', NULL, 'BUENA/DE PRIMERA', 750),
(121, NULL, 'ESTACIONAMIENTOS', NULL, NULL),
(122, 'MN', NULL, 'MEDIA/NORMAL', 750),
(123, 'BP', NULL, 'BUENA/DE PRIMERA', 375);
|
CREATE TABLE precinct_polling_locations(results_id BIGINT REFERENCES results (id) NOT NULL,
precinct_id BIGINT,
polling_location_id BIGINT);
|
<filename>openGaussBase/testcase/KEYWORDS/log/Opengauss_Function_Keyword_Log_Case0034.sql
-- @testpoint: opengauss关键字log(非保留),作为游标名,部分测试点合理报错
--前置条件
drop table if exists explain_test cascade;
create table explain_test(cid int,fid int);
--关键字不带引号-成功
start transaction;
cursor log for select * from explain_test order by 1;
close log;
end;
--关键字带双引号-成功
start transaction;
cursor "log" for select * from explain_test order by 1;
close "log";
end;
--关键字带单引号-合理报错
start transaction;
cursor 'log' for select * from explain_test order by 1;
close 'log';
end;
--关键字带反引号-合理报错
start transaction;
cursor `log` for select * from explain_test order by 1;
close `log`;
end;
--清理环境
drop table if exists explain_test cascade;
|
DROP DATABASE IF EXISTS employee_tracker;
CREATE database employee_tracker;
USE employee_tracker;
CREATE TABLE departments (
department_id INT NOT NULL AUTO_INCREMENT,
department VARCHAR(30) NOT NULL,
PRIMARY KEY (department_id)
);
CREATE TABLE roles (
role_id INT NOT NULL AUTO_INCREMENT,
title VARCHAR(100),
salary VARCHAR(100),
department_id INT,
PRIMARY KEY (role_id),
FOREIGN KEY (department_id) REFERENCES departments(department_id)
);
CREATE TABLE employees (
employee_id INT NOT NULL AUTO_INCREMENT,
first_name VARCHAR(30),
last_name VARCHAR(30),
role_id INT,
manager_id INT NULL,
PRIMARY KEY (employee_id),
FOREIGN KEY (role_id) REFERENCES roles(role_id)
);
|
<gh_stars>0
SELECT title FROM movies WHERE year = 2008 ; |
<filename>SQL/odev4/odev3.sql
SELECT COUNT(title) FROM film
WHERE title LIKE 'T%' AND rating ='G'; |
CREATE SCHEMA IF NOT EXISTS public
AUTHORIZATION postgres;
GRANT ALL ON SCHEMA public TO postgres;
GRANT ALL ON SCHEMA public TO public;
COMMENT ON SCHEMA public
IS 'standard public schema';
CREATE TABLE ticks
(
id bigint NOT NULL,
time_stamp timestamp without time zone NOT NULL,
CONSTRAINT ticks_pkey PRIMARY KEY (id)
)
WITH (
OIDS=FALSE
);
ALTER TABLE ticks
OWNER TO postgres;
CREATE TABLE users
(
id bigint NOT NULL,
name character varying(255) NOT NULL,
CONSTRAINT users_pkey PRIMARY KEY (id)
)
WITH (
OIDS=FALSE
);
ALTER TABLE users
OWNER TO postgres;
CREATE SCHEMA IF NOT EXISTS salesforcedev
AUTHORIZATION postgres;
CREATE TABLE salesforcedev.account
(
id bigint NOT NULL,
_hc_err character varying(255),
_hc_lastop character varying(255),
createddate timestamp without time zone NOT NULL,
lastmodifieddate timestamp without time zone,
name character varying(255) NOT NULL,
CONSTRAINT account_pkey PRIMARY KEY (id)
)
WITH (
OIDS=FALSE
);
ALTER TABLE salesforcedev.account
OWNER TO postgres;
CREATE TABLE salesforcedev.profile
(
id bigint NOT NULL,
name character varying(255) NOT NULL,
CONSTRAINT profile_pkey PRIMARY KEY (id)
)
WITH (
OIDS=FALSE
);
ALTER TABLE salesforcedev.profile
OWNER TO postgres;
CREATE TABLE salesforcedev."user"
(
id bigint NOT NULL,
last_change timestamp without time zone NOT NULL,
name character varying(255) NOT NULL,
CONSTRAINT user_pkey PRIMARY KEY (id)
)
WITH (
OIDS=FALSE
);
ALTER TABLE salesforcedev."user"
OWNER TO postgres;
|
CREATE TABLE if not exists bodhi.work_flow (
root_org text,
org text,
service text,
config text,
PRIMARY KEY (root_org, org, service));
|
<reponame>Akshaya869/Eternity-Beta
{"Money_871604407488348190":{"key":"Money_871604407488348190","value":"89590456316"},"Platinum_871604407488348190":{"key":"Platinum_871604407488348190","value":"3"},"Phone_871604407488348190":{"key":"Phone_871604407488348190","value":"2"},"muted_909433086654156872":{"key":"muted_909433086654156872","value":"931887897211179049"},"cooldown_botinfo_909433086654156872_871604407488348190":{"key":"cooldown_botinfo_909433086654156872_871604407488348190","value":1643369918852},"cooldown_postmeme_922830804793569344_871604407488348190":{"key":"cooldown_postmeme_922830804793569344_871604407488348190","value":1643530953635},"cooldown_botinfo_922830804793569344_871604407488348190":{"key":"cooldown_botinfo_922830804793569344_871604407488348190","value":1642741531898},"Bronze_871604407488348190":{"key":"Bronze_871604407488348190","value":"100100106"},"Bank_871604407488348190":{"key":"Bank_871604407488348190","value":"0"},"premium_871604407488348190":{"key":"premium_871604407488348190","value":"true"},"cooldown_shopreward_922830804793569344_871604407488348190":{"key":"cooldown_shopreward_922830804793569344_871604407488348190","value":1642760920084},"Utm_871604407488348190":{"key":"Utm_871604407488348190","value":"1"},"cooldown_weekly_922830804793569344_871604407488348190":{"key":"cooldown_weekly_922830804793569344_871604407488348190","value":1642761306564},"cooldown_monthly_922830804793569344_871604407488348190":{"key":"cooldown_monthly_922830804793569344_871604407488348190","value":1642761396303},"Cookie_871604407488348190":{"key":"Cookie_871604407488348190","value":"10202"},"cooldown_command_909433086654156872_871604407488348190":{"key":"cooldown_command_909433086654156872_871604407488348190","value":1642776285191},"Gold_871604407488348190":{"key":"Gold_871604407488348190","value":"0"},"Pizza_871604407488348190":{"key":"Pizza_871604407488348190","value":"197"},"SpaceCoin_871604407488348190":{"key":"SpaceCoin_871604407488348190","value":"11"},"SpaceTro_871604407488348190":{"key":"SpaceTro_871604407488348190","value":"210"},"guess_the_number_channel_909433086654156872":{"key":"guess_the_number_channel_909433086654156872","value":"922112225626423317"},"winning_number_909433086654156872":{"key":"winning_number_909433086654156872","value":"Doesn't exist."},"cooldown_work_909433086654156872_871604407488348190":{"key":"cooldown_work_909433086654156872_871604407488348190","value":1643007468298},"giveawayguildid_935066877116424212":{"key":"giveawayguildid_935066877116424212","value":"909433086654156872"},"giveawaychannelid_935066877116424212":{"key":"giveawaychannelid_935066877116424212","value":"923939584872681472"},"giveawayprize_935066877116424212":{"key":"giveawayprize_935066877116424212","value":"1 Million Eternity Coins"},"giveawaydescription_935066877116424212":{"key":"giveawaydescription_935066877116424212","value":"SUS"},"giveawaytime_935066877116424212":{"key":"giveawaytime_935066877116424212","value":"1643011318"},"giveawayisgiveaway_935066877116424212":{"key":"giveawayisgiveaway_935066877116424212","value":"true"},"cooldown_beg_909433086654156872_866980635729592362":{"key":"cooldown_beg_909433086654156872_866980635729592362","value":1643009154533},"Money_866980635729592362":{"key":"Money_866980635729592362","value":"82"},"giveawayparticipants_935066877116424212":{"key":"giveawayparticipants_935066877116424212","value":"866980635729592362 852117500535242772"},"giveawayisfinished_935066877116424212":{"key":"giveawayisfinished_935066877116424212","value":"true"}} |
<filename>db/sqlite/sql/DataModels/INSERT/InsertRegionalAuthorityUpdates.sql<gh_stars>0
INSERT INTO RegionalAuthority
SELECT *
FROM Rescission
WHERE Rescission.BudgetLevel = '7'
AND Rescission.BocCode <> '17'
ORDER BY Rescission.BFY DESC , Rescission.FundCode, Rescission.AccountCode, Rescission.BocCode;
|
-- Create table example
create table stock.stockshareoutstanding(
ticker varchar(10),
shareoutstanding bigint
);
-- Change table name
alter table stock.pred_price_sp500base
rename to pred_fbp_price_sp500_base;
-- Delete table
-- delete from stock.stockprice; |
DROP TABLE IF EXISTS temp.main_cacher CASCADE;
CREATE TABLE temp.main_cacher
(
c_key BIGINT,
c_value JSON,
expiration_date TIMESTAMP,
PRIMARY KEY (c_key)
);
CREATE INDEX main_cacher_idx ON temp.main_cacher (expiration_date); |
CREATE TABLE tags (
normalized VARCHAR(30) NOT NULL PRIMARY KEY,
tag VARCHAR(30) NOT NULL
);
INSERT INTO tags VALUES
('nsfw', 'nsfw'),
('sfw', 'sfw'); |
DECLARE @DatabaseName sysname = DB_NAME(), @ObjectName sysname = '[CyberProfiles].[Profile]', @ObjectType sysname = 'Table'
SET NOCOUNT ON;
DECLARE @IndexId int = NULL, @TableId int = NULL;
IF @ObjectType = 'Index'
BEGIN
SELECT @TableId = object_id
FROM sys.indexes
WHERE name = @ObjectName
IF @@ROWCOUNT <> 1 RAISERROR(N'Unable to determine table to which index "%s" belongs', 16, 1, @ObjectName);
SET @IndexId = INDEXPROPERTY(@TableId, @ObjectName, 'IndexID');
END
ELSE IF @ObjectType = 'Table'
BEGIN
SET @TableId = OBJECT_ID(@ObjectName);
END
ELSE
BEGIN
RAISERROR(N'Object Type "%s" is not supported', 16, 1, @ObjectType);
END
SELECT
databse_name = DB_NAME()
, file_name
, page_identifier = CONCAT(file_id,':',pt.from_page_id)
, check_file_total_size = file_total_size
, check_file_total_used_space = file_total_used_space
, check_file_total_unused_pages = file_total_unused_pages
, agg_file_total_reserved_pages = file_total_reserved_pages
, agg_file_total_consecutive_unused_pages = SUM(pt.consecutive_unused_pages) OVER (PARTITION BY file_id)
, pt.*
, pages_in_range = pt.to_page_id - pt.from_page_id + 1
FROM
(
SELECT
databse_name = DB_NAME()
, file_id
, file_name
, file_total_size
, file_total_used_space
, file_total_unused_pages = file_total_size - file_total_reserved_pages + 1
, file_total_reserved_pages
, prev_used_page
, from_used_page_id = allocated_page_page_id
, to_page_id = ISNULL(NULLIF(next_used_page,file_total_size-1) - 1, next_used_page)
, consecutive_unused_pages = ISNULL(NULLIF(next_used_page,file_total_size-1) - 1, next_used_page) - allocated_page_page_id
, next_used_page_id = LEAD(allocated_page_page_id,1,file_total_size-1) OVER(PARTITION BY file_id ORDER BY allocated_page_page_id ASC)
FROM
(
SELECT
database_id = DB_ID()
, f.file_id
, f.file_name
, f.file_total_used_space
, f.file_total_size
, file_total_reserved_pages = COUNT(*) OVER() + 9
, p.allocated_page_page_id
, prev_used_page = LAG(p.allocated_page_page_id,1,0) OVER (PARTITION BY f.file_id ORDER BY p.allocated_page_page_id ASC)
, next_used_page = LEAD(p.allocated_page_page_id,1,f.file_total_size - 1) OVER (PARTITION BY f.file_id ORDER BY p.allocated_page_page_id ASC)
FROM sys.dm_db_database_page_allocations(DB_ID(@DatabaseName),default,default,default,'DETAILED') AS p
INNER JOIN (
SELECT file_id, file_name = [name], size AS file_total_size
, file_total_used_space = FILEPROPERTY([name], 'SpaceUsed')
FROM sys.database_files AS f
WHERE type = 0
) AS f
ON f.file_id = p.allocated_page_file_id
) AS sub1
WHERE sub1.next_used_page <> sub1.allocated_page_page_id + 1
) AS sub2
CROSS APPLY
(
SELECT usage = 'EMPTY'
, from_page_id = from_used_page_id + 1
, to_page_id = sub2.to_page_id
, consecutive_unused_pages = sub2.consecutive_unused_pages
UNION ALL
SELECT
usage = 'USED'
, 0
, sub2.from_used_page_id
, 0
WHERE prev_used_page = 0
UNION ALL
SELECT
usage = 'USED'
, sub2.to_page_id + 1
, sub2.next_used_page_id
, 0
WHERE next_used_page_id < file_total_size-1
) AS pt |
USE [NavIntegrationDB]
GO
/****** Object: StoredProcedure [dbo].[SWITCH_DetailsDelete] Script Date: 02/13/2012 17:17:21 ******/
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE PROCEDURE [dbo].[SWITCH_DetailsDelete]
@param_intSwitchDetailsID INT
AS
BEGIN
SET NOCOUNT ON;
DELETE FROM dbo.SwitchDetails
WHERE SwitchDetailsID = @param_intSwitchDetailsID
END
GO
|
DROP TABLE public.certificates; |
-- +goose Up
-- SQL in this section is executed when the migration is applied.
CREATE TABLE job_status_events (
job_id CHAR(36) NOT NULL PRIMARY KEY,
updated timestamp with time zone NOT NULL
);
-- +goose StatementBegin
CREATE FUNCTION job_status_update_update() RETURNS trigger AS
$body$
BEGIN
IF NEW.status <> OLD.status THEN
INSERT INTO job_status_events ( job_id, updated ) VALUES ( NEW.job_id, now() ) ON CONFLICT ( job_id ) DO UPDATE SET updated = now();
END IF;
RETURN NEW;
END;
$body$ LANGUAGE plpgsql;
-- +goose StatementEnd
-- +goose StatementBegin
CREATE FUNCTION job_status_update_insert() RETURNS trigger AS
$body$
BEGIN
INSERT INTO job_status_events ( job_id, updated ) VALUES ( NEW.job_id, now() ) ON CONFLICT ( job_id ) DO UPDATE SET updated = now();
RETURN NEW;
END;
$body$ LANGUAGE plpgsql;
-- +goose StatementEnd
-- +goose StatementBegin
CREATE TRIGGER job_status_update_trigger_insert
AFTER
INSERT
ON jobs
FOR EACH ROW
EXECUTE PROCEDURE job_status_update_insert();
-- +goose StatementEnd
-- +goose StatementBegin
CREATE TRIGGER job_status_update_trigger_update
AFTER
UPDATE
ON jobs
FOR EACH ROW
EXECUTE PROCEDURE job_status_update_update();
-- +goose StatementEnd
-- +goose Down
-- SQL in this section is executed when the migration is rolled back.
DROP TRIGGER job_status_update_trigger_insert ON jobs;
DROP TRIGGER job_status_update_trigger_update ON jobs;
DROP FUNCTION job_status_update_update();
DROP FUNCTION job_status_update_insert();
DROP TABLE job_status_events;
|
-- MySQL dump 10.13 Distrib 5.7.19, for Linux (x86_64)
--
-- Host: localhost Database: javavirtual
-- ------------------------------------------------------
-- Server version 5.7.19-0ubuntu0.16.04.1
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
--
-- Table structure for table `students`
--
DROP TABLE IF EXISTS `students`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `students` (
`id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`ln` varchar(255) NOT NULL COMMENT '账号',
`pw` varchar(255) NOT NULL COMMENT '密码',
`no` varchar(255) NOT NULL COMMENT '学号',
`role` varchar(255) NOT NULL DEFAULT 'student' COMMENT '角色',
`name` varchar(255) NOT NULL COMMENT '姓名',
`sex` varchar(255) DEFAULT NULL COMMENT '性别',
`cls` varchar(255) DEFAULT NULL COMMENT '班级',
`birthday` varchar(255) DEFAULT NULL COMMENT '生日',
`introduce` varchar(255) DEFAULT NULL COMMENT '个人简介',
`phone` varchar(255) DEFAULT NULL COMMENT '电话',
`email` varchar(255) DEFAULT NULL COMMENT '邮箱',
`grp` varchar(255) DEFAULT NULL COMMENT '所在组',
`learn_level` varchar(255) DEFAULT NULL COMMENT '学习等级',
`power` varchar(255) DEFAULT NULL COMMENT '权限',
`is_used` varchar(255) NOT NULL DEFAULT '-1' COMMENT '是否启用',
`reg_date` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '注册时间',
`pass_date` timestamp NULL DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '批准时间',
PRIMARY KEY (`id`),
UNIQUE KEY `ln` (`ln`),
UNIQUE KEY `no` (`no`)
) ENGINE=InnoDB AUTO_INCREMENT=29 DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `students`
--
LOCK TABLES `students` WRITE;
/*!40000 ALTER TABLE `students` DISABLE KEYS */;
INSERT INTO `students` VALUES (22,'test1','test1','201501','student','小明',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'1','2016-05-06 14:27:55','2016-06-16 07:03:27'),(23,'test2','test2','201502','student','小红',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'1','2016-05-06 14:27:57','2016-06-16 07:03:31'),(24,'test3','test3','201503','student','小王',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'1','2016-05-06 14:27:58','2016-06-16 07:03:34'),(25,'test4','test4','201504','student','小杨',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'-1','2016-05-06 14:23:10','2016-06-16 07:03:40'),(26,'test5','test5','201505','student','小二',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'-1','2016-05-06 14:23:10','2016-06-16 07:03:44'),(27,'1','1','1','student','1','','','','','','','','','','-1','2016-08-23 03:50:27',NULL),(28,'SunnyLau','YD12300.','2015020800079','student','刘松','','','','','','','','','','-1','2016-09-17 12:02:08',NULL);
/*!40000 ALTER TABLE `students` ENABLE KEYS */;
UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2020-08-09 17:37:19
|
exec sp_MSforeachdb 'USE [?]; select distinct ''?'' AS DB,object_name(id) as SP from syscomments where text like ''%GET_VISIT_VISITOR_ID%'''
|
SELECT
/*%expand*/*
FROM
staffs
WHERE
deleted_at IS NULL
/*%if staff.id != null */
AND staff_id = /* staff.id */1
/*%end*/
/*%if staff.email != null */
AND email = /* staff.email */'<EMAIL>'
/*%end*/
/*%if staff.passwordResetToken != null */
AND password_reset_token = /* staff.passwordResetToken */'xxxx'
AND token_expires_at > NOW()
/*%end*/
LIMIT 1
|
delete from TRANSFER;
delete from ACCOUNT;
insert into ACCOUNT(ID, NAME, BALANCE) values ( 7, '<NAME>' , 100000.00);
|
<filename>gpMgmt/test/behave/mgmt_utils/steps/data/gptransfer_setup/constraint.sql
--Coverage for all the table and column constraints.
--Drop tables
Drop table if exists tbl_unique_constraint;
Drop table if exists tbl_unique_constraint2;
Drop table if exists tbl_primry_constraint;
Drop table if exists tbl_primry_constraint2;
Drop table if exists tbl_check_constraint;
Drop table if exists col_unique_constraint;
Drop table if exists col_primry_constraint;
Drop table if exists col_check_constraint;
--Create table with table constraint -Unique
CREATE table tbl_unique_constraint (i int, t text, constraint tbl_unq1 unique(i)) distributed by (i);
INSERT into tbl_unique_constraint values (100,'text1');
INSERT into tbl_unique_constraint values (200,'text2');
INSERT into tbl_unique_constraint values (300,'text3');
--Create table with table constraint -Primary key
CREATE table tbl_primry_constraint (i int, t text, constraint tbl_primary1 primary key (i)) distributed by (i);
INSERT into tbl_primry_constraint values (100,'text1');
INSERT into tbl_primry_constraint values (200,'text2');
INSERT into tbl_primry_constraint values (300,'text3');
--Create table with table constraint -Check
CREATE TABLE tbl_check_constraint (
a1 integer,
a2 text,
a3 varchar(10),
CONSTRAINT tbl_chk_con1 CHECK (a1 > 25 AND a2 <> '')
)DISTRIBUTED RANDOMLY;
INSERT into tbl_check_constraint values (100,'text1');
INSERT into tbl_check_constraint values (200,'text2');
INSERT into tbl_check_constraint values (300,'text3');
--Create table with column constraint -Unique
CREATE table col_unique_constraint (i int constraint col_unique1 unique, t text) distributed by (i);
INSERT into col_unique_constraint values (100,'text1');
INSERT into col_unique_constraint values (200,'text2');
INSERT into col_unique_constraint values (300,'text3');
--Create table with column constraint -Primary Key
CREATE table col_primry_constraint (i int constraint col_primary1 primary key, t text) distributed by (i);
INSERT into col_primry_constraint values (100,'text1');
INSERT into col_primry_constraint values (200,'text2');
INSERT into col_primry_constraint values (300,'text3');
--Create table with column constraint -Check
CREATE TABLE col_check_constraint (
did integer,
name varchar(40) NOT NULL constraint col_chk1 CHECK (name <> '')
)DISTRIBUTED RANDOMLY;
INSERT into col_check_constraint values (100,'text1');
INSERT into col_check_constraint values (200,'text2');
INSERT into col_check_constraint values (300,'text3');
|
-- @testpoint:openGauss保留关键字select作为视图名
--不带引号-合理报错
CREATE or replace VIEW select AS
SELECT * FROM pg_tablespace WHERE spcname = 'pg_default';
--加双引号-创建成功
CREATE or replace VIEW "select" AS
SELECT * FROM pg_tablespace WHERE spcname = 'pg_default';
--清理环境
drop VIEW "select";
--加单引号-合理报错
CREATE or replace VIEW 'select' AS
SELECT * FROM pg_tablespace WHERE spcname = 'pg_default';
--加反引号-合理报错
CREATE or replace VIEW `select` AS
SELECT * FROM pg_tablespace WHERE spcname = 'pg_default'; |
<filename>UserFront/Export/Dump20220117 - Copie/tennis_categorie_billet.sql
-- MySQL dump 10.13 Distrib 8.0.23, for Win64 (x86_64)
--
-- Host: 127.0.0.1 Database: Tennis
-- ------------------------------------------------------
-- Server version 5.7.31
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!50503 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
--
-- Table structure for table `categorie_billet`
--
DROP TABLE IF EXISTS `categorie_billet`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!50503 SET character_set_client = utf8mb4 */;
CREATE TABLE `categorie_billet` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`categorie_billet_enum_string` varchar(255) DEFAULT NULL,
`billet_id` bigint(20) DEFAULT NULL,
`user_id` bigint(20) DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `FK4opgodmbt4890s7m4rijvwpc9` (`billet_id`),
KEY `FKneunxaid7ej6qtuqwcuu5u067` (`user_id`)
) ENGINE=MyISAM AUTO_INCREMENT=52 DEFAULT CHARSET=latin1;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `categorie_billet`
--
LOCK TABLES `categorie_billet` WRITE;
/*!40000 ALTER TABLE `categorie_billet` DISABLE KEYS */;
INSERT INTO `categorie_billet` VALUES (1,'Billet grand public licencié',1,3),(2,'Billet grand public licencié',2,3),(3,'Billet grand public licencié',3,3),(4,'Billet grand public licencié',4,3),(5,'Billet grand public licencié',5,3),(6,'Billet grand public licencié',6,3),(7,'Billet grand public licencié',7,3),(8,'Billet grand public licencié',8,3),(9,'Billet grand public licencié',9,3),(10,'Billet big match licencié',10,3),(11,'Billet grand public licencié',11,3),(12,'Billet grand public licencié',12,3),(13,'Billet big match non licencié',13,3),(14,'Billet grand public non licencié',14,3),(15,'Billet grand public licencié',15,3),(16,'Billet grand public licencié',16,3),(17,'Billet grand public licencié',17,3),(18,'Billet grand public licencié',18,3),(19,'Billet grand public licencié',19,3),(20,'Billet grand public licencié',20,3),(21,'Billet grand public licencié',21,3),(22,'Billet grand public licencié',22,3),(23,'Billet grand public licencié',23,3),(24,'Billet grand public non licencié',24,3),(25,'Billet grand public licencié',25,3),(26,'Billet grand public licencié',26,3),(27,'Billet grand public non licencié',27,3),(28,'Billet grand public licencié',28,3),(29,'Billet grand public licencié',29,3),(30,'Billet grand public licencié',30,3),(31,'Billet grand public licencié',31,3),(32,'Billet grand public licencié',32,3),(33,'Billet grand public licencié',33,3),(34,'Billet grand public non licencié',34,3),(35,'Billet big match licencié',35,3),(36,'Billet grand public licencié',36,3),(37,'Billet grand public licencié',37,3),(38,'Billet big match licencié',38,3),(39,'Billet grand public licencié',39,3),(40,'Billet grand public non licencié',40,3),(41,'Billet grand public licencié',41,3),(42,'Billet grand public licencié',42,3),(43,'Billet grand public licencié',43,3),(44,'Billet big match licencié',44,3),(45,'Billet big match licencié',45,3),(46,'Billet grand public licencié',46,3),(47,'Billet big match licencié',47,3),(48,'Billet big match licencié',48,3),(49,'Billet grand public non licencié',49,3),(50,'Billet big match non licencié',50,3),(51,'Billet big match licencié',51,3);
/*!40000 ALTER TABLE `categorie_billet` ENABLE KEYS */;
UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2022-01-17 10:05:35
|
-- phpMyAdmin SQL Dump
-- version 4.8.5
-- https://www.phpmyadmin.net/
--
-- Host: localhost:8889
-- Generation Time: Jul 25, 2020 at 04:25 PM
-- Server version: 5.7.25
-- PHP Version: 7.3.8
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET time_zone = "+00:00";
--
-- Database: `isyweb`
--
-- --------------------------------------------------------
--
-- Table structure for table `data`
--
CREATE TABLE `data` (
`id` int(11) NOT NULL,
`name` varchar(255) NOT NULL,
`email` varchar(255) NOT NULL,
`subject` varchar(255) NOT NULL,
`message` longtext NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Dumping data for table `data`
--
INSERT INTO `data` (`id`, `name`, `email`, `subject`, `message`) VALUES
(1, 'ss', '<EMAIL>', 'tes', '22');
--
-- Indexes for dumped tables
--
--
-- Indexes for table `data`
--
ALTER TABLE `data`
ADD PRIMARY KEY (`id`);
--
-- AUTO_INCREMENT for dumped tables
--
--
-- AUTO_INCREMENT for table `data`
--
ALTER TABLE `data`
MODIFY `id` int(11) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=2;
|
<filename>sqls/mysql/2020050102_create_app_user.up.sql<gh_stars>0
create table `app_user` (
`id` int auto_increment
,`version` int not null
,`created_at` datetime not null default current_timestamp
,`updated_at` datetime not null default current_timestamp on update current_timestamp
,`created_by` int not null
,`updated_by` int not null
,`organization_id` int not null
,`login_id` varchar(200) character set ascii
,`hashed_password` varchar(200) character set ascii
,`username` varchar(40)
,`role` varchar(20) character set ascii
,`provider` varchar(40) character set ascii
,`provider_id` varchar(40) character set ascii
,`provider_access_token` text character set ascii
,`provider_refresh_token` text character set ascii
,`removed` tinyint(1) not null
,primary key(`id`)
,unique(`organization_id`, `login_id`)
,foreign key(`organization_id`) references `organization`(`id`) on delete cascade
);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.