branch_name stringclasses 149 values | text stringlengths 23 89.3M | directory_id stringlengths 40 40 | languages listlengths 1 19 | num_files int64 1 11.8k | repo_language stringclasses 38 values | repo_name stringlengths 6 114 | revision_id stringlengths 40 40 | snapshot_id stringlengths 40 40 |
|---|---|---|---|---|---|---|---|---|
refs/heads/master | <file_sep>
Vagrant.configure("2") do |config|
config.vm.network "forwarded_port", guest: 10008, host: 10008
config.vm.network "forwarded_port", guest: 8080, host: 8080
config.vm.network "forwarded_port", guest: 18080, host: 18080
config.vm.network "forwarded_port", guest: 8988, host: 8988
config.vm.network "forwarded_port", guest: 8989, host: 8989
config.vm.network "forwarded_port", guest: 9000, host: 9000
config.vm.network "forwarded_port", guest: 9092, host: 9092
config.vm.network "forwarded_port", guest: 2181, host: 2181
config.vm.network "forwarded_port", guest: 8082, host: 8082
config.vm.network "forwarded_port", guest: 8081, host: 8081
# Cassandra
config.vm.network "forwarded_port", guest: 7000, host: 7000
config.vm.network "forwarded_port", guest: 7001, host: 7001
config.vm.network "forwarded_port", guest: 7199, host: 7199
config.vm.network "forwarded_port", guest: 9042, host: 9042
config.vm.network "forwarded_port", guest: 9160, host: 9160
for i in 8030..8033
config.vm.network :forwarded_port, guest: i, host: i
end
config.vm.network "forwarded_port", guest: 8040, host: 8040
config.vm.network "forwarded_port", guest: 8042, host: 8042
config.vm.network "forwarded_port", guest: 8088, host: 8088
config.vm.network "forwarded_port", guest: 49707, host: 49707
config.vm.network "forwarded_port", guest: 50010, host: 50011
config.vm.network "forwarded_port", guest: 50020, host: 50020
config.vm.network "forwarded_port", guest: 50070, host: 50070
config.vm.network "forwarded_port", guest: 50075, host: 50075
config.vm.network "forwarded_port", guest: 50090, host: 50090
config.vm.network "forwarded_port", guest: 22, host: 9922
config.vm.network "forwarded_port", guest: 4040, host: 4040
config.vm.box = "aalkilani/spark-kafka-cassandra-applying-lambda-architecture/lambda_arch"
config.vm.hostname = "lambda-pluralsight"
config.vm.provision "docker-images", type: "shell", run: "always", inline: <<-SHELLPRE
docker restart zookeeper
docker restart spark-1.6.3
docker restart cassandra
docker restart zeppelin
docker restart kafka
SHELLPRE
config.vm.provision "image-fixes", type: "shell", run: "once", path: "fixes.sh"
config.vm.provider "virtualbox" do |v|
v.memory = 4096
v.cpus = 2
end
config.vm.provider "vmware_fusion" do |v|
v.vmx["memsize"] = "4096"
v.vmx["numvcpus"] = "2"
end
end
<file_sep># lambda_arch
A Apache Kafka, Spark, Cassandra, Zeppelin application processing data in lambda architecture (parallell batch&real time data processing)
| c405daf71907fe41cdf0d20b06d8bddc5485dfd1 | [
"Markdown",
"Ruby"
] | 2 | Ruby | mateuszpierzchala/lambda_arch | b9f69dd6588bfda38160d9da6cc222a9fbe8ce5e | e5464a90591a3485b71c353eb09a31623306c11b |
refs/heads/master | <repo_name>undefinedroot/x0-TCNDC3rdEd-playground<file_sep>/8-promises.js
const add = (a, b) => {
return new Promise((resolve, reject) => {
setTimeout(() => {
resolve(a + b);
}, 1000);
});
};
// add(1, 2)
// .then(sum => {
// console.log(sum);
// add(sum, 1).then(sum2 => {
// console.log(sum2);
// }).catch(e => {
// console.log('error', e);
// });
// }).catch(e => {
// console.log('error', e);
// });
// promise chaining
add(1, 1)
.then(sum => {
console.log(sum);
return add(sum, 1);
})
.then(sum2 => {
console.log(sum2);
})
.catch(e => {
console.log('error', e);
});
// const isFail = true;
// const doWorkPromise = new Promise((resolve, reject) => {
// setTimeout(() => {
// if (!isFail) {
// resolve([4, 5, 3]);
// } else {
// reject('failed');
// }
// }, 1000);
// });
// doWorkPromise.then(result => {
// console.log(result);
// }).catch(e => {
// console.log(e);
// });
/**
* pending
* ||
* /\
*fulfilled rejected
*/<file_sep>/6-raw-http.js
const https = require('https');
const darksky_url = `https://api.darksky.net/forecast`;
const darksky_token = `<KEY>`;
const latitude = 40;
const longitude = -75;
const url = `${darksky_url}/${darksky_token}/${latitude},${longitude}?units=si`;
const request = https.request(url, response => {
let data = '';
response.on('data', chunk => data += chunk.toString());
response.on('end', () => console.log(JSON.parse(data)));
});
request.on('error', error => console.log('An Error', error));
request.end();<file_sep>/4-callbacks.js
const doWorkCallback = callback => {
setTimeout(() => {
//callback('error', undefined);
callback(undefined, [4, 5, 3]);
}, 1000);
};
doWorkCallback((error, result) => {
if (error) {
return console.log(error);
}
console.log(result);
});
// #region old code
// setTimeout(() => {
// console.log('Two seconds are up');
// }, 2000);
// const names = ['12345', '123', '1234'];
// const shortNames = names.filter(name => name.length <= 4);
// const geocode = (address, callback) => {
// setTimeout(() => {
// const data = {
// latitude: 1,
// longitude: 2
// };
// callback(data);
// }, 2000);
// };
// geocode('Test', data => {
// console.log(data);
// });
// const add = (n1, n2, callback) => {
// setTimeout(() => {
// callback(n1 + n2);
// }, 2000);
// };
// add(1, 4, (sum) => {
// console.log(sum);
// });
// #endregion<file_sep>/5-es6-objects.js
// object property shorthand
const name = 'Test';
const userAge = 40;
const user = {
name, age: userAge, location: 'unknown'
};
console.log(user);
// object destructuring
const product = {
label: '<NAME>',
price: 3,
stock: 201,
salePrice: undefined,
rating: 4.2
};
// const { label: productLabel, stock, rating = 5 } = product;
// console.log(productLabel);
// console.log(stock);
// console.log(rating);
const transaction = (type, { label: productLabel, stock, rating = 5 } = {}) => {
console.log(type, productLabel, stock, rating);
};
transaction('order', product);
| 24b6180818b2a78315d7f07b8a1254ee4807638d | [
"JavaScript"
] | 4 | JavaScript | undefinedroot/x0-TCNDC3rdEd-playground | e9a6ae3cd487dce3b2a9c0199eec7605db48c6fc | b97209e4f7973317034a03b06f8901aca3bb536e |
HEAD | <repo_name>kosenconf/advent<file_sep>/Rakefile
task :default => :compile
desc "Compile Assets"
task :compile do
require "yaml"
require "json"
puts "Compile:"
puts " coffee > js"
system("coffee -c assets/advent.coffee")
puts " scss > css"
system("scss assets/advent.{scss,css}")
puts " yaml > json"
entries = YAML.load(open("assets/entries.yml"))
File.write(
"assets/entries.js",
entries.map { |array| key, value = array; "var #{key} = #{value.to_json}" }.join("\n"),
:encoding => Encoding::UTF_8
)
puts " haml > html"
system("haml index.haml > index.html")
puts "Done!"
end
<file_sep>/assets/advent.js
(function() {
var AdventViewModel;
AdventViewModel = function() {
return {
entries2012general: entries2012general,
entries2012teachers: entries2012teachers,
entries2011: entries2011,
entries2010: entries2010
};
};
ko.bindingHandlers.likeButton = {
init: function(element, valueAccessor) {
var value;
value = valueAccessor();
return $(element).html("<div class='fb-like' data-href='" + value + "' data-send='false' data-layout='box_count' data-width='100' data-show-faces='false'></div>");
}
};
ko.bindingHandlers.tweetButton = {
init: function(element, valueAccessor) {
var value;
value = valueAccessor();
return $(element).html("<a href='https://twitter.com/share' class='twitter-share-button' data-url='" + value + "' data-count='vertical'>Tweet</a>");
}
};
$(function() {
return ko.applyBindings(new AdventViewModel());
});
}).call(this);
<file_sep>/README.md
http://kosenconf.github.com/advent/
| 921ab3d9609568eb38f9c8ad28a617ac53bcb944 | [
"JavaScript",
"Ruby",
"Markdown"
] | 3 | Ruby | kosenconf/advent | 1f1c370ddd4990d7ed08547bd34a883e08b3c406 | 1581dd8a0c88dd4dfd4c7efe49d123a5f052864a |
refs/heads/main | <repo_name>KristoffBlackman/SE-Homework<file_sep>/Scripts/main.js
alert("YUUUUUUR!!! Im so glad your taking a look at my website!");<file_sep>/README.md
# SE-Homework
HW assignments for <NAME>
| 50ec31a82c4cc27a496eee58433eb328689d36a1 | [
"JavaScript",
"Markdown"
] | 2 | JavaScript | KristoffBlackman/SE-Homework | 4c75484620329d5cab32d2b730d624d7aa599b31 | fa54f2633c547fa1a3a264574623f9aa083059e3 |
refs/heads/master | <file_sep># cod-of-Hafman<file_sep>#include "header.h"
sym *Haf[256];
int abc[256] = { 0 };
int arr_bit[8]; int num_bit;
int haf_cod[8] = { 0 };
int file_processing(FILE *in)
{
int count = 0;
unsigned char syms[1];
int read_el = 0, i = 0;
do
{
read_el = fread(syms, sizeof(char), 1, in);
if (abc[syms[0]] == 0) count++;
abc[syms[0]] ++;
} while (read_el == 1);
return count;
}
tree* create_branch(int *abc, int num)
{
tree *root;
root = (tree*)calloc(1, sizeof(tree));
memset(root, 0, sizeof(tree));
root->count = abc[num];
root->val = (unsigned char)num;
return root;
}
prio_q* build_queue(int num, prio_q *head)
{
if (!head)
{
prio_q *new_el = (prio_q*)calloc(1, sizeof(prio_q));
new_el->elem = create_branch(abc, num);
head = new_el;
return head;
}
if (head->elem->count < abc[num])
{
head->next = build_queue(num, head->next);
return head;
}
prio_q *new_el = (prio_q*)calloc(1, sizeof(prio_q));
new_el->elem = create_branch(abc, num);
new_el->next = head;
head = new_el;
return head;
}
tree* pop(prio_q **el)
{
prio_q *tmp = *el;
tree *tr = (*el)->elem;
*el = (*el)->next;
free(tmp);
return tr;
}
tree* merge(tree *l, tree *r)
{
tree *new_branch = calloc(1, sizeof(tree));
new_branch->count = l->count + r->count;
new_branch->left = l;
new_branch->right = r;
return new_branch;
}
prio_q* push(prio_q *head, tree *el)
{
if (!head)
{
prio_q *new_el = (prio_q*)calloc(1, sizeof(prio_q));
new_el->elem = el;
head = new_el;
return head;
}
if (head->elem->count < el->count)
{
head->next = push(head->next, el);
return head;
}
prio_q *new_el = (prio_q*)calloc(1, sizeof(prio_q));
new_el->elem = el;
new_el->next = head;
head = new_el;
return head;
}
tree* build_tree(prio_q* head)
{
if (!head) return NULL;
tree *a, *b, *c;
while ((head) && (head->next))
{
a = pop(&head);
b = pop(&head);
c = merge(a, b);
head = push(head, c);
}
a = pop(&head);
return a;
}
void make_byte(int num, FILE *out)
{
arr_bit[num_bit] = num;
num_bit++;
if (num_bit == 8)
{
unsigned char c = 0;
for (int j = 0; j < 8; j++)
c = ((c << 1) | arr_bit[j]);
fprintf(out, "%c", c);
num_bit = 0;
}
}
void make_Haf_sym(unsigned char symb, int i)
{
Haf[symb] = (sym*)calloc(1, sizeof(sym));
Haf[symb]->count = i;
Haf[symb]->sym_Haf = (int*)calloc(i, sizeof(int));
for (int j = 0; j <= i; j++)
Haf[symb]->sym_Haf[j] = haf_cod[j];
}
void dfs(FILE *out, tree *root, int count_haf)
{
if (root->left)
{
make_byte(0, out);
haf_cod[count_haf] = 0;
dfs(out, root->left, count_haf + 1);
make_byte(1, out);
}
if (root->right)
{
make_byte(0, out);
haf_cod[count_haf] = 1;
dfs(out, root->right, count_haf + 1);
make_byte(1, out);
}
if (!((root->left) || (root->left)))
{
make_byte(1, out);
unsigned int c;
for (int j = 0; j < 8; j++)
{
c = ((root->val) >> (7 - j)) & 1;
make_byte(c, out);
}
make_Haf_sym(root->val, count_haf);
}
}
void coding_text(FILE *in, FILE *out)
{
unsigned char c;
int i = 0;
c = fgetc(in);
while (!feof(in))
{
for (i = 0; i < Haf[c]->count; i++)
make_byte(Haf[c]->sym_Haf[i], out);
c = fgetc(in);
}
if (num_bit != 0)
{
for (i = num_bit; i < 8; i++)
arr_bit[i] = 0;
c = 0;
for (i = 0; i < 8; i++)
c = ((c << 1) | arr_bit[i]);
fprintf(out, "%c", c);
}
}
void create_new_file(FILE *in, FILE *out, tree *root, int num_cod_sym)
{
fprintf(out, "d \r %d ", num_cod_sym);
dfs(out, root, 0);
fseek(in, 3, SEEK_SET);
coding_text(in, out);
fseek(out, 3, SEEK_SET);
fprintf(out, "%d", (8 - num_bit) % 8);
}
void encoder(FILE *in, FILE *out)
{
prio_q *head = NULL;
tree *root;
int num_cod_sym = 0;
fseek(in, 3, SEEK_SET);
num_cod_sym = file_processing(in);
for (int i = 0; i < 256; i++)
if (abc[i] != 0)
head = build_queue(i, head);
root = build_tree(head);
create_new_file(in, out, root, num_cod_sym);
}<file_sep>#include "header.h"
int verification(FILE *in);
int main(int argc, char *argv[])
{
FILE *in, *out;
char c;
if (argc < 2) return 0;
in = fopen(argv[1], "rb");
if(verification(in)) return 0;
out = fopen(argv[2], "w+b");
c = fgetc(in);
if (c == 'c') encoder(in, out);
if (c == 'd') decoder(in, out);
fclose(in);
fclose(out);
return 0;
}
int verification(FILE *in)
{
if (in == NULL)
{
printf("%s", "Error: file not found");
fclose(in);
return 1;
}
return 0;
}<file_sep>#include "header.h"
int bits[8] = { 0 };
int spare_bits[8] = { 0 };
int num_bit = 0;
void byte_to_bits(unsigned char byte, int *arr)
{
for (int i = 0; i < 8; i++)
arr[i] = (byte >> (7 - i)) & 1;
}
unsigned char read_bytes(FILE *in, int *arr)
{
unsigned char c;
c = fgetc(in);
byte_to_bits(c, arr);
return c;
}
tree* chose_breanch(tree* breanch)
{
tree *tmp = breanch;
if (!(breanch->left))
{
breanch = breanch->left;
breanch = (tree*)calloc(1, sizeof(tree));
tmp->left = breanch;
}
else
{
breanch = breanch->right;
breanch = (tree*)calloc(1, sizeof(tree));
tmp->right = breanch;
}
breanch->root = tmp;
return breanch;
}
void replacement(FILE *in)
{
for (int i = 0; i < 8; i++)
bits[i] = spare_bits[i];
unsigned char d = 0;
d = read_bytes(in, spare_bits);
}
unsigned char read_char_sym(FILE *in)
{
unsigned char elem = 0;
int tmp = num_bit + 1;
int j = 0;
num_bit++;
for (num_bit; num_bit < 8; num_bit++)
elem = (elem << 1) | bits[num_bit];
num_bit = num_bit % 8;
replacement(in);
for (j = 0; j < tmp; j++, num_bit++)
elem = (elem << 1) | bits[num_bit];
if(num_bit == 8) replacement(in);
num_bit = num_bit % 8;
return elem;
}
tree* tree_up(tree *breanch, FILE *in, int *lv)
{
while (bits[num_bit])
{
if((*lv) == 0)
return breanch;
num_bit++; (*lv)--;
breanch = breanch->root;
if (num_bit == 8)
replacement(in);
num_bit = num_bit % 8;
}
num_bit--;
return breanch;
}
tree* scanf_tree(FILE *in, tree *breanch, int num_cod_sym)
{
int lv = 0;
unsigned char c, d;
unsigned char elem = 0;
c = read_bytes(in, bits);
d = read_bytes(in, spare_bits);
while(num_cod_sym != 0)
{
for (num_bit; num_bit < 8; num_bit++)
{
if (bits[num_bit] == 0)
{
breanch = chose_breanch(breanch);
lv++;
}
else
{
elem = read_char_sym(in);
num_cod_sym--;
breanch->val = elem;
breanch = tree_up(breanch, in, &lv);
if (num_cod_sym == 0)
return breanch;
}
}
replacement(in);
num_bit = 0;
}
return breanch;
}
unsigned char search_in_tree(tree* root, FILE *in)
{
num_bit++;
if (num_bit == 8)
{
replacement(in);
num_bit = 0;
}
if (root->val) return root->val;
if (bits[num_bit] == 0) return search_in_tree(root->left, in);
return search_in_tree(root->right, in);
}
void decoding_text(FILE *in, FILE *out, tree *root, int byte_indent)
{
unsigned char c;
while (!feof(in))
{
num_bit--;
c = search_in_tree(root, in);
fprintf(out, "%c", c);
}
if (!num_bit)
{
for (int i = num_bit; i < 8 - byte_indent; i++)
if (!bits[i]) root = root->left;
else root = root->right;
c = root->val;
fprintf(out, "%c", c);
}
}
void decoder(FILE *in, FILE *out)
{
tree *root = (tree*)calloc(1, sizeof(tree));
fseek(in, 3, SEEK_SET);
int byte_indent;
int num_cod_sym;
fscanf(in, "%d", &byte_indent);
fscanf(in, "%d", &num_cod_sym);
fseek(in, 1, SEEK_CUR);
root = scanf_tree(in, root, num_cod_sym);
fprintf(out, "c \r");
decoding_text(in, out, root, byte_indent);
}<file_sep>#ifndef __HEADER_H__
#define __HEADER_H__
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include "structs.h"
void encoder(FILE *in, FILE *out);
void decoder(FILE *in, FILE *out);
#endif | 558786b88e72aad6f1638e79114c7e599b2327a3 | [
"Markdown",
"C"
] | 5 | Markdown | jenya-zhuchok/cod-of-Hafman | a50f25ec4fee731a79c8b132a40ef56af035a63f | 0203ae3d1c2babf94e9e521bd96b77c3db9954c6 |
refs/heads/main | <repo_name>AndrewRook/polling_simulator<file_sep>/tests/test_sampling.py
import numpy as np
import pandas as pd
import pytest
from functools import partial
from polling_simulator import sampling
from polling_simulator import Variable, Demographic
from polling_simulator.distributions import truncated_gaussian_distribution
class TestPredefinedSample:
def test_fails_when_asked_for_too_many_people(self):
data = pd.DataFrame({
"response_likelihood": np.ones(10)
})
with pytest.raises(ValueError):
sampling.predefined_sample(1, False)(20, data)
def test_fails_when_response_likelihood_is_too_low(self):
data = pd.DataFrame({
"response_likelihood": np.ones(1000) * 0.000001
})
with pytest.raises(ValueError):
sampling.predefined_sample(1, False)(20, data)
def test_fails_when_turnout_likelihood_is_too_low(self):
data = pd.DataFrame({
"response_likelihood": np.ones(1000),
"turnout_likelihood": np.ones(1000) * 0.000001
})
sampling.predefined_sample(1, False)(500, data)
with pytest.raises(ValueError):
sampling.predefined_sample(1, True)(500, data)
def test_returns_fewer_than_desired_when_response_rates_are_low(self):
np.random.seed(123)
data = pd.DataFrame({
"response_likelihood": np.ones(20) * 0.5
})
poll_responders, poll_non_responders = sampling.predefined_sample(1, False)(10, data)
assert len(poll_responders) < 10
assert len(poll_responders) + len(poll_non_responders) == 10
def test_reaches_more_people_when_makes_multiple_attempts(self):
np.random.seed(123)
data = pd.DataFrame({
"response_likelihood": np.ones(40) * 0.1
})
single_call_responders, _ = sampling.predefined_sample(1, False)(20, data)
multi_call_responders, non_responders = sampling.predefined_sample(5, False)(20, data)
assert len(multi_call_responders) > len(single_call_responders)
assert len(multi_call_responders) < 20
assert len(multi_call_responders) + len(non_responders) == 20
def test_applies_likely_voter_screen_correctly(self):
data = pd.DataFrame({
"response_likelihood": np.ones(1000),
"turnout_likelihood": np.ones(1000) * 0.1
})
responders, nonresponders = sampling.predefined_sample(1, True)(50, data)
assert len(responders) + len(nonresponders) < 50
class TestGuaranteedSample:
def test_fails_when_asked_for_too_many_people(self):
np.random.seed(123)
data = pd.DataFrame({
"response_likelihood": np.ones(10) * 0.05
})
with pytest.raises(ValueError):
sampling.guaranteed_sample(1, False)(5, data)
@pytest.mark.parametrize("num_people", [10, 50, 100, 500])
def test_always_returns_the_asked_for_number_of_people(self, num_people):
np.random.seed(123)
data = pd.DataFrame({
"response_likelihood": np.ones(10000) * 0.1
})
responders, non_responders = sampling.guaranteed_sample(1, False)(num_people, data)
assert len(responders) == num_people
assert len(non_responders) > 0
def test_works_with_multiple_attempts(self):
np.random.seed(123)
data = pd.DataFrame({
"response_likelihood": np.ones(10000) * 0.1
})
single_attempt_responders, single_attempt_non_responders = sampling.guaranteed_sample(1, False)(100, data)
multiple_attempt_responders, multiple_attempt_non_responders = sampling.guaranteed_sample(5, False)(100, data)
assert len(single_attempt_responders) == len(multiple_attempt_responders)
assert len(multiple_attempt_non_responders) < len(single_attempt_non_responders)
def test_works_with_likely_voter_screen(self):
np.random.seed(123)
data = pd.DataFrame({
"response_likelihood": np.ones(10000) * 0.5,
"turnout_likelihood": np.ones(10000) * 0.1
})
_, no_screen_non_responders = sampling.guaranteed_sample(1, False)(200, data)
responders, screened_non_responders = sampling.guaranteed_sample(1, True)(200, data)
assert len(responders) == 200
assert len(screened_non_responders) > 5 * len(no_screen_non_responders)
class TestPreStratifiedSample:
def test_freezes_assumed_demographics(self):
age = Variable("age", truncated_gaussian_distribution(25, 25, 18, 110))
young_people = Demographic(0.5, 0.1, {"a": 1}, age < 40)
old_people = Demographic(0.5, 0.1, {"b": 1}, age >= 40)
demographics = [
young_people, old_people
]
sampler = sampling.stratified_sample(demographics, sampling.guaranteed_sample(1, False))
demographics.pop(0)
assert len(demographics) == 1
for item in sampler.__closure__:
if type(item.cell_contents) == list:
assert len(item.cell_contents) == 2
def test_errors_when_demographics_are_not_mece(self):
age = Variable("age", truncated_gaussian_distribution(25, 25, 18, 110))
young_people = Demographic(0.5, 0.1, {"a": 1}, age < 40)
old_people = Demographic(0.5, 0.1, {"b": 1}, age >= 60)
sampler = sampling.stratified_sample([young_people, old_people], sampling.guaranteed_sample(1, False))
electorate = pd.DataFrame({
"age": np.arange(100)
})
with pytest.raises(ValueError) as e:
sampler(100, electorate)
assert "Demographics are not mutually exclusive" in e.value.args[0]
def test_stratification_works_as_expected(self):
np.random.seed(123)
gender = Variable("gender", partial(
np.random.choice, np.array(["M", "F"]), replace=True, p=np.array([0.5, 0.5])
))
men = Demographic(0.5, 0.1, {"a": 1}, (gender == "M"))
women = Demographic(0.5, 0.2, {"b": 1}, (gender == "F"))
demographics = [men, women]
sampler = sampling.stratified_sample(demographics, sampling.guaranteed_sample(1, False))
male_electorate = pd.DataFrame({
"turnout_likelihood": np.ones(50000) * men.turnout_likelihood,
"response_likelihood": men.response_likelihood,
"candidate_preference": "a",
"gender": "M"
})
female_electorate = pd.DataFrame({
"turnout_likelihood": np.ones(50000) * women.turnout_likelihood,
"response_likelihood": women.response_likelihood,
"candidate_preference": "b",
"gender": "F"
})
shuffled_electorate = pd.concat([male_electorate, female_electorate]).sample(frac=1)
responders, non_responders = sampler(1000, shuffled_electorate)
assert np.sum(responders["gender"] == "F") == 500
assert np.sum(responders["gender"] == "M") == 500
assert len(non_responders) > 3000
assert np.sum(non_responders["gender"] == "M") > np.sum(non_responders["gender"] == "F")
class TestInternalGetResponses:
def test_fails_when_passed_zero_attempts(self):
with pytest.raises(ValueError):
sampling._get_responses(None, 0)
def test_works_with_single_attempt(self):
np.random.seed(123)
response_likelihoods = np.ones(40) * 0.5
did_respond, num_attempts_required = sampling._get_responses(response_likelihoods, 1)
assert np.sum(did_respond) < len(response_likelihoods)
np.testing.assert_allclose(np.unique(num_attempts_required), np.array([-1, 1]))
assert np.min(num_attempts_required[did_respond]) == 1
def test_works_with_pandas_series(self):
np.random.seed(123)
response_likelihoods = pd.Series(np.ones(40) * 0.5)
did_respond, num_attempts_required = sampling._get_responses(response_likelihoods, 1)
assert np.sum(did_respond) < len(response_likelihoods)
np.testing.assert_allclose(np.unique(num_attempts_required), np.array([-1, 1]))
assert np.min(num_attempts_required[did_respond]) == 1
def test_works_with_multiple_attempts(self):
np.random.seed(123)
response_likelihoods = np.ones(40) * 0.25
did_respond_single, _ = sampling._get_responses(response_likelihoods, 1)
did_respond_multi, num_attempts_required = sampling._get_responses(response_likelihoods, 3)
assert np.sum(did_respond_multi) > np.sum(did_respond_single)
np.testing.assert_allclose(np.unique(num_attempts_required), np.array([-1, 1, 2, 3]))
assert np.min(num_attempts_required[did_respond_multi]) == 1
<file_sep>/README.md
# Polling Simulator
This is a lightweight Python tool for simulating political polls, based
on my understanding of how pollsters handle real data. It comes with
several methods for sampling electorates and aggregating polling
data built-in, but is designed in a way to easily allow for customization
as necessary.
# Installation
_Note: currently the only documented way to install this tool
is by cloning the repo and installing an environment via `conda`.
It is, however, written as a Python package, so making it `pip`-installable
via GitHub/PyPI would not be a heavy lift. If you'd like to see that, please
feel free to post an issue on the repo (or better yet, make a pull request);
if there's enough interest I'm happy to consider doing it._
## Assumptions:
* You are using a Linux-like OS (e.g. Ubuntu, Mac OSX)
* You are familiar with the command line
* You have [`conda`](https://docs.conda.io/en/latest/) installed
```bash
$ git clone https://github.com/AndrewRook/polling_simulator.git
$ cd polling_simulator
$ conda env create -f environment.yml
$ conda activate polling_simulator
```
# Usage:
For usage examples, check out the notebooks in this repository, and
see my blog series starting with [this post](https://andrewrook.github.io/2020/12/24/polling-part-0.html).
There are also docstrings for core functions if you want general guidance.<file_sep>/polling_simulator/sampling.py
import numpy as np
import pandas as pd
from copy import deepcopy
from typing import Callable, Union, Tuple
def predefined_sample(max_num_attempts, screen_likely_voters):
"""
Generate a function that will contact a particular number of people and use whoever responds.
(i.e. "I'm going to call 1000 people at random, and just use those who answer".)
Parameters
----------
max_num_attempts: Number of times to attempt to contact each potential participant.
screen_likely_voters: If ``True``, proportionately remove participants based on their turnout likelihood
(e.g. if someone has a turnout likelihood of 0.7, then they have a 30% chance of being rejected by the
sampler.
Returns
-------
A sampling function, to be used as ``sampling_strategy`` in ``simulate.run_polls``
or ``sampling.stratified_sample``.
"""
def _sampler(n_sample, shuffled_electorate):
if n_sample > len(shuffled_electorate):
raise ValueError(f"number of samples ({n_sample}) greater than electorate ({len(shuffled_electorate)})")
people_called = shuffled_electorate.head(n_sample)
does_respond, num_attempts_required = _get_responses(
people_called["response_likelihood"], max_num_attempts
)
likely_voter = (
np.ones(len(people_called)).astype(bool) if screen_likely_voters == False
else np.random.random(len(people_called)) < people_called["turnout_likelihood"]
)
poll_responders = people_called[does_respond & likely_voter].reset_index(drop=True)
poll_responders["num_contact_attempts"] = num_attempts_required[does_respond & likely_voter]
poll_non_responders = people_called[does_respond == False].reset_index(drop=True)
poll_non_responders["num_contact_attempts"] = max_num_attempts
if len(poll_responders) == 0:
raise ValueError("Poll returned no valid responses")
return poll_responders, poll_non_responders
return _sampler
def guaranteed_sample(max_num_attempts, screen_likely_voters):
"""
Generate a function that will contact people until they get a specific number of "good" responses, thus
guaranteeing a specific sample size.
(i.e. "I'm going to call people at random until I get 1000 responses".)
Parameters
----------
max_num_attempts: Number of times to attempt to contact each potential participant.
screen_likely_voters: If ``True``, proportionately remove participants based on their turnout likelihood
(e.g. if someone has a turnout likelihood of 0.7, then they have a 30% chance of being rejected by the
sampler.
Returns
-------
A sampling function, to be used as ``sampling_strategy`` in ``simulate.run_polls``
or ``sampling.stratified_sample``.
"""
def _sampler(n_sample, shuffled_electorate):
does_respond, num_attempts_required = _get_responses(
shuffled_electorate["response_likelihood"], max_num_attempts
)
likely_voter = (
np.ones(len(shuffled_electorate)).astype(bool) if screen_likely_voters == False
else (np.random.random(len(shuffled_electorate)) < shuffled_electorate["turnout_likelihood"]).values
)
cumulative_valid_responses = np.cumsum(does_respond & likely_voter)
#breakpoint()
if cumulative_valid_responses[-1] < n_sample:
raise ValueError(
f"number of samples ({n_sample}) greater than number of valid poll responders ({cumulative_valid_responses[-1]})"
)
people_contacted = shuffled_electorate[cumulative_valid_responses <= n_sample]
does_respond = does_respond[cumulative_valid_responses <= n_sample]
likely_voter = likely_voter[cumulative_valid_responses <= n_sample]
num_attempts_required = num_attempts_required[cumulative_valid_responses <= n_sample]
poll_responders = people_contacted.loc[does_respond & likely_voter, :].reset_index(drop=True)
poll_responders["num_contact_attempts"] = num_attempts_required[does_respond & likely_voter]
poll_nonresponders = people_contacted.loc[does_respond == False, :].reset_index(drop=True)
poll_nonresponders["num_contact_attempts"] = max_num_attempts
return poll_responders, poll_nonresponders
return _sampler
def stratified_sample(
assumed_demographics,
sampling_strategy: Callable
):
"""
Generate a function that will proportionately sample people based on their demographics.
For instance, if you have a demographic containing only 10% of the total electorate, this
function will attempt to fill out your sample to contain 10% of the demographic.
How close to that 10% you get will depend on the sampling strategy you pass in. For example,
if you want 10% of a population with a really low response rate and you use the ``predefined_sample``
sampler, you may get much less than 10% in your final sample.
Parameters
----------
assumed_demographics: A list of all the demographics you want to stratify by.
sampling_strategy: A function that represents the core sampling strategy you want to use on each
demographic (e.g., the output of ``guaranteed_sample``).
Returns
-------
A sampling function, to be used as ``sampling_strategy`` in ``simulate.run_polls``.
"""
# It's critical to make a copy of the demographics here, otherwise
# they can be mutated outside of the closure!
assumed_demographics = deepcopy(assumed_demographics)
def _sampler(n_sample, shuffled_electorate):
population_per_demographic = [
shuffled_electorate.loc[
demographic.population_segmentation.segment(shuffled_electorate), :
].copy(deep=True)
for demographic in assumed_demographics
]
population_in_all_demographics = sum([
len(population)
for population in population_per_demographic
])
if population_in_all_demographics != len(shuffled_electorate):
raise ValueError(f"""
Demographics are not mutually exclusive and completely exhaustive. A {len(shuffled_electorate)}-person
electorate was split into demographic groups totaling {population_in_all_demographics}.
""")
n_sample_per_demographic = [
round(n_sample * len(population) / population_in_all_demographics)
for population in population_per_demographic
]
poll_responders, poll_nonresponders = list(zip(*[
sampling_strategy(sample_size, demo_population)
for sample_size, demo_population in zip(n_sample_per_demographic, population_per_demographic)
]))
return pd.concat(poll_responders).reset_index(drop=True), pd.concat(poll_nonresponders).reset_index(drop=True)
return _sampler
def _get_responses(
response_likelihoods: Union[pd.Series, np.float64], max_num_attempts: int
):
# -> Tuple[np.ndarray[np.bool_], np.ndarray[np.int64]]: # Not working, not sure why
"""Figure out whether or not someone responds after N attempts. Return both whether or
not they responded but also the number of attempts made."""
if max_num_attempts < 1:
raise ValueError("max_num_attempts must be a positive integer")
realized_response_matrix = np.random.random((max_num_attempts, len(response_likelihoods)))
if isinstance(response_likelihoods, pd.Series):
response_likelihoods = response_likelihoods.values
did_respond_matrix = realized_response_matrix < response_likelihoods
did_respond = did_respond_matrix.any(axis=0)
num_attempts_required = did_respond_matrix.argmax(axis=0) + 1
num_attempts_required[did_respond == False] = -1
return did_respond, num_attempts_required
<file_sep>/tests/test_simulate.py
import numpy as np
import pandas as pd
import pytest
from functools import partial
from polling_simulator.core import Demographic, Variable
from polling_simulator.distributions import truncated_gaussian_distribution
from polling_simulator import simulate
@pytest.fixture(scope="function")
def age():
return Variable("age", truncated_gaussian_distribution(25, 25, 18, 110))
@pytest.fixture(scope="function")
def gender():
return Variable("gender", partial(
np.random.choice, np.array(["M", "F"]), replace=True, p=np.array([0.49, 0.51])
))
class TestGenerateElectorate:
def test_works_in_normal_case(self, gender):
np.random.seed(123)
demographics = [
Demographic(1, 1, {"a": 1}, gender == "M"),
Demographic(0.5, 0.4, {"a": 0.25, "b": 0.75}, gender == "F")
]
electorate = simulate.generate_electorate(10000, demographics)
assert len(electorate) == 10000
assert abs((electorate["gender"] == "M").sum() / len(electorate) - 0.49) < 1e-2
assert abs((electorate["gender"] == "F").sum() / len(electorate) - 0.51) < 1e-2
assert abs(electorate["turnout_likelihood"].mean() - (1 * 0.49 + 0.5 * 0.51)) < 1e-2
assert abs(electorate["response_likelihood"].mean() - (1 * 0.49 + 0.4 * 0.51)) < 1e-2
assert abs(
(electorate["candidate_preference"] == "a").sum() / len(electorate) - (1 * 0.49 + 0.25 * 0.51)
) < 1e-2
assert abs(
(electorate["candidate_preference"] == "b").sum() / len(electorate) - (0.75 * 0.51)
) < 1e-2
class TestGenerateDemographicFeaturesOfPopulation:
def test_fails_when_demographics_overlap(self, age):
np.random.seed(123)
population = pd.DataFrame({
"age": age.data_generator(10000)
})
demographics = [
Demographic(1, 1, {"a": 1}, age < 40),
Demographic(1, 1, {"b": 1}, age >= 35)
]
with pytest.raises(ValueError):
simulate.generate_demographic_features_of_population(population, demographics, ["a", "b"])
def test_fails_when_demographics_miss_people(self, age):
np.random.seed(123)
population = pd.DataFrame({
"age": age.data_generator(10000)
})
demographics = [
Demographic(1, 1, {"a": 1}, age < 40),
Demographic(1, 1, {"b": 1}, age > 45)
]
with pytest.raises(ValueError):
simulate.generate_demographic_features_of_population(population, demographics, ["a", "b"])
def test_segments_appropriately(self, gender):
np.random.seed(123)
population = pd.DataFrame({
"gender": gender.data_generator(10000)
})
demographics = [
Demographic(1, 1, {"a": 1}, gender == "M"),
Demographic(0.5, 0.4, {"a": 0.25, "b": 0.75}, gender == "F")
]
demographic_features = simulate.generate_demographic_features_of_population(
population, demographics, ["a", "b"]
)
assert abs(demographic_features["turnout_likelihood"].mean() - (1 * 0.49 + 0.5 * 0.51)) < 0.1
assert abs(demographic_features["response_likelihood"].mean() - (1 * 0.49 + 0.4 * 0.51)) < 0.1
assert abs(
(demographic_features["candidate_preference"] == "a").sum() / len(demographic_features) -
(1 * 0.49 + 0.25 * 0.51)
) < 0.1
assert abs(
(demographic_features["candidate_preference"] == "b").sum() / len(demographic_features) -
(0.75 * 0.51)
) < 0.1
class TestRunElection:
def test_applies_turnout_correctly(self, gender):
low_turnout = Demographic(
0.1, 1, {"a": 1},
(gender == "M")
)
high_turnout = Demographic(
0.9, 1, {"b": 1},
(gender == "F")
)
np.random.seed(123)
electorate = simulate.generate_electorate(
20000, [low_turnout, high_turnout]
)
result = simulate.run_election(electorate)
assert abs(20000 * 0.49 * 0.1 - result["a"]) < 100
assert abs(20000 * 0.51 * 0.9 - result["b"]) < 100
class TestRunMultipleElections:
def test_handles_low_vote_candidates(self, gender):
low_turnout = Demographic(
0.0001, 1, {"a": 1},
(gender == "M")
)
high_turnout = Demographic(
0.9, 1, {"b": 1},
(gender == "F")
)
np.random.seed(123)
electorate = simulate.generate_electorate(
2000, [low_turnout, high_turnout]
)
results = simulate.run_elections(10, electorate)
assert "a" in results.columns
assert results["a"].min() == 0
assert results.dtypes["a"] == np.int
<file_sep>/polling_simulator/__init__.py
from .core import Demographic, Variable
from .simulate import generate_electorate, run_elections, run_polls<file_sep>/demo.py
import numpy as np
from functools import partial
from scipy import stats
from polling_simulator import Variable, Demographic
from polling_simulator import aggregation, sampling
from polling_simulator.distributions import truncated_gaussian_distribution
from polling_simulator import generate_electorate, run_elections, run_polls
if __name__ == "__main__":
everyone = Variable("everyone", lambda x: np.ones(x).astype(bool))
demographics = [
Demographic(
population_percentage=1.0,
turnout_likelihood=1.0,
response_likelihood=1.0,
candidate_preference={"Dem": 0.51, "Rep": 0.47, "Ind": 1 - 0.51 - 0.47},
population_segmentation=(everyone == True)
)
]
electorate = generate_electorate(15500000, demographics)
breakpoint()
# np.random.seed(123)
# age = Variable("age", truncated_gaussian_distribution(25, 25, 18, 110))
# gender = Variable("gender", partial(
# np.random.choice, np.array(["M", "F"]), replace=True, p=np.array([0.49, 0.51])
# ))
# young_men = Demographic(
# 0.25,
# 0.5,
# 0.1,
# {"a": 0.5, "b": 0.5},
# (age < 40) & (gender == "M")
# )
# old_men = Demographic(
# 0.25,
# 0.7,
# 0.2,
# {"b": 1},
# (age >= 40) & (gender == "M")
# )
# young_women = Demographic(
# 0.25,
# 0.6,
# 0.05,
# {"a": 1},
# (age < 40) & (gender == "F")
# )
# old_women = Demographic(
# 0.25,
# 0.8,
# 0.2,
# {"a": 1},
# (age >= 40) & (gender == "F")
# )
# np.random.seed(123)
# electorate = generate_electorate(
# 100000,
# [
# young_men, old_men, young_women, old_women
# ]
# )
# polls = run_polls(
# 10,
# 1000, electorate, [young_men, old_men, young_women, old_women],
# sampling.guaranteed_sample(1, False), aggregation.naive_aggregation()
# )
# results = run_elections(10, electorate)
breakpoint()
<file_sep>/tests/test_aggregation.py
import numpy as np
import pandas as pd
from functools import partial
from polling_simulator import aggregation, Variable, Demographic
class TestNaiveAggregation:
def test_works(self):
data = pd.DataFrame({
"candidate_preference": ["a", "b", "a", "b", "a"]
})
aggregate = aggregation.naive_aggregation()(data, None).sort_values()
expected_aggregate = pd.Series([3.0, 2.0], index=["a", "b"]).sort_values()
pd.testing.assert_series_equal(aggregate, expected_aggregate, check_names=False)
class TestStratifiedAggregation:
def test_works_no_weighting(self):
gender = Variable("gender", partial(
np.random.choice, np.array(["M", "F"]), replace=True, p=np.array([0.5, 0.5])
))
men = Demographic(0.5, 0.5, {"a": 1}, (gender == "M"))
women = Demographic(0.5, 1, {"b": 1}, (gender == "F"))
male_poll = pd.DataFrame({
"turnout_likelihood": np.ones(1000) * men.turnout_likelihood,
"response_likelihood": men.response_likelihood,
"candidate_preference": "a",
"gender": "M"
})
female_poll = pd.DataFrame({
"turnout_likelihood": np.ones(2000) * women.turnout_likelihood,
"response_likelihood": women.response_likelihood,
"candidate_preference": "b",
"gender": "F"
})
poll_results = pd.concat([male_poll, female_poll]).sample(frac=1)
naive_aggregate = aggregation.naive_aggregation()(poll_results, None).sort_values()
stratified_aggregate = aggregation.stratified_aggregation(
[men, women], [0.5, 0.5]
)(poll_results, None).sort_values()
pd.testing.assert_series_equal(
pd.Series([1000.0, 2000.0], index=["a", "b"]).sort_values(),
naive_aggregate,
check_names=False
)
pd.testing.assert_series_equal(
pd.Series([1500.0, 1500.0], index=["a", "b"]).sort_values(),
stratified_aggregate,
check_names=False
)
<file_sep>/polling_simulator/simulate.py
import warnings
from typing import Iterable, Callable, List
import numpy as np
import pandas as pd
from polling_simulator.core import Demographic, Variable, _uniquefy_variables
def generate_electorate(num_people: int, demographics: Iterable[Demographic]):
"""
Construct an electorate based on a set of demographics. In addition to setting values based
on which demographic each person belongs to (turnout likelihood, candidate preference, etc),
for each person in the electorate,
randomly determine values for all ``Variable``s present in any demographics based on their
data generator functions.
Parameters
----------
num_people: The number of people you want to be in the electorate.
demographics: The demographics you want to use to generate the population.
Returns
-------
A pandas DataFrame, where each row is a person in the electorate and the columns contain information
about their demographics and their voting preferences.
"""
variables_used = []
candidates = set()
for demographic in demographics:
variables_used += demographic.population_segmentation.variables
candidates.update(list(demographic.candidate_preference.keys()))
candidates = list(candidates) # converting to list allows for easier indexing later
variables_used = _uniquefy_variables(variables_used)
# Create the basic values of the electorate
electorate = pd.DataFrame({
variable.name: variable.data_generator(num_people)
for variable in variables_used
})
# Add in columns for turnout, response, and candidate, based on demographics
electorate = pd.concat(
[electorate, generate_demographic_features_of_population(electorate, demographics, candidates)],
axis=1
)
return electorate
def generate_demographic_features_of_population(
population: pd.DataFrame, demographics: Iterable[Demographic], candidates: List[str]
):
"""
A helper function, used in ``generate_electorate``, to fill in columns based on demographic
information. Should not need to be used independently.
"""
# Start with dummy values for features
demographic_features = pd.DataFrame({
"turnout_likelihood": np.ones(len(population), dtype=np.float) * -1,
"response_likelihood": np.ones(len(population), dtype=np.float) * -1,
"candidate_preference": pd.Categorical([candidates[0]] * len(population), categories=candidates)
})
population_already_in_demographic = np.zeros(len(population), dtype=np.bool_)
for demographic in demographics:
population_in_demographic = demographic.population_segmentation.segment(population)
if np.sum(population_in_demographic & population_already_in_demographic) != 0:
# If someone is in multiple demographics, bail out
raise ValueError(
f"""
Some demographics overlap. Examples include:
{population[population_already_in_demographic & population_in_demographic]}
"""
)
demographic_features.loc[population_in_demographic, "turnout_likelihood"] = demographic.turnout_likelihood
demographic_features.loc[population_in_demographic, "response_likelihood"] = demographic.response_likelihood
demographic_features.loc[population_in_demographic, "candidate_preference"] = np.random.choice(
np.array(list(demographic.candidate_preference.keys())),
np.sum(population_in_demographic),
replace=True,
p=np.array(list(demographic.candidate_preference.values()))
)
population_already_in_demographic = population_in_demographic | population_already_in_demographic
if np.sum(population_already_in_demographic) != len(population_already_in_demographic):
# If someone is in NO demographics, bail out
raise ValueError(f"""
Demographics do not cover entire population. Examples include:
{population[population_already_in_demographic == False].head()}
""")
return demographic_features
def run_election(population: pd.DataFrame):
"""
Run an election, based on a simulated population.
Parameters
----------
population: A dataframe, likely created by ``generate_electorate``, which contains
(at minimum) turnout likelihoods and candidate preferences.
Returns
-------
A pandas DataFrame with the number of votes for each candidate.
"""
does_person_vote = population["turnout_likelihood"] > np.random.random(len(population))
votes = population.loc[does_person_vote, "candidate_preference"].value_counts()
return votes
def run_elections(num_elections: int, population: pd.DataFrame):
"""
Run multiple elections, using the same population. Good for understanding what
the distribution of possible election outcomes can be.
Parameters
----------
num_elections: Number of elections to simulate.
population: A dataframe, likely created by ``generate_electorate``, which contains
(at minimum) turnout likelihoods and candidate preferences.
Returns
-------
A pandas DataFrame, where each row represents an election and the columns show the
votes received for each candidate.
"""
election_results = pd.concat([
run_election(population).to_frame().T
for _ in range(num_elections)
])
# Handle the edge case where sometimes a candidate gets zero votes
election_results = election_results.fillna(0).astype(np.int)
# Remove categorical column indexer as it's not needed (and makes life harder)
election_results.columns = election_results.columns.astype("str")
return election_results.reset_index(drop=True)
def run_poll(
num_to_poll: int,
electorate: pd.DataFrame,
sampling_strategy: Callable, aggregation_strategy: Callable):
"""
Run a single poll based on an electorate, a sampling strategy, and an aggregation strategy.
Parameters
----------
num_to_poll: Number of people to poll. Note that the actual number of people polled will
depend on which ``sampling_strategy`` is chosen.
electorate: A dataframe, likely created by ``generate_electorate``, which contains
all relevant information about individual voters in the electorate you want to poll.
sampling_strategy: A function to determine who to poll. It must take two arguments: The number of people to
poll and a shuffled electorate dataframe, and return two DataFrames containing certain rows of
the input electorate DataFrame — one for poll responders and one for anyone who was contacted but did not
respond. Usually the output of one of the functions in the ``sampling`` module.
aggregation_strategy: A function that determines how to aggregate poll responses. It must take
two arguments: A DataFrame of information about poll responders and a DataFrame of
information about poll non-responders, and return a DataFrame where the rows are the candidates
and the column shows the number of responders who supported that candidate. Usually the output of
one of the functions in the ``aggregation`` module.
Returns
-------
A pandas DataFrmae showing the fraction of support for each candidate.
"""
shuffled_electorate = electorate.sample(frac=1).reset_index(drop=True)
poll_responders, poll_nonresponders = sampling_strategy(num_to_poll, shuffled_electorate)
poll_results = aggregation_strategy(poll_responders, poll_nonresponders)
poll_percentages = poll_results / poll_results.sum()
return poll_percentages
def run_polls(
num_polls: int,
num_to_poll: int,
electorate: pd.DataFrame,
sampling_strategy: Callable,
aggregation_strategy: Callable):
"""
Run multiple polls on the same electorate, with the same sampling and aggregation strategies
Parameters
----------
num_polls: Number of polls to run.
num_to_poll: Number of people to poll. Note that the actual number of people polled will
depend on which ``sampling_strategy`` is chosen.
electorate: A dataframe, likely created by ``generate_electorate``, which contains
all relevant information about individual voters in the electorate you want to poll.
sampling_strategy: A function to determine who to poll. It must take two arguments: The number of people to
poll and a shuffled electorate dataframe, and return two DataFrames containing certain rows of
the input electorate DataFrame — one for poll responders and one for anyone who was contacted but did not
respond. Usually the output of one of the functions in the ``sampling`` module.
aggregation_strategy: A function that determines how to aggregate poll responses. It must take
two arguments: A DataFrame of information about poll responders and a DataFrame of
information about poll non-responders, and return a DataFrame where the rows are the candidates
and the column shows the number of responders who supported that candidate. Usually the output of
one of the functions in the ``aggregation`` module.
Returns
-------
A pandas DataFrame, where each row is a poll and each column corresponds to the fraction of particpants
who supported that candidate.
"""
poll_results = [
run_poll(
num_to_poll,
electorate,
sampling_strategy,
aggregation_strategy
)
for _ in range(num_polls)
]
return pd.concat(poll_results, axis=1).T.reset_index(drop=True)
<file_sep>/polling_simulator/distributions.py
from scipy import stats
def convert_generic_scipy_distribution(distribution, *args, **kwargs):
"""
Take any of the standard ``scipy.stats`` distributions and convert it into the
format that the ``Variable`` class demands for its ``data_generator`` attribute.
Parameters
----------
distribution: The distribution (e.g. ``scipy.stats.truncnorm``)
args: positional arguments to pass to instantiate ``distribution``
kwargs keyword arguments to pass to instantiate ``distribution``
Returns
-------
A function ready for use as a ``Variable.data_generator``
"""
distro = distribution(*args, **kwargs)
return distro.rvs
def truncated_gaussian_distribution(mean, sigma, lower_clip, upper_clip):
"""
Helper function to generate a truncated gaussian distribution.
Parameters
----------
mean: The mean of the distribution
sigma: The standard deviation of the distribution
lower_clip: The minimum allowable value
upper_clip: The maximum allowable value
Returns
-------
A function ready for use as a ``Variable.data_generator``
"""
a = (lower_clip - mean) / sigma
b = (upper_clip - mean) / sigma
return convert_generic_scipy_distribution(stats.truncnorm, a, b, loc=mean, scale=sigma)<file_sep>/polling_simulator/aggregation.py
import numpy as np
import pandas as pd
from copy import deepcopy
from typing import Iterable
def _no_turnout_weighting():
def _weighting(poll_responses):
return np.ones(len(poll_responses))
return _weighting
def weight_by_self_reported_turnout(response_modifier=lambda x: x):
"""
Weight the results based on the vote likelihood directly obtained from
the poll respondents, optionally modified by a simple function.
Parameters
----------
response_modifier (optional): A function which takes in a pandas Series and returns a new Series.
If set, the function will be used to apply a correction to the reported turnout (e.g. to
downweight responders that claim to be 100% likely to vote).
Returns
-------
The ``response_modifier`` function, wrapped to be used on the turnout likelihoods.
"""
def _weighting(poll_responses):
modified_turnout_likelihood = response_modifier(poll_responses["turnout_likelihood"])
return modified_turnout_likelihood
return _weighting
def weight_by_assumed_turnout(assumed_demographics: Iterable["Demographic"]):
"""
Weight by the turnout likelihood assumed by a demographic model (rather than
the actual reported likelihood as in ``weight_by_self_reported_turnout``).
"""
raise NotImplementedError("Not yet implemented")
def naive_aggregation(turnout_weighting=_no_turnout_weighting()):
"""
Sum up the candidate preferences of poll responders without regard to any demographic
information.
Parameters
----------
turnout_weighting (optional): If set, apply a turnout weighting model before
aggregation.
Returns
-------
A function which takes in poll responses, does any necessary weighting, then
sums up the (weighted) support for each candidate and returns the results as a pandas
DataFrame.
"""
def _aggregation(poll_responses, poll_nonresponses):
weighted_poll_responses = pd.DataFrame({
"candidate_preference": poll_responses["candidate_preference"].values,
"weight": turnout_weighting(poll_responses)
})
candidate_votes = weighted_poll_responses.groupby("candidate_preference")["weight"].sum()
return candidate_votes
return _aggregation
def stratified_aggregation(
assumed_demographics: Iterable["Demographic"],
population_fraction_per_demographic: Iterable[float],
turnout_weighting=_no_turnout_weighting()):
"""
Sum up candidate preferences, controlling for the expected prevalence of each
demographic in the population. For instance, if you expect a certain demographic
to be 50% of the electorate, but for whatever reason they are only 10% of the
respondents to your poll, then this aggregation method will upweight them
accordingly.
Parameters
----------
assumed_demographics: The demographic segmentations to use for the reweighting
population_fraction_per_demographic: The expected population fraction for each demographic.
turnout_weighting (optional): If set, apply a turnout weighting model before aggregation.
Returns
-------
A function which takes in poll responses, does any necessary weighting, then
sums up the (weighted) support for each candidate and returns the results as a pandas
DataFrame.
"""
if abs(sum(population_fraction_per_demographic) - 1) > 1e-4:
raise ValueError(f"demographic populations do not sum to 1: {population_fraction_per_demographic}")
assumed_demographics = deepcopy(assumed_demographics)
population_fraction_per_demographic = deepcopy(population_fraction_per_demographic)
def _aggregation(poll_responses, _):
stratified_votes = []
responses_in_demographic = [
demographic.population_segmentation.segment(poll_responses)
for demographic in assumed_demographics
]
num_responses_per_demographic = [
population.sum()
for population in responses_in_demographic
]
if sum(num_responses_per_demographic) != len(poll_responses):
raise ValueError(f"""
Demographics are not mutually exclusive and completely exhaustive. {len(poll_responses)}
poll responders were split into demographic groups totaling {sum(num_responses_per_demographic)}.
""")
for responses_in_demographic, num_responses, population_fraction in zip(
responses_in_demographic, num_responses_per_demographic, population_fraction_per_demographic
):
raw_votes = naive_aggregation(turnout_weighting)(poll_responses[responses_in_demographic], None)
stratified_votes.append(
raw_votes # raw aggregation of polled people in the demographic
* population_fraction # the bigger the demo is in the whole population, the higher the weight
/ (num_responses / len(poll_responses)) # relative to the prevalence of the demo in the poll
)
# Don't need to worry about the denominator of the weights since we'll scale to
# percentages anyway
stratified_votes = pd.concat(stratified_votes).reset_index().groupby("candidate_preference")["weight"].sum()
return stratified_votes
return _aggregation
<file_sep>/polling_simulator/core.py
import operator
from dataclasses import dataclass
from abc import ABC
from typing import Callable, Union, Dict
import numpy as np
class _Base(ABC):
def __eq__(self, other: Union[int, float, str, "Variable", "Segmentation"]):
return Segmentation(self, other, operator.eq)
def __ne__(self, other: Union[int, float, str, "Variable", "Segmentation"]):
return Segmentation(self, other, operator.ne)
def __ge__(self, other: Union[int, float, "Variable", "Segmentation"]):
return Segmentation(self, other, operator.ge)
def __gt__(self, other: Union[int, float, "Variable", "Segmentation"]):
return Segmentation(self, other, operator.gt)
def __le__(self, other: Union[int, float, "Variable", "Segmentation"]):
return Segmentation(self, other, operator.le)
def __lt__(self, other: Union[int, float, "Variable", "Segmentation"]):
return Segmentation(self, other, operator.lt)
def __and__(self, other: Union["Variable", "Segmentation"]):
return Segmentation(self, other, operator.and_)
def __or__(self, other: Union["Variable", "Segmentation"]):
return Segmentation(self, other, operator.or_)
class Variable(_Base):
"""
A named variable, which can be used in Segmentations.
Parameters
----------
name: The name of the variable.
data_generator: A function which takes in a single integer, the number of data points
to generate, and returns that many data points with appropriately randomly generated
values. This allows for arbitrary generation strategies, from simply coin flips to complex
distributions.
"""
def __init__(
self,
name: str,
data_generator: Callable[[int], Union[np.int64, np.float64, np.object_]]
):
self.name = name
self.data_generator = data_generator
def __str__(self):
return self.name
class Segmentation(_Base):
"""
An object that represents a comparison between any of a Variable, a constant (e.g. int or string),
and/or another Segmentation.
Parameters
----------
left: The left side of the comparison (i.e. the "age" of "age > 21")
right: The right side of the comparison (i.e. the "21" of "age > 21")
comparator: The comparison (i.e. the ">" of "age > 21"). Can be one of:
>, >=, <, <=, ==, !=, &, |
"""
def __init__(
self,
left: Union[int, float, str, Variable, "Segmentation"],
right: Union[int, float, str, Variable, "Segmentation"],
comparator
):
self.left = left
self.right = right
self.comparator = comparator
def segment(self, df):
"""
Perform the segmentation on a pandas DataFrame.
Parameters
----------
df: A pandas DataFrame, which at least has columns with names corresponding to
all Variables used in the Segmentation (and any sub-segmentations).
Returns
-------
A numpy array of booleans, where ``True`` indicates the row of the input ``df`` is
in the segmentation and ``False`` means it is not.
"""
# If Segmentation, first evaluate Segmentation
left = self.left if issubclass(self.left.__class__, Segmentation) is False else self.left.segment(df)
right = self.right if issubclass(self.right.__class__, Segmentation) is False else self.right.segment(df)
# If Variable, replace with dataframe column
left = left if issubclass(left.__class__, Variable) is False else df[left.name]
right = right if issubclass(right.__class__, Variable) is False else df[right.name]
return self.comparator(left, right)
@property
def variables(self):
"""
Parse the Segmentation, recursively if necesary, to obtain a list of
all Variables used in it. This is useful for things like building a
DataFrame containing these variables, or validating that a DataFrame has the
right variables in it.
Returns
-------
A list of all unique Variable instances used in the Segmentation.
"""
all_variables = []
if issubclass(self.left.__class__, Variable):
all_variables.append(self.left)
elif issubclass(self.left.__class__, Segmentation):
all_variables += self.left.variables
if issubclass(self.right.__class__, Variable):
all_variables.append(self.right)
elif issubclass(self.right.__class__, Segmentation):
all_variables += self.right.variables
unique_variables = _uniquefy_variables(all_variables)
return unique_variables
def __str__(self):
comparator_map = {
operator.gt: ">",
operator.ge: ">=",
operator.lt: "<",
operator.le: "<=",
operator.eq: "==",
operator.ne: "!=",
operator.and_: "&",
operator.or_: "|"
}
left = f"({self.left})" if issubclass(self.left.__class__, Segmentation) else f"{self.left}"
right = f"({self.right})" if issubclass(self.right.__class__, Segmentation) else f"{self.right}"
return f"{left} {comparator_map[self.comparator]} {right}"
def _uniquefy_variables(non_unique_variables):
# Have to use this crazy explicit nested loop because Variable
# overrides the __eq__ method
unique_variables = []
for variable in non_unique_variables:
already_used = False
for unique_variable in unique_variables:
if variable is unique_variable:
already_used = True
break
if not already_used:
unique_variables.append(variable)
return unique_variables
@dataclass
class Demographic:
"""
A simple class that contains all the necessary information about a demographic
Parameters
----------
turnout_likelihood: a number between 0 and 1 that corresponds to the fractional
probability that someone in this demographic will vote.
response_likelihood: a number between 0 and 1 that corresponds to the fractional
probability that someone in this demographic will respond when contacted by
a pollster. (Note: If you set up a poll with multiple contact attempts, this
likelihood governs each attempt; if you have multiple contact attempts you
will have a higher chance of a response than would be indicated by this variable.
candidate_preference: A mapping between candidate identifier and the fraction of people
in the demographic who would vote for them. For example, if a Democrat has 60% of the
support in this demographic compared to 40% for a Republican, you'd enter
``{"Democrat": 0.6, "Republican": 0.4}``
population_segmentation: The Segmentation needed to identify who is in this demographic.
"""
turnout_likelihood: float
response_likelihood: float
candidate_preference: Dict[str, float] # TODO: ensure these sum to 1
population_segmentation: Segmentation
def get_population_in_demographic(self, population):
"""
A small helper function to identify how prevalent the demographic is in a population.
Parameters
----------
population: A DataFrame containing all individuals in the population.
Returns
-------
The number of individuals in that population which are in this demographic.
"""
in_demographic = self.population_segmentation.segment(population)
return np.sum(in_demographic)
<file_sep>/tests/test_core.py
import numpy as np
import pandas as pd
from polling_simulator import core
class TestVariable:
def test_instantiates_ok(self):
var = core.Variable("woo", lambda x: np.ones(x))
assert var.name == "woo"
class TestSegmentationVariable:
def test_general_working(self):
var1 = core.Variable("var1", lambda x: np.ones(x))
var2 = core.Variable("var2", lambda x: np.ones(x))
var3 = core.Variable("var3", lambda x: np.ones(x))
seg = (
((var1 > 3) & (var2 == 5)) |
(
(var1 == 10) &
((var2 < var1) | (var3 > 5))
)
)
seg_variables = seg.variables
assert len(seg_variables) == 3
assert seg_variables[0] is var1
assert seg_variables[1] is var2
assert seg_variables[2] is var3
class TestSegmentationSegment:
def test_general_working(self):
var = core.Variable("var", lambda x: np.ones(x))
seg = (var >= 3)
data = pd.DataFrame({"var": [1, 2, 3, 4, 5]})
segment_mask = seg.segment(data)
pd.testing.assert_series_equal(
segment_mask,
pd.Series([False, False, True, True, True], name="var")
)
def test_multiple_segments(self):
var1 = core.Variable("var1", lambda x: np.ones(x))
var2 = core.Variable("var2", lambda x: np.ones(x))
seg1 = var1 >= 3
seg2 = var2 < 5
seg = seg1 & seg2
data = pd.DataFrame({
"var1": [1, 2, 3, 4, 5],
"var2": [1, 5, 1, 5, 1]
})
segment_mask = seg.segment(data)
pd.testing.assert_series_equal(
segment_mask,
pd.Series([False, False, True, False, True])
)
def test_order_of_operation(self):
data = pd.DataFrame({
"var1": [1, 2, 3, 4, 5],
"var2": [1, 5, 1, 5, 1]
})
var1 = core.Variable("var1", lambda x: np.ones(x))
var2 = core.Variable("var2", lambda x: np.ones(x))
seg1 = var1 >= 4
seg2 = var2 < 5
seg3 = (var1 == 2)
seg_explicit_order = (seg3 | seg1) & seg2
segment_explicit_order_mask = seg_explicit_order.segment(data)
pd.testing.assert_series_equal(
segment_explicit_order_mask,
pd.Series([False, False, False, False, True])
)
seg_implicit_order = seg3 | seg1 & seg2
segment_implicit_order_mask = seg_implicit_order.segment(data)
pd.testing.assert_series_equal(
segment_implicit_order_mask,
pd.Series([False, True, False, False, True])
)
class TestSegmentationStr:
def test_works_complex_case(self):
var1 = core.Variable("var1", lambda x: np.ones(x))
var2 = core.Variable("var2", lambda x: np.ones(x))
seg1 = var1 >= 4
seg2 = var2 != var1
seg3 = seg2 == False
final_seg = seg1 & (seg2 | seg3)
assert str(final_seg) == "(var1 >= 4) & ((var2 != var1) | ((var2 != var1) == False))" | f1a50d5fdd24051f1084b4d33611498993cc8c92 | [
"Markdown",
"Python"
] | 12 | Python | AndrewRook/polling_simulator | df390b26bb7f2f6fd019813de1ab188489a68d61 | 6c06b4ddd004950269de129782c0b680a9da947a |
refs/heads/main | <file_sep><div align="center">
<h1>Computational statistics with python</h1>
</div>
<div align="center">
<img src="images/python.png" width="220">
</div>
The content of this document is based on the course of the same name taught by [<NAME>](https://github.com/jdaroesti) on [Platzi](https://platzi.com/r/michel_brmdz/).
# Table of contents
- [Course objectives](#Objectives)
- [Dynamic Programming](#Dynamic-Programming)
- [Introduction to dynamic programming](#Introduction-to-dynamic-programming)
- [Fibonacci optimization](#Fibonacci-optimization)
- [Random Paths](#Random-Paths)
- [What are random paths?](#What-are-random-paths?)
- [Stochastic Programs](#Stochastic-Programs)
- [Introduction to Stochastic Programming](#Introduction-to-Stochastic-Programming)
- [Probability calculation](#Probability-calculation)
- [Statistical inference](#Statistical-inference)
- [Mean](#Mean)
- [Variance and Standard Deviation](#Variance-and-Standard-Deviation)
- [Normal Distribution](#Normal-Distribution)
- [Monte Carlo simulations](#Monte-Carlo-simulations)
- [What are Monte Carlo Simulations?](#What-are-Monte-Carlo-Simulations?)
- [Sampling and Confidence Intervals](#Sampling-and-Confidence-Intervals)
- [Sampling](#Sampling)
- [Central limit theorem](#Central-limit-theorem)
- [Experimental Data](#Experimental-Data)
- [How to work with experimental data?](#How-to-work-with-experimental-data?)
- [Linear regression](#Linear-regression)
- [Certificate](#Certificate)
# Objectives
- Learn when to use dynamic programming and its benefits.
- Understand the difference between deterministic and stochastic programs.
- Learn to use Stochastic Programming.
- Learn to create valid computer simulations.
# Dynamic Programming
## Introduction to dynamic programming
<div align="center">
<img src="images/Bellman.jpg" width="200">
<p><NAME>man</p>
</div>
In the 1950s, <NAME> needed government funding to continue his research, so he needed a bombastic name so they wouldn't be able to reject his application, so he chose **dynamic programming**. Bellman's own words were:
_"[The name] Dynamic Programming was chosen to hide the fact that he was actually doing Math from government sponsors. The phrase Dynamic Programming is something no congressman can object to."_ - <NAME>.
Knowing that **Dynamic Programming** is not related to its name, the truth is that it is one of the most powerful techniques to optimize certain types of problems.
The problems that you can optimize are those that have an **optimal substructure**, this means that a **global optimal solution** can be found by combining **optimal solutions of local subproblems.**
We can also find **spliced problems**, which involve solving the same problem several times to find an optimal solution.
One technique to obtain high speed in our program is **Memorization**, which consists of saving previous computations and avoiding doing them again. Normally a dictionary is used, where the queries can be made in `O (1)`, and for this we make a change of _time by space._
## Fibonacci optimization
_Fibonacci_ series is represented as `Fn Fn = Fn-1 +-2` and is very simple to implement in code recursively. However, it is very inefficient to simply do it recursively, since we repeat the same computation several times.
<div align="center">
<img src="images/fibonnaci-algorithm.jpeg" width="80%">
<p>Fibonacci algorithm</p>
</div>
If you look at the image you will notice that we repeat the calculation several times for `f (4), f (3), f (2), f (1) and f (0)`, this means that our algorithm grows from form **exponential** `O (2 ^ n)`.
To optimize our algorithm we will first implement the **recursive function** and then give way to **memorization**, with this the improvements will be really surprising.
# Random Paths
## What are random paths?
**Random paths** are a type of simulation that randomly chooses a decision from a set of valid decisions. It is used in many fields of knowledge when systems **are not deterministic** and include **elements of randomness**.
# Stochastic Programs
## Introduction to Stochastic Programming
A program is **deterministic** when it is run with the same _input_ it produces the same _output_. **Deterministic** programs are very important, but there are problems that cannot be solved that way.
The **stochastic programming** allows us to introduce randomness to our programs to create simulations that allow us to solve other types of problems. **Stochastic programs** take advantage of the fact that the **probability distributions** of a problem are known or can be estimated.
## Probability calculation
The **probability** is a measure of the certainty associated with a future event or event and is usually expressed as a number between 0 and 1. A **probability** of 0 means that an event will never happen, and in its counterpart a **probability** of 1 means that it is guaranteed to happen.
When talking about **probability** we ask what fraction of all possible events the property we are looking for has, that is why it is important to be able to calculate all the possibilities of an event to understand its probability. The probability that an **event happens** and that **does not happen** is always **1**.
- Complement rule:
- P(A) + P(~A) = 1
- Multiplication rule:
- P(A y B) = P(A) * P(B)
- Addition rule:
- Mutually exclusive: P(A o B) = P(A) + P(B)
- Non exclusive: P(A o B) = P(A) + P(B) - P(A y B)
To see a practical example of the previous laws, we are going to carry out an exercise of rolling a 6-sided die:
- The probability that the number **1** will appear:
We have **6** possibilities and the number **1** is one of them, so the probability is **1/6**.
- The probability that we get the number **1 or 2:**
We have **6** possibilities and the number **1** is one of them and the **2** is another. The fact that we get a number is **mutually exclusive**, since we cannot obtain 2 numbers at the same time. Under this premise we will use the **mutually exclusive additive law.**
`P(1 o 2) = P(1) + P(2) `
`P(1 o 2) = 1/6 + 1/6`
`P(1 o 2) = 2/6`
- The probability that we get the number **1** at least **1 time** in **10 tosses**:
For each toss we have a **1/6** chance that we hit **1**, so we use the **multiplicative law**
`(1/6)^10 = 0.8333`
## Statistical inference
With simulations we can calculate the probabilities of complex events knowing the probabilities of simple events.
What happens when we don't know the probabilities of simple events? **Statistical inference** techniques allow us to infer / conclude the properties of a population from a **random sample.**
_"The guiding principle of **statistical inference** is that a random sample tends to exhibit the same properties as the population from which it was drawn."_ - <NAME>
<div align="center">
<img src="images/sample-population.png" width="50%">
</div>
### Law of large numbers
With the **law of large numbers** we can see that in repeated independent tests with the same probability p of an outcome, the fraction of deviations from p converges to zero as the number of tests approaches infinity.
<div align="center">
<img src="images/large-numbers.png" width="30%">
</div>
### Gambler's fallacy
The **gambler's fallacy** points out that after an extreme event, less extreme events will occur to level the mean.
The _regression to the mean_ indicates that after an extreme random event, the next event will likely be less extreme.
## Mean
The **mean** is a measure of central tendency, commonly known as the average. The mean of a population is denoted by the symbol μ and the mean of a sample is defined by X̄.
<div align="center">
<img src="images/mean.png" width="30%">
</div>
One way to calculate the mean with Python would be the following.
```py
import random
def mean(X):
return sum(X) / len(X)
if __name__ == '__main__':
X = [random.randint(9, 12) for i in range(20)]
mu = mean(X)
print(f'Array X: {X}')
print(f'Mean = {mu}')
```
## Variance and Standard Deviation
### Variance
The **variance** measures how spread out a set of random values is from its mean. While the **mean** gives us an idea of where the values are, the **variance** tells us how scattered they are. The **variance** must always be understood with respect to the mean.
<div align="center">
<img src="images/variance.png" width="30%">
</div>
### Standard Deviation
The **standard deviation** is the square root of the **variance**. It also allows us to understand propagation and it must always be understood in relation to the **mean**.
The advantage over **variance** is that the standard deviation is in the same units as the **mean**.
<div align="center">
<img src="images/standard-deviation.png" width="30%">
</div>
We are going to implement the **variance** and **standard deviation** functions in our ready-made script for the **mean.**
```py
import random
import math
def mean(X):
return sum(X) / len(X)
def variance(X):
mu = mean(X)
collector = 0
for x in X:
collector += (x - mu)**2
return collector / len(X)
def standard_deviation(X):
return math.sqrt(variance(X))
if __name__ == '__main__':
X = [random.randint(9, 12) for i in range(20)]
mu = mean(X)
Var = variance(X)
sigma = standard_deviation(X)
print(f'Array X: {X}')
print(f'Mean = {mu}')
print(f'Variance = {Var}')
print(f'Standard deviation = {sigma}')
```
## Normal Distribution
The **normal distribution** is one of the most recurrent distributions in any field. It is fully defined by its **mean** and its **standard deviation**. It allows calculating **confidence intervals** with the empirical rule.
<div align="center">
<img src="images/normal-distribution.png" width="30%">
</div>
In the following example we are going to create a distribution with standard deviation 1 and 3. When the deviation is low it means the variability of the data is less.
<div align="center">
<img src="images/variation-samples.webp" width="60%">
</div>
### Empirical rule
Also known as the 68-95-99.7 rule. Indicate the dispersion of the data in a normal distribution at one, two and three sigmas.
Allows you to calculate probabilities with the density of the normal distribution.
<div align="center">
<img src="images/empirical.png" width="30%">
</div>
<div align="center">
<img src="images/normal-distribution-plot.png" width="70%">
</div>
# Monte Carlo simulations
## What are Monte Carlo Simulations?
It allows you to create simulations to predict the outcome of a problem, in addition to converting deterministic problems into stochastic problems.
It is used in a wide variety of areas, from engineering to biology and law.
# Sampling and Confidence Intervals
## Sampling
**Sampling** is very important when we do not have access to the entire population we want to explore. One of the great discoveries of statistics is that **random samples** tend to show the same properties of the target population. Up to this point all the **samplings** that we have done are of the **probabilistic** type.
In a **random sampling** any member of the population has the same probability of being chosen.
In a **stratified sampling** we take into account the characteristics of the population to divide it into subgroups and then we take samples from each subgroup, this increases the probability that the sample is representative of the population.
## Central limit theorem
The **central limit theorem** is one of the most important theorems in statistics. It states that **random samples** from any distribution will have a **normal distribution**. This allows us to understand any distribution as the **normal distribution of its means** and that allows us to apply everything we know about **normal distributions.**
The more samples we obtain, the greater the similarity with the normal distribution. The larger the sample, the smaller the standard deviation.
<div align="center">
<img src="images/central-limit.png" width="70%">
</div>
# Experimental Data
## How to work with experimental data?
The **experimental data** are those that are generated through the **scientific method**.
- With the **scientific method** it is necessary to start with a _theory_ or _hypothesis_ about the result you want to reach.
- Based on the _hypothesis_ an experiment must be created to **validate** or **falsify** the _hypothesis_.
- A _hypothesis_ is **validated** or **falsified** by measuring the difference between the experimental measurements and those measurements predicted by the _hypothesis_.
## Linear regression
**Linear regression** allows us to approximate a function to a set of data obtained experimentally. It does not necessarily allow to approximate linear functions, but its variants allow to approximate any **polynomial function.**
To see an example of linear regressions in _Python_ in the following link you can access to see an example: [Collab - Regresión Lineal.](https://colab.research.google.com/drive/1vX4JFHd0gpAYdBNllH93O7QpYKywD4sb)
# Certificate
<div align="center">
<img src="images/diploma.jpg" width="90%">
</div>
<file_sep><div align="center">
<h1>Drunk road</h1>
</div>
This is an exercise where starting from a point 0 we can randomly decide which direction to take, depending on the established options.
<div align="center">
<img src="../images/random-walk.gif" width="220">
</div>
To carry out an example of randomness, we are going to create a program that will represent the "Drunk Road" problem. For this we will create 3 classes: one that represents the walking agent, one that generates an abstraction of the coordinates and one that represents the plane in which we are moving, and we are going to graph the distance in which our agent ends as we define a greater number of steps you can take.<file_sep>import sys
def recursive_fibonacci(n):
if n == 0 or n == 1:
return 1
return recursive_fibonacci(n - 1) + recursive_fibonacci(n - 2)
def dynamic_fibonacci(n, memo = {}):
if n == 0 or n == 1:
return 1
try:
return memo[n]
except KeyError:
result = dynamic_fibonacci(n - 1, memo) + dynamic_fibonacci(n - 2, memo)
memo[n] = result
return result
if __name__ == '__main__':
sys.setrecursionlimit(10002)
n = int(input('Choose a number: '))
result = dynamic_fibonacci(n)
print(result) | 40cd759b7eae86b37da2e3ee703bffd68f22f627 | [
"Markdown",
"Python"
] | 3 | Markdown | mich7095/computational-statistics-with-python | 6e6f3c647785ef91a74ee49db40a332bf29063a3 | 17f83d4993afbaec94d437a69b892f9368b071a4 |
refs/heads/master | <repo_name>Briaoeuidhtns/messenger-demo<file_sep>/server/routes/register.js
const asyncHandler = require('./asyncHandler')
const express = require('express')
const mongoose = require('mongoose')
const { User } = require('../schema/User')
const { hash } = require('bcrypt')
const sendUserInfo = require('../middleware/sendUserInfo')
const addJwtCookie = require('../middleware/addJwtCookie')
/** The bcrypt default salt difficulty */
const saltRounds = 10
const validPassword = (pw) => {
const createError = (msg) => {
// Use mongoose errors for consistent handling
const err = new mongoose.Error.ValidationError()
err.errors = {
// Fake path so we can filter out password errors from mongoose
safepassword: new mongoose.Error.ValidatorError({
type: 'custom',
path: 'safepassword',
message: msg,
}),
}
return err
}
if (pw.length < 6) {
throw createError('Password too short')
}
return pw
}
const router = (config) =>
express.Router().post(
'/register',
asyncHandler(async (req, res, next) => {
const {
user: { password, ...userRegistration },
} = req.body
try {
req.user.data = await User.create({
...userRegistration,
password: await hash(validPassword(password), saltRounds),
})
res.status(201)
next()
} catch (err) {
if (err instanceof mongoose.Error.ValidationError) {
const { password, ...errors } = err.errors
// Shouldn't ever be errors from mongoose validation in pw,
// but if there are it's a server error
if (password) throw password
res.status(400).send({
error: {
kind: 'validation',
items: Object.fromEntries(
Object.entries(errors).map(([p, e]) => [p, e.message])
),
},
})
} else throw err
}
}),
addJwtCookie(config),
sendUserInfo
)
module.exports = router
<file_sep>/client/src/storybookData.jsx
import SocketMock from 'socket.io-mock'
import { callbackify } from 'util'
export const randomObjectId = (size = 16) =>
[...Array(size)]
.map(() => Math.floor(Math.random() * 16).toString(16))
.join('')
export const sleep = (ms) => {
return new Promise((resolve) => setTimeout(resolve, ms))
}
export const users = [
'thomas',
'santiago',
'chiumbo',
'hualing',
'ashanti',
'julia',
'cheng',
].map((name) => ({
_id: randomObjectId(),
name,
img: `https://i.pravatar.cc/?u=${name}`,
}))
export const [me, other] = users
export const resolveUsers = async (k) => {
// to show loading state
await sleep(1000)
return users.find(({ _id }) => k === _id)
}
export const longMsg =
'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut in voluptate sit, qui epuletur, in dolore, qui torqueatur. Cuius similitudine perspecta in formarum specie ac dignitate transitum est ad honestatem dictorum atque factorum. Atque haec coniunctio confusioque virtutum tamen a philosophis ratione quadam distinguitur. Ait enim se, si uratur, Quam hoc suave! dicturum. Duo Reges: constructio interrete. Hoc mihi cum tuo fratre convenit. Inscite autem medicinae et gubernationis ultimum cum ultimo sapientiae comparatur. Aliter enim explicari, quod quaeritur, non potest.'
export const messages = [
['Where are you from?', other._id, [10, 45]],
["I'm from New York", me._id, [10, 51]],
['Share photo of your city, please', other._id, [10, 55]],
[longMsg, me._id, [10, 58]],
].map(([content, from, [h, m]]) => {
const createdAt = new Date()
createdAt.setHours(h)
createdAt.setMinutes(m)
return {
content,
from,
createdAt,
}
})
export const conversations = [
['santiago', 'Share photo of your city, please', true],
['chiumbo', 'Sure! what time?', true, 1],
['hualing', '😅😅😅', false, 12],
['ashanti', 'Sent photo', false],
['julia', 'Do you have any plans?', false],
['cheng', 'Message', false],
].map(([nameq, lastMessage, online, unread = 0]) => ({
_id: randomObjectId(),
members: [users.find(({ name }) => name === nameq)],
online,
lastMessage,
unread,
}))
export const searchConversations = (query) => {
if (query) {
const ids = new Set(
users.filter(({ name }) => name.includes(query)).map(({ _id }) => _id)
)
return conversations.filter(({ user }) => ids.has(user))
}
return conversations
}
export const createSocketMock = () =>
// Normally socket.io queues emits until there's a connection, but the mock
// is always connected, so there's a race condition between registering this
// handler and calling search in the component.
// Instead of changing the code to work with a mock with incorrect behavior,
// register when creating to ensure it happens first.
new SocketMock()
.on(
'get_conversations',
callbackify(async () => conversations)
)
.on(
'get_conversation_messages',
callbackify(async (id) => {
return messages
})
)
<file_sep>/client/src/pages/Messages/Sidebar/Conversation.stories.jsx
import { List } from '@material-ui/core'
import { longMsg } from 'storybookData'
import Conversation from './Conversation'
export default {
title: 'Messages/Sidebar/Conversation',
component: Conversation,
argTypes: { unread: { control: { type: 'number', min: 0, max: 100 } } },
parameters: { backgrounds: { default: 'light' } },
}
const Template = (args) => (
<List>
<Conversation {...args} />
</List>
)
Template.args = {
user: { name: 'santiago', img: 'https://i.pravatar.cc' },
unread: 0,
lastMessage: 'Last message',
online: false,
}
export { Template as Conversation }
export const Online = Template.bind({})
Online.args = { ...Template.args, online: true }
export const Emoji = Template.bind({})
Emoji.args = { ...Template.args, lastMessage: '😅😅😅' }
export const OneUnread = Template.bind({})
OneUnread.args = { ...Template.args, unread: 1 }
export const SomeUnread = Template.bind({})
SomeUnread.args = { ...Template.args, unread: 12 }
export const ManyUnread = Template.bind({})
ManyUnread.args = { ...Template.args, unread: 100 }
export const LongLastMessage = Template.bind({})
LongLastMessage.args = { ...Template.args, lastMessage: longMsg }
<file_sep>/client/src/pages/Messages/index.jsx
import { AppBar, Box, makeStyles, Typography } from '@material-ui/core'
import { useState } from 'react'
import Chat from './Chat'
import Sidebar from './Sidebar'
const useStyles = makeStyles((theme) => ({
root: { display: 'flex', height: '100vh' },
appBar: {
boxShadow: '0px 2px 20px rgba(88, 133, 196, 0.1)',
padding: theme.spacing(4, 0, 4, 3),
},
}))
const Messages = () => {
const classes = useStyles()
const [active, setActive] = useState()
return (
<Box className={classes.root}>
<Sidebar active={active} setActive={setActive} />
<Box display="flex" flex={1} flexDirection="column">
<AppBar
position="static"
color="inherit"
elevation={0}
className={classes.appBar}
>
<Typography variant="h2">{active?.members?.[0]?.name}</Typography>
</AppBar>
<Chat conversation={active} />
</Box>
</Box>
)
}
export default Messages
<file_sep>/client/src/pages/Messages/Sidebar/UserAvatar.jsx
import { Avatar, Badge } from '@material-ui/core'
import { makeStyles } from '@material-ui/core/styles'
const useStyles = makeStyles((theme) => {
const borderWidth = 2
const size = theme.spacing(1.5)
return {
badge: {
backgroundColor: ({ status }) =>
({ online: '#1CED84', offline: '#D0DAE9' }[status]),
border: 'solid #FFF',
borderWidth,
height: size,
minWidth: size,
borderRadius: '50%',
},
}
})
const UserAvatar = ({ user }) => {
const status = user.online ? 'online' : 'offline'
const classes = useStyles({ status })
return (
<Badge
variant="dot"
overlap="circle"
anchorOrigin={{
vertical: 'bottom',
horizontal: 'right',
}}
aria-label={status}
classes={{ badge: classes.badge }}
>
<Avatar src={user.img} />
</Badge>
)
}
export default UserAvatar
<file_sep>/client/src/pages/Messages/Sidebar/index.jsx
import { Box, InputBase, List, SvgIcon, Typography } from '@material-ui/core'
import { makeStyles } from '@material-ui/core/styles'
import { useFetcher } from 'context/SocketContext'
import { useUser } from 'context/UserContext'
import { useState } from 'react'
import useSWR from 'swr'
import Conversation from './Conversation'
import Header from './Header'
import { ReactComponent as SearchSvg } from './Search.svg'
const SearchIcon = (props) => (
<SvgIcon
{...props}
component={SearchSvg}
viewBox="0 0 12 12"
titleAccess="Search"
/>
)
const useStyles = makeStyles((theme) => ({
root: {
backgroundColor: '#f5f7fb',
padding: theme.spacing(3),
paddingBottom: 0,
// TODO probably shouldn't be fixed, maybe collapse on small screens?
width: 300,
},
title: { marginBottom: theme.spacing(1.5) },
search: {
position: 'relative',
width: '100%',
marginBottom: theme.spacing(2.5),
backgroundColor: '#e9eef9',
borderRadius: 5,
color: '#adc0de',
},
searchRoot: {
color: 'inherit',
width: '100%',
},
searchInput: {
padding: theme.spacing(2),
// vertical padding + font size from searchIcon
paddingLeft: `calc(1em + ${theme.spacing(4)}px)`,
color: '#000',
'&::placeholder': {
color: '#adc0de',
opacity: 1,
},
},
searchIcon: {
padding: theme.spacing(0, 2),
height: '100%',
position: 'absolute',
pointerEvents: 'none',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
},
}))
const Sidebar = ({ active, setActive }) => {
const classes = useStyles()
const fetcher = useFetcher()
const { user: me } = useUser()
const [search, setSearch] = useState('')
const { data: searchResults } = useSWR(search && ['find_user', search])
const { data: conversations, mutate: mutateConversations } = useSWR(
'get_conversations',
// No change events for this data yet, reenable to keep fresher
{ revalidateOnFocus: true }
)
const onSelectSearchResult = async (selected) =>
await mutateConversations(async (prev = []) => {
setSearch('')
const conversation = await fetcher('get_conversation_by_users', [
selected,
])
setActive(conversation)
return [conversation, ...prev.filter((c) => c._id !== conversation._id)]
})
return (
<Box className={classes.root}>
<Header user={me} />
<Typography variant="h2" className={classes.title}>
Chats
</Typography>
<Box className={classes.search}>
<Box className={classes.searchIcon}>
<SearchIcon fontSize="inherit" />
</Box>
<InputBase
placeholder="Search"
classes={{ root: classes.searchRoot, input: classes.searchInput }}
value={search}
onChange={(e) => setSearch(e.target.value)}
/>
</Box>
<List disablePadding>
{search
? searchResults?.map((user, idx) => (
<Conversation
user={user}
key={idx}
onClick={() => onSelectSearchResult(user._id)}
/>
))
: conversations?.map((c, idx) => (
<Conversation
user={c.members[0]}
lastMessage={c.lastMessage}
key={idx}
onClick={() => setActive(c)}
/>
))}
</List>
</Box>
)
}
export default Sidebar
<file_sep>/client/src/pages/Messages/Sidebar/Conversation.jsx
import {
Badge,
ListItem,
ListItemAvatar,
ListItemText,
} from '@material-ui/core'
import { makeStyles, withStyles } from '@material-ui/core/styles'
import UserAvatar from './UserAvatar'
const StandaloneBadge = withStyles({
badge: {
transform: 'initial',
transformOrigin: 'initial',
position: 'initial',
},
})(Badge)
const useStyles = makeStyles((theme) => ({
root: {
backgroundColor: '#fff',
borderRadius: theme.spacing(1),
'&:not(:last-child)': { marginBottom: theme.spacing(1) },
},
unread: {
right: theme.spacing(2.5),
paddingLeft: theme.spacing(4),
},
}))
const Conversation = ({ user, unread = 0, lastMessage, onClick }) => {
const classes = useStyles()
return (
<ListItem className={classes.root} button {...{ onClick }}>
<ListItemAvatar>
<UserAvatar {...{ user }} />
</ListItemAvatar>
<ListItemText
primary={user.name}
primaryTypographyProps={{ noWrap: true }}
secondary={lastMessage}
secondaryTypographyProps={{
noWrap: true,
color: unread ? 'textPrimary' : 'textSecondary',
}}
/>
<StandaloneBadge
badgeContent={unread}
color="primary"
className={classes.unread}
/>
</ListItem>
)
}
export default Conversation
<file_sep>/client/src/pages/welcome/Sidebar.jsx
import { Box, Grid, Hidden, SvgIcon, Typography } from '@material-ui/core'
import { makeStyles } from '@material-ui/core/styles'
import { ReactComponent as MessageSvg } from './Message.svg'
const MessageIcon = (props) => (
<SvgIcon {...props} component={MessageSvg} viewBox="0 0 68 67" />
)
const useStyles = makeStyles({
image: {
backgroundImage: 'url(./images/bg-img.png)',
backgroundSize: 'cover',
backgroundPosition: 'center',
},
overlay: {
backgroundImage:
'linear-gradient(rgb(58, 141, 255, 0.75), rgb(134, 185, 255, 0.75))',
},
heroText: {
textAlign: 'center',
marginTop: 30,
maxWidth: 300,
fontWeight: 400,
color: '#fff',
},
icon: {
fontSize: 67,
},
})
const Sidebar = () => {
const classes = useStyles()
return (
<Grid item xs={false} sm={4} md={5} className={classes.image}>
<Box
className={classes.overlay}
height="100%"
display="flex"
flexDirection="column"
alignItems="center"
>
<Box height="30vh" flexShrink={1} />
<Hidden xsDown>
<MessageIcon className={classes.icon} />
<Hidden smDown>
<Typography className={classes.heroText} variant="h1">
Converse with anyone with any language
</Typography>
</Hidden>
</Hidden>
</Box>
</Grid>
)
}
export default Sidebar
<file_sep>/client/src/pages/Messages/Sidebar/UserAvatar.stories.jsx
import { me as user } from 'storybookData'
import UserAvatar from './UserAvatar'
export default {
title: 'Messages/UserAvatar',
component: UserAvatar,
}
const Template = (props) => <UserAvatar {...props} />
Template.args = { user }
export const Online = Template.bind({})
Online.args = { user: { ...user, online: true } }
export const Offline = Template.bind({})
Offline.args = { user: { ...user, online: false } }
<file_sep>/client/src/schema.js
import * as Yup from 'yup'
export const loginSchema = Yup.object().shape({
email: Yup.string()
.ensure()
.required('Email is required')
.email('Email is not valid'),
password: Yup.string()
.ensure()
.required('Password is <PASSWORD>')
.max(100, 'Password is too long')
.min(6, '<PASSWORD>'),
})
export const signupSchema = loginSchema.shape({
username: Yup.string()
.ensure()
.required('Username is required')
.max(40, 'Username is too long'),
})
<file_sep>/client/src/pages/Messages/Sidebar/Header.jsx
import { Typography } from '@material-ui/core'
import { makeStyles } from '@material-ui/core/styles'
import { MoreHoriz as MenuIcon } from '@material-ui/icons'
import UserAvatar from './UserAvatar'
const useStyles = makeStyles((theme) => ({
root: {
display: 'flex',
alignItems: 'center',
marginBottom: theme.spacing(4),
},
name: {
flexGrow: 1,
marginLeft: theme.spacing(2),
},
icon: { color: '#95a7c4' },
}))
const Header = ({ user }) => {
const classes = useStyles()
return (
<div className={classes.root}>
<UserAvatar user={user} />
<Typography variant="h3" noWrap className={classes.name}>
{user.name}
</Typography>
<MenuIcon className={classes.icon} />
</div>
)
}
export default Header
<file_sep>/client/src/pages/Messages/Sidebar/Header.stories.jsx
import Header from './Header'
export default {
title: 'Messages/Sidebar/Header',
component: Header,
parameters: { backgrounds: { default: 'light' } },
}
const Template = (props) => <Header {...props} />
Template.args = {
user: { name: 'thomas', img: 'https://i.pravatar.cc' },
}
export { Template as Header }
<file_sep>/server/schema/Conversation.js
const { Schema, model, ObjectId } = require('mongoose')
const { User } = require('./User')
const beautifyUnique = require('mongoose-beautiful-unique-validation')
const ConversationSchema = new Schema(
{
members: {
type: [{ type: ObjectId, ref: User }],
required: true,
// Ensure the members list is unique and predictable
validate: [
{
validator: (val) =>
val.reduce(
([isSorted, prev], cur) => [
isSorted == null || (isSorted && prev <= cur),
cur,
],
[]
)[0],
msg: '{PATH} is not sorted in ascending order',
},
{
validator: (val) => new Set(val).size === val.length,
msg: '{PATH} values are not unique',
},
{
validator: (val) => val.length >= 2,
msg: '{PATH} must have at least two members',
},
],
set: (val) => [...new Set(val)].sort(),
},
},
{ strictQuery: 'throw' }
)
.plugin(beautifyUnique)
.loadClass(
class {
static findOrCreate(query) {
return this.findOneAndUpdate(query, {}, { upsert: true, new: true })
}
}
)
const Conversation = model('Conversation', ConversationSchema)
module.exports = { Conversation, ConversationSchema }
<file_sep>/client/src/App.jsx
import { MuiThemeProvider } from '@material-ui/core'
import CssBaseline from '@material-ui/core/CssBaseline'
import { BrowserRouter as Router, Redirect, Switch } from 'react-router-dom'
import SWRSocketConfig from 'SWRSocketConfig'
import AuthorizedRoute, { unknown } from './AuthorizedRoute'
import { SocketManager } from './context/SocketContext'
import { UserManager } from './context/UserContext'
import Messages from './pages/Messages'
import Login from './pages/welcome/Login'
import Signup from './pages/welcome/Signup'
import { theme } from './themes/theme.js'
const App = () => (
<MuiThemeProvider theme={theme}>
<CssBaseline />
<UserManager>
<SocketManager>
<SWRSocketConfig>
<Router>
<Switch>
<AuthorizedRoute
path="/login"
allow={unknown}
fallbackPath="/messages"
>
<Login />
</AuthorizedRoute>
<AuthorizedRoute
path="/signup"
allow={unknown}
fallbackPath="/messages"
>
<Signup />
</AuthorizedRoute>
<AuthorizedRoute path="/messages">
<Messages />
</AuthorizedRoute>
<Redirect exact from="/" to="/signup" />
</Switch>
</Router>
</SWRSocketConfig>
</SocketManager>
</UserManager>
</MuiThemeProvider>
)
export default App
<file_sep>/client/src/pages/welcome/SubmitButton.jsx
import { Button } from '@material-ui/core'
import { makeStyles } from '@material-ui/core/styles'
const useStyles = makeStyles((theme) => ({
submit: {
padding: theme.spacing(2, 7.25),
borderRadius: 3,
marginTop: theme.spacing(6.5),
},
}))
const SubmitButton = ({ children }) => {
const classes = useStyles()
return (
<Button
type="submit"
variant="contained"
size="large"
color="primary"
className={classes.submit}
>
{children}
</Button>
)
}
export default SubmitButton
<file_sep>/server/app.js
const createError = require('http-errors')
const express = require('express')
const cookieParser = require('cookie-parser')
const logger = require('morgan')
const { jwtCookieParser } = require('./middleware/auth')
const { requireUser } = require('./middleware/auth')
const sendUserInfo = require('./middleware/sendUserInfo')
const clearCookies = require('./middleware/clearCookies')
const { User } = require('./schema/User')
const registerRouter = require('./routes/register')
const loginRouter = require('./routes/login')
const staticFiles = require('@messenger/client/route')
console.log({ staticFiles })
const fs = require('fs')
fs.readdir(staticFiles, (err, files) => {
if (err) {
throw err
}
files.forEach((file) => console.log({ file }))
})
const { json, urlencoded, Router } = express
const app = express()
const JWT_COOKIE_NAME = 'SESSION_TOKEN'
app.use(logger('dev'))
app.use(json())
app.use(urlencoded({ extended: false }))
app.use(cookieParser())
app.use(express.static(staticFiles))
app.use(
jwtCookieParser({
cookie: JWT_COOKIE_NAME,
key: process.env.JWT_PRIVATE_KEY,
jwtOpts: { algorithms: [process.env.JWT_ALG] },
convert: User.fromClaim.bind(User),
})
)
const tokenSettings = {
cookie: JWT_COOKIE_NAME,
key: process.env.JWT_PRIVATE_KEY,
jwtOpts: { algorithm: process.env.JWT_ALG, expiresIn: '1h' },
}
app.use(
'/user',
loginRouter(tokenSettings),
registerRouter(tokenSettings),
Router().get('/info', requireUser, sendUserInfo),
Router().post(
'/logout',
clearCookies(JWT_COOKIE_NAME, 'AUTHENTICATED'),
(_, res) => res.status(200).send()
)
)
// catch 404 and forward to error handler
app.use((req, res, next) => {
next(createError(404))
})
// error handler
app.use((err, req, res, next) => {
// set locals, only providing error in development
res.locals.message = err.message
res.locals.error = req.app.get('env') === 'development' ? err : {}
console.error(err)
// render the error page
res.status(err.status || 500)
res.json({ error: err })
})
module.exports = app
<file_sep>/client/src/AuthorizedRoute.jsx
import { Redirect, Route, useLocation } from 'react-router-dom'
import { useUser } from './context/UserContext'
export const known = (user) => !!user
export const unknown = (user) => !known(user)
export const all = () => true
export const none = () => false
/**
* A `Route` that redirects to `fallbackPath` if `allow` returns false.
*
* Defaults to allowing all known users, and redirecting to `/signup` if not authorized
*
* Only intended to handle children as children elements.
*/
const AuthorizedRoute = ({
allow = known,
fallbackPath = '/signup',
children,
...props
}) => {
const location = useLocation()
const { user } = useUser()
return (
<Route {...props}>
{allow(user) ? (
children
) : (
<Redirect
to={{
pathname: fallbackPath,
state: { referrer: location },
}}
/>
)}
</Route>
)
}
export default AuthorizedRoute
<file_sep>/server/middleware/sendUserInfo.js
const express = require('express')
const handler = (req, res) => {
const userDoc = req.user?.data
if (userDoc)
res.send({
data: {
kind: 'user',
user: userDoc.toObject(),
},
})
else
res.status(404).send({
error: {
kind: 'no user',
message: "Couldn't find user",
cause: req.user?.error,
},
})
}
module.exports = handler
<file_sep>/client/src/themes/theme.js
import { createMuiTheme } from '@material-ui/core'
export const theme = createMuiTheme({
overrides: {
MuiButton: {
sizeLarge: {
fontSize: '1rem',
},
},
},
typography: {
fontFamily: '"Open Sans"',
button: {
textTransform: 'none',
fontSize: '0.875rem',
fontWeight: 400,
},
body1: {
fontSize: '0.875rem',
fontWeight: 600,
},
body2: {
fontSize: '0.875rem',
fontWeight: 400,
},
caption: {
fontSize: '0.6875rem',
fontWeight: 600,
},
h1: {
fontSize: '1.625rem',
fontWeight: 600,
},
h2: {
fontSize: '1.25rem',
fontWeight: 600,
},
h3: {
fontSize: '1rem',
fontWeight: 600,
},
},
palette: {
primary: { main: '#3a8dff' },
background: { default: '#fff' },
text: { primary: '#000' },
},
})
<file_sep>/client/src/context/UserContext.jsx
import cookie from 'cookie'
import {
createContext,
useCallback,
useContext,
useEffect,
useState,
} from 'react'
export const UserContext = createContext()
export const UserManager = ({ children }) => {
const [user, setUserRaw] = useState()
// The current user should always be online, but we may fetch user info before socketio has connected
// So the api will mark us as offline, which is correct, but not how we should appear in app
const setUser = useCallback(
(user) => setUserRaw(user && { ...user, online: true }),
[setUserRaw]
)
const [error, setError] = useState()
const login = async (email, password) => {
try {
const res = await (
await fetch('/user/login', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ user: { email, password } }),
})
).json()
if (res.error) throw res.error
setUser(res.data.user)
} catch (err) {
setError(err)
}
}
const register = async (name, email, password) => {
try {
const res = await (
await fetch('/user/register', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ user: { name, email, password } }),
})
).json()
if (res.error) throw res.error
setUser(res.data.user)
} catch (err) {
setError(err)
}
}
const infoRequest = async () => {
const res = await (await fetch('/user/info')).json()
if (res.error) throw res.error
return res.data.user
}
useEffect(() => {
// Should have gotten the user from the login response,
// but may have been logged in before, or in another tab
const isAuthed = cookie.parse(document.cookie).AUTHENTICATED
if (isAuthed && !user) infoRequest().then(setUser, setError)
}, [user, setUser])
const logout = async () => {
await fetch('/user/logout', {
method: 'POST',
})
setUser()
}
return (
<UserContext.Provider value={{ user, error, login, logout, register }}>
{children}
</UserContext.Provider>
)
}
export const useUser = () => useContext(UserContext)
<file_sep>/client/src/context/SocketContext.jsx
import { createContext, useContext, useEffect, useMemo, useState } from 'react'
import { io } from 'socket.io-client'
import { promisify } from 'util'
import { useUser } from './UserContext'
export const SocketContext = createContext()
export const SocketManager = ({ children }) => {
const { user } = useUser()
const [socket] = useState(() => io({ autoConnect: false }))
useEffect(() => {
if (user) {
socket.connect()
const connected = ['connected', (i) => console.log('connected', i)]
socket.once(...connected)
const connect_error = ['connect_error', (err) => console.error(err)]
socket.on(...connect_error)
return () => {
socket.off(...connected)
socket.off(...connect_error)
}
}
}, [user, socket])
return (
<SocketContext.Provider value={socket}>{children}</SocketContext.Provider>
)
}
export const useSocket = () => useContext(SocketContext)
export const useFetcher = () => {
const socket = useSocket()
return useMemo(() => promisify(socket.emit.bind(socket)), [socket])
}
<file_sep>/server/routes/login.js
const { User } = require('../schema/User')
const asyncHandler = require('./asyncHandler')
const express = require('express')
const { compare } = require('bcrypt')
const sendUserInfo = require('../middleware/sendUserInfo')
const addJwtCookie = require('../middleware/addJwtCookie')
const configure = (config) =>
express.Router().post(
'/login',
asyncHandler(async (req, res, next) => {
const {
user: { email, password },
} = req.body
const realUser = await User.findOne({ email }).exec()
const realHash = realUser?.password
if (realHash && (await compare(password, realHash))) {
req.user = { data: realUser }
next()
} else {
res.status(401).send({ error: { kind: 'invalid credentials' } })
}
}),
addJwtCookie(config),
sendUserInfo
)
module.exports = configure
<file_sep>/client/.storybook/preview.js
import { theme } from '../src/themes/theme'
import ScopedCssBaseline from '@material-ui/core/ScopedCssBaseline'
import { MuiThemeProvider } from '@material-ui/core'
export const parameters = {
actions: { argTypesRegex: '^on[A-Z].*' },
}
const muiSetup = (storyfn) => (
<MuiThemeProvider theme={theme}>
<ScopedCssBaseline>{storyfn()}</ScopedCssBaseline>
</MuiThemeProvider>
)
export const decorators = [muiSetup]
<file_sep>/server/schema/Message.js
const { Schema, model, ObjectId } = require('mongoose')
const beautifyUnique = require('mongoose-beautiful-unique-validation')
const { Conversation } = require('./Conversation')
const { User } = require('./User')
const MessageSchema = new Schema(
{
content: String,
from: { type: ObjectId, ref: User, required: true },
to: { type: ObjectId, ref: Conversation, required: true },
},
{ timestamps: true },
{ strictQuery: 'throw' }
).plugin(beautifyUnique)
const Message = model('Message', MessageSchema)
module.exports = { Message, MessageSchema }
<file_sep>/client/src/pages/welcome/Login.jsx
import { Typography } from '@material-ui/core'
import { makeStyles } from '@material-ui/core/styles'
import { useUser } from 'context/UserContext'
import { Form, Formik } from 'formik'
import { useCallback, useEffect, useState } from 'react'
import { loginSchema } from 'schema'
import Base from './Base'
import ButtonHeader from './ButtonHeader'
import Error from './Error'
import Input from './Input'
import SubmitButton from './SubmitButton'
const useStyles = makeStyles((theme) => ({
forgot: {
paddingRight: theme.spacing(1.25),
color: '#3a8dff',
fontSize: '0.75rem',
fontWeight: 400,
textDecoration: 'none',
},
form: {
display: 'flex',
flexDirection: 'column',
alignItems: 'center',
},
}))
const Login = () => {
const classes = useStyles()
const { login, error } = useUser()
const [errorOpen, setErrorOpen] = useState(false)
useEffect(() => setErrorOpen(!!error), [error])
const handleErrorClose = useCallback((_event, reason) => {
if (reason === 'clickaway') return
setErrorOpen(false)
}, [])
return (
<>
<Error
open={errorOpen}
onClose={handleErrorClose}
message="Login failed"
/>
<Base
welcomeMsg="Welcome back!"
buttonHeader={
<ButtonHeader
label="Don't have an account?"
buttonText="Create account"
to="/signup"
/>
}
>
<Formik
initialValues={loginSchema.getDefault()}
validationSchema={loginSchema}
onSubmit={async ({ email, password }) => {
await login(email, password)
}}
>
<Form className={classes.form}>
<Input
label="E-mail address"
name="email"
autoComplete="email"
autoFocus
/>
<Input
label="Password"
name="password"
endAdornment={
// TODO this should link to a reset page, currently blank but included for appearance
<Typography className={classes.forgot} component="a" href="#">
Forgot?
</Typography>
}
type="password"
autoComplete="current-password"
/>
<SubmitButton>Login</SubmitButton>
</Form>
</Formik>
</Base>
</>
)
}
export default Login
<file_sep>/client/src/pages/Messages/Chat/index.jsx
import { makeStyles } from '@material-ui/core'
import { Box, InputBase } from '@material-ui/core'
import { useSocket } from 'context/SocketContext'
import { useEffect, useRef, useState } from 'react'
import useSWR from 'swr'
import Bubble from './Bubble'
const useStyles = makeStyles((theme) => ({
inputInput: {
color: '#000000',
'&::placeholder': {
color: '#ADC0DE',
opacity: 1,
},
padding: theme.spacing(4.5, 3.5),
},
scroll: { overflowY: 'auto' },
noMinHeight: { minHeight: 0 },
}))
const Chat = ({ conversation }) => {
const classes = useStyles()
const socket = useSocket()
const [msgValue, setMsgValue] = useState('')
const { data: messages } = useSWR(
conversation && ['get_conversation_messages', conversation._id]
)
const onSendMessage = () => {
const msg = { content: msgValue, to: conversation }
socket.emit('send_message', msg)
}
const msgEndScrollMarker = useRef()
// Don't smooth scroll while initially loading a conversation
const prevConversation = useRef()
const scrollToBottom = () => {
// Don't scroll until done rerendering from everything loading
if (msgEndScrollMarker.current && conversation && messages) {
msgEndScrollMarker.current.scrollIntoView({
behavior: prevConversation.current === conversation ? 'smooth' : 'auto',
})
prevConversation.current = conversation
}
}
useEffect(scrollToBottom, [messages, conversation])
return (
<Box
display="flex"
flexDirection="column"
m={5}
mt={4}
flex={1}
justifyContent="flex-end"
className={classes.noMinHeight}
>
<Box className={classes.scroll} component="ol" p={0} m={0}>
{messages?.map(({ content, from, createdAt }, idx) => (
<Bubble
key={idx}
value={content}
sender={conversation.members.find((u) => u._id === from)}
sendTime={createdAt}
/>
))}
<Box ref={msgEndScrollMarker} />
</Box>
<Box
bgcolor="#E9EEF9"
borderRadius={5}
component="form"
onSubmit={(e) => {
e.preventDefault()
const m = msgValue.trim()
if (m) {
onSendMessage(m)
setMsgValue('')
}
}}
mt={5}
>
<InputBase
value={msgValue}
fullWidth
onChange={(e) => setMsgValue(e.target.value)}
placeholder="Type something..."
classes={{ input: classes.inputInput }}
disabled={conversation == null}
/>
</Box>
</Box>
)
}
export default Chat
<file_sep>/server/bin/www.js
#!/usr/bin/env node
require('dotenv').config({ debug: process.env.DEBUG })
const app = require('../app')
const http = require('http')
const mongoose = require('mongoose')
const { User } = require('../schema/User')
const io = require('../io')
/**
* Normalize a port into a number, string, or false.
*/
const normalizePort = (val) => {
const port = parseInt(val, 10)
if (isNaN(port)) return val
if (port >= 0) return port
return false
}
/**
* Adapt Server.listen into a promise
*/
const listen = (server, port) =>
new Promise((resolve, reject) => {
server
.once('listening', () => resolve(server))
.once('error', reject)
.listen(port)
})
const run = async () => {
// Mongoose init
// Disable all deprecated behavior
mongoose
.set('useNewUrlParser', true)
.set('useFindAndModify', false)
.set('useCreateIndex', true)
.set('useUnifiedTopology', true)
await mongoose.connect(process.env.MONGO_URL)
// Ensure indexes are created first
await User.init()
mongoose.connection.on('error', (err) => console.error(err))
// Server init
const port = normalizePort(process.env.PORT || '3001')
app.set('port', port)
let server
try {
server = await listen(http.createServer(app), port)
const addr = server.address()
const bind = typeof addr === 'string' ? 'pipe ' + addr : 'port ' + addr.port
console.log('Listening on ' + bind)
} catch (error) {
if (error.syscall !== 'listen') {
throw error
}
const bind = typeof port === 'string' ? 'Pipe ' + port : 'Port ' + port
// handle specific listen errors with friendly messages
switch (error.code) {
case 'EACCES':
console.error(bind + ' requires elevated privileges')
process.exit(1)
case 'EADDRINUSE':
console.error(bind + ' is already in use')
process.exit(1)
default:
throw error
}
}
io.attach(server)
}
run()
<file_sep>/client/src/pages/Messages/Chat/index.stories.jsx
import { SocketContext } from 'context/SocketContext'
import { UserContext } from 'context/UserContext'
import { useEffect, useState } from 'react'
import { conversations, createSocketMock, me, messages } from 'storybookData'
import SWRSocketConfig from 'SWRSocketConfig'
import Chat from '.'
export default {
title: 'Messages/Chat/Chat',
component: Chat,
argTypes: { onSendMessage: { action: 'sendMessage' } },
}
const Template = ({ onSendMessage, messages, ...props }) => {
const [socket] = useState(createSocketMock)
useEffect(() => {
const send = ['send_message', onSendMessage]
socket.on(...send)
return () => {
socket.off(...send)
}
}, [socket, onSendMessage])
useEffect(() => {
messages.forEach((msg) => socket.emit('new_message', msg))
}, [messages, socket])
return (
<SocketContext.Provider value={socket.socketClient}>
<UserContext.Provider value={{ user: me }}>
<SWRSocketConfig>
<Chat {...props} />
</SWRSocketConfig>
</UserContext.Provider>
</SocketContext.Provider>
)
}
Template.args = { messages, conversation: conversations[0] }
export { Template as Chat }
<file_sep>/server/middleware/addJwtCookie.js
const { User } = require('../schema/User')
const express = require('express')
const { sign } = require('jsonwebtoken')
const { compare } = require('bcrypt')
const addJwtCookie = ({ key, jwtOpts, cookie }) => (req, res, next) => {
res
.cookie(cookie, sign({ data: { _id: req.user.data._id } }, key, jwtOpts), {
httpOnly: true,
secure: true,
sameSite: 'Strict',
})
.cookie('AUTHENTICATED', true, {
httpOnly: false,
secure: true,
sameSite: 'Strict',
})
next()
}
module.exports = addJwtCookie
<file_sep>/client/src/pages/welcome/Input.jsx
import { TextField, Typography } from '@material-ui/core'
import { makeStyles } from '@material-ui/core/styles'
import { useField } from 'formik'
import { uniqueId } from 'lodash'
import { useState } from 'react'
const useStyles = makeStyles((theme) => ({
label: {
color: 'rgb(0, 0, 0, 0.4)',
paddingLeft: 5,
},
inputs: {
paddingLeft: 5,
},
inputRoot: {
paddingTop: theme.spacing(2.5),
},
underline: {
'&::before': {
borderBottom: '1.2px solid rgba(0, 0, 0, 0.2)',
},
},
formControlLabel: { transform: 'none' },
}))
const Input = ({ label, endAdornment, ...props }) => {
const classes = useStyles()
const [field, { touched, error }] = useField(props)
const [id] = useState(() => uniqueId(props.name))
return (
<TextField
id={id}
label={
<Typography variant="body2" component="span" className={classes.label}>
{label}
</Typography>
}
fullWidth
margin="normal"
InputLabelProps={{
classes: { formControl: classes.formControlLabel },
}}
InputProps={{
classes: {
root: classes.inputRoot,
input: classes.inputs,
underline: classes.underline,
},
endAdornment,
}}
{...props}
{...field}
error={!!(touched && error)}
helperText={touched && error?.toString()}
/>
)
}
export default Input
<file_sep>/server/io.js
const { Server } = require('socket.io')
const cookieParser = require('cookie-parser')
const { jwtCookieParser, requireUser } = require('./middleware/auth')
const { User, registerOnlineGetter } = require('./schema/User')
const { Message } = require('./schema/Message')
const util = require('util')
const { Conversation } = require('./schema/Conversation')
const io = new Server(undefined, {
cors: {
origin: 'http://localhost:3000',
},
})
const online = new Map()
io.online = online
registerOnlineGetter((id) => !!online.get(id.toString()))
const JWT_COOKIE_NAME = 'SESSION_TOKEN'
const wrap = (middleware) => (socket, next) =>
middleware(socket.request, { locals: {} }, next)
io.use(wrap(cookieParser()))
io.use(
wrap(
jwtCookieParser({
cookie: JWT_COOKIE_NAME,
key: process.env.JWT_PRIVATE_KEY,
jwtOpts: { algorithms: [process.env.JWT_ALG] },
convert: User.fromClaim.bind(User),
})
)
)
io.use(wrap(requireUser))
const update = (m, k, f) => {
m.set(k, f(m.get(k)))
}
io.on('connection', (socket) => {
const user = socket.request.user.data
socket.join(user._id.toString())
update(online, user._id.toString(), (x = 0) => x + 1)
socket.on('send_message', async ({ content, to }) => {
const msg = await (await Message.create({ content, to, from: user._id }))
.populate('to')
.execPopulate()
const msgObject = msg.toObject({ depopulate: true })
msg.to.members.forEach((u) =>
io.to(u.toString()).emit('new_message', msgObject)
)
})
/**
* Get all conversations this user is in
*/
socket.on(
'get_conversations',
util.callbackify(
async () =>
await Conversation.find()
.where('members')
.in([user._id])
.populate({ path: 'members', match: { _id: { $ne: user._id } } })
.lean({ virtuals: true })
)
)
/**
* Get messages in a conversation by the conversation id
*/
socket.on(
'get_conversation_messages',
util.callbackify(
async (conversationId) =>
await Message.find({ to: conversationId }).sort('createdAt').lean()
)
)
/**
* Get a conversation by the user(s) involved
*
* The authorized user is implicitly included in the query.
*
* The query will only match conversations exactly matching the user list.
*
* If the conversation doesn't exist, it will be created
*/
socket.on(
'get_conversation_by_users',
util.callbackify(
async (users) =>
await Conversation.findOrCreate({
members: [...users, user._id.toString()],
})
.populate({ path: 'members', match: { _id: { $ne: user._id } } })
.lean({ virtuals: true })
)
)
/**
* Find another user by fuzzy text search
*/
socket.on(
'find_user',
util.callbackify(async (query) =>
query == null
? []
: await User.find(
{ $text: { $search: query }, _id: { $ne: user._id } },
{ score: { $meta: 'textScore' } }
)
.sort({ score: { $meta: 'textScore' } })
.limit(10)
.lean({ virtuals: true })
)
)
socket.on('disconnect', () => {
update(online, user._id.toString(), (x) => x - 1)
})
})
module.exports = io
<file_sep>/client/src/pages/welcome/Signup.jsx
import { makeStyles } from '@material-ui/core/styles'
import { useUser } from 'context/UserContext'
import { Form, Formik } from 'formik'
import { useCallback, useEffect, useState } from 'react'
import { signupSchema } from 'schema'
import Base from './Base'
import ButtonHeader from './ButtonHeader'
import Error from './Error'
import Input from './Input'
import SubmitButton from './SubmitButton'
const useStyles = makeStyles((theme) => ({
form: {
display: 'flex',
flexDirection: 'column',
alignItems: 'center',
},
}))
const Signup = () => {
const classes = useStyles()
const { register, error } = useUser()
const [errorOpen, setErrorOpen] = useState(false)
useEffect(() => setErrorOpen(!!error), [error])
const handleErrorClose = useCallback((_event, reason) => {
if (reason === 'clickaway') return
setErrorOpen(false)
}, [])
return (
<>
<Error
open={errorOpen}
onClose={handleErrorClose}
message="Registration failed"
/>
<Base
welcomeMsg="Create an account."
buttonHeader={
<ButtonHeader
label="Already have an account?"
buttonText="Login"
to="/login"
/>
}
>
<Formik
initialValues={signupSchema.getDefault()}
validationSchema={signupSchema}
onSubmit={async ({ username, email, password }) => {
await register(username, email, password)
}}
>
<Form className={classes.form}>
<Input
name="username"
label="Username"
autoComplete="username"
autoFocus
/>
<Input name="email" label="E-mail address" autoComplete="email" />
<Input
name="password"
label="Password"
type="password"
autoComplete="current-password"
/>
<SubmitButton>Create</SubmitButton>
</Form>
</Formik>
</Base>
</>
)
}
export default Signup
<file_sep>/client/src/pages/Messages/Sidebar/index.stories.jsx
import { SocketContext } from 'context/SocketContext'
import { UserContext } from 'context/UserContext'
import { useState } from 'react'
import { createSocketMock, me } from 'storybookData'
import SWRSocketConfig from 'SWRSocketConfig'
import Sidebar from '.'
export default {
title: 'Messages/Sidebar',
component: Sidebar,
argTypes: {
onSelect: { action: 'select' },
},
parameters: { backgrounds: { default: 'dark' } },
}
const Template = ({ user }) => {
const [socket] = useState(createSocketMock)
const [active, setActive] = useState()
return (
<SocketContext.Provider value={socket.socketClient}>
<UserContext.Provider value={{ user }}>
<SWRSocketConfig>
<Sidebar {...{ active, setActive }} />
</SWRSocketConfig>
</UserContext.Provider>
</SocketContext.Provider>
)
}
Template.args = {
user: me,
}
export { Template as Sidebar }
<file_sep>/client/src/pages/Messages/Chat/Bubble.jsx
import { Avatar, Box, ListItem, Typography } from '@material-ui/core'
import { makeStyles } from '@material-ui/core/styles'
const useStyles = makeStyles((theme) => {
return {
container: {
flexDirection: ({ isSender }) => (isSender ? 'row-reverse' : 'row'),
alignItems: 'flex-start',
},
background: {
background: ({ isSender }) =>
isSender ? '#F4F6FA' : 'linear-gradient(225deg, #6CC1FF, #3A8DFF)',
borderRadius: 10,
borderTopLeftRadius: ({ isSender }) => (isSender ? undefined : 0),
borderBottomRightRadius: ({ isSender }) => (isSender ? 0 : undefined),
padding: theme.spacing(1, 2),
},
text: {
color: ({ isSender }) => (isSender ? '#91A3C0' : '#FFFFFF'),
},
avatar: {
width: theme.spacing(4),
height: theme.spacing(4),
marginRight: theme.spacing(1.5),
marginTop: theme.spacing(1.5),
},
content: {
display: 'flex',
flexDirection: 'column',
alignItems: ({ isSender }) => (isSender ? 'flex-end' : 'flex-start'),
[theme.breakpoints.up('sm')]: {
maxWidth: 'min(75%, 100ch)',
},
},
timestamp: {
color: '#BECCE2',
},
}
})
const isSameDate = (a, b = new Date()) =>
a.getDate() === b.getDate() &&
a.getMonth() === b.getMonth() &&
a.getFullYear() === b.getFullYear()
const todayFormat = Intl.DateTimeFormat(undefined, { timeStyle: 'short' })
const exactFormat = Intl.DateTimeFormat(undefined, {
dateStyle: 'short',
timeStyle: 'short',
})
const formatTimestamp = (date) =>
(isSameDate(date) ? todayFormat : exactFormat).format(date)
const Bubble = ({ sender, value, sendTime }) => {
const classes = useStyles({ isSender: !sender })
return (
<ListItem className={classes.container} disableGutters>
{sender && <Avatar src={sender.img} className={classes.avatar} />}
<Box className={classes.content}>
<Typography className={classes.timestamp} variant="caption">
{sender && (sender.name ?? 'unknown') + ' '}
{formatTimestamp(new Date(sendTime))}
</Typography>
<Box className={classes.background}>
<Typography className={classes.text} variant="body1">
{value}
</Typography>
</Box>
</Box>
</ListItem>
)
}
export default Bubble
| 65e901d2d38fa36aeeeae81c15aa92c3f1bf35b4 | [
"JavaScript"
] | 34 | JavaScript | Briaoeuidhtns/messenger-demo | 706b90b2a566b7f18d27ae72187f3495aa22760f | b6cf1b00985c5de8d2aaf9f83d55a2e32f367153 |
refs/heads/master | <repo_name>asktami/noteful-app<file_sep>/src/FolderList/FolderList.js
import React, { useContext } from 'react';
import PropTypes from 'prop-types';
import { NavLink } from 'react-router-dom';
import FolderError from './FolderError';
import NotefulContext from '../NotefulContext';
import { config } from '../config';
const FolderList = (props) => {
const contextType = useContext(NotefulContext);
const { notes, folders, handleClickDeleteFolder } = contextType;
let folderId;
if (config.DATASOURCE === 'postgresql') {
folderId = parseInt(props.match.params.id_folder);
} else {
folderId = props.match.params.id_folder;
}
return (
<>
<header>
<>
<h2>Folders</h2>
<NavLink to={'/add-folder'}>
<button className="btn-add">+</button>
</NavLink>
{props.match.params.id_folder !== undefined ? (
<>
<NavLink to={`/edit-folder/${props.match.params.id_folder}`}>
<button className="btn-edit">✎</button>
</NavLink>
<button
className="btn-delete"
onClick={() => handleClickDeleteFolder(folderId, props)}
>
-
</button>
</>
) : null}
</>
</header>
<ul>
{folders.map((folder) => (
<li
key={folder.id}
className={folder.id === folderId ? ' active' : null}
>
<FolderError>
<NavLink to={`/folders/${folder.id}`}>
<span role="img" aria-label="Folder">
📂
</span>
{folder.name} (
{notes.filter((note) => note.id_folder === folder.id).length})
</NavLink>
</FolderError>
</li>
))}
</ul>
</>
);
};
export default FolderList;
// to catch bugs
// check that get a folders array that has id and name
// this array is the "folders" variable coming from context
FolderList.propTypes = {
folders: PropTypes.arrayOf(
PropTypes.shape({
id: PropTypes.string.isRequired,
name: PropTypes.string.isRequired,
})
),
};
<file_sep>/src/AddNote/AddNote.test.js
import React from 'react';
import ReactDOM from 'react-dom';
import AddNote from './AddNote';
it('renders without crashing', () => {
const div = document.createElement('div');
const props = {
name: 'test-folder',
content: 'note content',
modified: new Date(),
location: {
state: {
id_folder: 99
}
},
history: {
push: () => {}
}
};
ReactDOM.render(<AddNote {...props} />, div);
ReactDOM.unmountComponentAtNode(div);
});
<file_sep>/src/EditNote/EditNote.js
import React from 'react';
import { config } from '../config';
import NotefulContext from '../NotefulContext';
import ValidationError from '../ValidationError';
class EditNote extends React.Component {
static contextType = NotefulContext;
state = {
apiError: null,
formValid: true,
errorCount: null,
id: '',
id_folder: '',
name: '',
content: '',
errors: {
id_folder: '',
name: '',
content: '',
},
};
// to see addNote apiError in ui:
/*
state = {
apiError: 'addNote apiError errorMessage',
...
*/
// get note to be updated
componentDidMount() {
const { noteId } = this.props.match.params;
fetch(config.NOTES_ENDPOINT + `/${noteId}`, {
method: 'GET',
headers: {
'content-type': 'application/json',
authorization: `Bearer ${config.API_KEY}`,
},
})
.then((res) => {
if (!res.ok) return res.json().then((error) => Promise.reject(error));
return res.json();
})
.then((responseData) => {
this.setState({
id: responseData.id,
name: responseData.name,
id_folder: responseData.id_folder,
content: responseData.content,
});
})
.catch((error) => {
this.setState({ apiError: error });
});
}
updateErrorCount = () => {
let errors = this.state.errors;
let count = 0;
Object.values(errors).forEach((val) => {
if (val.length > 0) {
count++;
}
});
this.setState({ errorCount: count });
let valid = count === 0 ? true : false;
this.setState({ formValid: valid });
};
validateField = (name, value) => {
let err = '';
if (name === 'name') {
if (value.length === 0) {
err = 'Note title is required';
} else if (value.length < 3) {
err = 'Note title must be at least 3 characters long';
}
}
if (name === 'id_folder') {
if (value.length === 0) {
err = 'You must select a folder';
}
}
if (name === 'content') {
if (value.length === 0) {
err = 'You must enter a description';
} else if (value.length < 5) {
err = 'The description must be at least 5 characters long';
}
}
const { errors } = { ...this.state };
errors[name] = err;
this.setState({ errors });
};
handleChange = (event) => {
const { name, value } = event.target;
this.setState({ [name]: value });
this.validateField(name, value);
this.updateErrorCount();
};
handleClickCancel = () => {
this.props.history.push('/');
};
resetFields = (newFields) => {
this.setState({
id: newFields.id || '',
name: newFields.name || '',
content: newFields.content || '',
id_folder: newFields.id_folder || '',
});
};
handleSubmit = (e) => {
e.preventDefault();
// do NOT submit form if any errors
if (this.state.errorCount > 0) return;
// get the form fields to be updated
const { noteId } = this.props.match.params;
// b/c id from datasource can be either text or number
let correct_type_noteId;
if (config.DATASOURCE === 'postgresql') {
correct_type_noteId = parseInt(this.state.id);
} else {
correct_type_noteId = this.state.id;
}
let correct_type_folderId;
if (config.DATASOURCE === 'postgresql') {
correct_type_folderId = parseInt(this.state.id_folder);
} else {
correct_type_folderId = this.state.id_folder;
}
const newNote = {
id: correct_type_noteId,
id_folder: correct_type_folderId,
name: this.state.name,
content: this.state.content,
modified: new Date(),
};
this.setState({ apiError: null });
fetch(config.NOTES_ENDPOINT + `/${noteId}`, {
method: 'PATCH',
body: JSON.stringify(newNote),
headers: {
'content-type': 'application/json',
authorization: `Bearer ${config.API_KEY}`,
},
})
.then((res) => {
if (!res.ok) return res.json().then((error) => Promise.reject(error));
})
.then(() => {
this.resetFields(newNote);
this.context.updateNotes(newNote);
// return to note folder
this.props.history.push(`/folders/${this.state.id_folder}`);
})
.catch((error) => {
error(error);
this.setState({ apiError: error });
});
};
render() {
const { errors, name, content, id_folder } = this.state;
const folders = this.context.folders;
if (this.state.apiError) {
return <p className="error">{this.state.apiError}</p>;
}
return (
<form onSubmit={this.handleSubmit}>
<fieldset>
<legend>Edit Note</legend>
<label htmlFor="id_folder">Folder</label>
<select
id="id_folder"
name="id_folder"
aria-label="Folder Id"
required
aria-required="true"
aria-describedby="id_folderError"
aria-invalid="true"
value={id_folder}
onChange={this.handleChange}
>
<option value="">Select a folder</option>
{folders.map((folder) => (
<option key={folder.id} value={folder.id}>
{folder.name}
</option>
))}
</select>
{errors.id_folder.length > 0 && (
<ValidationError id={'id_folderError'} message={errors.id_folder} />
)}
<label htmlFor="name">Title</label>
<input
type="text"
id="name"
name="name"
value={name}
onChange={this.handleChange}
aria-label="Note Title"
required
aria-required="true"
aria-describedby="noteTitleError"
aria-invalid="true"
/>
{errors.name.length > 0 && (
<ValidationError id={'noteTitleError'} message={errors.name} />
)}
<label htmlFor="content">Description</label>
<textarea
id="content"
name="content"
value={content}
onChange={this.handleChange}
aria-label="Note Description"
required
aria-required="true"
aria-describedby="noteDescriptionError"
aria-invalid="true"
/>
{errors.content.length > 0 && (
<ValidationError
id={'noteDescriptionError'}
message={errors.content}
/>
)}
<br />
<button className="btn-cancel" onClick={this.handleClickCancel}>
Cancel
</button>{' '}
<button
className="btn-save"
disabled={this.state.formValid === false}
>
Save Note
</button>
</fieldset>
{this.state.errorCount !== null ? (
<p className="form-status">
Form is {this.state.formValid ? 'complete ✅' : 'incomplete ❌'}
</p>
) : null}
</form>
);
}
}
export default EditNote;
<file_sep>/src/Footer/Footer.js
import React from 'react';
const Footer = props => {
return (
<footer>
© 2019{' '}
<a
href="http://www.asktami.com"
target="_blank"
rel="noopener noreferrer"
>
<NAME>
</a>
</footer>
);
};
export default Footer;
<file_sep>/src/Note/Note.test.js
import React from 'react';
import { shallow } from 'enzyme';
import toJson from 'enzyme-to-json';
import Note from './Note';
describe('Dummy Note test', () => {
test('adding 1 + 2 should return 3', () => {
expect(2 * 2).toBe(4);
});
});
<file_sep>/src/AddFolder/AddFolder.js
import React from 'react';
import { config } from '../config';
import NotefulContext from '../NotefulContext';
import ValidationError from '../ValidationError';
class AddFolder extends React.Component {
static contextType = NotefulContext;
state = {
apiError: null,
formValid: false,
errorCount: null,
name: '',
errors: {
name: 'You must enter a folder name',
},
};
updateErrorCount = () => {
let errors = this.state.errors;
let count = 0;
Object.values(errors).forEach((val) => {
if (val.length > 0) {
count++;
}
});
this.setState({ errorCount: count });
let valid = count === 0 ? true : false;
this.setState({ formValid: valid });
};
validateField = (name, value) => {
let err = '';
if (name === 'name') {
if (value.trim().length === 0) {
err = 'Folder name is required';
} else if (value.trim().length < 3) {
err = 'Folder name must be at least 3 characters long';
}
}
const { errors } = { ...this.state };
errors[name] = err;
this.setState({ errors });
};
handleChange = (event) => {
const { name, value } = event.target;
this.setState({ [name]: value.trim() });
this.validateField(name, value);
this.updateErrorCount();
};
handleClickCancel = () => {
this.props.history.push('/');
};
handleSubmit = (e) => {
e.preventDefault();
// do NOT submit form if any errors
if (this.state.errorCount > 0) return;
// get the form fields from the event
const { name } = e.target;
const folder = {
name: name.value,
};
this.setState({ apiError: null });
fetch(config.FOLDERS_ENDPOINT, {
method: 'POST',
body: JSON.stringify(folder),
headers: {
'content-type': 'application/json',
Authorization: `Bearer ${config.API_KEY}`,
},
})
.then((res) => {
if (!res.ok) {
// get the error message from the response,
return res.json().then((error) => {
// then throw it
throw error;
});
}
return res.json();
})
.then((data) => {
// clear form values
name.value = '';
this.context.addFolder(data);
// select newly created folder:
this.props.history.push(`/folders/${data.id}`);
})
.catch((error) => {
this.setState({ apiError: error });
});
};
render() {
const { errors } = this.state;
if (this.state.apiError) {
return <p className="error">{this.state.apiError}</p>;
}
return (
<form className="addFolderForm" onSubmit={this.handleSubmit} noValidate>
<fieldset>
<legend>New Folder</legend>
<label htmlFor="name">Name</label>
<input
type="text"
id="name"
name="name"
aria-label="Folder Name"
required
aria-required="true"
aria-describedby="folderNameError"
aria-invalid="true"
onChange={this.handleChange}
/>
{errors.name.length > 0 && (
<ValidationError message={errors.name} id={'folderNameError'} />
)}
<br />
<button className="btn-cancel" onClick={this.handleClickCancel}>
Cancel
</button>{' '}
<button
className="btn-save"
disabled={this.state.formValid === false}
>
Save Folder
</button>
</fieldset>
{this.state.errorCount !== null ? (
<p className="form-status">
Form is {this.state.formValid ? 'complete ✅' : 'incomplete ❌'}
</p>
) : null}
</form>
);
}
}
export default AddFolder;
<file_sep>/src/NoteItem/NoteItem.test.js
import React from 'react';
import { shallow } from 'enzyme';
import toJson from 'enzyme-to-json';
import NoteItem from './NoteItem';
describe('Dummy NoteItem test', () => {
test('adding 1 + 2 should return 3', () => {
expect(2 * 2).toBe(4);
});
});
<file_sep>/src/NoteItem/NoteItemAsFunctionalComponent.js
import React, { useContext } from 'react';
import { NavLink } from 'react-router-dom';
import { config } from '../config';
import NotefulContext from '../NotefulContext';
// this function 1st deletes via the API, then from state
// context.deleteNote = the update function, to update state in context
// must pass in PROPS to get history, location and match (AND props.note.id) AND pass in CONTEXT for deleteNote function because there is no other way for the handleClickDelete function
// - to know what is in Context
// - to know history, location and match
function handleClickDelete(props, context) {
const noteId = props.note.id;
fetch(config.NOTES_ENDPOINT + `/${noteId}`, {
method: 'DELETE',
headers: {
'content-type': 'application/json',
},
})
.then((res) => {
if (!res.ok) {
// get the error message from the response,
return res.json().then((error) => {
// then throw it
throw error;
});
}
return res.json();
})
.then((data) => {
// call the callback function when the request is successful
// this is where the App component can remove it from state
// ie. update the notes stored in state
// which also updates the notes stored in context
context.deleteNote(noteId);
// if in Note detail, return to show all notes list
if (props.location.pathname.includes('/notes/')) {
props.history.push(`/`);
}
})
.catch((error) => {
context.addErrorNotes(error);
});
}
const NoteItem = (props) => {
// need to grab NotefulContext (globals)
const contextType = useContext(NotefulContext);
return (
<NotefulContext.Consumer>
{/*
Use the Consumer to grab values from contex
--- the value we're grabbing from context is the deleteNote function, we're passing it to the delete button
QUESTION: what is context?
ANSWER:
is it equal to the object inside NotefulCcontext.js?
an anonymous function with the parameter context automatically defined by {context => ... }, don't know where the parameter function comes from, don't need to know where it comes from because we're using it to render a result; context is a variable containing data; this function is called by line 39 NotefulContext.Consumer
NOTE: context could be any word since its just the parameter label
*/}
{(context) => (
<div className="note-item">
{/*
THIS CAUSED A staticContent ERROR:
<NavLink to={`/notes/${note.id}`} {...props}>
<h3>{note.title}</h3>
</NavLink> */}
<NavLink to={{ pathname: `/notes/${props.note.id}`, props: props }}>
<h3>{props.note.name}</h3>
</NavLink>
<div className="button-container">
<span>
Modified on{' '}
<span className="note-datemod">
{props.note.modified
? props.note.modified.split('T', 1)[0]
: ''}
</span>
</span>
<span>
<button
className="btn-delete"
onClick={() => {
handleClickDelete(props, contextType);
}}
>
-
</button>
</span>
</div>
</div>
)}
</NotefulContext.Consumer>
);
};
export default NoteItem;
<file_sep>/README.md
# noteful-react-client

[View Live](https://noteful-app-asktami.vercel.app/)
## Description
Works with [https://github.com/asktami/noteful-api](https://github.com/asktami/noteful-api).
There are 3 routes: the main route, the dynamic folder route and a dynamic note route.
- Each route should have a header, main section, and a sidebar section
- Every route will have the same header section, the app's title should be a link to the main route
- The main route: - Should be displayed when the path is / - The main section will display all of the available notes - - Each note should show it's name and modified date - The sidebar will display a list of folders with none selected
- The dynamic folder route: - Should be displayed when the path is /folder/<with-a-folder-id-here> - The folder-id will reference an id of one of the folders in state - The main section should display only the notes that are "in" the selected folder - The sidebar should display the folder list with the selected folder highlighted
- The dynamic note route: - Should be displayed when the path is /notes/<with-a-note-id-here> - The note-id will reference an id of one of the notes in state - The main section should display the currently selected notes name, modified date and content - The sidebar should display the folder of the currently selected note as well as a "back" button
- Use the React Context API instead of prop drilling
- Implement fetch requests to two endpoints when the application mounts: /folders and /notes. Store the response from these requests using a setState inside the main App component
- The API calls can be made to either a **local json server** or a **PostgreSQL database**
- Implement the delete button for each note in the list in the main route and folder route
- Implement the delete button on the note page, if the delete is successful, redirect to the / path
- Implement "add-folder" and "add-note" controlled component forms
- Implement "edit-folder" and "edit-note" controlled component forms
- A folder is a _parent_ record to _child_ note records
## Hosted on
- Zeit
## Setup
1. See [https://github.com/asktami/noteful-api](https://github.com/asktami/noteful-api) for instructions on installing the backend API
2. Clone this repo
3. In Terminal, change to the directory on your computer that contains this repo
4. Install dependencies: `npm install`
5. Environment:
- Prepare the environment file: `cp example.env .env`
- Replace values in `.env` with your custom values
- Replace the value for `REACT_APP_API_KEY` with the same API token value you use in your backend API
6. Start the app in a web browser: `npm start`
### To use a Noteful JSON server
1. To get your local copy of the Noteful JSON API, clone this project into your local projects folder:
```
git clone https://github.com/tomatau/noteful-json-server
cd ./noteful-json-server
npm install
npm start
Use db BACKUP.json, from this repo, as the datasource (renamed as db.json, replacing the existing db.json).
Ctrl-c to close the server
```
2. Change the backend API endpoints in `./src/config.js`
You can see documentation for the JSON server once its started by visiting http://localhost:9090.
You can see all of the data currently stored in the server by visiting http://localhost:9090/db.
More info is at [https://github.com/typicode/json-server](https://github.com/typicode/json-server) - a fake REST API for the database.
---
## Create React App
This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
## Available Scripts
In the project directory, you can run:
### `npm start`
Runs the app in the development mode.<br>
Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
The page will reload if you make edits.<br>
You will also see any lint errors in the console.
### `npm test`
Launches the test runner in the interactive watch mode.<br>
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
### `npm run build`
Builds the app for production to the `build` folder.<br>
It correctly bundles React in production mode and optimizes the build for the best performance.
The build is minified and the filenames include the hashes.<br>
Your app is ready to be deployed!
See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
### `npm run eject`
**Note: this is a one-way operation. Once you `eject`, you can’t go back!**
If you aren’t satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project.
Instead, it will copy all the configuration files and the transitive dependencies (Webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you’re on your own.
You don’t have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn’t feel obligated to use this feature. However we understand that this tool wouldn’t be useful if you couldn’t customize it when you are ready for it.
## Learn More
You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
To learn React, check out the [React documentation](https://reactjs.org/).
### Code Splitting
This section has moved here: https://facebook.github.io/create-react-app/docs/code-splitting
### Analyzing the Bundle Size
This section has moved here: https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size
### Making a Progressive Web App
This section has moved here: https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app
### Advanced Configuration
This section has moved here: https://facebook.github.io/create-react-app/docs/advanced-configuration
### Deployment
This section has moved here: https://facebook.github.io/create-react-app/docs/deployment
### `npm run build` fails to minify
This section has moved here: https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify
<file_sep>/src/NotefulContext.js
import React from 'react';
// nothing else needs to be here because the context is defined in App.js,as contextObj - THIS IS ONLY TRUE IF NOT RUNNING TESTS
// IF RUNNING TESTS then need to copy over references to objects and methods:
const NotefulContext = React.createContext({
folders: [],
notes: [],
deleteNote: () => {},
addNote: () => {},
addFolder: () => {},
addErrorNotes: () => {},
addErrorFolders: () => {},
notesError: () => {},
updateFolders: () => {},
updateNotes: () => {},
handleClickDeleteFolder: () => {}
});
export default NotefulContext;
<file_sep>/src/AddNote/AddNote.js
import React from 'react';
import { config } from '../config';
import NotefulContext from '../NotefulContext';
import ValidationError from '../ValidationError';
class AddNote extends React.Component {
static contextType = NotefulContext;
state = {
apiError: null,
formValid: false,
errorCount: null,
id_folder: this.props.location.state.id_folder || '', // use selected id_folder passed in via NavLink
name: '',
content: '',
errors: {
id_folder:
!this.props.location.state.id_folder && 'You must select a folder', // error only if no id_folder passed in via NavLink
name: 'You must enter a note title',
content: 'You must enter a description',
},
};
// to see addNote apiError in ui:
/*
state = {
apiError: 'addNote apiError errorMessage',
...
*/
updateErrorCount = () => {
let errors = this.state.errors;
let count = 0;
Object.values(errors).forEach((val) => {
if (val.length > 0) {
count++;
}
});
this.setState({ errorCount: count });
let valid = count === 0 ? true : false;
this.setState({ formValid: valid });
};
validateField = (name, value) => {
let err = '';
if (name === 'name') {
if (value.length === 0) {
err = 'Note title is required';
} else if (value.length < 3) {
err = 'Note title must be at least 3 characters long';
}
}
if (name === 'id_folder') {
if (value.length === 0) {
err = 'You must select a folder';
}
}
if (name === 'content') {
if (value.length === 0) {
err = 'You must enter a description';
} else if (value.length < 5) {
err = 'The description must be at least 5 characters long';
}
}
const { errors } = { ...this.state };
errors[name] = err;
this.setState({ errors });
};
handleChange = (event) => {
const { name, value } = event.target;
this.setState({ [name]: value.trim() });
this.validateField(name, value.trim());
this.updateErrorCount();
};
handleClickCancel = () => {
this.props.history.push('/');
};
handleSubmit = (e) => {
e.preventDefault();
// this.updateErrorCount();
// do NOT submit form if any errors
if (this.state.errorCount > 0) return;
// get the form fields from the event
const { id_folder, name, content } = e.target;
const note = {
id_folder: id_folder.value,
name: name.value,
content: content.value,
modified: new Date(Date.UTC()),
};
this.setState({ apiError: null });
fetch(config.NOTES_ENDPOINT, {
method: 'POST',
body: JSON.stringify(note),
headers: {
'content-type': 'application/json',
Authorization: `Bearer ${config.API_KEY}`,
},
})
.then((res) => {
if (!res.ok) {
// get the error message from the response,
return res.json().then((error) => {
// then throw it
throw error;
});
}
return res.json();
})
.then((data) => {
// clear form values
id_folder.value = '';
name.value = '';
content.value = '';
this.context.addNote(data);
// return to list:
this.props.history.push(`/folders/${id_folder.value}`);
})
.catch((error) => {
this.setState({ apiError: error });
});
};
render() {
const { errors } = this.state;
const folders = this.context.folders;
if (this.state.apiError) {
return <p className="error">{this.state.apiError}</p>;
}
return (
<form onSubmit={this.handleSubmit}>
<fieldset>
<legend>New Note</legend>
<label htmlFor="id_folder">Folder</label>
<select
id="id_folder"
name="id_folder"
aria-label="Folder Id"
required
aria-required="true"
aria-describedby="id_folderError"
aria-invalid="true"
value={this.state.id_folder}
onChange={this.handleChange}
>
<option value="">Select a folder</option>
{folders.map((folder) => (
<option key={folder.id} value={folder.id}>
{folder.name}
</option>
))}
</select>
{errors.id_folder.length > 0 && (
<ValidationError id={'id_folderError'} message={errors.id_folder} />
)}
<label htmlFor="name">Title</label>
<input
type="text"
id="name"
name="name"
onChange={this.handleChange}
aria-label="Note Title"
required
aria-required="true"
aria-describedby="noteTitleError"
aria-invalid="true"
/>
{errors.name.length > 0 && (
<ValidationError id={'noteTitleError'} message={errors.name} />
)}
<label htmlFor="content">Description</label>
<textarea
id="content"
name="content"
onChange={this.handleChange}
aria-label="Note Description"
required
aria-required="true"
aria-describedby="noteDescriptionError"
aria-invalid="true"
/>
{errors.content.length > 0 && (
<ValidationError
id={'noteDescriptionError'}
message={errors.content}
/>
)}
<br />
<button className="btn-cancel" onClick={this.handleClickCancel}>
Cancel
</button>{' '}
<button
className="btn-save"
disabled={this.state.formValid === false}
>
Save Note
</button>
</fieldset>
{this.state.errorCount !== null ? (
<p className="form-status">
Form is {this.state.formValid ? 'complete ✅' : 'incomplete ❌'}
</p>
) : null}
</form>
);
}
}
export default AddNote;
<file_sep>/src/App-ORIGINAL.js
import React, { useState, useEffect } from 'react';
import { Route } from 'react-router-dom';
import './App.css';
import { config } from './config';
// when using dummyStore text datafile:
// import dummyStore from './dummy-store';
// using React.Context:
import NotefulContext from './NotefulContext';
import Header from './Header/Header';
import Footer from './Footer/Footer';
import FolderList from './FolderList/FolderList';
import FolderItem from './FolderItem/FolderItem';
import NoteList from './NoteList/NoteList';
import Note from './Note/Note';
import AddFolder from './AddFolder/AddFolder';
import AddNote from './AddNote/AddNote';
import EditFolder from './EditFolder/EditFolder';
import EditNote from './EditNote/EditNote';
const routes = [
{
path: '/',
exact: true,
header: Header,
aside: FolderList,
section: NoteList,
},
{
path: '/folders/:id_folder',
exact: true,
header: Header,
aside: FolderList,
section: NoteList,
},
{
path: '/notes/:noteId',
exact: true,
header: Header,
aside: FolderItem,
section: Note,
},
{
path: '/add-folder',
exact: true,
header: Header,
aside: null,
section: AddFolder,
},
{
path: '/add-note',
exact: true,
header: Header,
aside: null,
section: AddNote,
},
{
path: '/edit-note/:noteId',
exact: true,
header: Header,
aside: null,
section: EditNote,
},
{
path: '/edit-folder/:id_folder',
exact: true,
header: Header,
aside: null,
section: EditFolder,
},
{
path: '/delete-folder/:id_folder',
exact: true,
header: Header,
aside: FolderList,
section: NoteList,
},
{
path: '/:any/:any/:any',
exact: true,
header: Header,
aside: () => null,
section: () => 'Do not edit the url!',
},
];
const App = (props) => {
// set default state variable values with hooks
const [folders, setFolders] = useState([]);
const [notes, setNotes] = useState([]);
const [foldersError, setFoldersError] = useState(null);
const [notesError, setNotesError] = useState(null);
// to stop submit of EditFolder form since deleteFolder button is inside the form
const [deletedFolderId, setDeletedFolderId] = useState(null);
// to see foldersError in ui:
// const [foldersError, setFoldersError] = useState({ value: 'foldersAPI errorMessage' });
// to see notesError in ui:
// const [notesError, setNotesError] = useState({value: 'notesAPI errorMessage'});
// deleteNotes updates state
// and inside render context is updated with values from state
// then context is used to display values in FolderList and NoteList
/*
After making successful a DELETE API request, you can use a this.state.notes.filter method along with setState to remove a note from state and update context.
*/
const deleteNote = (noteId) => {
const newNotes = notes.filter((note) => note.id !== noteId);
setNotes(newNotes);
};
const deleteFolder = (id_folder) => {
const newFolders = folders.filter((folder) => folder.id !== id_folder);
setFolders(newFolders);
};
const addNote = (note) => {
setNotes([...notes, note]);
};
const addFolder = (folder) => {
setFolders([...folders, folder]);
};
const addErrorNotes = (error) => {
setNotesError(error);
};
const addErrorFolders = (error) => {
setFoldersError(error);
};
/*
// NOTE NOTE NOTE
// Pattern: every route is responsible for loading the data it needs from scratch
// So the component rendering the /detail/:id route needs to fetch data for itself, including the correct id to use from the url, via the props React Router provides.
*/
const getFolders = () => {
fetch(config.FOLDERS_ENDPOINT, {
method: 'GET',
headers: {
'content-type': 'application/json',
Authorization: `Bearer ${config.API_KEY}`,
},
})
.then((res) => {
if (!res.ok) {
throw new Error(res.status);
}
return res.json();
})
.then(setFolders)
// passes res to setFolders function
// shortcut which equals .then(res => this.setFolders(res))
.catch((error) => setFoldersError(error));
};
const getNotes = () => {
fetch(config.NOTES_ENDPOINT, {
method: 'GET',
headers: {
'content-type': 'application/json',
Authorization: `Bearer ${config.API_KEY}`,
},
})
.then((res) => {
if (!res.ok) {
throw new Error(res.status);
}
return res.json();
})
.then(setNotes)
// passes res to setNotes function
// shortcut which equals .then(res => this.setNotes(res))
.catch((error) => setNotesError(error));
};
const updateFolders = (updatedFolder) => {
const newFolders = folders.map((folder) =>
folder.id !== updatedFolder.id ? folder : updatedFolder
);
setFolders(newFolders);
};
const updateNotes = (updatedNote) => {
const newNotes = notes.map((note) =>
note.id !== updatedNote.id ? note : updatedNote
);
setNotes(newNotes);
};
// to stop submit of EditFolder form since deleteFolder button is inside the form
const clearDeletedFolderId = () => {
setDeletedFolderId(null);
};
const handleClickDeleteFolder = (id_folder, props) => {
// to stop submit of EditFolder form since deleteFolder button is inside the form
setDeletedFolderId(id_folder);
fetch(config.FOLDERS_ENDPOINT + `/${id_folder}`, {
method: 'DELETE',
headers: {
'content-type': 'application/json',
Authorization: `Bearer ${config.API_KEY}`,
},
})
.then((res) => {
// I think b/c cors, typecode gives a res.status = 404 and an EMPTY error object when try to delete note so,
if (!res.ok || res.status === '404') {
// get the error message from the response,
return res.json().then((error) => {
// then throw it
// throw res.status instead of error b/c error is an empty object
throw res.status;
});
}
return res.json();
})
.then((data) => {
// call the callback function when the request is successful
// this is where the App component can remove it from state
// ie. update the folders stored in state
// which also updates the folders stored in context
deleteFolder(id_folder);
// remove id_folder from URL
props.history.push(`/`);
})
.catch((error) => {
// WORKAROUND to handle EMPTY error object and res.status = 404
if (error !== 404) {
addErrorFolders(error);
}
if (error === 404) {
deleteFolder(id_folder);
// remove id_folder from URL
props.history.push(`/`);
}
});
};
// only load ONCE, to fetch initial API data
useEffect(() => {
getFolders();
getNotes();
}, []); /* add an empty array as the 2nd argument to have this run only 1x after the initial render */
// create object to update the values stored in NotefulContext
const contextObj = {
notes: notes,
folders: folders,
deleteNote: deleteNote,
addNote: addNote,
addFolder: addFolder,
addErrorNotes: addErrorNotes,
addErrorFolders: addErrorFolders,
notesError: notesError,
updateFolders: updateFolders,
updateNotes: updateNotes,
handleClickDeleteFolder: handleClickDeleteFolder,
deletedFolderId: deletedFolderId,
clearDeletedFolderId: clearDeletedFolderId,
};
return (
<>
<Header />
{/* actually update the values stored in NotefulContext by passing contextObj into value
Use the Provider to make values available to all children/grandchildren/subcomponents
See: https://reactjs.org/docs/context.html#caveats
-- the code below will re-render all consumers every time the Provider re-renders because a new object is always created for value
*/}
<NotefulContext.Provider value={contextObj}>
<main>
<div className="aside">
{foldersError && <p className="error">{foldersError.value}</p>}
{routes.map(({ path, exact, aside: A }) => (
<Route key={path} path={path} exact={exact} component={A} />
))}
</div>
<article>
{/* NOTE:
CAN use render props to pass unfinishedMessage prop via route
AND
to pass location, match and history props to the component so that in the component I have access to the history object to push a new location into
render={props => (
<S
{...props}
unfinishedMessage={unfinishedMessage} />
)}
can also pass unfinishedMessage via Context and do:
component={S}
---- ALTERNATIVE:
{routes.map(({ path, exact, section: S }) => (
<Route
key={path}
path={path}
exact={exact}
render={props => <S {...props} />}
/>
))}
*/}
{notesError && <p className="error">{notesError.value}</p>}
{routes.map(({ path, exact, section: S }) => (
<Route key={path} path={path} exact={exact} component={S} />
))}
</article>
</main>
</NotefulContext.Provider>
<Footer />
</>
);
};
export default App;
| 0aac7d3c2753dff55b8b345cea3c27527cffc247 | [
"JavaScript",
"Markdown"
] | 12 | JavaScript | asktami/noteful-app | 76ea8c5a60289b7c68f3f9d4e21dfdd18ac017f6 | 7498afa68a2c2ece8349870312e622fa4d847abe |
refs/heads/master | <file_sep>package com.azoft.json2dart.view;
import com.intellij.uiDesigner.core.GridConstraints;
import com.intellij.uiDesigner.core.GridLayoutManager;
import com.intellij.uiDesigner.core.Spacer;
import org.fife.ui.rsyntaxtextarea.RSyntaxTextArea;
import org.fife.ui.rsyntaxtextarea.SyntaxConstants;
import org.fife.ui.rsyntaxtextarea.Theme;
import javax.swing.*;
import java.awt.*;
import java.io.IOException;
public class Json2DartForm {
public JPanel rootView;
public RSyntaxTextArea editor;
public JButton generateButton;
public JCheckBox finalFields;
public JTextField fileName;
public JLabel fileNameLabel;
private OnGenerateClicked listener;
public void setOnGenerateListener(OnGenerateClicked listener) {
this.listener = listener;
generateButton.addActionListener(action -> {
if (this.listener != null) {
this.listener.onClicked(
fileName != null ? fileName.getText() : "response",
editor != null ? editor.getText() : "",
finalFields != null && finalFields.isSelected()
);
}
});
}
private void createUIComponents() {
editor = new RSyntaxTextArea();
editor.setSyntaxEditingStyle(SyntaxConstants.SYNTAX_STYLE_JSON);
editor.setCodeFoldingEnabled(true);
try {
Theme theme = Theme.load(getClass().getResourceAsStream(
"/org/fife/ui/rsyntaxtextarea/themes/monokai.xml"));
theme.apply(editor);
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
{
// GUI initializer generated by IntelliJ IDEA GUI Designer
// >>> IMPORTANT!! <<<
// DO NOT EDIT OR ADD ANY CODE HERE!
$$$setupUI$$$();
}
/**
* Method generated by IntelliJ IDEA GUI Designer
* >>> IMPORTANT!! <<<
* DO NOT edit this method OR call it in your code!
*
* @noinspection ALL
*/
private void $$$setupUI$$$() {
createUIComponents();
rootView = new JPanel();
rootView.setLayout(new GridLayoutManager(2, 4, new Insets(0, 0, 0, 0), -1, -1));
rootView.setPreferredSize(new Dimension(500, 500));
final JScrollPane scrollPane1 = new JScrollPane();
rootView.add(scrollPane1, new GridConstraints(0, 0, 1, 4, GridConstraints.ANCHOR_CENTER, GridConstraints.FILL_BOTH, GridConstraints.SIZEPOLICY_CAN_SHRINK | GridConstraints.SIZEPOLICY_WANT_GROW, GridConstraints.SIZEPOLICY_CAN_SHRINK | GridConstraints.SIZEPOLICY_WANT_GROW, null, null, null, 0, false));
scrollPane1.setViewportView(editor);
generateButton = new JButton();
generateButton.setText("Generate");
rootView.add(generateButton, new GridConstraints(1, 3, 1, 1, GridConstraints.ANCHOR_CENTER, GridConstraints.FILL_HORIZONTAL, GridConstraints.SIZEPOLICY_CAN_SHRINK | GridConstraints.SIZEPOLICY_CAN_GROW, GridConstraints.SIZEPOLICY_FIXED, null, null, null, 0, false));
finalFields = new JCheckBox();
finalFields.setText("Make fields final");
rootView.add(finalFields, new GridConstraints(1, 0, 1, 1, GridConstraints.ANCHOR_WEST, GridConstraints.FILL_NONE, GridConstraints.SIZEPOLICY_CAN_SHRINK | GridConstraints.SIZEPOLICY_CAN_GROW, GridConstraints.SIZEPOLICY_FIXED, null, null, null, 0, false));
fileName = new JTextField();
rootView.add(fileName, new GridConstraints(1, 2, 1, 1, GridConstraints.ANCHOR_WEST, GridConstraints.FILL_HORIZONTAL, GridConstraints.SIZEPOLICY_WANT_GROW, GridConstraints.SIZEPOLICY_FIXED, null, new Dimension(150, -1), null, 0, false));
fileNameLabel = new JLabel();
fileNameLabel.setText("Root file name:");
rootView.add(fileNameLabel, new GridConstraints(1, 1, 1, 1, GridConstraints.ANCHOR_WEST, GridConstraints.FILL_NONE, GridConstraints.SIZEPOLICY_FIXED, GridConstraints.SIZEPOLICY_FIXED, null, null, null, 0, false));
}
/**
* @noinspection ALL
*/
public JComponent $$$getRootComponent$$$() {
return rootView;
}
public interface OnGenerateClicked {
void onClicked(String fileName, String json, Boolean finalFields);
}
}
<file_sep>package com.azoft.json2dart.delegates
import com.intellij.notification.*
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.project.ProjectManager
import com.intellij.openapi.ui.Messages
class MessageDelegate {
companion object {
private const val GROUP_LOG = "JSON2DART_GENERATOR_LOG"
}
private val logGroup =
NotificationGroup(GROUP_LOG, NotificationDisplayType.NONE, true)
fun onException(throwable: Throwable) {
val message = throwable.message ?: "Something went wrong"
sendNotification(
logGroup.createNotification(message , NotificationType.INFORMATION)
)
showMessage(message, "Error")
}
fun showMessage(message: String) {
showMessage(message, "")
}
fun log(message: String) {
sendNotification(
logGroup.createNotification(message, NotificationType.INFORMATION)
)
}
private fun sendNotification(notification: Notification) {
ApplicationManager.getApplication().invokeLater {
val projects = ProjectManager.getInstance().openProjects
Notifications.Bus.notify(notification, projects[0])
}
}
private fun showMessage(message: String, header: String) {
ApplicationManager.getApplication().invokeLater {
Messages.showDialog(message, header, arrayOf("OK"), -1, null)
}
}
}<file_sep>package com.azoft.json2dart.delegates.generator
import com.azoft.json2dart.delegates.generator.data.NodeInfo
import com.azoft.json2dart.delegates.generator.data.NodeWrapper
import com.fasterxml.jackson.databind.JsonNode
import com.fasterxml.jackson.databind.node.ArrayNode
import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper
import java.io.*
import java.util.*
class DartClassGenerator {
fun generateFromJson(source: String, destiny: File, rootName: String, isFinal: Boolean) {
val nodesToProcessStack = Stack<NodeWrapper>()
try {
nodesToProcessStack.add(
NodeWrapper(
node = jacksonObjectMapper().readTree(source),
fieldName = rootName,
sneakCaseName = rootName,
className = extractRootClassName(rootName)
)
)
} catch (e: Exception) {
throw SyntaxException()
}
val packageTemplate = extractPackageName(destiny)
val finalMode = if (isFinal) "final " else ""
var nodeWrapper: NodeWrapper
var nodeInfo: NodeInfo
var buffer: FileOutputStream
var target: FileOutputStream
var constructorStringBuilder: StringBuilder
var serializatorStringBuilder: StringBuilder
val importsList = mutableListOf<String>()
var bufferFile: File
while (nodesToProcessStack.isNotEmpty()) {
nodeWrapper = nodesToProcessStack.pop()
bufferFile = File(destiny, "__${nodeWrapper.sneakCaseName}.dart")
buffer = FileOutputStream(bufferFile)
target = FileOutputStream(File(destiny, "${nodeWrapper.sneakCaseName}.dart"))
constructorStringBuilder = createConstructorStart(nodeWrapper)
serializatorStringBuilder = createSerializatorStart()
buffer.writeText("\nclass ${nodeWrapper.className} {\n\n")
try {
nodeWrapper.node?.fields()?.forEach {
nodeInfo = processNode(buffer, it.value, it.key, finalMode)
nodeInfo.node?.apply {
nodesToProcessStack.add(this)
target.writeText("import '$packageTemplate$sneakCaseName.dart';\n")
}
serializatorStringBuilder.append(nodeInfo.mapSerialization)
constructorStringBuilder.append("\t\t${it.key} = ${nodeInfo.mapDeserialization}")
}
constructorStringBuilder.apply {
deleteCharAt(length - 1).deleteCharAt(length - 1).append(";\n")
}
serializatorStringBuilder
.append("\t\treturn data;\n")
.append("\t}\n")
buffer.writeText(constructorStringBuilder.toString()).writeText("\n")
buffer.writeText(serializatorStringBuilder.toString())
buffer.writeText("}")
buffer.close()
mergeBufferAndTarget(target, bufferFile)
} finally {
buffer.close()
target.close()
}
importsList.clear()
}
}
private fun processNode(
fout: FileOutputStream, node: JsonNode, name: String, finalMode: String
): NodeInfo {
val nodeInfo = extractNodeInfo(node, name)
fout.writeText(" $finalMode${nodeInfo.stringRepresentation} $name;\n")
return nodeInfo
}
private fun extractNodeInfo(node: JsonNode, name: String): NodeInfo {
return when {
node.isDouble || node.isFloat || node.isBigDecimal ->
NodeInfo("double", name)
node.isShort || node.isInt || node.isLong || node.isBigInteger ->
NodeInfo("int", name)
node.isBoolean ->
NodeInfo("bool", name)
node.isTextual ->
NodeInfo("String", name)
node.isArray ->
extractArrayData(node as ArrayNode, name)
node.isObject ->
NodeWrapper(node, name).toObjectNodeInfo()
else -> NodeInfo("Object", name)
}
}
private fun extractArrayData(node: ArrayNode, name: String): NodeInfo {
val iterator = node.iterator()
if (!iterator.hasNext()) {
return NodeInfo("List<Object>", name)
}
val elementInfo = extractNodeInfo(iterator.next(), name)
return NodeInfo(
"List<${elementInfo.stringRepresentation}>",
elementInfo.node,
elementInfo.buildListDeserialization(name),
elementInfo.buildListSerialization(name)
)
}
private fun createConstructorStart(nodeWrapper: NodeWrapper) =
StringBuilder()
.append("\n\t${nodeWrapper.className}.fromJsonMap(Map<String, dynamic> map): \n")
private fun createSerializatorStart() =
StringBuilder()
.append("\tMap<String, dynamic> toJson() {\n")
.append("\t\tfinal Map<String, dynamic> data = new Map<String, dynamic>();\n")
private fun mergeBufferAndTarget(targetStream: FileOutputStream, bufferFile: File) {
BufferedReader(FileReader(bufferFile)).useLines { lines ->
lines.forEach {
targetStream.writeText(it).writeText("\n")
}
}
bufferFile.delete()
}
private fun extractPackageName(dir: File): String {
val absolutePath = dir.absolutePath
val splitted = absolutePath.split(if (isWindows()) "\\" else "/")
val libIndex = splitted.indexOf("lib")
if (libIndex == -1) {
throw NotAFlutterProject()
}
val fold = splitted
.subList(libIndex + 1, splitted.size)
.fold(StringBuilder()) { builder, s -> builder.append(s).append("/") }
return "package:${splitted[libIndex - 1]}/$fold"
}
private fun FileOutputStream.writeText(text: String): FileOutputStream {
write(text.toByteArray(Charsets.UTF_8))
return this
}
private fun NodeWrapper.toObjectNodeInfo(): NodeInfo {
val field = this.fieldName
return NodeInfo(
className,
this,
"if(map['$fieldName'] == null ? null : $className.fromJsonMap(map[\'$fieldName\']),\n",
"\t\tdata['$field'] = $field == null ? null : $field.toJson();\n"
)
}
private fun NodeInfo.buildListDeserialization(rawName: String) =
if (node != null) {
"map[\'${node.fieldName}\'] == null ? [] : List<${node.className}>.from(map[\"${node.fieldName}\"]" +
".map((it) => ${node.className}.fromJsonMap(it))),\n"
} else {
"List<$stringRepresentation>.from(map[\"$rawName\"]),\n"
}
private fun NodeInfo.buildListSerialization(rawName: String) =
if (node != null) {
"\t\tdata['$rawName'] = ${node.fieldName} != null ? \n" +
"\t\t\tthis.${node.fieldName}.map((v) => v.toJson()).toList()\n" +
"\t\t\t: null;\n"
} else {
"\t\tdata['$rawName'] = $rawName;\n"
}
private fun extractRootClassName(rootFileName: String): String {
var needUp = true
val builder = StringBuilder()
val i = rootFileName.iterator()
var element: Char
while (i.hasNext()) {
element = i.nextChar()
if (element == '_') {
needUp = true
continue
}
if (needUp) {
element = element.toUpperCase()
needUp = false
}
builder.append(element)
}
return builder.toString()
}
}<file_sep>package com.azoft.json2dart
import com.azoft.json2dart.delegates.generator.GeneratorDelegate
import com.azoft.json2dart.view.Json2DartForm
import com.intellij.openapi.actionSystem.AnAction
import com.intellij.openapi.actionSystem.AnActionEvent
import com.intellij.openapi.actionSystem.CommonDataKeys
import com.intellij.openapi.ui.DialogBuilder
class JsonToDartAction(
private val generatorDelegate: GeneratorDelegate = GeneratorDelegate()
) : AnAction("Convert json to dart") {
override fun actionPerformed(event: AnActionEvent) {
DialogBuilder().apply {
val form = Json2DartForm()
form.setOnGenerateListener { fileName, json, finalFields ->
window.dispose()
generatorDelegate.runGeneration(event, fileName, json, finalFields)
}
setCenterPanel(form.rootView)
setTitle("Json2Dart")
removeAllActions()
show()
}
}
override fun update(e: AnActionEvent) {
super.update(e)
e.presentation.isEnabledAndVisible = e.getData(CommonDataKeys.VIRTUAL_FILE)?.isDirectory ?: false
}
}<file_sep>package com.azoft.json2dart.delegates.generator
fun toClassName(value: String) =
value[0].toUpperCase() + value.slice(1 until value.length)
fun toSneakCase(value: String): String {
return value.fold(StringBuilder()) { builder, c ->
if (c.isUpperCase()) {
builder.append("_").append(c.toLowerCase())
} else {
builder.append(c)
}
}.toString()
}
fun isWindows(): Boolean =
System.getProperty("os.name").contains("Windows")<file_sep>package com.azoft.json2dart.delegates.generator
import java.io.IOException
class SyntaxException: Exception("Wrong json syntax")
class FileIOException: IOException("Cannot read or write file")
class NotAFlutterProject: Exception("Oooops! Looks like plugin cannot find 'lib' folder. This is a flutter project, isn't it?")<file_sep>package com.azoft.json2dart.delegates.generator
import com.azoft.json2dart.delegates.MessageDelegate
import com.intellij.ide.projectView.ProjectView
import com.intellij.openapi.actionSystem.AnActionEvent
import com.intellij.openapi.actionSystem.CommonDataKeys
import com.intellij.openapi.actionSystem.LangDataKeys
import com.intellij.openapi.progress.ProgressIndicator
import com.intellij.openapi.progress.ProgressManager
import com.intellij.openapi.progress.Task
import java.io.File
import java.io.IOException
class GeneratorDelegate(
private val messageDelegate: MessageDelegate = MessageDelegate()
) {
fun runGeneration(event: AnActionEvent, fileName: String, json: String, finalFields: Boolean) {
ProgressManager.getInstance().run(
object : Task.Backgroundable(
event.project, "Dart file generating", false
) {
override fun run(indicator: ProgressIndicator) {
try {
DartClassGenerator().generateFromJson(
json,
File(event.getData(CommonDataKeys.VIRTUAL_FILE)?.path),
fileName.takeIf { it.isNotBlank() } ?: "response",
finalFields
)
messageDelegate.showMessage("Dart class has been generated")
} catch (e: Throwable) {
when(e) {
is IOException -> messageDelegate.onException(FileIOException())
else -> messageDelegate.onException(e)
}
} finally {
indicator.stop()
ProjectView.getInstance(event.project).refresh()
event.getData(LangDataKeys.VIRTUAL_FILE)?.refresh(false, true)
}
}
}
)
}
}<file_sep>package com.azoft.json2dart.delegates.generator.data
data class NodeInfo(
val stringRepresentation: String,
val node: NodeWrapper?,
val mapDeserialization: String?,
val mapSerialization: String?
) {
constructor(stringRepresentation: String, name: String):
this(
stringRepresentation,
null,
"map[\'$name\'],\n",
"\t\tdata['$name'] = $name;\n"
)
}<file_sep># json2dart-converter
This plugin allows you generate dart classes from a raw json.
Just right click on the target package pick "New" and choose "Convert json to dart".
Link to the jetbrains repository:
https://plugins.jetbrains.com/plugin/11460-json2dart
<file_sep>package com.azoft.json2dart.delegates.generator.data
import com.azoft.json2dart.delegates.generator.toClassName
import com.azoft.json2dart.delegates.generator.toSneakCase
import com.fasterxml.jackson.databind.JsonNode
data class NodeWrapper(
val node: JsonNode?,
val fieldName: String,
val sneakCaseName: String = toSneakCase(fieldName),
val className: String = toClassName(fieldName)
) | e0ff0c508fef81e1d98992d4dcbca3df0c530df1 | [
"Markdown",
"Java",
"Kotlin"
] | 10 | Java | cuong292/json2dart-converter | ad412873499758b176fa1a1c2ccb062b1eea0755 | 577f24b7b395e1f3aa7fe20d52cf8023fd3c8e2b |
refs/heads/main | <repo_name>ahsai001/custom-speaker-murottal<file_sep>/src/main.cpp
// #include <sdkconfig.h>
// #define CONFIG_FREERTOS_IDLE_TIME_BEFORE_SLEEP 2 // must be > 1 to compile!!
// #define CONFIG_FREERTOS_USE_TICKLESS_IDLE 1
// #define CONFIG_PM_ENABLE 1
// #define CONFIG_PM_USE_RTC_TIMER_REF 1
#include <Arduino.h>
//#include <SPI.h>
//#include <FS.h>
#include <WiFi.h>
#include <SPIFFS.h>
//#include <SD.h>
#include <WiFiClientSecure.h>
//#include <WiFiClient.h>
#include <WebServer.h>
#include <WebSocketsServer.h>
#include <ESPmDNS.h>
#include <DMD32.h>
#include "fonts/SystemFont5x7.h"
#include "fonts/Arial_black_16.h"
#include "HTTPClient.h"
#include "ArduinoJson.h"
//#include <nvs_flash.h>
#include <Preferences.h>
//#include <FirebaseFS.h>
#include <FirebaseESP32.h>
// Provide the token generation process info.
#include "addons/TokenHelper.h"
// Provide the RTDB payload printing info and other helper functions.
//#include "addons/RTDBHelper.h"
//#include "esp_pm.h"
#include "OneButton.h"
//=========================================================================
//============================= Global Declarations =======================
//=========================================================================
// section code : DMD, toggle led, wifi alive, web server, Clock, JWS
TaskHandle_t taskLEDHandle = NULL;
TaskHandle_t taskWebHandle = NULL;
TaskHandle_t taskKeepWiFiHandle = NULL;
TaskHandle_t taskDMDHandle = NULL;
TaskHandle_t taskClockHandle = NULL;
TaskHandle_t taskDateHandle = NULL;
TaskHandle_t taskJWSHandle = NULL;
TaskHandle_t taskCountdownJWSHandle = NULL;
TaskHandle_t taskButtonTouchHandle = NULL;
TaskHandle_t taskFirebaseHandle = NULL;
TaskHandle_t taskWebSocketHandle = NULL;
SemaphoreHandle_t mutex_con = NULL;
SemaphoreHandle_t mutex_dmd = NULL;
SemaphoreHandle_t mutex_clock = NULL;
SemaphoreHandle_t mutex_date = NULL;
Preferences preferences;
bool isClockManual = false;
volatile int h24 = 12; // hours in 24 format
volatile int h = 12; // hours in 12 format
volatile int m = 0; // minutes
volatile int s = 0; // seconds
char str_clock_full[9] = "--:--:--"; // used by dmd task
char str_date[26] = "------, -- --------- ----"; // used by dmd task
char str_hijri_date[30] = "-- ------- ----- ----";
char str_date_full[55] = "";
// char timeDay[3];
// char timeMonth[10];
// char timeYear[5];
volatile int day = -1;
volatile int month = -1;
volatile int year = -1;
volatile int weekday = -1;
volatile int hijri_day = -1;
volatile int hijri_month = -1;
volatile int hijri_year = -1;
volatile bool isWiFiReady = false;
volatile bool isClockReady = false;
volatile bool isDateReady = false;
volatile bool isJWSReady = false;
volatile bool isSPIFFSReady = false;
volatile bool isWebSocketReady = false;
volatile bool isFirebaseReady = false;
const uint8_t built_in_led = 2;
const uint8_t relay = 26;
const uint8_t marquee_speed = 27;
char data_jadwal_subuh[9];
char data_jadwal_syuruk[9];
char data_jadwal_dhuha[9];
char data_jadwal_dzuhur[9];
char data_jadwal_ashar[9];
char data_jadwal_maghrib[9];
char data_jadwal_isya[9];
char type_jws[8] = "sholat"; // subuh, dzuhur, ashar, maghrib, isya
char count_down_jws[9] = "--:--:--"; // 04:30:00
// 22.30 - 23.45 : 1 jam + 15 menit
// 22.30 - 23.15 : 1 jam + -15 menit
// 22.30 - 22.45 : 0 jam + 15 menit
// 22.30 - 22.15 : 0 jam + -15 menit + 24 jam
// 22.30 - 01.45 : -21 jam + 15 menit + 24 jam
// 22.30 - 01.15 : -21 jam + -15 menit + 24 jam
void taskKeepWiFiAlive(void *parameter);
void taskFirebase(void *parameter);
void taskClock(void *parameter);
void taskJadwalSholat(void *parameter);
void taskDate(void *parameter);
void stopTaskToggleLED();
void startTaskToggleLED();
void startTaskWebSocketServer();
void startTaskCountdownJWS();
//================================================================================
//================================== Task Web Socket Server ===================
//================================================================================
WebSocketsServer webSocket = WebSocketsServer(81);
void log(const char *message)
{
size_t len = strlen(message);
Serial.print(message);
if (isWebSocketReady)
{
webSocket.broadcastTXT(message, len);
}
}
void logln(const char *message)
{
log(message);
Serial.println();
if (isWebSocketReady)
{
webSocket.broadcastTXT("<br>", 4);
}
}
void logf(const char *format, ...)
{
char loc_buf[128];
char *temp = loc_buf;
va_list arg;
va_list copy;
va_start(arg, format);
va_copy(copy, arg);
int len = vsnprintf(temp, sizeof(loc_buf), format, copy);
va_end(copy);
if (len < 0)
{
va_end(arg);
return;
};
if (len >= sizeof(loc_buf))
{
temp = (char *)malloc(len + 1);
temp[len] = '\0';
if (temp == NULL)
{
va_end(arg);
return;
}
len = vsnprintf(temp, len + 1, format, arg);
}
va_end(arg);
logln(temp);
if (temp != loc_buf)
{
free(temp);
}
}
void webSocketEvent(uint8_t num, WStype_t type, uint8_t *payload, size_t length)
{
switch (type)
{
case WStype_DISCONNECTED:
Serial.printf("[%u] Disconnected!\n", num);
break;
case WStype_CONNECTED:
{
IPAddress ip = webSocket.remoteIP(num);
Serial.printf("[%u] Connected from %d.%d.%d.%d url: %s\n", num, ip[0], ip[1], ip[2], ip[3], payload);
// send message to client
webSocket.sendTXT(num, "Connected");
}
break;
case WStype_TEXT:
Serial.printf("[%u] get Text: %s\n", num, payload);
// send message to client
webSocket.sendTXT(num, "Command Received : OK");
// send data to all connected clients
// webSocket.broadcastTXT("message here");
break;
case WStype_BIN:
Serial.printf("[%u] get binary length: %u\n", num, length);
// hexdump(payload, length);
// send message to client
// webSocket.sendBIN(num, payload, length);
break;
case WStype_ERROR:
case WStype_FRAGMENT_TEXT_START:
case WStype_FRAGMENT_BIN_START:
case WStype_FRAGMENT:
case WStype_FRAGMENT_FIN:
break;
}
}
void taskWebSocketServer(void *paramater)
{
isWebSocketReady = false;
webSocket.begin();
webSocket.onEvent(webSocketEvent);
isWebSocketReady = true;
logln("Web Socket server started");
logf("Web socket stack size : %d", uxTaskGetStackHighWaterMark(NULL));
for (;;)
{
webSocket.loop();
delay(1000);
}
}
char *getAllocatedString(String text)
{
unsigned int length = text.length() + 1;
char *allocatedString = (char *)malloc(sizeof(char) * (length));
// memset(allocatedString,'\0',sizeof(char)*length);
sprintf_P(allocatedString, (PGM_P)F("%s"), text.c_str());
allocatedString[length - 1] = '\0';
return allocatedString;
}
long long sDistanceFromNowToTime(uint8_t hours, uint8_t minutes, uint8_t seconds)
{
long long deltaInSecond = ((hours - h24) * 3600) + ((minutes - m) * 60) + seconds - s;
if (deltaInSecond <= 0)
{
deltaInSecond += 24 * 3600;
}
return deltaInSecond;
}
long long msDistanceFromNowToTime(uint8_t hours, uint8_t minutes, uint8_t seconds)
{
return sDistanceFromNowToTime(hours, minutes, seconds) * 1000;
}
long long sDistanceFromTimeToTime(uint8_t fhours, uint8_t fminutes, uint8_t fseconds, uint8_t thours, uint8_t tminutes, uint8_t tseconds)
{
long long deltaInSecond = ((thours - fhours) * 3600) + ((tminutes - fminutes) * 60) + (tseconds - fseconds);
if (deltaInSecond <= 0)
{
deltaInSecond += 24 * 3600;
}
return deltaInSecond;
}
long long msDistanceFromTimeToTime(uint8_t fhours, uint8_t fminutes, uint8_t fseconds, uint8_t thours, uint8_t tminutes, uint8_t tseconds)
{
return sDistanceFromTimeToTime(fhours, fminutes, fseconds, thours, tminutes, tseconds) * 1000;
}
std::array<long long, 2> sDistanceFromDayTimeToDayTime(int16_t fdays, uint8_t fhours, uint8_t fminutes, uint8_t fseconds, int16_t tdays, uint8_t thours, uint8_t tminutes, uint8_t tseconds)
{
// now is day 0, fdays=1 means tomorrow, fdays=2 means the day after tomorrow
std::array<long long, 2> result;
// 1, 9:00:00 ==> 5, 8:30:00
// 5, 9:00:00 ==> 5, 8:30:00
// 2, 9:00:00 ==> 2, 8:00:00
//-1, 9:00:00 ==> 0, 22:00:00 ==> 0, 23:00:00
long long deltaInSecond = 0;
deltaInSecond += (tdays - fdays - 1) * 24 * 3600;
deltaInSecond += sDistanceFromTimeToTime(fhours, fminutes, fseconds, 24, 0, 0);
deltaInSecond += sDistanceFromTimeToTime(0, 0, 0, thours, tminutes, tseconds);
result[1] = deltaInSecond; // distance from 'from' to 'to'
deltaInSecond = 0;
deltaInSecond += (fdays - 0 - 1) * 24 * 3600; //-48 jam + 2 jam + 9 jam = -37
deltaInSecond += sDistanceFromTimeToTime(h24, m, s, 24, 0, 0);
deltaInSecond += sDistanceFromTimeToTime(0, 0, 0, fhours, fminutes, fseconds);
result[0] = deltaInSecond; // distance from 'now' to 'from'
return result;
}
std::array<long long, 2> msDistanceFromDayTimeToDayTime(int16_t fdays, uint8_t fhours, uint8_t fminutes, uint8_t fseconds, int16_t tdays, uint8_t thours, uint8_t tminutes, uint8_t tseconds)
{
std::array<long long, 2> sDistance = sDistanceFromDayTimeToDayTime(fdays, fhours, fminutes, fseconds, tdays, thours, tminutes, tseconds);
std::array<long long, 2> msDistance;
msDistance[0] = sDistance[0] * 1000;
msDistance[1] = sDistance[1] * 1000;
return msDistance;
}
void delayMSUntilAtTime(uint8_t hours, uint8_t minutes, uint8_t seconds)
{
delay(msDistanceFromNowToTime(hours, minutes, seconds));
}
// void eraseNVS(){
// nvs_flash_erase(); // erase the NVS partition and...
// nvs_flash_init(); // initialize the NVS partition.
// ESP.restart();
// while(true);
// }
std::array<unsigned long, 4> getArrayOfTime(const char *time)
{
char copied_time[9] = {'\0'};
if (strlen(time) <= 6)
{
sprintf_P(copied_time, (PGM_P)F("%s:00"), time);
}
else
{
sprintf_P(copied_time, (PGM_P)F("%s"), time);
}
const char delimiter[2] = ":";
char *token = strtok(copied_time, delimiter);
std::array<unsigned long, 4> as;
int index = 0;
while (token != NULL)
{
as[index] = atoi(token);
index++;
token = strtok(NULL, delimiter);
}
as[3] = (as[0] * 3600) + (as[1] * 60) + as[2];
// log("=>");
// log(as[0]);
// log("-");
// log(as[1]);
// log("-");
// log(as[2]);
// log("-");
// logln(as[3]);
return as;
}
std::array<uint16_t, 3> getArrayOfDate(const char *date)
{
char copied_date[11] = {'\0'};
sprintf_P(copied_date, (PGM_P)F("%s"), date);
const char delimiter[2] = "-";
char *token = strtok(copied_date, delimiter);
std::array<uint16_t, 3> as;
int index = 0;
while (token != NULL)
{
as[index] = atoi(token);
index++;
token = strtok(NULL, delimiter);
}
return as;
}
//=========================================================================
//================================== Task DMD ==========================
//=========================================================================
#define DISPLAYS_ACROSS 2
#define DISPLAYS_DOWN 1
#define DMD_DATA_SIZE 30
#define DMD_DATA_FLASH_INDEX 0
#define DMD_DATA_IMPORTANT_INDEX 1
#define DMD_DATA_REGULER_INDEX 6
#define DMD_DATA_FREE_INDEX DMD_DATA_SIZE
enum DMDType
{
DMD_TYPE_INIT,
DMD_TYPE_SCROLL,
DMD_TYPE_SCROLL_STATIC,
DMD_TYPE_STATIC_STATIC,
DMD_TYPE_SCROLL_COUNTDOWN,
DMD_TYPE_SCROLL_COUNTUP
};
struct DMD_Data
{
DMDType type = DMD_TYPE_INIT;
char *text1 = NULL;
uint8_t speed1 = 0;
bool need_free_text1 = false;
char *text2 = NULL;
uint8_t speed2 = 0;
bool need_free_text2 = false;
const uint8_t *font = NULL;
unsigned long delay_inMS = 0; // delay refresh dalam setiap kemunculan
unsigned long duration_inMS = 0; // durasi setiap kemunculan
int max_count = 1; // jumlah kemunculan, -1 for unlimited
int count = 0; // by code
unsigned long life_time_inMS = 0; // in ms
long long start_time_inMS = 0; // by code
};
enum DMD_Data_Line_Type
{
DMD_Data_Line_Type_Init,
DMD_Data_Line_Type_Static,
DMD_Data_Line_Type_Bounce,
DMD_Data_Line_Type_Scroll
};
struct DMD_Data_Line {
DMD_Data_Line_Type type = DMD_Data_Line_Type_Init;
int width = -1;
int8_t step = 0;
int8_t posY = 0;
int posX = 0;
bool message_full_displayed = false;
unsigned long start = 0;
};
bool need_reset_dmd_loop_index = false;
bool allowed_dmd_loop = true;
int dmd_loop_index = 0; // we can change this runtime
struct DMD_Data dmd_data_list[DMD_DATA_SIZE]; // index 0 - 5 for important message
DMD dmd(DISPLAYS_ACROSS, DISPLAYS_DOWN);
hw_timer_t *timer = NULL;
void IRAM_ATTR triggerScan()
{
dmd.scanDisplayBySPI();
}
void marqueeText(const uint8_t *font, const char *text, int top)
{
dmd.selectFont(font);
dmd.drawMarquee(text, strlen(text), (32 * DISPLAYS_ACROSS) - 1, top);
unsigned long start = millis();
unsigned long timer = start;
boolean ret = false;
while (!ret)
{
if ((timer + marquee_speed) < millis())
{
ret = dmd.stepMarquee(-1, 0);
timer = millis();
}
}
}
void resetDMDLoopIndex()
{ // use this function to make show important message right now
need_reset_dmd_loop_index = true;
}
void stopDMDLoop(){
allowed_dmd_loop = false;
}
void startDMDLoop(){
allowed_dmd_loop = true;
}
uint8_t getAvailableDMDIndex(bool isImportant, uint8_t reservedIndex)
{
uint8_t choosenIndex = 0;
if (reservedIndex >= DMD_DATA_SIZE)
{
if (isImportant)
{
choosenIndex = DMD_DATA_IMPORTANT_INDEX;
}
else
{
choosenIndex = DMD_DATA_REGULER_INDEX;
}
}
else
{
choosenIndex = reservedIndex;
}
bool full = false;
while (dmd_data_list[choosenIndex].type > DMD_TYPE_INIT && !full)
{
choosenIndex++;
if (choosenIndex >= DMD_DATA_SIZE)
{
full = true;
}
}
return choosenIndex;
}
// show with custom
void setupDMDdata(bool isImportant, uint8_t reservedIndex, DMDType type, const char *text1, uint8_t speed1, bool need_free_text1, const char *text2, uint8_t speed2, bool need_free_text2, const uint8_t *font, unsigned long delay_inMS, unsigned long duration_inMS, int max_count, unsigned long life_time_inMS, long long start_time_inMS)
{
logln("dmd wait.....");
xSemaphoreTake(mutex_dmd, portMAX_DELAY);
logln("dmd start.....");
uint8_t index = getAvailableDMDIndex(isImportant, reservedIndex);
if (index >= DMD_DATA_SIZE)
{
logln("DMD slot is full");
xSemaphoreGive(mutex_dmd);
return;
}
dmd_data_list[index].type = type;
dmd_data_list[index].text1 = (char *)text1;
dmd_data_list[index].speed1 = speed1;
dmd_data_list[index].need_free_text1 = need_free_text1;
dmd_data_list[index].text2 = (char *)text2;
dmd_data_list[index].speed2 = speed2;
dmd_data_list[index].need_free_text2 = need_free_text2;
dmd_data_list[index].font = font;
dmd_data_list[index].delay_inMS = delay_inMS;
dmd_data_list[index].duration_inMS = duration_inMS;
dmd_data_list[index].max_count = max_count;
dmd_data_list[index].count = 0;
dmd_data_list[index].life_time_inMS = life_time_inMS;
dmd_data_list[index].start_time_inMS = start_time_inMS;
logf("%s : %s,index : %d,type : %d,max_count : %d,life_time : %ld", text1, text2, index, type, max_count, life_time_inMS);
logln("dmd done .....");
xSemaphoreGive(mutex_dmd);
}
void stopTaskWebSocketServer();
void stopTaskCountdownJWS();
void resetDMDData(uint8_t index)
{
DMD_Data *item = dmd_data_list + index;
logf("reset 0 %d", index);
if (item->type > DMD_TYPE_INIT && item->need_free_text1)
{
logf("reset 1 %d", index);
free(item->text1);
item->text1 = NULL;
logf("reset 1 end %d", index);
}
if (item->type > DMD_TYPE_INIT && item->need_free_text2)
{
logf("reset 2 %d", index);
free(item->text2);
item->text2 = NULL;
logf("reset 2 end %d", index);
}
logf("reset x end %d", index);
item->type = DMD_TYPE_INIT;
item->speed1 = 0;
item->need_free_text1 = false;
item->speed2 = 0;
item->need_free_text2 = false;
item->font = 0;
item->delay_inMS = 0;
item->duration_inMS = 0;
item->max_count = 0;
item->count = 0;
item->life_time_inMS = 0;
item->start_time_inMS = 0;
}
void showFlashMessage(const char *text, bool need_free_text)
{
resetDMDData(DMD_DATA_FLASH_INDEX);
setupDMDdata(true, DMD_DATA_FLASH_INDEX, DMD_TYPE_SCROLL, text, 0, need_free_text, "", 0, false, Arial_Black_16, 1000, 4000, 1, 0, 0);
resetDMDLoopIndex();
}
// show at exact range time
void setupDMDAtExactRangeTime(bool isImportant, uint8_t reservedIndex, DMDType type, const char *text1, bool need_free_text1, const char *text2, bool need_free_text2, const uint8_t *font, unsigned long delay_inMS, unsigned long duration_inMS, int16_t start_day, const char *start_time, int16_t end_day, const char *end_time /*09:10:23*/)
{
std::array<unsigned long, 4> start_time_info = getArrayOfTime(start_time);
std::array<unsigned long, 4> end_time_info = getArrayOfTime(end_time);
std::array<long long, 2> distance_info = msDistanceFromDayTimeToDayTime(start_day, start_time_info[0], start_time_info[1], start_time_info[2], end_day, end_time_info[0], end_time_info[1], end_time_info[2]);
setupDMDdata(isImportant, reservedIndex, type, text1, 0, need_free_text1, text2, 0, need_free_text2, font, delay_inMS, duration_inMS, -1, distance_info[1], millis() + distance_info[0]);
}
// show at exact time for iteration
void setupDMDAtExactTimeForIteration(bool isImportant, uint8_t reservedIndex, DMDType type, const char *text1, bool need_free_text1, const char *text2, bool need_free_text2, const uint8_t *font, unsigned long delay_inMS, unsigned long duration_inMS, int max_count, int16_t day, const char *exact_time /*09:10:23*/)
{
std::array<unsigned long, 4> timeInfo = getArrayOfTime(exact_time);
std::array<long long, 2> distance_info = msDistanceFromDayTimeToDayTime(day, timeInfo[0], timeInfo[1], timeInfo[2], day, timeInfo[0], timeInfo[1], timeInfo[2]);
setupDMDdata(isImportant, reservedIndex, type, text1, 0, need_free_text1, text2, 0, need_free_text2, font, delay_inMS, duration_inMS, max_count, 0, millis() + distance_info[0]);
}
// show at exact time for some life time
void setupDMDAtExactTimeForLifeTime(bool isImportant, uint8_t reservedIndex, DMDType type, const char *text1, bool need_free_text1, const char *text2, bool need_free_text2, const uint8_t *font, unsigned long delay_inMS, unsigned long duration_inMS, unsigned long life_time_inMS, int16_t day, const char *exact_time /*09:10:23*/)
{
std::array<unsigned long, 4> timeInfo = getArrayOfTime(exact_time);
std::array<long long, 2> distance_info = msDistanceFromDayTimeToDayTime(day, timeInfo[0], timeInfo[1], timeInfo[2], day, timeInfo[0], timeInfo[1], timeInfo[2]);
setupDMDdata(isImportant, reservedIndex, type, text1, 0, need_free_text1, text2, 0, need_free_text2, font, delay_inMS, duration_inMS, -1, life_time_inMS, millis() + distance_info[0]);
}
// show at exact time forever
void setupDMDAtExactTimeForever(bool isImportant, uint8_t reservedIndex, DMDType type, const char *text1, bool need_free_text1, const char *text2, bool need_free_text2, const uint8_t *font, unsigned long delay_inMS, unsigned long duration_inMS, int16_t day, const char *exact_time /*09:10:23*/)
{
std::array<unsigned long, 4> timeInfo = getArrayOfTime(exact_time);
std::array<long long, 2> distance_info = msDistanceFromDayTimeToDayTime(day, timeInfo[0], timeInfo[1], timeInfo[2], day, timeInfo[0], timeInfo[1], timeInfo[2]);
setupDMDdata(isImportant, reservedIndex, type, text1, 0, need_free_text1, text2, 0, need_free_text2, font, delay_inMS, duration_inMS, -1, 0, millis() + distance_info[0]);
}
// show at now for some iteration
void setupDMDAtNowForIteration(bool isImportant, uint8_t reservedIndex, DMDType type, const char *text1, bool need_free_text1, const char *text2, bool need_free_text2, const uint8_t *font, unsigned long delay_inMS, unsigned long duration_inMS, int max_count)
{
setupDMDdata(isImportant, reservedIndex, type, text1, 0, need_free_text1, text2, 0, need_free_text2, font, delay_inMS, duration_inMS, max_count, 0, 0);
}
// show at now for some life time
void setupDMDAtNowForLifeTime(bool isImportant, uint8_t reservedIndex, DMDType type, const char *text1, bool need_free_text1, const char *text2, bool need_free_text2, const uint8_t *font, unsigned long delay_inMS, unsigned long duration_inMS, unsigned long life_time_inMS)
{
setupDMDdata(isImportant, reservedIndex, type, text1, 0, need_free_text1, text2, 0, need_free_text2, font, delay_inMS, duration_inMS, -1, life_time_inMS, 0);
}
// show at now forever
void setupDMDAtNowForever(bool isImportant, uint8_t reservedIndex, DMDType type, const char *text1, bool need_free_text1, const char *text2, bool need_free_text2, const uint8_t *font, unsigned long delay_inMS, unsigned long duration_inMS)
{
setupDMDdata(isImportant, reservedIndex, type, text1, 0, need_free_text1, text2, 0, need_free_text2, font, delay_inMS, duration_inMS, -1, 0, 0);
}
void setupDMD()
{
uint8_t cpuClock = ESP.getCpuFreqMHz();
timer = timerBegin(0, cpuClock, true);
timerAttachInterrupt(timer, &triggerScan, false);
timerAlarmWrite(timer, 300, true);
timerAlarmEnable(timer);
// control brightness DMD
ledcSetup(0, 5000, 8);
ledcAttachPin(4, 0);
ledcWrite(0, 20);
// setup clock
wifi_mode_t mode = WiFi.getMode();
if (mode == WIFI_MODE_STA)
{
//setupDMDAtNowForever(false, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_COUNTUP, "up up up", false, "test", false, System5x7, 1000, 10000);
//setupDMDAtNowForever(false, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_COUNTDOWN, "down down down", false, "test", false, System5x7, 1000, 10000);
setupDMDAtNowForever(false, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, str_date_full, false, str_clock_full, false, System5x7, 1000, 15000);
setupDMDAtNowForever(false, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, type_jws, false, count_down_jws, false, System5x7, 1000, 10000);
}
else if (mode == WIFI_MODE_AP)
{
setupDMDAtNowForever(false, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "1. silakan connect ke wifi 'Speaker Murottal AP' dengan password '<PASSWORD>'", false, "Cara Setup", false, System5x7, 1000, 5000);
setupDMDAtNowForever(false, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "2. Akses website http://speaker-murottal.local", false, "Cara Setup", false, System5x7, 1000, 5000);
setupDMDAtNowForever(false, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "3. Masuk menu 'Wifi manager' dan set wifi akses anda yg terkoneksi ke internet", false, "Cara Setup", false, System5x7, 1000, 5000);
setupDMDAtNowForever(false, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "4. silakan restart device anda", false, "Cara Setup", false, System5x7, 1000, 5000);
}
dmd.clearScreen(true);
marqueeText(Arial_Black_16, "Assalamu'alaikum", 1);
dmd.clearScreen(true);
marqueeText(Arial_Black_16, "Developed by AhsaiLabs", 1);
logln("DMD is coming");
}
unsigned int stringWidth(const uint8_t *font, const char *str)
{
unsigned int width = 0;
char c;
int idx;
dmd.selectFont(font);
for (idx = 0; c = str[idx], c != 0; idx++)
{
int cwidth = dmd.charWidth(c);
if (cwidth > 0)
width += cwidth + 1;
}
if (width)
{
width--;
}
return width;
}
uint8_t stringHeight(const uint8_t *font)
{
return pgm_read_byte(font + FONT_HEIGHT);
}
int drawTextCenter(const uint8_t *font, const char *str, int top)
{
unsigned int length = stringWidth(font, str);
int posX = ((32 * DISPLAYS_ACROSS) - length) / 2;
dmd.drawString(posX, top, str, strlen(str), GRAPHICS_NORMAL);
return posX;
}
int drawTextCenter(const uint8_t *font, const char *str, int top, byte bGraphicsMode)
{
unsigned int length = stringWidth(font, str);
int posX = ((32 * DISPLAYS_ACROSS) - length) / 2;
dmd.drawString(posX, top, str, strlen(str), bGraphicsMode);
return posX;
}
void clearLine(int x1, int y1, int x2, int y2)
{
dmd.drawLine(x1, y1, x2, y2, GRAPHICS_INVERSE);
}
void clearBox(int x1, int y1, int x2, int y2)
{
dmd.drawBox(x1, y1, x2, y2, GRAPHICS_INVERSE);
}
void clearFilledBox(int x1, int y1, int x2, int y2)
{
dmd.drawFilledBox(x1, y1, x2, y2, GRAPHICS_INVERSE);
}
void anim_in(DMD_Data *item)
{
int posy = 0;
int old_posy = 0;
int target = 0;
unsigned long start = millis();
unsigned long start2 = start;
bool isText1Done = true;
bool isText2Done = true;
dmd.selectFont(item->font);
switch (item->type)
{
case DMD_TYPE_SCROLL_STATIC:
case DMD_TYPE_SCROLL_COUNTDOWN:
case DMD_TYPE_SCROLL_COUNTUP:
posy = 0 - 7 - 1;
old_posy = posy - 1;
target = 0;
isText2Done = false;
break;
default:
break;
}
while (true)
{
if (need_reset_dmd_loop_index)
{
break;
}
if (isText1Done && isText2Done)
{
break;
}
switch (item->type)
{
case DMD_TYPE_SCROLL_STATIC:
case DMD_TYPE_SCROLL_COUNTDOWN:
case DMD_TYPE_SCROLL_COUNTUP:
if (millis() - start2 > marquee_speed)
{
int posx = drawTextCenter(item->font, item->text2, posy, GRAPHICS_NORMAL);
int strWidth = stringWidth(item->font, item->text2);
clearLine(posx, old_posy, posx + strWidth - 1, old_posy);
start2 = millis();
if (!isText2Done)
{
old_posy = posy;
posy++;
if (posy > target)
{
isText2Done = true;
}
}
}
break;
default:
break;
}
}
}
void anim_out(DMD_Data *item)
{
int posy = 0;
int old_posy = 0;
int target = 0;
unsigned long start = millis();
unsigned long start2 = start;
bool isText1Done = true;
bool isText2Done = true;
dmd.selectFont(item->font);
switch (item->type)
{
case DMD_TYPE_SCROLL_STATIC:
case DMD_TYPE_SCROLL_COUNTDOWN:
case DMD_TYPE_SCROLL_COUNTUP:
target = 0 - 7 - 1;
posy = 0;
old_posy = posy + 1;
isText2Done = false;
break;
default:
break;
}
while (true)
{
if (need_reset_dmd_loop_index)
{
break;
}
if (isText1Done && isText2Done)
{
break;
}
switch (item->type)
{
case DMD_TYPE_SCROLL_STATIC:
case DMD_TYPE_SCROLL_COUNTDOWN:
case DMD_TYPE_SCROLL_COUNTUP:
if (millis() - start2 > marquee_speed)
{
int posx = drawTextCenter(item->font, item->text2, posy, GRAPHICS_NORMAL);
int strWidth = stringWidth(item->font, item->text2);
int strHeight = stringHeight(item->font);
clearLine(posx, posy + strHeight, posx + strWidth - 1, posy + strHeight);
start2 = millis();
if (!isText2Done)
{
old_posy = posy;
posy--;
if (posy < target)
{
isText2Done = true;
}
}
}
break;
default:
break;
}
}
}
void showStaticLine(DMD_Data * item, unsigned long * start, const char * text, int8_t posY){
if (millis() - (*start) > item->delay_inMS)
{
drawTextCenter(item->font, text, posY);
*start = millis();
}
}
void showBounceLine(DMD_Data * item, unsigned long * start, char * text, int8_t posY, int * posX, int * width, int8_t * step, bool * message_full_displayed){
if (millis() - (*start) > marquee_speed)
{
log("*");
dmd.drawString(*posX, posY, text, strlen(text), GRAPHICS_NORMAL);
*posX += (*step);
if (*posX >= ((32 * DISPLAYS_ACROSS) - (*width)))
{
*step = -1;
}
else if (*posX <= 0)
{
*step = 1;
}
*start = millis();
}
}
void showScrollLine(DMD_Data * item, unsigned long * start, char * text, int8_t posY, int * posX, int * width, int8_t * step, bool * message_full_displayed){
if (millis() - (*start) > marquee_speed)
{
log("*");
dmd.drawString(*posX, posY, text, strlen(text), GRAPHICS_NORMAL);
if (*posX < (-1 * (*width)))
{
*posX = (32 * DISPLAYS_ACROSS) - 1;
*message_full_displayed = true;
}
(*posX)--;
*start = millis();
}
}
void showDMDDataLine(DMD_Data * item, char * text, DMD_Data_Line * line){
if(line->type == DMD_Data_Line_Type_Static){
showStaticLine(item, &line->start,text, line->posY);
} else if(line->type == DMD_Data_Line_Type_Bounce){
showBounceLine(item, &line->start, text, line->posY, &line->posX, &line->width, &line->step, &line->message_full_displayed);
} else if(line->type == DMD_Data_Line_Type_Scroll){
showScrollLine(item, &line->start, text, line->posY, &line->posX, &line->width, &line->step, &line->message_full_displayed);
}
}
void setupDMDDataLine(DMD_Data_Line * line, DMD_Data_Line_Type type, unsigned long start, int8_t posY, const uint8_t * font, char * text){
line->type = type;
line->start = start;
line->posY = posY;
line->width = stringWidth(font, text);
line->step = 1;
line->posX = 0;
line->message_full_displayed = true;
if (line->width > (32 * DISPLAYS_ACROSS))
{
line->message_full_displayed = false;
line->type = DMD_Data_Line_Type_Scroll;
line->posX = (32 * DISPLAYS_ACROSS) - 1;
}
}
void taskDMD(void *parameter)
{
setupDMD();
logf("DMD stack size : %d", uxTaskGetStackHighWaterMark(NULL));
for (;;)
{
// byte b;
// 10 x 14 font clock, including demo of OR and NOR modes for pixels so that the flashing colon can be overlayed
// dmd.drawBox(0, 0, (32 * DISPLAYS_ACROSS) - 1, (16 * DISPLAYS_DOWN) - 1, GRAPHICS_TOGGLE);
for (dmd_loop_index = 0; dmd_loop_index < DMD_DATA_SIZE && allowed_dmd_loop; dmd_loop_index++)
{
if (need_reset_dmd_loop_index)
{
need_reset_dmd_loop_index = false;
dmd_loop_index = -1;
continue;
}
DMD_Data *item = dmd_data_list + dmd_loop_index;
if (item->type <= DMD_TYPE_INIT)
{
// logln("no type");
continue;
}
unsigned long start = millis();
if (item->start_time_inMS > 0 && start < item->start_time_inMS)
{
// logln("dont go now");
continue;
}
// Logic to destroy DMDData
bool deleteData = false;
if (item->max_count > 0 && item->count >= item->max_count)
{
// logln("max_count > 0");
deleteData = true;
}
if (item->life_time_inMS > 0 && (millis() - item->start_time_inMS) > item->life_time_inMS)
{
// logln("life_time_inMS > 0");
deleteData = true;
}
if (deleteData)
{
// reset struct to stop drawing in dmd
resetDMDData(dmd_loop_index);
// logln("delete");
continue;
}
logf("index : %d, type : %d, text1 : %s, text2 : %s, start_time : %lld, max_count : %d, life_time : %ld", dmd_loop_index, item->type, item->text1, item->text2, item->start_time_inMS, item->max_count, item->life_time_inMS);
dmd.clearScreen(true);
anim_in(item);
// logln("go.................");
item->count++;
while (start + item->duration_inMS > millis())
{
if (need_reset_dmd_loop_index)
{
break;
}
log("go");
switch (item->type)
{
case DMD_TYPE_SCROLL_STATIC:
case DMD_TYPE_STATIC_STATIC:
{
int counter = item->duration_inMS / item->delay_inMS;
unsigned long start = millis();
dmd.selectFont(item->font);
//setup line 1
struct DMD_Data_Line line1;
if(item->type == DMD_TYPE_SCROLL_STATIC){
setupDMDDataLine(&line1,DMD_Data_Line_Type_Bounce,start,8,item->font, item->text1);
} else {
setupDMDDataLine(&line1,DMD_Data_Line_Type_Static,start,8,item->font, item->text1);
}
//setup line 2
struct DMD_Data_Line line2;
setupDMDDataLine(&line2,DMD_Data_Line_Type_Static,start,0,item->font, item->text2);
while (counter >= 0 || !line1.message_full_displayed || !line2.message_full_displayed)
{
if (need_reset_dmd_loop_index)
{
break;
}
if (millis() - start > item->delay_inMS)
{
counter--;
start = millis();
}
showDMDDataLine(item,item->text1,&line1);
showDMDDataLine(item,item->text2,&line2);
}
}
break;
case DMD_TYPE_SCROLL: // single scrolling text
{
int counter = item->duration_inMS / item->delay_inMS;
unsigned long start = millis();
dmd.selectFont(item->font);
//setup line 1
struct DMD_Data_Line line;
setupDMDDataLine(&line,DMD_Data_Line_Type_Bounce,start,1,item->font, item->text1);
while (counter >= 0 || !line.message_full_displayed)
{
if (need_reset_dmd_loop_index)
{
break;
}
if (millis() - start > item->delay_inMS)
{
counter--;
start = millis();
}
showDMDDataLine(item,item->text1,&line);
}
}
break;
case DMD_TYPE_SCROLL_COUNTDOWN: // count down timer
{
int counter = item->duration_inMS / item->delay_inMS;
unsigned long start = millis();
dmd.selectFont(item->font);
int leftSeconds = counter;
int hours = leftSeconds / 3600;
int minutes = 0;
int seconds = 0;
if (hours > 0)
{
leftSeconds = leftSeconds % 3600;
}
minutes = leftSeconds / 60;
if (minutes > 0)
{
leftSeconds = leftSeconds % 60;
}
seconds = leftSeconds;
//setup line 1
struct DMD_Data_Line line;
setupDMDDataLine(&line,DMD_Data_Line_Type_Bounce,start,8,item->font, item->text1);
while (counter >= 0 || !line.message_full_displayed)
{
if (need_reset_dmd_loop_index)
{
break;
}
if (millis() - start > item->delay_inMS)
{
if (seconds == -1)
{
seconds = 59;
minutes--;
}
if (minutes == -1)
{
minutes = 59;
hours--;
}
// display
char count_down[9];
sprintf_P(count_down, (PGM_P)F("%02d:%02d:%02d"), hours, minutes, seconds);
drawTextCenter(item->font, count_down, 0);
seconds--;
counter--;
start = millis();
}
showDMDDataLine(item,item->text1,&line);
}
}
break;
case DMD_TYPE_SCROLL_COUNTUP: // count up timer
{
int counter = item->duration_inMS / 1000;
unsigned long start = millis();
dmd.selectFont(item->font);
int hours = 0;
int minutes = 0;
int seconds = 0;
int countup = 0;
//setup line 1
struct DMD_Data_Line line;
setupDMDDataLine(&line,DMD_Data_Line_Type_Bounce,start,8,item->font, item->text1);
while (countup <= counter || !line.message_full_displayed)
{
if (need_reset_dmd_loop_index)
{
break;
}
if (millis() - start > item->delay_inMS)
{
if (seconds == 61)
{
seconds = 1;
minutes++;
}
if (minutes == 61)
{
minutes = 1;
hours++;
}
// display
char count_up[9];
sprintf_P(count_up, (PGM_P)F("%02d:%02d:%02d"), hours, minutes, seconds);
drawTextCenter(item->font, count_up, 0);
seconds++;
countup++;
start = millis();
}
showDMDDataLine(item,item->text1,&line);
}
}
break;
default:
break;
}
logln("===");
} // end while
anim_out(item);
} // end for
/*
dmd.drawChar(0, 3, '2', GRAPHICS_NORMAL);
dmd.drawChar(7, 3, '3', GRAPHICS_NORMAL);
dmd.drawChar(17, 3, '4', GRAPHICS_NORMAL);
dmd.drawChar(25, 3, '5', GRAPHICS_NORMAL);
dmd.drawChar(15, 3, ':', GRAPHICS_OR); // clock colon overlay on
delay(1000);
dmd.drawChar(15, 3, ':', GRAPHICS_NOR); // clock colon overlay off
delay(1000);
dmd.drawChar(15, 3, ':', GRAPHICS_OR); // clock colon overlay on
delay(1000);
dmd.drawChar(15, 3, ':', GRAPHICS_NOR); // clock colon overlay off
delay(1000);
dmd.drawChar(15, 3, ':', GRAPHICS_OR); // clock colon overlay on
delay(1000);*/
// half the pixels on
// dmd.drawTestPattern(PATTERN_ALT_0);
// delay(1000);
// the other half on
// dmd.drawTestPattern(PATTERN_ALT_1);
// delay(1000);
// display some text
// dmd.clearScreen(true);
// dmd.selectFont(System5x7);
// for (byte x = 0; x < DISPLAYS_ACROSS; x++)
// {
// for (byte y = 0; y < DISPLAYS_DOWN; y++)
// {
// dmd.drawString(2 + (32 * x), 1 + (16 * y), "freet", 5, GRAPHICS_NORMAL);
// dmd.drawString(2 + (32 * x), 9 + (16 * y), "ronic", 5, GRAPHICS_NORMAL);
// }
// }
// delay(2000);
// draw a border rectangle around the outside of the display
// dmd.clearScreen(true);
// dmd.drawBox(0, 0, (32 * DISPLAYS_ACROSS) - 1, (16 * DISPLAYS_DOWN) - 1, GRAPHICS_NORMAL);
// delay(1000);
// for (byte y = 0; y < DISPLAYS_DOWN; y++)
// {
// for (byte x = 0; x < DISPLAYS_ACROSS; x++)
// {
// // draw an X
// int ix = 32 * x;
// int iy = 16 * y;
// dmd.drawLine(0 + ix, 0 + iy, 11 + ix, 15 + iy, GRAPHICS_NORMAL);
// dmd.drawLine(0 + ix, 15 + iy, 11 + ix, 0 + iy, GRAPHICS_NORMAL);
// delay(1000);
// // draw a circle
// dmd.drawCircle(16 + ix, 8 + iy, 5, GRAPHICS_NORMAL);
// delay(1000);
// // draw a filled box
// dmd.drawFilledBox(24 + ix, 3 + iy, 29 + ix, 13 + iy, GRAPHICS_NORMAL);
// delay(1000);
// }
// }
// // stripe chaser
// for (b = 0; b < 20; b++)
// {
// dmd.drawTestPattern((b & 1) + PATTERN_STRIPE_0);
// delay(200);
// }
// delay(200);
}
}
void startTaskDMD()
{
xTaskCreatePinnedToCore(
taskDMD, // Function that should be called
"Display DMD", // Name of the task (for debugging)
4500, // Stack size (bytes)
NULL, // Parameter to pass
1, // Task priority
&taskDMDHandle, // Task handle
CONFIG_ARDUINO_RUNNING_CORE);
}
void stopTaskDMD()
{
if (taskDMDHandle != NULL)
{
stopDMDLoop();
for(int i=0;i<DMD_DATA_SIZE;i++){
logf("reset %d", i);
resetDMDData(i);
}
vTaskDelete(taskDMDHandle);
taskDMDHandle = NULL;
}
}
void startTaskKeepWifi(){
xTaskCreatePinnedToCore(
taskKeepWiFiAlive, // Function that should be called
"Keep WiFi Alive", // Name of the task (for debugging)
3200, // Stack size (bytes)
NULL, // Parameter to pass
1, // Task priority
&taskKeepWiFiHandle, // Task handle
CONFIG_ARDUINO_RUNNING_CORE);
}
void stopTaskKeepWifi(){
if (taskKeepWiFiHandle != NULL){
vTaskDelete(taskKeepWiFiHandle);
taskKeepWiFiHandle = NULL;
}
}
void startTaskFirebase(){
xTaskCreatePinnedToCore(
taskFirebase, // Function that should be called
"Firebase", // Name of the task (for debugging)
65000, // Stack size (bytes)
NULL, // Parameter to pass
1, // Task priority
&taskFirebaseHandle, // Task handle
CONFIG_ARDUINO_RUNNING_CORE);
}
void stopTaskFirebase(){
if (taskFirebaseHandle != NULL){
vTaskDelete(taskFirebaseHandle);
taskFirebaseHandle = NULL;
}
}
void startTaskClock(){
xTaskCreatePinnedToCore(
taskClock, // Function that should be called
"Clock", // Name of the task (for debugging)
3400, // Stack size (bytes)
NULL, // Parameter to pass
1, // Task priority
&taskClockHandle, // Task handle
0);
}
void stopTaskClock(){
if (taskClockHandle != NULL){
vTaskDelete(taskClockHandle);
taskClockHandle = NULL;
}
}
void startTaskJWS(){
xTaskCreatePinnedToCore(
taskJadwalSholat, // Function that should be called
"<NAME>", // Name of the task (for debugging)
5500, // Stack size (bytes)
NULL, // Parameter to pass
1, // Task priority
&taskJWSHandle, // Task handle
CONFIG_ARDUINO_RUNNING_CORE);
}
void startTaskDate(){
xTaskCreatePinnedToCore(
taskDate, // Function that should be called
"Date", // Name of the task (for debugging)
7000, // Stack size (bytes)
NULL, // Parameter to pass
1, // Task priority
&taskDateHandle, // Task handle
0);
}
void stopTaskJWS(){
if (taskJWSHandle != NULL){
vTaskDelete(taskJWSHandle);
taskJWSHandle = NULL;
}
}
void stopTasksBeforePreferencesChanged(){
stopTaskDMD();
stopTaskFirebase();
stopTaskCountdownJWS();
stopTaskWebSocketServer();
stopTaskToggleLED();
}
void startTasksAfterPreferencesChanged(){
startTaskToggleLED();
startTaskWebSocketServer();
startTaskCountdownJWS();
startTaskFirebase();
startTaskDMD();
}
//================================================================================
//================================== Task Toggle LED ==========================
//================================================================================
uint32_t led_on_delay = 500;
uint32_t led_off_delay = 500;
void taskToggleLED(void *parameter)
{
// logf("LED stack size : %d", uxTaskGetStackHighWaterMark(NULL));
for (;;)
{
digitalWrite(built_in_led, HIGH);
delay(led_on_delay);
digitalWrite(built_in_led, LOW);
delay(led_off_delay);
}
}
void startTaskToggleLED()
{
xTaskCreate(
taskToggleLED, // Function that should be called
"Toggle LED", // Name of the task (for debugging)
1000, // Stack size (bytes)
NULL, // Parameter to pass
1, // Task priority
&taskLEDHandle // Task handle
);
}
void stopTaskToggleLED()
{
if (taskLEDHandle != NULL)
{
vTaskDelete(taskLEDHandle);
taskLEDHandle = NULL;
}
digitalWrite(built_in_led, HIGH);
}
//=====================================================================================
//================================== Task Keep WiFi Alive ==========================
//=====================================================================================
String ssid = "";
String password = "";
#define WIFI_TIMEOUT_MS 20000 // 20 second WiFi connection timeout
#define WIFI_RECOVER_TIME_MS 30000 // Wait 30 seconds after a failed connection attempt
void taskKeepWiFiAlive(void *parameter)
{
for (;;)
{
if (WiFi.status() == WL_CONNECTED)
{
delay(10000);
continue;
}
isWiFiReady = false;
startTaskToggleLED();
logln("[WIFI] Connecting");
WiFi.mode(WIFI_STA);
WiFi.begin(ssid.c_str(), password.c_str());
unsigned long startAttemptTime = millis();
// Keep looping while we're not connected and haven't reached the timeout
while (WiFi.status() != WL_CONNECTED &&
millis() - startAttemptTime < WIFI_TIMEOUT_MS)
{
}
// When we couldn't make a WiFi connection (or the timeout expired)
// sleep for a while and then retry.
if (WiFi.status() != WL_CONNECTED)
{
logln("[WIFI] FAILED");
delay(WIFI_RECOVER_TIME_MS);
continue;
}
isWiFiReady = true;
log("Connected to ");
logln(ssid.c_str());
log("IP address: ");
logln(WiFi.localIP().toString().c_str());
if (MDNS.begin("speaker-murottal"))
{
logln("speaker-murottal.local is available");
}
logf("Keep Wifi stack size : %d", uxTaskGetStackHighWaterMark(NULL));
stopTaskToggleLED();
}
}
//================================================================================
//================================== Task Web Server ==========================
//================================================================================
const char index_html_wifi[] PROGMEM = R"rawliteral(
<!DOCTYPE HTML>
<html>
<head>
<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">
<meta name = "viewport" content = "width = device-width, initial-scale = 1.0, maximum-scale = 1.0, user-scalable=0">
<title>AhsaiLabs Speaker Qur'an</title>
<style>
body {
background-color: #e6d8d5;
text-align: center;
}
</style>
</head>
<body>
<h1>Setting WiFi Speaker Qur'an Ahsailabs</h1>
<form action="/wifi" method="post">
<p>
<label>SSID: </label>
<input maxlength="30" name="ssid"><br>
<label>Key: </label><input maxlength="30" name="password"><br><br>
<input type="submit" value="Save">
</p>
</form>
<form action="/forgetwifi" method="post">
<p>
<input type="submit" value="Forget wifi">
</p>
</body>
</html>
)rawliteral";
const char index_html_setting[] PROGMEM = R"rawliteral(
<!DOCTYPE HTML><html><head>
<title>AhsaiLabs Speaker Qur'an</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="icon" href="data:,">
<style>
body {
background-color: #e6d8d5;
text-align: center;
}
.slider {
width: 300px;
}
</style>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
</head><body>
<h1>Setting Speaker Qur'an Ahsailabs</h1>
<form action="/get-setting">
Flash Message: <input type="text" name="scrolltext" required>
<input type="submit" value="Notify">
</form><br><br><br>
<form action="/get-setting">
Current Time : <input type="time" name="time" required>
<input type="submit" value="Set Time">
</form><br><br><br>
<form action="/get-setting">
<label for="day">Current day :</label>
<select name="day" required>
<option value="0">Ahad</option>
<option value="1">Senin</option>
<option value="2">Selasa</option>
<option value="3">Rabu</option>
<option value="4">Kamis</option>
<option value="5">Jum'at</option>
<option value="6">Sabtu</option>
</select>
<br>
Current Date : <input type="date" name="date" required>
<br>
Current Hijri Day : <input type="number" name="hijri_day" min="1" max="30" required>
<br>
<label for="hijri_month">Current Hijri Month:</label>
<select name="hijri_month" required>
<option value="0">Muharram</option>
<option value="1">Shafar</option>
<option value="2">Rabiul Awwal</option>
<option value="3">Rabiul Akhir</option>
<option value="4">Jumadil Awwal</option>
<option value="5">Jumadil Akhir</option>
<option value="6">Rajab</option>
<option value="7">Sya'ban</option>
<option value="8">Ramadhan</option>
<option value="9">Syawal</option>
<option value="10">Dzulqo'dah</option>
<option value="11">Dzulhijjah</option>
</select>
<br>
Current Hijri Year: <input type="number" id="hijri_year" min="1440" name="hijri_year" required>
<br>
<input type="submit" value="Set Date">
</form><br>
<p>Brightness: <span id="brightnessPos"></span> %</p>
<input type="range" min="0" max="255" value="20" class="slider" id="brightnessSlider" onchange="brightnessChange(this.value)"/>
<script>
$.ajaxSetup({timeout:1000});
var slider = document.getElementById("brightnessSlider");
var brightnessP = document.getElementById("brightnessPos");
brightnessP.innerHTML = Math.round((slider.value/255)*100);
$.get("/brightness?level=" + slider.value);
slider.oninput = function() {
slider.value = this.value;
brightnessP.innerHTML = Math.round((this.value/255)*100);
}
function brightnessChange(pos) {
$.get("/brightness?level=" + pos);
}
</script>
</body></html>
)rawliteral";
const char index_html_ws[] PROGMEM = R"rawliteral(
<!DOCTYPE html>
<html>
<head>
<title>AhsaiLabs Speaker Qur'an</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta charset="UTF-8" />
<style>
body {
background-color: #e6d8d5;
}
h1 {
text-align: center;
}
p {
background-color: #a59999;
word-wrap: break-word;
color: #020000;
}
/* The switch - the box around the slider */
.switch {
position: relative;
display: inline-block;
width: 60px;
height: 34px;
}
/* Hide default HTML checkbox */
.switch input {
opacity: 0;
width: 0;
height: 0;
}
/* The slider */
.slider {
position: absolute;
cursor: pointer;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: #ccc;
-webkit-transition: 0.4s;
transition: 0.4s;
}
.slider:before {
position: absolute;
content: "";
height: 26px;
width: 26px;
left: 4px;
bottom: 4px;
background-color: white;
-webkit-transition: 0.4s;
transition: 0.4s;
}
input:checked + .slider {
background-color: #2196f3;
}
input:focus + .slider {
box-shadow: 0 0 1px #2196f3;
}
input:checked + .slider:before {
-webkit-transform: translateX(26px);
-ms-transform: translateX(26px);
transform: translateX(26px);
}
/* Rounded sliders */
.slider.round {
border-radius: 34px;
}
.slider.round:before {
border-radius: 50%;
}
</style>
</head>
<body>
<h1 id="heading">Received Logs: <small>active</small></h1>
<p id="message"></p>
<button type="button" id="btn_reset">reset</button>
<label class="switch">
<input id="cb_on" type="checkbox" onclick="handleClick(this);" checked/>
<span class="slider round"></span>
</label>
</body>
<script>
var Socket;
var heading = document.getElementById("heading");
var p_message = document.getElementById("message");
var btn_reset = document.getElementById("btn_reset");
var cb_on = document.getElementById("cb_on");
btn_reset.addEventListener("click", button_reset_pressed);
function init() {
Socket = new WebSocket("ws://" + window.location.hostname + ":81/");
Socket.onmessage = function (event) {
processCommand(event);
};
}
function handleClick(cb) {
console.log("Clicked, new value = " + cb.checked);
if(cb.checked){
heading.innerHTML = "Received Logs: <small>active</small>";
} else {
heading.innerHTML = "Received Logs: <small>inactive</small>";
}
}
function processCommand(event) {
if (cb_on.checked) {
var log = event.data;
p_message.innerHTML = p_message.innerHTML + log;
console.log(log);
}
}
function button_reset_pressed() {
//Socket.send("on");
p_message.innerHTML = "";
}
window.onload = function (event) {
init();
};
</script>
</html>
)rawliteral";
const char index_html_root[] PROGMEM = R"rawliteral(
<!DOCTYPE html>
<html>
<head>
<title>AhsaiLabs Speaker Qur'an</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta charset="UTF-8" />
<style>
body {
background-color: #e6d8d5;
text-align: center;
}
div {
display: block;
width: 50%;
line-height: 30px;
margin: 10px auto;
background-color: aqua;
}
div a:hover {
color: white;
background-color: blueviolet;
}
div a {
display: block;
width: 100%;
color: black;
text-decoration: none;
}
</style>
</head>
<body>
<h1>Selamat datang sahabat pengguna Speaker Murottal by AhsaiLabs</h1>
<div><a href="/wifi">Wifi Manager</a></div>
<div><a href="/setting">Settings</a></div>
<div><a href="/logs">Show Logs</a></div>
<div><a href="/restart">Restart</a></div>
</body>
<script>
function init() {
}
window.onload = function (event) {
init();
};
</script>
</html>
)rawliteral";
WebServer server(80);
void handleWebRoot()
{
server.send(200, "text/html", index_html_root);
}
void handleWebNotFound()
{
digitalWrite(built_in_led, 0);
String message = "File Not Found\n\n";
message += "URI: ";
message += server.uri();
message += "\nMethod: ";
message += (server.method() == HTTP_GET) ? "GET" : "POST";
message += "\nArguments: ";
message += server.args();
message += "\n";
for (uint8_t i = 0; i < server.args(); i++)
{
message += " " + server.argName(i) + ": " + server.arg(i) + "\n";
}
server.send(404, "text/plain", message);
digitalWrite(built_in_led, 1);
}
void handleServerClient()
{
server.handleClient();
delay(1000);
}
void taskWebServer(void *parameter)
{
server.on("/", handleWebRoot);
server.on("/setting", []()
{ server.send(200, "text/html", index_html_setting); });
server.on("/get-setting", []()
{
if(server.hasArg("scrolltext")){
String scrolltext = server.arg("scrolltext");
char * info = getAllocatedString(scrolltext);
showFlashMessage(info,true);
} else if(server.hasArg("time")){
String time = server.arg("time");
std::array<unsigned long, 4> timeInfo = getArrayOfTime(time.c_str());
xSemaphoreTake(mutex_clock, portMAX_DELAY);
h24 = timeInfo[0]; // 24 hours
h = h24 > 12 ? h24-12 : h24;
m = timeInfo[1];
s = timeInfo[2];
xSemaphoreGive(mutex_clock);
if(!isClockReady){
isClockManual = true;
}
} else if(server.hasArg("date")){
String date = server.arg("date");
std::array<uint16_t, 3> dateInfo = getArrayOfDate(date.c_str());
xSemaphoreTake(mutex_date, portMAX_DELAY);
day = dateInfo[2];
month = dateInfo[1];
year = dateInfo[0];
weekday = server.arg("day").toInt();
hijri_day = server.arg("hijri_day").toInt();
hijri_month = server.arg("hijri_month").toInt();
hijri_year = server.arg("hijri_year").toInt();
String hijri_month_names[] = {"Muharam", "Safar", "Rabiul Awal", "Rabiul Akhir", "Jumadil Awal", "Jumadil Akhir", "Rajab", "Sya'ban", "Ramadhan", "Syawal", "Dzulqo'dah", "Dzulhijjah"};
sprintf_P(str_hijri_date, (PGM_P)F("%d %s %d"), hijri_day,hijri_month_names[hijri_month].c_str(), hijri_year);
xSemaphoreGive(mutex_date);
}
server.sendHeader("Location", "/setting", true);
server.send(302, "text/plain", ""); });
server.on("/brightness", []()
{
String level = server.arg("level");
ledcWrite(0, level.toInt());
server.send(404, "text/plain", "ubah brigtness berhasil"); });
server.on("/restart", []()
{
server.send(200, "text/plain", "restart ESP");
ESP.restart(); });
server.on("/wifi", []()
{
if (server.hasArg("ssid")&& server.hasArg("password")) {
String ssid = server.arg("ssid");
String password = server.arg("password");
stopTasksBeforePreferencesChanged();
delay(1000);
//preferences.begin("settings", false);
preferences.putString("ssid", ssid);
preferences.putString("password", <PASSWORD>);
//preferences.end();
server.send(200, "text/plain", "setting wifi berhasil, silakan restart");
//ESP.restart();
} else {
server.send(200, "text/html", index_html_wifi);
} });
server.on("/forgetwifi", []()
{
stopTasksBeforePreferencesChanged();
delay(1000);
// preferences.begin("settings", false);
preferences.remove("ssid");
preferences.remove("password");
// preferences.end(); });
server.send(200, "text/plain", "forget wifi berhasil, silakan restart");
// ESP.restart();
});
server.on("/logs", []()
{ server.send(200, "text/html", index_html_ws); });
server.onNotFound(handleWebNotFound);
server.begin();
logln("HTTP server started");
logf("Web Server stack size : %d", uxTaskGetStackHighWaterMark(NULL));
for (;;)
{
handleServerClient();
}
}
void startTaskWebSocketServer()
{
xTaskCreatePinnedToCore(
taskWebSocketServer, // Function that should be called
"Web Socket", // Name of the task (for debugging)
5000, // Stack size (bytes)
NULL, // Parameter to pass
1, // Task priority
&taskWebSocketHandle, // Task handle
CONFIG_ARDUINO_RUNNING_CORE);
}
void stopTaskWebSocketServer()
{
if (taskWebSocketHandle != NULL)
{
vTaskDelete(taskWebSocketHandle);
taskWebSocketHandle = NULL;
}
}
void startTaskWebServer()
{
xTaskCreatePinnedToCore(
taskWebServer, // Function that should be called
"Web Server", // Name of the task (for debugging)
5000, // Stack size (bytes)
NULL, // Parameter to pass
1, // Task priority
&taskWebHandle, // Task handle
CONFIG_ARDUINO_RUNNING_CORE);
}
void stopTaskWebServer()
{
if (taskWebHandle != NULL)
{
vTaskDelete(taskWebHandle);
taskWebHandle = NULL;
}
}
void stopTaskDate(){
if (taskDateHandle != NULL)
{
vTaskDelete(taskDateHandle);
taskDateHandle = NULL;
}
}
//===========================================================================
//================================== Task Clock ==========================
//===========================================================================
// const char * ntpServer = "pool.ntp.org";
const char *ntpServer = "time.google.com";
const uint8_t timezone = 7; // jakarta GMT+7
const long gmtOffset_sec = timezone * 3600; // in seconds
const int daylightOffset_sec = 0;
void taskClock(void *parameter)
{
isClockReady = false;
while (!isWiFiReady && !isClockManual)
{
logln("Task clock waiting for wifi...");
delay(5000);
}
if (!isClockManual)
{
// Init and get the time
configTime(gmtOffset_sec, daylightOffset_sec, ntpServer);
struct tm timeinfo;
while (!getLocalTime(&timeinfo) && !isClockManual)
{
logln("Clock : Failed to obtain time");
delay(2000);
}
if (!isClockManual)
{
Serial.println(&timeinfo, "%A, %B %d %Y %H:%M:%S");
// log("Day of week: ");
// logln(&timeinfo, "%A");
// log("Month: ");
// logln(&timeinfo, "%B");
// log("Day of Month: ");
// logln(&timeinfo, "%d");
// log("Year: ");
// logln(&timeinfo, "%Y");
// log("Hour: ");
// logln(&timeinfo, "%H");
// log("Hour (12 hour format): ");
// logln(&timeinfo, "%I");
// log("Minute: ");
// logln(&timeinfo, "%M");
// log("Second: ");
// logln(&timeinfo, "%S");
// strftime(timeDay,3, "%d", &timeinfo);
// strftime(timeMonth,10, "%B", &timeinfo);
// strftime(timeYear,5, "%Y", &timeinfo);
h24 = timeinfo.tm_hour; // 24 hours
h = timeinfo.tm_hour > 12 ? timeinfo.tm_hour - 12 : timeinfo.tm_hour;
m = timeinfo.tm_min;
s = timeinfo.tm_sec;
}
}
if (isClockManual)
{
isClockManual = false;
}
logf("Clock stack size : %d", uxTaskGetStackHighWaterMark(NULL));
String type = "AM";
for (;;)
{
s = s + 1;
delay(1000);
xSemaphoreTake(mutex_clock, portMAX_DELAY);
if (s == 60)
{
s = 0;
m = m + 1;
}
if (m == 60)
{
m = 0;
h = h + 1;
h24 = h24 + 1;
}
if (h == 13)
{
h = 1;
}
if (h24 == 24)
h24 = 0;
// if (h24 < 12)
// type = "AM";
// if (h24 == 12)
// type = "PM";
// if (h24 > 12)
// type = "PM";
// log("Time : ");
// log(timeinfo.tm_hour);
// log(":");
// log(m);
// log(":");
// logln(s);
sprintf_P(str_clock_full, (PGM_P)F("%02d:%02d:%02d"), h24, m, s);
isClockReady = true;
xSemaphoreGive(mutex_clock);
}
}
const char *getJsonData(const char *link)
{
return NULL;
}
bool isKabisat(int year)
{
bool isKabisat = false;
if (year % 4 == 0)
{
if (year % 100 == 0)
{
if (year % 400 == 0)
{
isKabisat = true;
}
else
{
isKabisat = false;
}
}
else
{
isKabisat = true;
}
}
else
{
isKabisat = false;
}
return isKabisat;
}
//===========================================================================
//================================== Task Masehi Date & Hijri Date =======
//===========================================================================
void taskDate(void *parameter)
{
isDateReady = false;
for (;;)
{
xSemaphoreTake(mutex_con, portMAX_DELAY);
{
bool isMasehiOfflineMode = false;
bool isHijriOfflineMode = false;
if (isWiFiReady)
{
// ONLINE MODE
// get masehi date
configTime(gmtOffset_sec, daylightOffset_sec, ntpServer);
struct tm timeinfo;
if (!getLocalTime(&timeinfo))
{
logln("Date : Failed to obtain time");
isMasehiOfflineMode = true;
isHijriOfflineMode = true;
}
else
{
Serial.println(&timeinfo, "%A, %B %d %Y %H:%M:%S");
xSemaphoreTake(mutex_date, portMAX_DELAY);
day = timeinfo.tm_mday;
month = timeinfo.tm_mon; // 0-11 since januari
year = timeinfo.tm_year + 1900;
weekday = timeinfo.tm_wday; // 0-6 since sunday
xSemaphoreGive(mutex_date);
// get hijri date
char link[140] = {'\0'};
sprintf_P(link, (PGM_P)F("https://www.al-habib.info/utils/calendar/pengubah-kalender-hijriyah-v7.php?the_y=%04d&the_m=%02d&the_d=%02d&the_conv=ctoh&lg=1"), year, month + 1, day);
logln(link);
WiFiClientSecure client;
HTTPClient http;
client.setInsecure();
// Your Domain name with URL path or IP address with path
http.begin(client, link);
int httpResponseCode = http.GET();
if (httpResponseCode == 200)
{
logf("Date HTTP Response code: %d", httpResponseCode);
String jsonData = http.getString();
DynamicJsonDocument doc(512);
DeserializationError error = deserializeJson(doc, jsonData);
if (error)
{
log("Date deserializeJson() failed: ");
logln(reinterpret_cast<const char *>(error.f_str()));
isHijriOfflineMode = true;
}
else
{
const char *hijri_date = doc["tanggal_hijriyah"];
xSemaphoreTake(mutex_date, portMAX_DELAY);
sprintf_P(str_hijri_date, (PGM_P)F("%s"), hijri_date);
hijri_day = doc["hijri_tanggal"];
hijri_month = doc["hijri_bulan"];
hijri_year = doc["hijri_tahun"];
xSemaphoreGive(mutex_date);
}
doc.clear();
}
else
{
logf("Date Error code: %d", httpResponseCode);
isHijriOfflineMode = true;
}
// Free resources
http.end();
}
}
else
{
isMasehiOfflineMode = true;
isHijriOfflineMode = true;
}
if (day > -1 && hijri_day > -1)
{
isDateReady = true;
}
if (isDateReady)
{
if (isMasehiOfflineMode)
{
// MASEHI OFFLINE MODE
// 31: 0,2,4,6,7,9,11
// 30: 3,5,8,10,12
// 28/29: 1
xSemaphoreTake(mutex_date, portMAX_DELAY);
day++;
if (day >= 29)
{
if (month == 1)
{
if (isKabisat(year))
{
if (day > 29)
{
day = 1;
month++;
}
}
else
{
if (day > 28)
{
day = 1;
month++;
}
}
}
else if (month == 3 || month == 5 || month == 8 || month == 10 || month == 12)
{
if (day > 30)
{
day = 1;
month++;
}
}
else if (month == 0 || month == 2 || month == 4 || month == 6 || month == 7 || month == 9 || month == 11)
{
if (day > 31)
{
day = 1;
month++;
}
}
}
if (month > 11)
{
month = 0;
year++;
}
weekday++;
if (weekday >= 7)
{
weekday = 0;
}
xSemaphoreGive(mutex_date);
}
if (isHijriOfflineMode)
{
// HIJRI OFFLINE MODE
// dont adjust here
}
// calculation
String day_names[] = {"Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jum'at", "Sabtu"};
String month_names[] = {"Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"};
memset(str_date, '\0', sizeof(char) * 26);
sprintf_P(str_date, (PGM_P)F("%s, %02d %s %02d"), day_names[weekday].c_str(), day, month_names[month].c_str(), year);
sprintf_P(str_date_full, (PGM_P)F("%s / %s"), str_date, str_hijri_date);
if (weekday == 0)
{
setupDMDAtExactRangeTime(true, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "Besok adalah puasa hari senin, silakan dipersiapkan semuanya", false, "Info PUASA", false, System5x7, 1000, 5000, 0, "09:00:00", 0, "23:59:00");
}
else if (weekday == 3)
{
setupDMDAtExactRangeTime(true, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "Besok adalah puasa hari kamis, silakan dipersiapkan semuanya", false, "Info PUASA", false, System5x7, 1000, 5000, 0, "09:00:00", 0, "23:59:00");
}
else if (weekday == 4)
{
setupDMDAtExactRangeTime(true, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "Waktunya Al Kahfi, Sholawat Nabi, Doa penghujung jumat", false, "Info", false, System5x7, 1000, 5000, 0, "18:30:00", 1, "17:30:00");
}
else if (weekday == 5)
{
setupDMDAtExactRangeTime(true, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "Waktunya Al Kahfi, Sh<NAME>abi, Doa penghujung jumat", false, "Info", false, System5x7, 1000, 5000, 0, "00:01:00", 0, "17:30:00");
}
if (hijri_day == 12 || hijri_day == 13 || hijri_day == 14)
{
setupDMDAtExactRangeTime(true, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "Besok adalah puasa ayyamul bidh, silakan dipersiapkan semuanya", false, "Info PUASA", false, System5x7, 1000, 5000, 0, "09:00:00", 0, "23:59:00");
}
}
logf("Date stack size : %d", uxTaskGetStackHighWaterMark(NULL));
}
xSemaphoreGive(mutex_con);
if (!isDateReady)
{
logln("Task date waiting for wifi...");
delay(35000); // 35 seconds
}
else
{
delayMSUntilAtTime(0, 1, 0);
}
}
}
void startTaskCountdownJWS();
void stopTaskCountdownJWS();
//=========================================================================
//================================== Task Jadwal Sholat =================
//=========================================================================
void taskJadwalSholat(void *parameter)
{
for (;;)
{
if (!isDateReady)
{
logln("Task JWS waiting for date...");
delay(10000);
continue;
}
bool isFetchSuccess = false;
xSemaphoreTake(mutex_con, portMAX_DELAY);
{
char link[100] = {'\0'};
sprintf_P(link, (PGM_P)F("https://api.myquran.com/v1/sholat/jadwal/1301/%02d/%02d/%02d"), year, month + 1, day);
logln(link);
WiFiClientSecure client;
HTTPClient http;
client.setInsecure();
// Your Domain name with URL path or IP address with path
http.begin(client, link);
int httpResponseCode = http.GET();
if (httpResponseCode == 200)
{
logf("JWS HTTP Response code: %d", httpResponseCode);
String jsonData = http.getString();
DynamicJsonDocument doc(768);
DeserializationError error = deserializeJson(doc, jsonData);
if (error)
{
log("JWS deserializeJson() failed: ");
logln(reinterpret_cast<const char *>(error.f_str()));
delay(20000);
}
else
{
JsonObject data_jadwal = doc["data"]["jadwal"];
// for testing only
// sprintf_P(data_jadwal_subuh, (PGM_P)F("%s:00"), "02:37");// "04:37"
// sprintf_P(data_jadwal_syuruk, (PGM_P)F("%s:00"), "02:42");
// sprintf_P(data_jadwal_dhuha, (PGM_P)F("%s:00"), "03:30");
// sprintf_P(data_jadwal_dzuhur, (PGM_P)F("%s:00"), "04:30");
// sprintf_P(data_jadwal_ashar, (PGM_P)F("%s:00"), "05:50");
// sprintf_P(data_jadwal_maghrib, (PGM_P)F("%s:00"), "06:39");
// sprintf_P(data_jadwal_isya, (PGM_P)F("%s:00"), "07:58");
sprintf_P(data_jadwal_subuh, (PGM_P)F("%s:00"), data_jadwal["subuh"].as<const char *>()); // "04:37"
sprintf_P(data_jadwal_syuruk, (PGM_P)F("%s:00"), data_jadwal["terbit"].as<const char *>()); // "04:37"
sprintf_P(data_jadwal_dhuha, (PGM_P)F("%s:00"), data_jadwal["dhuha"].as<const char *>()); // "04:37"
sprintf_P(data_jadwal_dzuhur, (PGM_P)F("%s:00"), data_jadwal["dzuhur"].as<const char *>());
sprintf_P(data_jadwal_ashar, (PGM_P)F("%s:00"), data_jadwal["ashar"].as<const char *>());
sprintf_P(data_jadwal_maghrib, (PGM_P)F("%s:00"), data_jadwal["maghrib"].as<const char *>());
sprintf_P(data_jadwal_isya, (PGM_P)F("%s:00"), data_jadwal["isya"].as<const char *>());
isJWSReady = true;
isFetchSuccess = true;
log("Subuh : ");
logln(data_jadwal_subuh);
log("Syuruk : ");
logln(data_jadwal_syuruk);
log("Dhuha : ");
logln(data_jadwal_dhuha);
log("Dzuhur : ");
logln(data_jadwal_dzuhur);
log("Ashar : ");
logln(data_jadwal_ashar);
log("Magrib : ");
logln(data_jadwal_maghrib);
log("Isya : ");
logln(data_jadwal_isya);
}
doc.clear();
}
else
{
logf("JWS Error code: %d", httpResponseCode);
}
// Free resources
http.end();
}
xSemaphoreGive(mutex_con);
logf("JWS stack size : %d", uxTaskGetStackHighWaterMark(NULL));
if (isFetchSuccess)
{
stopTaskCountdownJWS();
startTaskCountdownJWS();
delayMSUntilAtTime(0, 30, 0);
}
else
{
delay(180000); // 3 minutes
}
}
}
#define ALERT_COUNTUP_SHOLAT 5 * 60 * 1000 /*5 menit*/
#define ALERT_COUNTDOWN_DZIKIR 1 * 60 * 1000 /*5 menit*/
void updateHijriForFirstHalfNight()
{
// it's time to update hijri date
if (hijri_day + 1 <= 29)
{
sprintf_P(str_hijri_date, (PGM_P)F("%d%s"), hijri_day + 1, (hijri_day >= 10 ? str_hijri_date + 2 : str_hijri_date + 1));
log("New Hijri Date :");
logln(str_hijri_date);
sprintf_P(str_date_full, (PGM_P)F("%s / %s"), str_date, str_hijri_date);
}
}
void taskCountDownJWS(void *parameter)
{
for (;;)
{
if (!isJWSReady)
{
logln("Task countdown-jws waiting for jws...");
delay(10000);
continue;
}
std::array<unsigned long, 4> clock = getArrayOfTime(str_clock_full);
std::array<unsigned long, 4> subuh = getArrayOfTime(data_jadwal_subuh);
std::array<unsigned long, 4> syuruk = getArrayOfTime(data_jadwal_syuruk);
std::array<unsigned long, 4> dhuha = getArrayOfTime(data_jadwal_dhuha);
std::array<unsigned long, 4> dzuhur = getArrayOfTime(data_jadwal_dzuhur);
std::array<unsigned long, 4> ashar = getArrayOfTime(data_jadwal_ashar);
std::array<unsigned long, 4> maghrib = getArrayOfTime(data_jadwal_maghrib);
std::array<unsigned long, 4> isya = getArrayOfTime(data_jadwal_isya);
int counter = 0;
memset(type_jws, '\0', sizeof(char) * 8);
if ((clock[3] < subuh[3] && clock[3] >= 0) || (clock[3] >= isya[3] && clock[3] <= 86400))
{
sprintf_P(type_jws, (PGM_P)F("subuh"));
counter = sDistanceFromTimeToTime(clock[0], clock[1], clock[2], subuh[0], subuh[1], subuh[2]);
if (clock[3] >= isya[3] && clock[3] <= 86400)
{
updateHijriForFirstHalfNight();
}
}
else if (clock[3] < syuruk[3])
{
sprintf_P(type_jws, (PGM_P)F("syuruk"));
counter = syuruk[3] - clock[3];
// it's time to dzikir in the morning
setupDMDAtNowForLifeTime(true, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "Dzikir Pagi", false, count_down_jws, false, System5x7, 1000, ALERT_COUNTDOWN_DZIKIR, msDistanceFromNowToTime(syuruk[0], syuruk[1], syuruk[2]));
// resetDMDLoopIndex();
}
else if (clock[3] < dhuha[3])
{
sprintf_P(type_jws, (PGM_P)F("dhuha"));
counter = dhuha[3] - clock[3];
// it's time to sholat dhuha
setupDMDAtNowForLifeTime(true, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "Waktu Sholat Dhuha", false, str_clock_full, false, System5x7, 1000, 10000, (dzuhur[3] - dhuha[3] - (15 * 60)) * 1000);
// resetDMDLoopIndex();
}
else if (clock[3] < dzuhur[3])
{
sprintf_P(type_jws, (PGM_P)F("dzuhur"));
counter = dzuhur[3] - clock[3];
}
else if (clock[3] < ashar[3])
{
sprintf_P(type_jws, (PGM_P)F("ashar"));
counter = ashar[3] - clock[3];
}
else if (clock[3] < maghrib[3])
{
sprintf_P(type_jws, (PGM_P)F("maghrib"));
counter = maghrib[3] - clock[3];
// it's time to dzikir in the afternoon
setupDMDAtNowForLifeTime(true, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "Dzikir Petang", false, count_down_jws, false, System5x7, 1000, ALERT_COUNTDOWN_DZIKIR, msDistanceFromNowToTime(maghrib[0], maghrib[1], maghrib[2]));
// resetDMDLoopIndex();
if (weekday == 5)
{
setupDMDAtNowForLifeTime(true, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, "Doa di akhir hari jumat", false, count_down_jws, false, System5x7, 1000, ALERT_COUNTDOWN_DZIKIR, msDistanceFromNowToTime(maghrib[0], maghrib[1], maghrib[2]));
}
}
else if (clock[3] < isya[3])
{
sprintf_P(type_jws, (PGM_P)F("isya"));
counter = isya[3] - clock[3];
updateHijriForFirstHalfNight();
}
int leftSeconds = counter;
int hours = leftSeconds / 3600;
int minutes = 0;
int seconds = 0;
if (hours > 0)
{
leftSeconds = leftSeconds % 3600;
}
minutes = leftSeconds / 60;
if (minutes > 0)
{
leftSeconds = leftSeconds % 60;
}
seconds = leftSeconds;
logf("Counter Countdown for %s : %d ==> %d - %d - %d", type_jws, counter, hours, minutes, seconds);
logf("Countdown JWS stack size : %d", uxTaskGetStackHighWaterMark(NULL));
while (counter >= 0)
{
if (seconds == -1)
{
seconds = 59;
minutes--;
}
if (minutes == -1)
{
minutes = 59;
hours--;
}
sprintf_P(count_down_jws, (PGM_P)F("%02d:%02d:%02d"), hours, minutes, seconds);
seconds--;
counter--;
delay(1000);
}
// show alert
char count_sholat_alert[30] = {0};
sprintf_P(count_sholat_alert, (PGM_P)F("Sudah masuk waktu %s"), type_jws);
setupDMDAtNowForIteration(true, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, getAllocatedString(count_sholat_alert), true, str_clock_full, false, System5x7, 1000, ALERT_COUNTUP_SHOLAT, 5);
resetDMDLoopIndex();
delay(5000);
}
}
void startTaskCountdownJWS()
{
xTaskCreatePinnedToCore(
taskCountDownJWS, // Function that should be called
"Countdown Jadwal Sholat", // Name of the task (for debugging)
4500, // Stack size (bytes)
NULL, // Parameter to pass
1, // Task priority
&taskCountdownJWSHandle, // Task handle
CONFIG_ARDUINO_RUNNING_CORE);
}
void stopTaskCountdownJWS()
{
if (taskCountdownJWSHandle != NULL)
{
vTaskDelete(taskCountdownJWSHandle);
taskCountdownJWSHandle = NULL;
}
}
//=========================================================================
//================================== SPIFFS ============================
//=========================================================================
#define NASEHAT_COUNT_MAX 10
boolean appendFile(const char *text, const char *fileName, boolean overWrite)
{
size_t result = 0;
File file;
if (fileName != NULL)
{
if (!SPIFFS.exists(fileName))
{
file = SPIFFS.open(fileName, FILE_WRITE);
}
else
{
if (overWrite)
{
if (SPIFFS.remove(fileName))
{
file = SPIFFS.open(fileName, FILE_WRITE);
}
else
{
logln("file cannot be removed, why?");
}
}
else
{
file = SPIFFS.open(fileName, FILE_APPEND);
}
}
if (file)
{
if (text != NULL)
{
result = file.println(text);
}
file.close();
}
}
return result;
}
std::array<String, NASEHAT_COUNT_MAX> readFile(const char *fileName)
{
File file;
std::array<String, NASEHAT_COUNT_MAX> stringResult;
if (fileName != NULL)
{
if (SPIFFS.exists(fileName))
{
file = SPIFFS.open(fileName, FILE_READ);
if (file)
{
int count = 0;
while (file.available() && count < NASEHAT_COUNT_MAX)
{
String line = file.readStringUntil('\n');
stringResult[count] = line;
count++;
}
file.close();
}
}
}
return stringResult;
}
void listAllFiles()
{
File root = SPIFFS.open("/");
File file = root.openNextFile();
while (file)
{
log("FILE: ");
logln(file.name());
file = root.openNextFile();
}
}
//=========================================================================
//================================== Task Firebase Scheduler ===========
//=========================================================================
void setupDMDNasehat(const char *info)
{
setupDMDAtNowForLifeTime(false, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, info, true, str_clock_full, false, System5x7, 1000, 10000, msDistanceFromNowToTime(23, 59, 0));
setupDMDAtNowForLifeTime(false, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, str_date_full, false, str_clock_full, false, System5x7, 1000, 10000, msDistanceFromNowToTime(23, 59, 0));
setupDMDAtNowForLifeTime(false, DMD_DATA_FREE_INDEX, DMD_TYPE_SCROLL_STATIC, type_jws, false, count_down_jws, false, System5x7, 1000, 10000, msDistanceFromNowToTime(23, 59, 0));
}
// Your Firebase Project Web API Key
#define FB_API_KEY "<KEY>"
// Your Firebase Realtime database URL
#define FB_DATABASE_URL "https://custom-speaker-murottal-default-rtdb.asia-southeast1.firebasedatabase.app/"
void taskFirebase(void *parameter)
{
while (!isWiFiReady)
{
logln("Task Firebase nasehat waiting for wifi...");
delay(20000);
}
FirebaseData fbdo;
FirebaseAuth auth;
FirebaseConfig config;
std::array<String, 10> nasehatVector;
String nasehatListPath = "/app/nasehat/list";
String fuid = "";
bool isAuthenticated = false;
config.api_key = FB_API_KEY;
config.database_url = FB_DATABASE_URL;
Firebase.enableClassicRequest(fbdo, true);
fbdo.setResponseSize(8192); // minimum size is 4096 bytes
logln("------------------------------------");
logln("Firebase Sign up new user...");
xSemaphoreTake(mutex_con, portMAX_DELAY);
// Sign in to firebase Anonymously
if (Firebase.signUp(&config, &auth, "", ""))
{
logln("Firebase signup Success");
isAuthenticated = true;
fuid = auth.token.uid.c_str();
}
else
{
logf("Firebase signup Failed, %s\n", config.signer.signupError.message.c_str());
isAuthenticated = false;
}
// Assign the user sign in credentials
// auth.user.email = "<EMAIL>";
// auth.user.password = "<PASSWORD>";
// isAuthenticated = true;
// Assign the callback function for the long running token generation task, see addons/TokenHelper.h
config.token_status_callback = tokenStatusCallback;
// config.signer.tokens.legacy_token = "<KEY>";
// Initialise the firebase library
Firebase.begin(&config, &auth);
xSemaphoreGive(mutex_con);
// Firebase.reconnectWiFi(true);
logf("Firebase stack size : %d", uxTaskGetStackHighWaterMark(NULL));
// int test = 0;
for (;;)
{
boolean isFbReady = false;
xSemaphoreTake(mutex_con, portMAX_DELAY);
{
if (isWiFiReady)
{
isFbReady = Firebase.ready();
logf("Firebase ready or not ? %d", isFbReady);
if (isAuthenticated && isFbReady)
{
logln("------------------------------------");
logln("Firebase get data...");
if (Firebase.getArray(fbdo, nasehatListPath))
{
FirebaseJsonArray fbja = fbdo.jsonArray();
// appendFile(NULL,"/nasehat_firebase.txt",true);
for (size_t i = 0; i < fbja.size(); i++)
{
FirebaseJsonData result;
// result now used as temporary object to get the parse results
fbja.get(result, i);
// Print its value
logf("Array index: %d, type: %d, value: %s", i, result.type, result.to<String>().c_str());
const char *info = getAllocatedString(result.to<String>());
setupDMDNasehat(info);
// appendFile(info,"/nasehat_firebase.txt",false);
}
isFirebaseReady = true;
logln("Firebase get process...");
}
else
{
isFbReady = false;
}
logln("Firebase done data...");
}
}
if (!isFbReady && isFirebaseReady)
{
nasehatVector = readFile("/nasehat_firebase.txt");
for (int x = 0; x < NASEHAT_COUNT_MAX; x++)
{
String info = nasehatVector.at(x);
Serial.println(info);
setupDMDNasehat(info.c_str());
}
}
}
xSemaphoreGive(mutex_con);
if (!isFbReady)
{
if (!isWiFiReady)
{
delay(10000);
}
else
{
delay(60000);
}
}
else
{
delayMSUntilAtTime(1, 20, 0);
}
}
}
//=========================================================================
//================================== Task Button / Touch Handle ========
//=========================================================================
OneButton resetBtn(33, true);
void clickPressBtn(){
//stopTaskDMD();
}
void longPressBtn()
{
// remove ssid & password in preferences setting
stopTasksBeforePreferencesChanged();
//preferences.begin("settings", false);
preferences.remove("ssid");
preferences.remove("password");
//preferences.end();
logln("Please restart, remove wifi credential success");
// delay(15000);
// ESP.restart();
}
void taskButtonTouch(void *parameter)
{
logf("Button Touch stack size : %d", uxTaskGetStackHighWaterMark(NULL));
resetBtn.attachDuringLongPress(longPressBtn);
resetBtn.attachClick(clickPressBtn);
for (;;)
{
resetBtn.tick();
delay(500);
}
}
// void taskButtonTouch(void * parameter){
// logf("Button Touch stack size : %d",uxTaskGetStackHighWaterMark(NULL));
// for(;;){
// uint16_t touchValue = touchRead(33);
// bool isTouched = touchValue < 8;
// logf("Touch Value : %d", touchValue);
// if(isTouched){
// //preferences.begin("settings", false);
// preferences.remove("ssid");
// preferences.remove("password");
// //preferences.end();
// logln("Please restart, remove wifi credential success");
// // delay(15000);
// //ESP.restart();
// }
// delay(5000);
// }
// }
//=========================================================================
//================================== Main App ==========================
//=========================================================================
void setup()
{
Serial.begin(115200);
while (!Serial)
{
delay(1000);
}
// esp_pm_config_esp32_t pmConfig;
// pmConfig.light_sleep_enable = true;
// pmConfig.max_freq_mhz = 240;
// pmConfig.min_freq_mhz = 80;
// esp_err_t result = esp_pm_configure(&pmConfig);
// switch (result)
// {
// case ESP_OK:
// Serial.println("configure pm success");
// break;
// default:
// Serial.print("configure pm : ");
// Serial.println(result);
// break;
// }
// setCpuFrequencyMhz(240);
logf("Modem Sleep : %d", WiFi.getSleep());
logf("Freq CPU : %d", ESP.getCpuFreqMHz());
logf("Cores : %d", ESP.getChipCores());
logf("Chip Model : %s", ESP.getChipModel());
logf("CC : %d", ESP.getCycleCount());
logf("Free Heap : %d", ESP.getFreeHeap());
logf("Free Ram : %d", ESP.getFreePsram());
logf("SDK Version : %s", ESP.getSdkVersion());
logf("Sketch Size : %d", ESP.getSketchSize());
pinMode(built_in_led, OUTPUT);
mutex_con = xSemaphoreCreateMutex();
if (mutex_con == NULL)
{
logln("Mutex con can not be created");
}
mutex_dmd = xSemaphoreCreateMutex();
if (mutex_dmd == NULL)
{
logln("Mutex dmd can not be created");
}
mutex_clock = xSemaphoreCreateMutex();
if (mutex_clock == NULL)
{
logln("Mutex clock can not be created");
}
mutex_date = xSemaphoreCreateMutex();
if (mutex_date == NULL)
{
logln("Mutex date can not be created");
}
preferences.begin("settings", false);
// ssid = preferences.getString("ssid","3mbd3vk1d-2");
// password = preferences.getString("password","<PASSWORD>");
ssid = preferences.getString("ssid", "");
password = preferences.getString("password", "");
//preferences.end();
while (!SPIFFS.begin(true))
{
logln("An Error has occurred while mounting SPIFFS");
return;
}
isSPIFFSReady = true;
if (ssid.length() <= 0 || password.length() <= 0)
{
WiFi.mode(WIFI_AP);
IPAddress IP = {192, 168, 48, 81};
IPAddress NMask = {255, 255, 255, 0};
WiFi.softAPConfig(IP, IP, NMask);
WiFi.softAP("Speaker Murottal AP", "qwerty654321");
if (MDNS.begin("speaker-murottal"))
{
logln("speaker-murottal.local is available");
}
startTaskToggleLED();
delay(5000);
startTaskWebServer();
delay(5000);
startTaskWebSocketServer();
delay(5000);
startTaskDMD();
delay(6000);
}
else
{
startTaskKeepWifi();
delay(5000);
startTaskWebServer();
delay(5000);
startTaskWebSocketServer();
delay(5000);
startTaskClock();
delay(5000);
startTaskDate();
delay(5000);
startTaskJWS();
delay(5000);
startTaskDMD();
delay(10000);
startTaskFirebase();
delay(5000);
xTaskCreatePinnedToCore(
taskButtonTouch, // Function that should be called
"Button/Touch Action", // Name of the task (for debugging)
1500, // Stack size (bytes)
NULL, // Parameter to pass
1, // Task priority
&taskButtonTouchHandle, // Task handle
CONFIG_ARDUINO_RUNNING_CORE);
delay(5000);
}
// vTaskDelete(NULL);
}
void loop()
{
// do nothing, everything is doing in task
}<file_sep>/readme.md
# Custom Speaker Qur'an
## Config in code :
- PIN_DMD_nOE 4
- PIN_DMD_A 26
- PIN_DMD_B 19
- PIN_DMD_CLK 18
- PIN_DMD_SCLK 15
- PIN_DMD_R_DATA 23
## Todo List :
- countdown after sholat reminder fire, error => done, 11 sept 2021 01:56
- munculkan hari bulan tahun dalam clock => done, 11 sept 2021 01:56
- bug ketika setting wifi dan forget wifi dan reset wifi, saat prefereces.remove => ?
- bug ketika SPIFFS => done?
- dmd mati setelah durasi tertentu atau ketika wifi mati => ??
- animation in & out => done
| 5b3f2af90034c8df658a5e76c440f560b0493880 | [
"Markdown",
"C++"
] | 2 | C++ | ahsai001/custom-speaker-murottal | da0328012cb5dcb16dd21f5210d215bb20296341 | 86c30aa58433a229055b97037c329beacf6521c1 |
refs/heads/main | <repo_name>mtejaswi97/gitCommit<file_sep>/ChessProject/src/main/java/com/ncr/chess/ChessBoard.java
package com.ncr.chess;
public class ChessBoard {
public static int MAX_BOARD_WIDTH = 7;
public static int MAX_BOARD_HEIGHT = 7;
public static int MIN_INDEX = 0;
public static int MAX_INDEX = 6;
private Pawn[][] pieces;
public ChessBoard() {
pieces = new Pawn[MAX_BOARD_WIDTH][MAX_BOARD_HEIGHT];
}
public void addPiece(Pawn pawn, int xCoordinate, int yCoordinate, PieceColor pieceColor) {
boolean res = isLegalBoardPosition(xCoordinate, yCoordinate);
if(res) {
if(pieces[xCoordinate][yCoordinate] != null) {
int getX = pieces[xCoordinate][yCoordinate].getXCoordinate();
int getY = pieces[xCoordinate][yCoordinate].getYCoordinate();
if(getX == xCoordinate && getY == yCoordinate) {
pawn.setXCoordinate(-1);
pawn.setYCoordinate(-1);
}
}else {
pieces[xCoordinate][yCoordinate] = new Pawn(PieceColor.BLACK);
pieces[xCoordinate][yCoordinate].setXCoordinate(xCoordinate);
pieces[xCoordinate][yCoordinate].setYCoordinate(yCoordinate);
pawn.setXCoordinate(xCoordinate);
pawn.setYCoordinate(yCoordinate);
}
}else {
pawn.setXCoordinate(-1);
pawn.setYCoordinate(-1);
}
//throw new UnsupportedOperationException("Need to implement ChessBoard.add()");
}
public boolean isLegalBoardPosition(int xCoordinate, int yCoordinate)throws UnsupportedOperationException {
boolean result = true;
if(xCoordinate < MIN_INDEX || xCoordinate > MAX_INDEX) {
result = false;
}
if(yCoordinate < MIN_INDEX || yCoordinate > MAX_INDEX) {
result = false;
}
return result;
// throw new UnsupportedOperationException("Need to implement ChessBoard.IsLegalBoardPosition()");
}
}
| 090f97eae61db2c44c757ffb353cf2a1fe146508 | [
"Java"
] | 1 | Java | mtejaswi97/gitCommit | b813c559c02aeddd94c4e18d16e5daa6ab5f9673 | 23b54a0b35612fcb20cfdce4850195939454fc8f |
refs/heads/master | <repo_name>Iamdheeraj22/ConnectionTesting<file_sep>/app/src/main/java/com/example/connectiontesting/Connection.java
package com.example.connectiontesting;
import android.content.Context;
import android.net.ConnectivityManager;
import android.net.Network;
import android.net.NetworkInfo;
public class Connection {
public static boolean isNetwork(Context context) {
ConnectivityManager connectivityManager = (ConnectivityManager)
context.getSystemService(Context.CONNECTIVITY_SERVICE);
if (connectivityManager != null) {
NetworkInfo[] networkInfo = connectivityManager.getAllNetworkInfo();
if (networkInfo != null) {
for (NetworkInfo info : networkInfo) {
if (info.getState() == NetworkInfo.State.CONNECTED) {
return true;
}
}
}
}
return false;
}
}
<file_sep>/app/src/main/java/com/example/connectiontesting/NetworkListener.java
package com.example.connectiontesting;
import android.app.AlertDialog;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.content.IntentFilter;
import android.preference.DialogPreference;
import android.view.LayoutInflater;
import android.view.View;
import android.view.WindowManager;
import android.widget.Button;
public class NetworkListener extends BroadcastReceiver {
@Override
public void onReceive(Context context, Intent intent) {
if (!Connection.isNetwork(context)) {
AlertDialog.Builder dialog = new AlertDialog.Builder(context);
View view = LayoutInflater.from(context).inflate(R.layout.network_view, null);
dialog.setView(view);
AlertDialog alertDialog=dialog.create();
alertDialog.getWindow().setType(WindowManager.LayoutParams.
TYPE_SYSTEM_ALERT);
alertDialog.show();
dialog.setCancelable(false);
Button button = view.findViewById(R.id.retry);
button.setOnClickListener(view1 -> {
alertDialog.dismiss();
onReceive(context, intent);
});
// AlertDialog.Builder dialog = new AlertDialog.Builder(context);
// dialog.setMessage("network error");
// dialog.setPositiveButton("Retry", new DialogInterface.OnClickListener() {
// @Override
// public void onClick(DialogInterface dialogInterface, int i) {
// onReceive(context, intent);
// }
// });
//dialog.show();
}
}
}
| ddebfe50d5e7f5f78b47713294afa80a9f876385 | [
"Java"
] | 2 | Java | Iamdheeraj22/ConnectionTesting | d727e1953566c616333213e18e05f07420565bec | f2d91f58648a5d0098c4bd081be2881dbdf5a7f1 |
refs/heads/master | <file_sep>Peppered Password Hashing
=========================
Secure password hashing using HMAC before (BCrypt) Hash.
---
MIT Licence
Unless required by applicable law or agreed to in writing, software
distributed under the Licence is distributed on an "AS IS" basis,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
Contact: <EMAIL>
Latest version available at: https://gitlab.com/Netsilik/PepperedPasswords
Installation
------------
```
composer require netsilik/peppered-passwords
```
Usage
-----
**Hashing new passwords**
```
<?php
namespace My\Name\Space;
use Netsilik\Lib\PepperedPasswords;
$pepper = hex2bin(env('PEPPER')); // The binary pepper value, stored as a hexadecimal string
$hasher = new PepperedPasswords($pepper);
$hash = $hasher->hash($new_plaintext_password); // Story $hash in the user's record
```
**Verifying passwords**
```
<?php
namespace My\Name\Space;
use Netsilik\Lib\PepperedPasswords;
$pepper = hex2bin(env('PEPPER')); // The binary pepper value, stored as a hexadecimal string
$hasher = new PepperedPasswords($pepper);
if ($hasher->verify($new_plaintext_password, $hash)) { // $hash retrieved from the user's record
echo 'Password ok.';
} else {
echo 'Wrong credentials.';
}
```
<file_sep><?php
namespace Netsilik\Lib;
/**
* @package Scepino\Lib
* @copyright (c) 2010-2021 Netsilik (http://netsilik.nl)
* @license MIT
*/
/**
* Handle peppered password hashes
*/
final class PepperedPasswords
{
/**
* The algorithm to use for calculating the HMac of the password
*/
const HMAC_ALGORITHM = 'sha256';
/**
* @var string $_pepper The pepper value
*/
private $_pepper;
/**
* @var array The options for the PASSWORD_DEFAULT hashing algorithm
* See {@link https://www.php.net/password_hash} for more details
*/
private $_options;
/**
* Constructor
*
* @param string $pepper The pepper to use as the HMac key
* @param array $options The options for the PASSWORD_DEFAULT hashing algorithm
*/
public function __construct(string $pepper, array $options = [])
{
$this->_pepper = $pepper;
$this->_options = $options;
}
/**
* Calculate the peppered hash of a password
*
* @param string $password The password to calculate the hash for
*
* @return string The peppered hash of the password supplied
*/
public function hash(string $password) : string
{
return password_hash($this->_hmac($password), PASSWORD_DEFAULT, $this->_options);
}
/**
* Verify a password against its peppered hash
*
* @param string $password The password to verify
* @param string $passwordHash The password hash to verify the password against
*
* @return bool True if the password is correct, false otherwise
*/
public function verify(string $password, string $passwordHash) : bool
{
return password_verify($this->_hmac($password), $passwordHash);
}
/**
* Compute the HMac for the password
*
* @return string the HMac for the supplied password
*/
private function _hmac(string $password)
{
return hash_hmac(self::HMAC_ALGORITHM, $password, $this->_pepper, true);
}
}
| c77e9024018d05e1ec141ff0640d00731ad92ca7 | [
"Markdown",
"PHP"
] | 2 | Markdown | Netsilik/PepperedPasswords | b7d0075bc1cbcae0e6298ffa6e6a5a38930fe810 | 558810332c1125f90059204ef82a053c421129bb |
refs/heads/master | <repo_name>GustavoHCruz/PipelineSimulator<file_sep>/Pipeline.py
dictionary = {"inst":0,"mem":20,"reg":11,"PC":0,"IR":"","clock":1,"hazard":0,"pause":0}
memory,registers,instructions = [],[],[]
pipeline,IBR = ["NIL","NIL","NIL","NIL"],["NIL","NIL","NIL"]
labels = {}
def initialize():
global memory, registers, instructions
memory = [0 for x in range(dictionary["mem"])]
registers = [0 for x in range(dictionary["reg"])]
inst = 0
while True:
aux = input()
if len(aux) == 0:
break
if aux.find(":") == -1:
inst += 1
instructions.append(aux)
else:
aux = aux.replace(" ","")
aux = aux.split(":")
labels[aux[0]] = inst
dictionary["inst"] = inst
def hazardControl():
cmd,dep = pipeline[1],pipeline[2]
if cmd.find(":") != -1 or cmd.find("j") != -1:
return False
cmd = cmd.replace(",","")
cmd = cmd.split(" ")
r1,r2 = cmd[2],cmd[-1]
if r1.find("r") == -1:
r1 = "NULL"
if r2.find("r") == -1:
r2 = "NULL"
if dep != "NIL" and dep.find(":") == -1:
dep = dep.replace(",","")
dep = dep.split(" ")
dep = dep[0] + dep[1]
if dep.find(r2) != -1 or dep.find(r1) != -1:
return True
return False
def fetch():
pipeline[0] = instructions[dictionary["PC"]]
dictionary["IR"] = instructions[dictionary["PC"]]
dictionary["PC"] += 1
def decode():
if dictionary["hazard"] == 0:
IBR[0] = dictionary["IR"]
if IBR[0].find(":") == -1:
IBR[0] = IBR[0].replace(",","")
IBR[0] = IBR[0].replace("\\","")
IBR[0] = IBR[0].replace("$","")
IBR[0] = IBR[0].replace("r","")
IBR[0] = IBR[0].replace("[","")
IBR[0] = IBR[0].replace("]","")
IBR[0] = IBR[0].split(" ")
if len(IBR[0]) != 2:
IBR[0][1] = int(IBR[0][1])
IBR[0][2] = int(IBR[0][2])
if IBR[0][0] != "beq":
IBR[0][-1] = int(IBR[0][-1])
else:
IBR[0] = IBR[0].split(":")
IBR[0] = IBR[0][1]
if hazardControl():
dictionary["hazard"] = 1
else:
dictionary["hazard"] = 0
def execute():
result = 0
if len(IBR[1]) == 2:
result = labels[IBR[1][1]]
elif len(IBR[1]) > 2:
r1,r2,r3 = IBR[1][1],IBR[1][2],IBR[1][-1]
if IBR[1][0] == "lw":
result = memory[r3]
dictionary["pause"] = 1
elif IBR[1][0] == "sw":
result = registers[r1]
dictionary["pause"] = 1
elif IBR[1][0] == "li":
result = r3
elif IBR[1][0] == "move":
result = registers[r2]
elif IBR[1][0] == "add":
result = registers[r2] + registers[r3]
dictionary["pause"] = 1
elif IBR[1][0] == "addi":
result = registers[r2] + r3
elif IBR[1][0] == "sub":
result = registers[r2] - registers[r3]
dictionary["pause"] = 1
elif IBR[1][0] == "subi":
result = registers[r2] - r3
elif IBR[1][0] == "beq":
if registers[r1] == registers[r2]:
result = labels[r3]
else:
result = -1
return result
def write(result):
global IBR,pipeline
if len(IBR[2]) > 1:
if len(IBR[2]) == 2:
dictionary["PC"] = result
pipeline,IBR = ["NIL","NIL","NIL","NIL"],["NIL","NIL","NIL"]
dictionary["pause"],dictionary["hazard"] = 0,0
else:
r1,r2,r3 = IBR[2][1],IBR[2][2],IBR[2][-1]
if IBR[2][0] == "sw":
memory[r2] = result
elif IBR[2][0] == "beq":
if result != -1:
dictionary["PC"] = result
pipeline,IBR = ["NIL","NIL","NIL","NIL"],["NIL","NIL","NIL"]
dictionary["pause"],dictionary["hazard"] = 0,0
else:
registers[r1] = result
def printPipeline():
print("\n============Current Clock:",dictionary["clock"],"============")
print("PC:",dictionary["PC"])
print("Data Memory:\n",memory)
print("Registers Memory:\n",registers[1:])
print("Fetch:\t",pipeline[0])
print("Decode:\t",pipeline[1])
print("Execute:",pipeline[2])
print("Write:\t",pipeline[3])
print("==========================================")
dictionary["clock"] += 1
def simulate():
while True:
printPipeline()
if dictionary["pause"] == 0:
IBR[2] = IBR[1]
pipeline[3] = pipeline[2]
if pipeline[3] != "NIL":
write(result)
if dictionary["hazard"] == 0:
IBR[1] = IBR[0]
pipeline[2] = pipeline[1]
if pipeline[2] != "NIL":
result = execute()
if dictionary["hazard"] == 0:
pipeline[1] = pipeline[0]
if pipeline[1] != "NIL":
decode()
if dictionary["PC"] < len(instructions):
fetch()
else:
pipeline[0] = "NIL"
else:
pipeline[1] = "NIL"
decode()
else:
pipeline[2] = "NIL"
if pipeline[1] != "NIL":
decode()
else:
pipeline[3] = "NIL"
dictionary["pause"] = 0
if (pipeline[3] == "NIL") and (pipeline[2] == "NIL") and (pipeline[1] == "NIL") and (pipeline[0] == "NIL") and (dictionary["PC"] >= len(instructions)):
break
printPipeline()
initialize()
simulate()<file_sep>/README.md
Implementation of a simple Pipeline simulator in Python. | 859e7b2bd2f9b6fe6b3f86b8a471f9179bc98a92 | [
"Markdown",
"Python"
] | 2 | Python | GustavoHCruz/PipelineSimulator | 927097990e436f3bc7171d22b40c51c2ae6bb593 | d11aa02ce7cb545a1ca795261b79a58f62a92c44 |
refs/heads/master | <repo_name>uralnoyanozcan/ElectronicShopProject<file_sep>/WebApplication1/Pages/ShoppingCart.aspx.cs
using Microsoft.AspNet.Identity;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.UI;
using System.Web.UI.WebControls;
using WebApplication1.App_Code;
using WebApplication1.App_Code.Models;
namespace WebApplication1.Pages
{
public partial class ShoppingCart : System.Web.UI.Page
{
protected void Page_Load(object sender, EventArgs e)
{
string userId = User.Identity.GetUserId();
GetPurchacesInCart(userId);
}
private void GetPurchacesInCart(string userId)
{
CartModel model = new CartModel();
double subTotal = 0;
List<Cart> pList = model.GetOrdesInCart(userId);
CreateShopTable(pList,out double subTotal);
double vat = subTotal*0.21;
}
}
}<file_sep>/WebApplication1/App_Code/Models/ProductModel.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
namespace WebApplication1.App_Code.Models
{
public class ProductModel
{
public string InsertProduct(Products product)
{
try
{
Model db = new Model();
db.Product.Add(product);
db.SaveChanges();
return product.Name + " successfully inserted";
}
catch (Exception e)
{
return "Error:" + e;
}
}
public string UpdateProduct(int id,Products product)
{
try
{
Model db = new Model();
Products p = db.Product.Find(id);
p.Name = product.Name;
p.Price = product.Price;
p.Description = product.Description;
p.Image = product.Image;
db.SaveChanges();
return product.Name + " successfully updated";
}
catch (Exception e)
{
return "Error:" + e;
}
}
public string DeleteProduct(int id, Products product)
{
try
{
Model db = new Model();
Products p = db.Product.Find(id);
db.Product.Attach(product);
db.Product.Remove(product);
db.SaveChanges();
return product.Name + " successfully deleted";
}
catch (Exception e)
{
return "Error:" + e;
}
}
public Products GetProduct(int id)
{
try
{
using (Model db = new Model())
{
Products product = db.Product.Find(id);
return product;
}
}
catch (Exception)
{
return null;
}
}
public List<Products> getAllProducts()
{
try
{
using (Model db = new Model())
{
List<Products> products = (from x in db.Product select x).ToList();
return products;
}
}
catch (Exception)
{
return null;
}
}
public List<Products> GetProductsByType(int typeId)
{
try
{
using (Model db = new Model())
{
List<Products> products = (from x in db.Product where x.TypeID == typeId select x).ToList();
return products;
}
}
catch (Exception)
{
return null;
}
}
}
}<file_sep>/WebApplication1/App_Code/Models/CartModel.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
namespace WebApplication1.App_Code.Models
{
public class CartModel
{
public string InsertCart(Cart cart)
{
try
{
Model db = new Model();
db.Cart.Add(cart);
db.SaveChanges();
return cart.Date + " successfully inserted";
}
catch (Exception e)
{
return "Error:" + e;
}
}
public string UpdateCart(int id, Cart cart)
{
try
{
Model db = new Model();
Cart p = db.Cart.Find(id);
p.Date = cart.Date;
p.ID = cart.ID;
p.IsInCart = cart.IsInCart;
p.Product = cart.Product;
p.ProductID = cart.ProductID;
p.ClientID = cart.ClientID;
p.Amount = cart.Amount;
db.SaveChanges();
return cart.Date + " successfully updated";
}
catch (Exception e)
{
return "Error:" + e;
}
}
public string DeleteCart(int id, Cart cart)
{
try
{
Model db = new Model();
Cart p = db.Cart.Find(id);
db.Cart.Attach(cart);
db.Cart.Remove(cart);
db.SaveChanges();
return cart.Date + " successfully deleted";
}
catch (Exception e)
{
return "Error:" + e;
}
}
}
}<file_sep>/WebApplication1/App_Code/Model.cs
namespace WebApplication1.App_Code
{
using System;
using System.Data.Entity;
using System.ComponentModel.DataAnnotations.Schema;
using System.Linq;
public partial class Model : DbContext
{
public Model()
: base("name=Model1")
{
}
public virtual DbSet<Cart> Cart { get; set; }
public virtual DbSet<Products> Product { get; set; }
public virtual DbSet<ProductType> ProductType { get; set; }
protected override void OnModelCreating(DbModelBuilder modelBuilder)
{
modelBuilder.Entity<Products>()
.Property(e => e.Price)
.HasPrecision(18, 0);
modelBuilder.Entity<Products>()
.Property(e => e.Description)
.IsUnicode(false);
modelBuilder.Entity<ProductType>()
.HasMany(e => e.Product)
.WithRequired(e => e.ProductType)
.HasForeignKey(e => e.TypeID);
}
}
}
<file_sep>/WebApplication1/Pages/Account/Register.aspx.cs
using System;
using System.Linq;
using System.Web;
using Microsoft.AspNet.Identity;
using Microsoft.AspNet.Identity.EntityFramework;
using Microsoft.Owin.Security;
using WebApplication1.App_Code;
using WebApplication1.App_Code.Models;
namespace WebApplication1.Pages.Account
{
public partial class Login : System.Web.UI.Page
{
protected void Page_Load(object sender, EventArgs e)
{
}
protected void Button1_Click(object sender, EventArgs e)
{
if (txtPassword.Text == txtConfirmPassword.Text)
{
UserModel1 db = new UserModel1();
UserTable user = new UserTable();
user.userName = txtUserName.Text;
user.password = <PASSWORD>;
db.UserTable.Add(user);
db.SaveChanges();
lblResult.Text = "succesfully registered";
Response.Write("You can log in now");
Response.Redirect("/Pages/Account/Login.aspx");
}
else
{
lblResult.Text = "Passwords do not match";
}
}
}
}<file_sep>/WebApplication1/App_Code/Cart.cs
namespace WebApplication1.App_Code
{
using System;
using System.Collections.Generic;
using System.ComponentModel.DataAnnotations;
using System.ComponentModel.DataAnnotations.Schema;
using System.Data.Entity.Spatial;
//[Table("Cart")]
public partial class Cart
{
public int ID { get; set; }
public int ClientID { get; set; }
public int ProductID { get; set; }
public int Amount { get; set; }
public DateTime Date { get; set; }
public bool IsInCart { get; set; }
public virtual Products Product { get; set; }
}
}
<file_sep>/WebApplication1/App_Code/Models/ProductTypeModel.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
namespace WebApplication1.App_Code.Models
{
public class ProductTypeModel
{
public string InsertProductType(ProductType productType)
{
try
{
Model db = new Model();
db.ProductType.Add(productType);
db.SaveChanges();
return productType.Name + " successfully inserted";
}
catch (Exception e)
{
return "Error:" + e;
}
}
public string UpdateProductType(int id, ProductType productType)
{
try
{
Model db = new Model();
ProductType p = db.ProductType.Find(id);
p.Name = productType.Name;
db.SaveChanges();
return productType.Name + " successfully updated";
}
catch (Exception e)
{
return "Error:" + e;
}
}
public string DeleteProductType(int id, ProductType productType)
{
try
{
Model db = new Model();
ProductType p = db.ProductType.Find(id);
db.ProductType.Attach(productType);
db.ProductType.Remove(productType);
db.SaveChanges();
return productType.Name + " successfully deleted";
}
catch (Exception e)
{
return "Error:" + e;
}
}
}
}<file_sep>/WebApplication1/Pages/Account/Login.aspx.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.UI;
using System.Web.UI.WebControls;
using WebApplication1.App_Code;
namespace WebApplication1.Pages.Account
{
public partial class Register : System.Web.UI.Page
{
protected void Page_Load(object sender, EventArgs e)
{
}
protected void btnSubmit_Click(object sender, EventArgs e)
{
UserModel1 db = new UserModel1();
List<UserTable> users = getAllUsers();
foreach(UserTable user in users)
{
if (user.userName == txtUsername.Text)
{
string enteredPassword = "" + txtPasswordLog.Text;
string userPassword = "" + user.password.TrimEnd();
if(string.Equals(userPassword, enteredPassword))
{
lblResult.Text = "Log in successful!";
Session["user"] = user.userID;
Response.Redirect("/Index.aspx");
return;
}
else
{
lblResult.Text = userPassword.Length +" "+ enteredPassword.Length;
return;
}
}
}
lblResult.Text = "user not found";
}
public List<UserTable> getAllUsers()
{
try
{
using (UserModel1 db = new UserModel1())
{
List<UserTable> users = (from x in db.UserTable select x).ToList();
return users;
}
}
catch (Exception)
{
return null;
}
}
}
}<file_sep>/WebApplication1/App_Code/UserModel1.cs
namespace WebApplication1.App_Code
{
using System;
using System.Data.Entity;
using System.ComponentModel.DataAnnotations.Schema;
using System.Linq;
public partial class UserModel1 : DbContext
{
public UserModel1()
: base("name=UserModel1")
{
}
public virtual DbSet<UserTable> UserTable { get; set; }
protected override void OnModelCreating(DbModelBuilder modelBuilder)
{
modelBuilder.Entity<UserTable>()
.Property(e => e.password)
.IsFixedLength();
}
}
}
<file_sep>/README.txt
The first time you open the program you will see the products on the page.
You can add them to the basket, you can change the amount.
There are Login and Register sections on this page. If you are a member you are logged in or you can create a new record. | 02b18d432f3f1b581b109d9e61c1021c4f837244 | [
"C#",
"Text"
] | 10 | C# | uralnoyanozcan/ElectronicShopProject | 68363027192dc8e499291d3aa99138ecdf9a58d3 | 58a6157e77a85d1a773559b64489c18d943fb4f2 |
refs/heads/main | <repo_name>w-hosinski/Programming-Challenges<file_sep>/leetcode/Two Sum.js
const twoSum = (nums,target) => {
let deleteCounter = 0
while (nums.length>1){
for(i=1;i<nums.length;i++){
if(nums[0]+nums[i] == target) return [deleteCounter,i+deleteCounter]
}
deleteCounter++
nums.shift()
}
}<file_sep>/leetcode/First Bad Version.js
var solution = function(isBadVersion) {
return function(n) {
let first = 1
let last = n
let middle = ~~n/2
while (first <= last) {
middle = ~~((first+last)/2)
if(isBadVersion(middle)) last = middle - 1
else first = middle + 1
}
return first
}
}<file_sep>/leetcode/Rotate Array.js
const rotate = (nums, k) => {
let endPiece = nums.splice(-k,k)
nums.splice(0,0,...endPiece)
return nums
}
console.log(rotate([1,2,3,4,5,6,7],2))<file_sep>/leetcode/Maximum Subarray.js
const maxSubArray = nums => {
let tempSum = 0;
let largestSum = nums[0];
for(let n of nums) {
tempSum = Math.max(tempSum + n, n)
largestSum = Math.max(largestSum, tempSum);
}
return largestSum
}<file_sep>/Project Euler/Multiples of 3 and 5.js
const multiplesOf3and5 = n => {
let result = 0
for(let i = 3; i<n; i+=3) i%15 ? result+=i: null
for(let j = 5; j<n; j+=5) result+=j
return result
}
console.log(`multiplesOf3and5 für 1000 ist ${multiplesOf3and5(1000)}`)<file_sep>/Codeinterview-Übungen/index.js
let list = document.getElementById("list")
function counter() {
for(let i=1; i<101;i++) {
if(i%15===0) list.innerHTML += `<div>fizzbuzz</div>`
else if(i%5===0) list.innerHTML += `<div>buzz</div>`
else if(i%3===0) list.innerHTML += `<div>fizz</div>`
else list.innerHTML += `<div>${i}</div>`
}
}
function add(...args){
return args.reduce((a,b) => a+b,0)
}
function stringIncludes(a,b){
return a.toLowerCase().includes(b.toLowerCase())
}
function getNames(...objects) {
let nameArr = []
let names = objects.forEach((el) => {
if (el.name != undefined) nameArr.push(el.name)
})
return nameArr
}
function getLargestNumberIndex(arr) {
let largestNum = arr.reduce((a,b) => Math.max(a,b),0)
return arr.indexOf(largestNum)
}
function greatestCommonDivisor(a,b) {
for (let i = Math.max(a,b)/2; i>1; i--) {
if(a%i==0 && b%i==0) return i
}
return 1
}
<file_sep>/Project Euler/Even Fibonacci Numbers.js
const fiboEvenSum = n => {
let i = 1
let j = 2
let result = 2
if(n<2) return 0
while (i+j<=n) {
(i+j)%2 == 0 ? result+=i+j : null
i<j ? i = i+j : j = i+j
}
return result
}
console.log(`fiboEvenSum für 1000 ist ${fiboEvenSum(1000)}`)<file_sep>/Algorithms/Find the Symmetric Difference.js
/* Symmetrische Differenz ist wie ein XOR, es kommen nur die Elemente durch, welche genau ein Mal in zwei Arrays vorkommen.
Da es eine binäre operation ist, werden bei mehr als 2 (n) Arrays als input n-1 operationen gebraucht
1. Die ersten zwei Arrays variblen zuweisen
2. Duplikate entfernen
3. Alles in ein array combinieren und sortieren
4. Nach duplikaten suchen und diese entfernen
5. Falls es mehr als 2 Arrays sind: Gefiltertes Array der ersten Array variable zuweisen und dem nächsten Array die zweite varible
*/
const sym = arrs => {
let firstArr = arrs[0]
let sortedArr = []
for (let arrCounter=1; arrCounter<arrs.length; arrCounter++) {
let secondArr = arrs[arrCounter]
firstArr = [...new Set(firstArr)]
secondArr = [...new Set(secondArr)]
let combinedArrs = firstArr.concat(secondArr)
sortedArr = combinedArrs.sort()
for(let i=0; i<sortedArr.length-2; i++) {
if(sortedArr[i] == sortedArr[i+1]) {
sortedArr.splice(i,2)
i--
}
}
firstArr = sortedArr
}
return sortedArr
}
console.log("sym von [[1,2,3,2],[1,2,2,2,2,5],[1,2,3]] ist:")
console.log(sym([[1,2,3,2],[1,2,2,2,2,5],[1,2,3]]))
<file_sep>/leetcode/Fibonacci Number.js
const fib = n => {
let i = 0
let j = 1
if(n==0) return 0
if(n==1) return 1
for(k=1;k<n;k++){
if(i<j) i = i+j
else j = i+j
}
return Math.max(i,j)
}<file_sep>/Project Euler/Largest prime factor.js
const isPrime = n => {
for(let i = 2, s = Math.sqrt(n); i<=s; i++)
if(n%i == 0) return false;
return true;
}
const largestPrimeFactor = n => {
if (n==1) return null
if (isPrime(n)) return n
let primeArr = []
for(let i=2; i<=n/2; i++)
if (isPrime(i)) primeArr.unshift(i)
for (let prime of primeArr)
if (n%prime == 0) return prime
}
console.log(`largestPrimeFactor für 1000 ist ${largestPrimeFactor(1000)}`)<file_sep>/leetcode/Merge Sorted Array.js
const merge = (nums1, m, nums2, n) => {
nums1.splice(m,n,...nums2)
nums1.sort((a,b) => a-b)
return nums1
}
console.log(merge([1,2,3,0,0,0],3,[7,8,9],3))<file_sep>/leetcode/N-th Tribonacci Number.js
const tribonacci = n => {
let i = 0
let j = 1
let k = 1
if(n==0) return 0
if(n==1) return 1
for(let l=2;l<n;l++) {
if(i==Math.min(i,j,k)) i = i+j+k
else if(j==Math.min(i,j,k)) j = i+j+k
else k = i+j+k
}
return Math.max(i,j,k)
}
console.log(tribonacci(4))<file_sep>/Codeinterview-Übungen/clickCounter.js
document.getElementById("counterButton").addEventListener("click",counter)
let incrementer = 0
function counter() {
incrementer++
document.getElementById("display").innerText = incrementer
}<file_sep>/leetcode/Search Insert Position.js
const searchInsert = (nums,target) => {
let first = 0
let last = nums.length - 1
while (first <= last) {
let middle = ~~((first+last)/2)
if(nums[middle] == target) return middle
if(nums[middle]<target) first = middle+1
else last = middle-1
}
return first
}<file_sep>/leetcode/Roman to Integer.js
const romanToInt = s => {
let arr = s.split("")
let sum = 0
let specChars = ["IV","IX","XL","XC","CD","CM",4,9,40,90,400,900]
for(let i = 0; i<6; i++) {
if(s.indexOf(specChars[i]) != -1){
arr.splice(s.indexOf(specChars[i]),2)
console.log(arr)
sum += specChars[i+6]
}
}
for(num of arr) {
switch(num) {
case "I": sum++
break
case "V": sum+=5
break
case "X": sum+=10
break
case "L": sum+=50
break
case "C": sum+=100
break
case "D": sum+=500
break
case "M": sum+=1000
break
}
}
return sum
}
romanToInt("IV") | 9db73192d96962454f8fa8684143cec7116b3b36 | [
"JavaScript"
] | 15 | JavaScript | w-hosinski/Programming-Challenges | 26994d802db2f7727e770b0daeb56aece3fad58c | e0bc3f0cf3973221d0c951aa2ec814e954b4fce3 |
refs/heads/master | <repo_name>jhonybasha/Prism_Xamarin_Sample<file_sep>/ISuitePro.ERP.Digital.CxUI/ISuitePro.ERP.Digital.CxUI/NavigationItem.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Input;
namespace ISuitePro.ERP.Digital.CxUI
{
public class NavigationItem
{
public string Text { get; private set; }
public string Icon { get; private set; }
public ICommand Command { get; private set; }
public NavigationItem(string text, string icon, ICommand command)
{
Text = text;
Icon = icon;
Command = command;
}
}
}
<file_sep>/ISuitePro.ERP.Digital.CxUI/ISuitePro.ERP.Digital.CxUI/Views/MainPage.xaml.cs
using System;
using Xamarin.Forms;
using ISuitePro.ERP.Digital.CxUI;
namespace ISuitePro.ERP.Digital.CxUI.Views
{
public partial class MainPage : MasterDetailPage
{
public MainPage()
{
InitializeComponent();
}
async void OnToolbarItemClicked(object sender, EventArgs e)
{
//await DisplayAlert(WindowsPlatformSpecificsHelpers.Title, WindowsPlatformSpecificsHelpers.Message, WindowsPlatformSpecificsHelpers.Dismiss);
await DisplayAlert("Alert", "Toolbar item clicked", "Ok");
}
}
}<file_sep>/ISuitePro.ERP.Digital.CxUI/ISuitePro.ERP.Digital.CxUI/Views/CollapseWidthAdjusterContentView.xaml.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Xamarin.Forms;
using Xamarin.Forms.Xaml;
using Xamarin.Forms.PlatformConfiguration;
using Xamarin.Forms.PlatformConfiguration.WindowsSpecific;
namespace ISuitePro.ERP.Digital.CxUI.Views
{
[XamlCompilation(XamlCompilationOptions.Compile)]
public partial class CollapseWidthAdjusterContentView : ContentView
{
public static readonly BindableProperty ParentPageProperty = BindableProperty.Create("ParentPage", typeof(Xamarin.Forms.MasterDetailPage), typeof(CollapseWidthAdjusterContentView), null, propertyChanged: OnParentPagePropertyChanged);
public Xamarin.Forms.MasterDetailPage ParentPage
{
get { return (Xamarin.Forms.MasterDetailPage)GetValue(ParentPageProperty); }
set { SetValue(ParentPageProperty, value); }
}
public CollapseWidthAdjusterContentView()
{
InitializeComponent();
}
void OnChangeButtonClicked(object sender, EventArgs e)
{
double width;
if (double.TryParse(entry.Text, out width))
{
ParentPage.On<Windows>().CollapsedPaneWidth(width);
}
}
static void OnParentPagePropertyChanged(BindableObject element, object oldValue, object newValue)
{
if (newValue != null)
{
var instance = element as CollapseWidthAdjusterContentView;
instance.entry.Text = instance.ParentPage.On<Windows>().CollapsedPaneWidth().ToString();
}
}
}
}<file_sep>/ISuitePro.ERP.Digital.CxUI/ISuitePro.ERP.Digital.CxUI/Views/ToolbarPlacementChangerContentView.xaml.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Xamarin.Forms;
using Xamarin.Forms.Xaml;
using Xamarin.Forms.PlatformConfiguration;
using Xamarin.Forms.PlatformConfiguration.WindowsSpecific;
namespace ISuitePro.ERP.Digital.CxUI.Views
{
[XamlCompilation(XamlCompilationOptions.Compile)]
public partial class ToolbarPlacementChangerContentView : ContentView
{
public static readonly BindableProperty ParentPageProperty = BindableProperty.Create("ParentPage", typeof(Xamarin.Forms.Page), typeof(ToolbarPlacementChangerContentView), null, propertyChanged: OnParentPagePropertyChanged);
public Xamarin.Forms.Page ParentPage
{
get { return (Xamarin.Forms.Page)GetValue(ParentPageProperty); }
set { SetValue(ParentPageProperty, value); }
}
public ToolbarPlacementChangerContentView()
{
InitializeComponent();
PopulatePicker();
}
void PopulatePicker()
{
var enumType = typeof(ToolbarPlacement);
var placementOptions = Enum.GetNames(enumType);
foreach (string option in placementOptions)
{
picker.Items.Add(option);
}
}
void OnPickerSelectedIndexChanged(object sender, EventArgs e)
{
ParentPage.On<Windows>().SetToolbarPlacement((ToolbarPlacement)Enum.Parse(typeof(ToolbarPlacement), picker.Items[picker.SelectedIndex]));
}
static void OnParentPagePropertyChanged(BindableObject element, object oldValue, object newValue)
{
if (newValue != null)
{
var enumType = typeof(ToolbarPlacement);
var instance = element as ToolbarPlacementChangerContentView;
instance.picker.SelectedIndex = Array.IndexOf(Enum.GetNames(enumType), Enum.GetName(enumType, instance.ParentPage.On<Windows>().GetToolbarPlacement()));
}
}
}
}<file_sep>/ISuitePro.ERP.Digital.CxUI/ISuitePro.ERP.Digital.CxUI/Views/MenuPage.xaml.cs
using System;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using System.Linq;
using Xamarin.Forms;
namespace ISuitePro.ERP.Digital.CxUI.Views
{
public partial class MenuPage : ContentPage
{
public static ObservableCollection<string> items { get; set; }
public MenuPage()
{
items = new ObservableCollection<string>() { "speaker", "pen", "lamp", "monitor", "bag", "book", "cap", "tote", "floss", "phone" };
InitializeComponent();
var colors = new List<object>();
for (int i = 0; i < 16; i++)
{
var c = 200 - 10 * i;
colors.Add(new { Color = Color.FromRgb(c, c, c) });
};
listView.ItemsSource = colors;
}
private void Button1_OnClicked(object sender, EventArgs e)
{
DisplayAlert("Button Clicked", "Button1", "Ok");
}
private void Button2_OnClicked(object sender, EventArgs e)
{
DisplayAlert("Button Clicked", "Button2", "Ok");
}
private void Button3_OnClicked(object sender, EventArgs e)
{
DisplayAlert("Button Clicked", "Button3", "Ok");
}
void OnSelection(object sender, SelectedItemChangedEventArgs e)
{
if (e.SelectedItem == null)
{
return; //ItemSelected is called on deselection, which results in SelectedItem being set to null
}
DisplayAlert("Item Selected", e.SelectedItem.ToString(), "Ok");
//comment out if you want to keep selections
ListView lst = (ListView)sender;
lst.SelectedItem = null;
}
void OnRefresh(object sender, EventArgs e)
{
var list = (ListView)sender;
//put your refreshing logic here
var itemList = items.Reverse().ToList();
items.Clear();
foreach (var s in itemList)
{
items.Add(s);
}
//make sure to end the refresh state
list.IsRefreshing = false;
}
void OnTap(object sender, ItemTappedEventArgs e)
{
DisplayAlert("Item Tapped", e.Item.ToString(), "Ok");
}
void OnMore(object sender, EventArgs e)
{
var item = (MenuItem)sender;
DisplayAlert("More Context Action", item.CommandParameter + " more context action", "OK");
}
void OnDelete(object sender, EventArgs e)
{
var item = (MenuItem)sender;
items.Remove(item.CommandParameter.ToString());
}
}
}
<file_sep>/ISuitePro.ERP.Digital.CxUI/ViewModels/HomePageViewModel.cs
using Prism.Commands;
using Prism.Mvvm;
using Prism.Navigation;
using System;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using System.Linq;
using Xamarin.Forms;
namespace ISuitePro.ERP.Digital.CxUI.ViewModels
{
public class MenuLink
{
public string Title { get; set; }
public string IconName { get; set; }
}
public class HomePageViewModel : BindableBase, INavigationAware
{
private ObservableCollection<MenuLink> _menuItems;
private INavigationService _navigationService;
public ObservableCollection<MenuLink> MenuItems
{
get { return _menuItems; }
set { SetProperty(ref _menuItems, value); }
}
public DelegateCommand HomeCommand { get; set; }
private DelegateCommand<ItemTappedEventArgs> _goToDetailPage;
public DelegateCommand<ItemTappedEventArgs> GoToDetailPage
{
get
{
if (_goToDetailPage == null)
{
_goToDetailPage = new DelegateCommand<ItemTappedEventArgs>(async selected =>
{
NavigationParameters param = new NavigationParameters();
param.Add("show", selected.Item);
await _navigationService.NavigateAsync("DetailsPage", param);
});
}
return _goToDetailPage;
}
}
public HomePageViewModel(INavigationService navigationService)
{
_navigationService = navigationService;
HomeCommand = new DelegateCommand(Home);
}
private void Home()
{
//TODO: call service
}
public void OnNavigatingTo(NavigationParameters parameters)
{
}
public void OnNavigatedFrom(NavigationParameters parameters)
{
}
public void OnNavigatedTo(NavigationParameters parameters)
{
if (MenuItems == null || MenuItems.Count == 0)
{
//var result = _apiService.GetMainMenu();
var menu = new List<MenuLink>();
menu.Add(new MenuLink { Title = "Apprisal Year" });
menu.Add(new MenuLink { Title = "Employee Level" });
menu.Add(new MenuLink { Title = "Leave Type" });
menu.Add(new MenuLink { Title = "Claim Types" });
menu.Add(new MenuLink { Title = "Salary Component" });
menu.Add(new MenuLink { Title = "Off-Day" });
MenuItems = new ObservableCollection<MenuLink>(menu);
}
}
}
}
<file_sep>/ISuitePro.ERP.Digital.CxUI/ViewModels/DetailsPageViewModel.cs
using Prism.Mvvm;
using Prism.Navigation;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace ISuitePro.ERP.Digital.CxUI.ViewModels
{
public class DetailsPageViewModel: BindableBase
{
public DetailsPageViewModel(INavigationService navigationService)
{
}
}
}
| 53ffe4f9434be9fe99020628417768f5248cfe5a | [
"C#"
] | 7 | C# | jhonybasha/Prism_Xamarin_Sample | 2b0ac1aa820154f2609e12f79eb60d9c1a4cb9b0 | e0043f00ce0dfb4392af33af9947c188027db3df |
refs/heads/master | <file_sep>function temperatureConverter(){
var tempinput = document.getElementById("inputFahrenheit").value;
//alert(tempinput);
var inverse=document.getElementById("inverseSelect").checked;
calc(tempinput,inverse)
}
function calc(tempinput,inverse) {
if((inverse!=true&&inverse!=false)||tempinput==null){
throw Error('input cannot be null')
}
valNum = parseFloat(tempinput);
var result = null;
if(inverse==true){
//alert("sai");
$("#OutputLabel").html('Celsius:');
$("#Inputlabel").html('Fahrenheit');
// document.getElementById("outputCelcius").innerHTML='<p>'+((valNum-32)/1.8)+'</p>';
result = Math.ceil((valNum-32)/1.8)
$("#outputCelcius").html(result);
return result;
}
else{
result=Math.ceil(valNum*1.8+32);
$("#Inputlabel").html('Celsius:');
$("#OutputLabel").html('Fahrenheit');
$("#outputCelcius").html(result);
// console.log(valNum);
return result;
}
}<file_sep>I created 3 pages and the info in them is as described below:
Page 1 :In the home page I added a new button without any function and named as A02.
Page 2: Created a temp.js file for calculating the temperature in required units and added a inverse button as we must have a 2 inputs and atleast one calculated value.
Page 3: I attached my linkedin, bitbucket and github profile to the contact page. | 2a6907ee11b17777d1bf223e49ce851dfd9a9e85 | [
"JavaScript",
"Markdown"
] | 2 | JavaScript | RajeshMidde-source/A02Nadakudhiti | ec2f3eb204b8ca7852477eb712cae742ef02a091 | c3cbac3cfdee530fcb7d268be2d950a15f314410 |
refs/heads/master | <repo_name>Elaine-AL/German-population<file_sep>/germpop.R
library(ggplot2)
gerpop = read.csv("C:/Users/Elaine/Documents/german population/Results.csv", header = TRUE, nrow = 19)
View(gerpop)
names(gerpop)
gerpop = subset(gerpop, select = c(period, data))
gerpop$period = gsub("YEAR", "", as.character(gerpop$period))
gerpop$period = as.numeric(gerpop$period)
plot = ggplot(data = gerpop, aes(x = period, y = data, group = 1)) + geom_line(size = 1.5, color = "blue") + scale_x_continuous(breaks = seq(2000, 2018, 2))
plot
plot = plot + labs(title = "German mid year population estimate", x = "Year", y = "Population in '000", caption = "https://unstats.un.org") + theme_dark()
<file_sep>/germpop.Rmd
---
title: "germpop"
author: "<NAME>."
date: "April 15, 2019"
output: html_document
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
## R Markdown
```{r germpop}
library(ggplot2)
gerpop = read.csv("C:/Users/Elaine/Documents/german population/Results.csv",
header = TRUE,
nrow = 19
)
View(gerpop)
names(gerpop)
gerpop = subset(gerpop,
select = c(period, data
)
)
gerpop$period = gsub("YEAR",
"",
as.character(gerpop$period
)
)
gerpop$period = as.numeric(gerpop$period)
plot = ggplot(data = gerpop,
aes(x = period,
y = data,
group = 1
)
) +
geom_line(size = 1.5,
color = "blue"
) + scale_x_continuous(breaks = seq(2000,
2018,
2
)
)
plot
plot = plot + labs(title = "German mid year population estimate",
x = "Year",
y = "Population in '000",
caption = "https://unstats.un.org") + theme_dark()
```
## Including Plots
```{r plot, echo=FALSE}
plot(plot)
```
<file_sep>/README.md
# German-population
Analysis of the population disparity in German from 2002
| 9db4523d6c047c47dda93f923a86e0c4ccfac129 | [
"Markdown",
"R",
"RMarkdown"
] | 3 | R | Elaine-AL/German-population | 32373dcec6ad8d3659e900417309830283c95e74 | 779c5b6ec89a864b7cb31438e5ef3dcecc1a9a61 |
refs/heads/master | <repo_name>prathamesh07/Bank_sms_analysis_1.1<file_sep>/function_definitions/keyphrases.py
import os
# this script basically reads from various text files and
# adds the data to variables in the form of list or dict
# so that checkers can use it later to compare
#
Declined_keyphrases_list = []
fp = open("data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/declined",'r')
for key in fp.read().split('\n'):
if key !="":
Declined_keyphrases_list.append(str(key))
ATM_keyphrases_list = []
fp = open("data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/atm",'r')
for key in fp.read().split('\n'):
if key !="":
ATM_keyphrases_list.append(str(key))
Debit_keyphrases_list = []
fp = open("data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/spent_online",'r')
for key in fp.read().split('\n'):
if key !="":
Debit_keyphrases_list.append(str(key))
Debit_2_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/deposited_to_others_account",'r')
for key in fp.read().split('\n'):
if key !="":
Debit_2_keyphrases_list.append(str(key))
Balance_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/Balance",'r')
for key in fp.read().split('\n'):
if key !="":
Balance_keyphrases_list.append(str(key))
Credit_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/Credited",'r')
for key in fp.read().split('\n'):
if key !="":
Credit_keyphrases_list.append(str(key))
OTP_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/OTP",'r')
for key in fp.read().split('\n'):
if key !="":
OTP_keyphrases_list.append(str(key))
Payment_Due_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/payment_due",'r')
for key in fp.read().split('\n'):
if key !="":
Payment_Due_keyphrases_list.append(str(key))
Info_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/Info_messages",'r')
for key in fp.read().split('\n'):
if key !="":
Info_keyphrases_list.append(str(key))
Minimum_balance_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/min_balance",'r')
for key in fp.read().split('\n'):
if key !="":
Minimum_balance_keyphrases_list.append(str(key))
Warning_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/warning",'r')
for key in fp.read().split('\n'):
if key !="":
Warning_keyphrases_list.append(str(key))
Acknowledge_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/acknowledge",'r')
for key in fp.read().split('\n'):
if key !="":
Acknowledge_keyphrases_list.append(str(key))
Advert_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/message_type_keywords/Advert",'r')
for key in fp.read().split('\n'):
if key !="":
Advert_keyphrases_list.append(str(key))
CASA_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/account_type_keywords/CASA",'r')
for key in fp.read().split('\n'):
if key !="":
CASA_keyphrases_list.append(str(key))
Debit_Card_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/account_type_keywords/Debit_card",'r')
for key in fp.read().split('\n'):
if key !="":
Debit_Card_keyphrases_list.append(str(key))
Credit_Card_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/account_type_keywords/Credit_card",'r')
for key in fp.read().split('\n'):
if key !="":
Credit_Card_keyphrases_list.append(str(key))
Wallet_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/account_type_keywords/Wallet",'r')
for key in fp.read().split('\n'):
if key !="":
Wallet_keyphrases_list.append(str(key))
Prepaid_Card_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/account_type_keywords/Prepaid_card",'r')
for key in fp.read().split('\n'):
if key !="":
Prepaid_Card_keyphrases_list.append(str(key))
Loan_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/account_type_keywords/Loan",'r')
for key in fp.read().split('\n'):
if key !="":
Loan_keyphrases_list.append(str(key))
NEFT_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/transaction_instrument_keywords/NEFT",'r')
for key in fp.read().split('\n'):
if key !="":
NEFT_keyphrases_list.append(str(key))
IMPS_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/transaction_instrument_keywords/IMPS",'r')
for key in fp.read().split('\n'):
if key !="":
IMPS_keyphrases_list.append(str(key))
NetBanking_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/transaction_instrument_keywords/Net_banking",'r')
for key in fp.read().split('\n'):
if key !="":
NetBanking_keyphrases_list.append(str(key))
Cheque_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/transaction_instrument_keywords/Cheque",'r')
for key in fp.read().split('\n'):
if key !="":
Cheque_keyphrases_list.append(str(key))
Account_Number_False_Alarm_keyphrases_list = []
fp = open( "data_files/sms_classification_level2_keywords/financial/bank_level2_classification/false_account_number_keywords/Ac_No_False_keyphrases_list",'r')
for key in fp.read().split('\n'):
if key !="":
Account_Number_False_Alarm_keyphrases_list.append(str(key))
<file_sep>/function_definitions/bank_sms_preperation_function_definitions/balance_error_based_dummy_entry_generation_script.py
import pandas as pd
from datetime import datetime, timedelta
def balance_error_based_dummy_entry_generation_func(bank_sms_df):
bank_sms_df['DummyFlag'] = 0
#Creating empty dataframe to store dummy sms
#dummy_sms = pd.DataFrame()
dummy_sms_df = pd.DataFrame()
for idx, row in bank_sms_df.iterrows():
print 8 , '\t\t' , idx
if idx == 0 or row['Error'] == '_NA_' or str(row['Error']) == '0':
continue
else:
#Storing row attributes in variables so that we can use them to fill dummy entry
SmsID = -1*int(row['SmsID'])
CustomerID = row['CustomerID']
BankName = row['BankName']
SENDER_PARENT = row['SENDER_PARENT']
SENDER_CHILD_1 = row['SENDER_CHILD_1']
SENDER_CHILD_2 = row['SENDER_CHILD_2']
SENDER_CHILD_3 = row['SENDER_CHILD_3']
AccountNo = row['AccountNo']
LinkedDebitCardNumber = row['LinkedDebitCardNumber']
AccountType = row['AccountType']
MessageSource = row['MessageSource']
ReferenceNumber = '_NA_'
TxnInstrument = '_NA_'
MessageType = 'Credit' if float(row['Error']) >=0 else 'Debit'
Currency_1 = row['Currency_1']
Amt_1 = abs(float(row['Error']))
Message = 'Dummy entry of '+Currency_1+str(Amt_1)+' is added.'
Currency_2 = row['Currency_2']
Amt_2 = float(row['Error']) + float(bank_sms_df.at[idx-1, 'Amt_2'])
Amt_2_calculated = Amt_2
Error = 0
ConsecutiveTxnTimespan = '_NA_'
Currency_3 = '-'
Amt_3 = -1
Vendor = '_NA_'
TxnAmount = float(row['Error'])
RepeatedTxnFlag = 0
BulkTxnFlag = 0
current_date = row['MessageTimestamp']
prev_date = bank_sms_df.at[idx-1, 'MessageTimestamp']
date_difference = (current_date-prev_date).days
if date_difference > 1:
dummy_entry_datetime = current_date - timedelta(1)
else:
dummy_entry_datetime = (current_date - prev_date)/2 + prev_date
to_be_appended = pd.DataFrame({'SmsID':SmsID, 'CustomerID':CustomerID, 'BankName':pd.Series(BankName), 'SENDER_PARENT':pd.Series(SENDER_PARENT), 'SENDER_CHILD_1':pd.Series(SENDER_CHILD_1), 'SENDER_CHILD_2':pd.Series(SENDER_CHILD_2), 'SENDER_CHILD_3':pd.Series(SENDER_CHILD_3), 'AccountNo':AccountNo, 'LinkedDebitCardNumber':pd.Series(LinkedDebitCardNumber), \
'AccountType':pd.Series(AccountType), 'MessageSource':pd.Series(MessageSource), 'Message':pd.Series(Message), 'MessageTimestamp':dummy_entry_datetime, 'ReferenceNumber':pd.Series(ReferenceNumber), 'TxnInstrument':TxnInstrument, \
'MessageType':pd.Series(MessageType), 'Currency_1':pd.Series(Currency_1), 'Amt_1':Amt_1, 'Currency_2':pd.Series(Currency_2), 'Amt_2_calculated':Amt_2_calculated, 'Error':Error, \
'ConsecutiveTxnTimespan':pd.Series(ConsecutiveTxnTimespan), 'Currency_3':pd.Series(Currency_3), 'Amt_3':Amt_3, 'Vendor':pd.Series(Vendor), 'TxnAmount':TxnAmount, \
'RepeatedTxnFlag':RepeatedTxnFlag, 'BulkTxnFlag':BulkTxnFlag , 'DummyFlag':1})
#Appending dummy sms to main dataframe
bank_sms_df = bank_sms_df.append(to_be_appended)
#making a seperate df of dummy entries
dummy_sms_df = dummy_sms_df.append(to_be_appended)
#Sorting the dataframe
bank_sms_df.sort_values(['CustomerID', 'BankName', 'AccountNo', 'MessageTimestamp'], inplace=True)
bank_sms_df.to_csv('data_files/intermediate_output_files/banks/bank_sms_with_dummy_entries.csv', index=False)
bank_sms_df.index = range(len(bank_sms_df.index.values))
dummy_sms_df.to_csv('data_files/intermediate_output_files/banks/only_dummy_entries.csv', index=False)
return bank_sms_df<file_sep>/function_definitions/bank_sms_preperation_function_definitions/parameter_calculation_script.py
from datetime import datetime
import pandas as pd
#---------------------------------------------------------------------------------------------------
bank_sms_filtered_flaged = pd.DataFrame()
def getNumberOfTxns(l):
global bank_sms_filtered_flaged
TotalDebitTxns = 0
TotalCreditTxns = 0
for i in l:
messageType = bank_sms_filtered_flaged.at[i, 'MessageType']
#print messageType
if messageType == 'Debit':
TotalDebitTxns += 1
elif messageType == 'Credit':
TotalCreditTxns += 1
#print messageType, TotalCreditTxns
return (TotalCreditTxns + TotalDebitTxns , TotalDebitTxns, TotalCreditTxns)
def getNumberOfBulkTxns(l):
global bank_sms_filtered_flaged
TotalBulkTxns = 0
for i in l:
if bank_sms_filtered_flaged.at[i, 'BulkTxnFlag']:
TotalBulkTxns += 1
return TotalBulkTxns
def getPercentOfTxns(TotalDebitTxns, TotalCreditTxns):
try:
PercentOfDebitTxns = (float(TotalDebitTxns)/(TotalDebitTxns+TotalCreditTxns))*100
PercentOfCreditTxns = (float(TotalCreditTxns)/(TotalDebitTxns+TotalCreditTxns))*100
except ZeroDivisionError:
PercentOfDebitTxns = 0
PercentOfCreditTxns = 0
return (PercentOfDebitTxns, PercentOfCreditTxns)
def getNetTxnAmt(l):
global bank_sms_filtered_flaged
NetTxnAmt = 0
for i in l:
NetTxnAmt += float(bank_sms_filtered_flaged.at[i, 'TxnAmount'])
return NetTxnAmt
def getMaxMinBalance(l):
global bank_sms_filtered_flaged
MaxBalance = -99999999
MinBalance = +99999999
amount_to_consider = 0
for i in l:
Amt_2 = float(bank_sms_filtered_flaged.at[i, 'Amt_2'])
Amt_2_calculated = float(bank_sms_filtered_flaged.at[i, 'Amt_2_calculated'])
BulkTxnFlag = bank_sms_filtered_flaged.at[i, 'BulkTxnFlag']
if BulkTxnFlag in [1,2] :
if Amt_2_calculated != -1:
amount_to_consider = Amt_2_calculated
else :
continue
else :
if Amt_2 != -1 :
amount_to_consider = Amt_2
elif Amt_2_calculated != -1:
amount_to_consider = Amt_2_calculated
else :
continue
if amount_to_consider > MaxBalance :
MaxBalance = amount_to_consider
if amount_to_consider < MinBalance :
MinBalance = amount_to_consider
if abs(MaxBalance + 99999999 ) < 0.001 :
MaxBalance = "_NA_"
if abs(MinBalance - 99999999 ) < 0.001 :
MinBalance = "_NA_"
return (MaxBalance, MinBalance)
def parameterCalculationFunc(l):
TotalNumberOfTxns, TotalDebitTxns, TotalCreditTxns = getNumberOfTxns(l)
TotalBulkTxns = getNumberOfBulkTxns(l)
PercentOfDebitTxns, PercentOfCreditTxns = getPercentOfTxns(TotalDebitTxns, TotalCreditTxns)
NetTxnAmt= getNetTxnAmt(l)
MaxBalance, MinBalance = getMaxMinBalance(l)
return (TotalNumberOfTxns, TotalDebitTxns, TotalCreditTxns, TotalBulkTxns, PercentOfDebitTxns, PercentOfCreditTxns, NetTxnAmt, MaxBalance, MinBalance)
#-------------------------------------------------------------------------------------------------
def parameter_calculation_func(bank_sms_df,account_type):
global bank_sms_filtered_flaged
bank_sms_filtered_flaged = bank_sms_df[bank_sms_df['AccountType'] == account_type ]
bank_sms_filtered_flaged = bank_sms_filtered_flaged.reset_index(drop=True)
bank_sms_df['DummySMSCount'] = -1
bank_sms_df['TotalSMSCount'] = -1
#Creating list to store distinct user-bank-account-day combination's indexes
user_bank_acc_day_combination_idx_list=[0]
#Creating empty dataframe to store calculated parameters for each user-bank-account-day combination
parameters = pd.DataFrame()
DummyFlag_current = 0
flag = 1
for i in range(len(bank_sms_filtered_flaged)-1):
print 9 , '\t\t' , i
#print bank_sms_filtered_flaged.at[i,'AccountType']
if flag == 1 :
AllSMSOnADayCounter = 1
DummySMSCounter = 0
CustomerID_current = int(bank_sms_filtered_flaged.at[i, 'CustomerID'])
BankName_current = bank_sms_filtered_flaged.at[i, 'BankName']
SENDER_PARENT_current = bank_sms_filtered_flaged.at[i, 'SENDER_PARENT']
SENDER_CHILD_1_current = bank_sms_filtered_flaged.at[i, 'SENDER_CHILD_1']
SENDER_CHILD_2_current = bank_sms_filtered_flaged.at[i, 'SENDER_CHILD_2']
SENDER_CHILD_3_current = bank_sms_filtered_flaged.at[i, 'SENDER_CHILD_3']
AccountType = bank_sms_filtered_flaged.at[i, 'AccountType']
AccountNo_current = int(bank_sms_filtered_flaged.at[i, 'AccountNo'])
Date_current = bank_sms_filtered_flaged.at[i, 'MessageTimestamp'].strftime('%Y-%m-%d')
CustomerID_next = int(bank_sms_filtered_flaged.at[i+1, 'CustomerID'])
BankName_next = bank_sms_filtered_flaged.at[i+1, 'BankName']
AccountNo_next = int(bank_sms_filtered_flaged.at[i+1, 'AccountNo'])
Date_next = bank_sms_filtered_flaged.at[i+1, 'MessageTimestamp'].strftime('%Y-%m-%d')
if CustomerID_current == CustomerID_next and BankName_current == BankName_next and AccountNo_current == AccountNo_next and Date_current == Date_next:
AllSMSOnADayCounter += 1
user_bank_acc_day_combination_idx_list.append(i+1)
if bank_sms_filtered_flaged.at[i+1, 'DummyFlag'] == 1:
DummyFlag_current = 1
DummySMSCounter += 1
flag = 0
continue
else:
flag = 1
TotalNumberOfTxns, TotalDebitTxns, TotalCreditTxns, TotalBulkTxns, PercentOfDebitTxns, PercentOfCreditTxns, NetTxnAmt, MaxBalance, MinBalance = parameterCalculationFunc(user_bank_acc_day_combination_idx_list)
user_bank_acc_day_combination_idx_list = [i+1]
Date = datetime.strptime(Date_current, '%Y-%m-%d')
percentOfDummyEntries = (float(DummySMSCounter)/float(AllSMSOnADayCounter))*100
to_be_appended = pd.DataFrame({'TotalSMSCount':AllSMSOnADayCounter,'DummySMSCount':DummySMSCounter,'PercentOfDummyEntries':percentOfDummyEntries,'DummyFlag':DummyFlag_current,'CustomerID':CustomerID_current, 'BankName':pd.Series(BankName_current), 'SENDER_PARENT':pd.Series(SENDER_PARENT_current), 'SENDER_CHILD_1':pd.Series(SENDER_CHILD_1_current), 'SENDER_CHILD_2':pd.Series(SENDER_CHILD_2_current), 'SENDER_CHILD_3':pd.Series(SENDER_CHILD_3_current), 'AccountNumber':AccountNo_current, 'AccountType':AccountType, 'Date':Date, 'TotalNumberOfTxns':TotalNumberOfTxns, 'TotalDebitTxns':TotalDebitTxns, \
'TotalCreditTxns':TotalCreditTxns, 'TotalBulkTxns':TotalBulkTxns, 'PercentOfDebitTxns':PercentOfDebitTxns, 'PercentOfCreditTxns':PercentOfCreditTxns, 'NetTxnAmt':NetTxnAmt, 'MaxBalance':MaxBalance, 'MinBalance':MinBalance})
DummyFlag_current = 0
parameters = parameters.append(to_be_appended)
parameters.index = range(len(parameters.index.values))
parameters = parameters[['AccountNumber','AccountType','BankName','CustomerID','Date','DummyFlag','DummySMSCount','TotalSMSCount','PercentOfDummyEntries','MaxBalance','MinBalance','NetTxnAmt','PercentOfCreditTxns','PercentOfDebitTxns','SENDER_CHILD_1','SENDER_CHILD_2','SENDER_CHILD_3','SENDER_PARENT','TotalBulkTxns','TotalCreditTxns','TotalDebitTxns','TotalNumberOfTxns']]
parameters.to_csv('data_files/intermediate_output_files/banks/'+account_type+'_parameters.csv', index=False)
return parameters
<file_sep>/function_definitions/bank_sms_preperation_function_definitions/balance_sms_adjustment_script.py
from datetime import datetime, timedelta
import pandas as pd
"""
This script adjusts first balance sms of each day as first sms of that day by truncating time part to 00-00-00.
So that this sms can be used to compute opening balance. Also, there is entry of same sms but with prev day date
and time 23-59-59. So that we can compute prev day's closing balance.
"""
def balance_sms_adjustment_func(bank_sms_df):
bank_sms_df_original = bank_sms_df
bank_sms_df = bank_sms_df[bank_sms_df['MessageType'] == 'Balance']
#Truncating time part of datetime to 00:00:00
bank_sms_df['MessageTimestamp'] = bank_sms_df['MessageTimestamp'].apply(pd.datetools.normalize_date)
#print len(bank_sms_df.index.values)
#Considering only first balance sms from each day
bank_sms_df.drop_duplicates(['CustomerID', 'BankName', 'AccountNo', 'MessageTimestamp'], inplace=True)
#print len(bank_sms_df.index.values)
for idx, row in bank_sms_df.iterrows():
print 4 , '\t\t' , idx
SmsID = row['SmsID']
CustomerID = row['CustomerID']
Message = row["Message"]
MessageSource = row["MessageSource"]
MessageDate = row["MessageDate"]
MessageTimestamp = row["MessageTimestamp"]
MessageType = row["MessageType"]
Currency_1 = row["Currency_1"]
Amt_1 = row["Amt_1"]
Currency_2 = row["Currency_2"]
Amt_2 = row["Amt_2"]
Currency_3 = row["Currency_3"]
Amt_3 = row["Amt_3"]
Vendor = row["Vendor"]
AccountNo = row["AccountNo"]
AccountType = row["AccountType"]
ReferenceNumber = row["ReferenceNumber"]
TxnInstrument = '_NA_'
BankName = row["BankName"]
SENDER_PARENT = row['SENDER_PARENT']
SENDER_CHILD_1 = row['SENDER_CHILD_1']
SENDER_CHILD_2 = row['SENDER_CHILD_2']
SENDER_CHILD_3 = row['SENDER_CHILD_3']
LinkedDebitCardNumber = row['LinkedDebitCardNumber']
timestamp_for_new_row = MessageTimestamp - timedelta(seconds=1)
to_be_appended = pd.DataFrame({'SmsID':SmsID, 'CustomerID':CustomerID, 'Message':pd.Series(Message), 'MessageSource':pd.Series(MessageSource), 'MessageDate':pd.Series(MessageDate), \
'MessageTimestamp':timestamp_for_new_row, 'MessageType':pd.Series(MessageType), 'Currency_1':pd.Series(Currency_1), 'Amt_1':Amt_1, 'Currency_2':pd.Series(Currency_2), 'Amt_2':Amt_2, 'Currency_3':pd.Series(Currency_3), \
'Amt_3':Amt_3, 'Vendor':pd.Series(Vendor), 'AccountNo':pd.Series(AccountNo), 'AccountType':pd.Series(AccountType), 'ReferenceNumber':pd.Series(ReferenceNumber), 'TxnInstrument':TxnInstrument, 'BankName':pd.Series(BankName), 'SENDER_PARENT':pd.Series(SENDER_PARENT), 'SENDER_CHILD_1':pd.Series(SENDER_CHILD_1), 'SENDER_CHILD_2':pd.Series(SENDER_CHILD_2), 'SENDER_CHILD_3':pd.Series(SENDER_CHILD_3), 'LinkedDebitCardNumber':pd.Series(LinkedDebitCardNumber)})
bank_sms_df = bank_sms_df.append(to_be_appended)
bank_sms_df.sort_values(['CustomerID', 'BankName', 'AccountNo', 'MessageTimestamp'], inplace=True)
#print len(bank_sms_df.index.values)
#Reading orginal dataframe again and dropping balance sms rows
bank_sms_df_without_balance = bank_sms_df_original[bank_sms_df_original['MessageType'] != 'Balance']
#Appending adjusted balance sms dates dataframe to orginal dataframe
bank_sms_df = bank_sms_df.append(bank_sms_df_without_balance)
#Sorting according to 'CustomerID', 'BankName', 'AccountNo', 'MessageTimestamp'
bank_sms_df.sort_values(['CustomerID', 'BankName', 'AccountNo', 'MessageTimestamp'], inplace=True)
bank_sms_df.to_csv('data_files/intermediate_output_files/banks/bank_sms_with_balance_sms_adjusted.csv', index=False)
bank_sms_df.index = range(len(bank_sms_df.index.values))
return bank_sms_df<file_sep>/utility_sms_analysis_main.py
import pandas as pd
from time import time
from function_definitions.sms_level1_classification_func import utility_sms_filtering_func
ti = time()*1000
#Filtering utility sms from all sms
bank_sms_df = utility_sms_filtering_func("user_sms_pipe.csv") # creates a 'bank_sms_raw.csv' file
tf = time()*1000
print 'THE END',(tf-ti)<file_sep>/function_definitions/bank_sms_preperation_function_definitions/duplicate_flag_generation_script.py
import pandas as pd
"""
This script generates RepeatedTxnFlag column which indicates duplicate sms based on txn reference number.
If txn is uniuque -> flag 0
If txn is duplicate but balance (Amt_2) is not given by bank -> flag 1
If txn is duplicate but balance (Amt_2) is given by bank -> flag 2
"""
def duplicate_flag_generation_func(bank_sms_df):
#Creating RepeatedTxnFlag column and initializing with zero.
bank_sms_df['RepeatedTxnFlag'] = 0
temp = -1 #This variable is used to store index of last duplicate sms.
count = 0 #To store count of total number of exceptions
for i in range(len(bank_sms_df.index.values) -1):
print 5 , "\t\t", i
if i <= temp :
continue
current_row_ref_number = bank_sms_df.at[i, 'ReferenceNumber']
next_row_ref_number = bank_sms_df.at[i+1, 'ReferenceNumber']
if current_row_ref_number != next_row_ref_number or current_row_ref_number == '_NA_':
continue
else :
j = 0
try :
while bank_sms_df.at[i+j, 'ReferenceNumber'] == bank_sms_df.at[i+j+1, 'ReferenceNumber'] and bank_sms_df.at[i+j, 'BankName'] == bank_sms_df.at[i+j+1, 'BankName']:
j+=1
except :
count += 1
pass
#This list holds the indexes of duplicate sms i.e sms for each user-bank-account having same reference number
duplicate_sms_idx_list = []
for k in range(j+1):
duplicate_sms_idx_list.append(i + k)
flag = 0
for idx in duplicate_sms_idx_list :
if abs(float(bank_sms_df.at[idx, 'Amt_2']) + 1) < 0.001 : #float(bank_sms_df.at[idx, 'Amt_2']) == -1
bank_sms_df.at[idx,"RepeatedTxnFlag"] = 1
elif flag == 0 :
bank_sms_df.at[idx,"RepeatedTxnFlag"] = 2
flag = 1
else :
bank_sms_df.at[idx,"RepeatedTxnFlag"] = 1
temp = idx
bank_sms_df = bank_sms_df[['SmsID', 'CustomerID', 'BankName', 'SENDER_PARENT' , 'SENDER_CHILD_1' , 'SENDER_CHILD_2' , 'SENDER_CHILD_3' ,'AccountNo', 'LinkedDebitCardNumber', 'AccountType', 'MessageSource', 'Message', 'MessageTimestamp', 'ReferenceNumber', 'TxnInstrument', 'MessageType', 'Currency_1', 'Amt_1', 'Currency_2', 'Amt_2', 'Currency_3', 'Amt_3', 'Vendor', 'RepeatedTxnFlag']]
#print 'count =', count
bank_sms_df.to_csv('data_files/intermediate_output_files/banks/bank_sms_flaged.csv',index = False)
bank_sms_df.index = range(len(bank_sms_df.index.values))
return bank_sms_df<file_sep>/regex/store_regex.py
import re
import pickle
debit_vendor_re_list = []
debit_vendor_re_list.append(re.compile(r' \d{1,2}-\d{1,2}-\d{4} ON ACCOUNT OF(.+?)\. COMBINED AVAILABLE '))
debit_vendor_re_list.append(re.compile(r' \d{1,2}-\d{1,2}-\d{4} TOWARDS (.+?)\. COMBINED AVAILABLE '))
debit_vendor_re_list.append(re.compile(r' AT (.+?) ON '))
debit_vendor_re_list.append(re.compile(r'AT (.+?) IN USING CARD NO'))
debit_vendor_re_list.append(re.compile(r' AT (.+?)\. AVBL'))
debit_vendor_re_list.append(re.compile(r' AT (.+?)\. AVAILABLE'))
debit_vendor_re_list.append(re.compile(r' AT (.+?) OF KARNATAKA'))
debit_vendor_re_list.append(re.compile(r' AT (.+?) ?\.AVL BAL'))
debit_vendor_re_list.append(re.compile(r' AT (.+?) TXN'))
debit_vendor_re_list.append(re.compile(r'\d+\.\d+ AT (.+?) WITH YOUR '))
debit_vendor_re_list.append(re.compile(r'\d{1,2}[A-Z]{3} AT (.+?) \. CALL '))
debit_vendor_re_list.append(re.compile(r'\d+AT(.+?) ?TXN#'))
debit_vendor_re_list.append(re.compile(r' FOR (.+?) ?TXN'))
debit_vendor_re_list.append(re.compile(r' FOR (.+?) ?PAYMENT '))
debit_vendor_re_list.append(re.compile(r' FOR (.+?)\.MAINTAIN'))
debit_vendor_re_list.append(re.compile(r' FOR (.+?) - ACCOUNT '))
debit_vendor_re_list.append(re.compile(r' INFO ?[\.-](.+?)\. YOUR'))
debit_vendor_re_list.append(re.compile(r' AT (.+?)\. FINAL'))
debit_vendor_re_list.append(re.compile(r' -(.+?)\. AVL'))
debit_vendor_re_list.append(re.compile(r' FROM (.+?)\. AVAILABLE '))
debit_vendor_re_list.append(re.compile(r' AT (.+?)[\.:] ?COMBINED'))
debit_vendor_re_list.append(re.compile(r' (?:TOWARDS|ACCOUNT OF) (.+?)[\.:] ?COMBINED'))
debit_vendor_re_list.append(re.compile(r' AT (.+?)\. CALL'))
debit_vendor_re_list.append(re.compile(r' FROM (.+?) ON '))
debit_vendor_re_list.append(re.compile(r' AT (.+?) USING'))
debit_vendor_re_list.append(re.compile(r' AT (.+?)\. TOLLFREE '))
debit_vendor_re_list.append(re.compile(r' DET:(.+?)\. ?A/C'))
debit_vendor_re_list.append(re.compile(r' DETAILS: (.+?)TOT BAL'))
debit_vendor_re_list.append(re.compile(r' DETAILS: (.+?) TXN'))
debit_vendor_re_list.append(re.compile(r' DETAILS: (.+?) TO ACCOUNT'))
debit_vendor_re_list.append(re.compile(r' DET:(.+?)\. ?IF NOT '))
debit_vendor_re_list.append(re.compile(r' AT (.+?) ?IS APP'))
debit_vendor_re_list.append(re.compile(r'/-(.+?)-CLEAR'))
debit_vendor_re_list.append(re.compile(r'/(.+?)CLEAR BALANCE'))
debit_vendor_re_list.append(re.compile(r'-POS-(.+?)-BALANCE'))
debit_vendor_re_list.append(re.compile(r'-ECOM-(.+?)-BALANCE'))
debit_vendor_re_list.append(re.compile(r'ECOM/(.+?)\. GIVE A '))
debit_vendor_re_list.append(re.compile(r'-NFS-(.+?)-BALANCE'))
debit_vendor_re_list.append(re.compile(r'\. (.+?)\. CLEAR BAL '))
debit_vendor_re_list.append(re.compile(r' AT(.+?)\.AVAILABLE'))
debit_vendor_re_list.append(re.compile(r' ON ACCOUNT OF (.+?) @'))
debit_vendor_re_list.append(re.compile(r' (CHQ PAID .+?) VALUE'))
debit_vendor_re_list.append(re.compile(r' TOWARDS (.+?) VALUE'))
debit_vendor_re_list.append(re.compile(r' AT (.+?)\. ?THIS'))
debit_vendor_re_list.append(re.compile(r'TXN:(.+?)\. ?A/C'))
debit_vendor_re_list.append(re.compile(r' ON(.+?) FOR RS\.'))
debit_vendor_re_list.append(re.compile(r' TRANSACTION ON (.+?) IS'))
debit_vendor_re_list.append(re.compile(r' AT(.+?) IND '))
debit_vendor_re_list.append(re.compile(r' TRANSACTION (.+?) IS:'))
debit_vendor_re_list.append(re.compile(r' TO PAY YOUR (.+?) BILL FOR\. TOTAL '))
debit_vendor_re_list.append(re.compile(r' \d{2}-\d{2}-\d{4} \d{2}:\d{2}:\d{2} [Ii]nfo: ?(.+?)$'))
debit_vendor_re_list.append(re.compile(r' RS \d+\.?\d+ INFO: ?(.+?)$'))
debit_vendor_re_list.append(re.compile(r'RS \d+\.\d+ ON \d+-\w+-\d+ AT (.+?)$'))
debit_vendor_re_list.append(re.compile(r'^YOUR (.+?) OF '))
debit_vendor_re_list.append(re.compile(r' INFO: ?(.+?)\. TOTAL'))
debit_vendor_re_list.append(re.compile(r'\d{1,2}:\d{1,2}:\d{1,2} INFO: ?(.+?)$'))
debit_vendor_re_list.append(re.compile(r'RS \d+\.?\d{0,2} INFO: ?(.+?)$'))
debit_vendor_re_list.append(re.compile(r'\d{1,2}-\w{3}-\d{1,2} AT (.+?).$'))
debit_vendor_re_list.append(re.compile(r'-DD ISSUE,(.+?)TOT AVBL '))
# debit_vendor_re_list.append(re.compile(r' at (.+?)\.$'))
# debit_vendor_re_list.append(re.compile(r' at (.+?)$'))
# debit_vendor_re_list.append(re.compile(r' for ?(.+?)\.$'))
debit_2_vendor_re_list = []
debit_2_vendor_re_list.append(re.compile(r'TOWARDS ?(.+?) ?VAL'))
debit_2_vendor_re_list.append(re.compile(r' BENEFICIARY : ?(.+?) ?ON'))
debit_2_vendor_re_list.append(re.compile(r'ISSUE,(.+?) TOT '))
debit_2_vendor_re_list.append(re.compile(r' FOR (.+?) TXN '))
debit_2_vendor_re_list.append(re.compile(r'^(.+?) TRANSACTION '))
# debit_2_vendor_re_list.append(re.compile(r''))
# debit_2_vendor_re_list.append(re.compile(r''))
# debit_2_vendor_re_list.append(re.compile(r''))
# debit_2_vendor_re_list.append(re.compile(r''))
# debit_2_vendor_re_list.append(re.compile(r''))
# debit_2_vendor_re_list.append(re.compile(r''))
# debit_2_vendor_re_list.append(re.compile(r''))
credit_vendor_re_list = []
credit_vendor_re_list.append(re.compile(r'TOWARDS ?(.+?) ?VAL'))
credit_vendor_re_list.append(re.compile(r'INFO[\.:](.+?) .YOUR'))
credit_vendor_re_list.append(re.compile(r'BY (.+?) ON'))
#credit_vendor_re_list.append(re.compile(r'Det:(.+?)\. Ac'))
credit_vendor_re_list.append(re.compile(r'DET:(.+?)\. A/?C'))
credit_vendor_re_list.append(re.compile(r'BY (.+?)'))
#credit_vendor_re_list.append(re.compile(r'Info:(.+?)$'))
credit_vendor_re_list.append(re.compile(r'RS \d+\.?\d{0,2} INFO: ?(.+?)$'))
account_number_re_list = []
account_number_re_list.append(re.compile(r'A/C ?\d+-\d+[\*-]+(\d+)'))
account_number_re_list.append(re.compile(r' \*\*\d+\.\.\.(\d+)'))
account_number_re_list.append(re.compile(r'A/C \d+-\d+[X\*\.]-(\d+)'))
account_number_re_list.append(re.compile(r'ACCOUNT [X\*\.]+\d+[X\*\.]+(\d+)'))
account_number_re_list.append(re.compile(r'A/C NO [X\*\.]+\d+[X\*\.]+(\d+)'))
account_number_re_list.append(re.compile(r'CARD ACCOUNT \d+[X\*\.]+(\d+)'))
account_number_re_list.append(re.compile(r'YOUR A?/?C? [X\*\.]+\d+[X\*\.]+(\d+)'))
account_number_re_list.append(re.compile(r'ACCT XX+(\d+)'))
account_number_re_list.append(re.compile(r'CARD ENDING -?(\d+)'))
account_number_re_list.append(re.compile(r'CARD ENDING WITH -?(\d+)'))
account_number_re_list.append(re.compile(r'CREDIT CARD \d+[NX\*\.]+(\d+)'))
account_number_re_list.append(re.compile(r'AC NO \d+[NX\*\.]+(\d+)'))
account_number_re_list.append(re.compile(r'A/C \d+[NX\*\.]+(\d+)'))
account_number_re_list.append(re.compile(r'A/C\.? NO[\.:]? ?XX(\d+)'))
account_number_re_list.append(re.compile(r'CREDIT CARD XX+(\d+)'))
account_number_re_list.append(re.compile(r'CREDITCARD NUMBER XX+(\d+)'))
account_number_re_list.append(re.compile(r' A/C NO\.[X]+(\d+)'))
account_number_re_list.append(re.compile(r'A/C NO\. [NXx\*\.]+(\d+)'))
account_number_re_list.append(re.compile(r'A/C [NXx\*\.]*(\d+)'))
account_number_re_list.append(re.compile(r'CARD NO [NXx\*\.]+(\d+)'))
account_number_re_list.append(re.compile(r'CARD [NX\*]+ ?(\d+)'))
account_number_re_list.append(re.compile(r'CARD NO\. [Nx\*X]+(\d+)'))
account_number_re_list.append(re.compile(r'CREDITCARD NUMBER X+(\d+)'))
account_number_re_list.append(re.compile(r' CARD \d+[NX\*/]+(\d+)'))
account_number_re_list.append(re.compile(r' CARD (\d+)'))
account_number_re_list.append(re.compile(r'AC :? ?[NX\*/]+(\d+)'))
account_number_re_list.append(re.compile(r'ACCOUNT NUMBER [NX\*]*(\d+)'))
account_number_re_list.append(re.compile(r' LOAN A/C [A-Z]{3}(\d+)'))
account_number_re_list.append(re.compile(r' ACCOUNT (\d+)'))
account_number_re_list.append(re.compile(r'A/C-X(\d+)X'))
account_number_re_list.append(re.compile(r'[Ss][bB]-(\d+)'))
account_number_re_list.append(re.compile(r'ENDING IN (\d+) IS'))
account_number_re_list.append(re.compile(r'ENDING IN (\d+) WAS'))
account_number_re_list.append(re.compile(r'[X\*]{2,16} ? ?(\d+)'))
account_number_re_list.append(re.compile(r'[N\*]{2,16} ? ?(\d+)'))
# account_number_re_list.append(re.compile(r''))
# account_number_re_list.append(re.compile(r''))
# account_number_re_list.append(re.compile(r''))
# account_number_re_list.append(re.compile(r''))
# account_number_re_list.append(re.compile(r''))
# account_number_re_list.append(re.compile(r''))
junk_re_list = []
junk_re_list.append(re.compile(r' ?HTTP ?'))
junk_re_list.append(re.compile(r' ?WWW ?'))
junk_re_list.append(re.compile(r'^WWW ?'))
junk_re_list.append(re.compile(r' ?COM '))
junk_re_list.append(re.compile(r' ?COM$'))
junk_re_list.append(re.compile(r'^VIN'))
junk_re_list.append(re.compile(r'^VPS'))
junk_re_list.append(re.compile(r'^ ? ?TPT'))
junk_re_list.append(re.compile(r'^ ? ?IIN'))
junk_re_list.append(re.compile(r'^ ? ?INB'))
junk_re_list.append(re.compile(r'^ ? ?IPS'))
junk_re_list.append(re.compile(r'^ ? ?VISAPOS'))
junk_re_list.append(re.compile(r'^ +'))
junk_re_list.append(re.compile(r'^.{1,2}$'))
junk_re_list.append(re.compile(r' PVT '))
junk_re_list.append(re.compile(r' INDI?A?(?: |$)'))
junk_re_list.append(re.compile(r' SELLE?R?(?: |$)'))
junk_re_list.append(re.compile(r' SERVICES?(?: |$)'))
junk_re_list.append(re.compile(r' PRIVATE?(?: |$)'))
junk_re_list.append(re.compile(r' PVT '))
junk_re_list.append(re.compile(r' LIMITED '))
junk_re_list.append(re.compile(r' LTD '))
junk_re_list.append(re.compile(r' IN '))
junk_re_list.append(re.compile(r' .+'))
junk_re_list.append(re.compile(r' INTERNET '))
# junk_re_list.append(re.compile(r''))
# junk_re_list.append(re.compile(r''))
# junk_re_list.append(re.compile(r''))
# junk_re_list.append(re.compile(r''))
# junk_re_list.append(re.compile(r''))
# junk_re_list.append(re.compile(r''))
# junk_re_list.append(re.compile(r''))
# junk_re_list.append(re.compile(r''))
# junk_re_list.append(re.compile(r''))
# junk_re_list.append(re.compile(r''))
# junk_re_list.append(re.compile(r''))
money_re_list = []
money_re_list.append(re.compile(r"(?:^| |:|\W)(?:INR|RS|USD|SGD)\W?\W?\d+\.?\d{0,2}")) #proper
money_re_list.append(re.compile(r"(?:BALANCE ?I?S?:? | IS:? ?)\W?\W?\d+\.?\d{0,2}"))
money_re_list.append(re.compile(r'(?:^| |:|\W)\d+\.?\d{0,2} (?:INR|RS|USD|SGD)'))# proper
money_re_list.append(re.compile(r"BALANCE ?I?S?:? \W?\W?\d+\.?\d{0,2}"))
money_re_list.append(re.compile(r'X\d+: (\d+.?\d*) \* USE'))
money_re_list.append(re.compile(r"LEDG BAL \d+\.?\d{0,2}"))
money_re_list.append(re.compile(r'BAL ?\W ?\d+\.?\d{0,2}'))
reference_number_re_list = []
reference_number_re_list.append(re.compile(r"REFERENCE NUMBER IS:([A-Z0-9]+)",re.IGNORECASE))
reference_number_re_list.append(re.compile(r"REF NO- XXXX(\d+)"))
reference_number_re_list.append(re.compile(r"REFERENCE NUMBER ?([A-Z0-9]+)"))
reference_number_re_list.append(re.compile(r"NEFT IN UTR ([A-Z0-9]+)"))
reference_number_re_list.append(re.compile(r"REF\.? NO\.? ([A-Z0-9]+)"))
reference_number_re_list.append(re.compile(r"REF\.? NO ?\.?:? ?([A-Z0-9/-]+)"))
reference_number_re_list.append(re.compile(r"REF(\d+)"))
reference_number_re_list.append(re.compile(r"REFNO\. IS (\d+)"))
reference_number_re_list.append(re.compile(r"REF ID ([A-Z0-9]+)"))
reference_number_re_list.append(re.compile(r"REF ?# ([A-Z0-9]+)"))
reference_number_re_list.append(re.compile(r"([A-Z0-9]+)IS YOUR REFERENCE NUMBER"))
reference_number_re_list.append(re.compile(r"REFERENCE NO IS ([A-Z0-9-]+)"))
reference_number_re_list.append(re.compile(r"REF\.? NO\.? ?IS ([A-Z0-9]+)"))
reference_number_re_list.append(re.compile(r" REF\. ([A-Z0-9-]+)"))
reference_number_re_list.append(re.compile(r'REF COMP #? ([A-Z0-9-]+)'))
reference_number_re_list.append(re.compile(r"REF\. No\. [A-Za-z]+ IS:(\d+)"))
reference_number_re_list.append(re.compile(r"REF ID ([A-Z0-9-]+)"))
reference_number_re_list.append(re.compile(r"REF:? ?([A-Z0-9-]+)"))
reference_number_re_list.append(re.compile(r"MMT\*(\d+)\*\*"))
# reference_number_re_list.append(re.compile(r""))
# reference_number_re_list.append(re.compile(r""))
# reference_number_re_list.append(re.compile(r""))
# reference_number_re_list.append(re.compile(r""))
credit_card_limit_re_list = []
credit_card_limit_re_list.append(re.compile(r'TOTAL CRE?D?I?T? LI?MI?T ?:? ?((?:INR|RS|USD|SGD)\W{0,2}\d+\.?\d{0,2})'))
regex = {}
regex['account_number_regex'] = account_number_re_list
regex['debit_vendor_regex'] = debit_vendor_re_list
regex['debit_2_vendor_regex'] = debit_2_vendor_re_list
regex['credit_vendor_regex'] = credit_vendor_re_list
regex['money_regex'] = money_re_list
regex['junk_regex'] = junk_re_list
regex['reference_number_regex'] = reference_number_re_list
regex['credit_card_limit_regex'] = credit_card_limit_re_list
try :
fileobject = open('../function_definitions/regex.pkl','wb')
except :
fileobject = open('regex.pkl','wb')
pickle.dump(regex,fileobject)
fileobject.close()
<file_sep>/function_definitions/getters.py
import re
from checkers import checkers_func
from regex_extractor_from_pickle import account_number_re_list
from regex_extractor_from_pickle import debit_vendor_re_list
from regex_extractor_from_pickle import debit_2_vendor_re_list
from regex_extractor_from_pickle import credit_vendor_re_list
from regex_extractor_from_pickle import money_re_list
from regex_extractor_from_pickle import junk_re_list
from regex_extractor_from_pickle import reference_number_re_list
from regex_extractor_from_pickle import credit_card_limit_re_list
from all_dict_generator import bank_dict
number = re.compile(r'\d') # regular expression for a single digit
amount_re = re.compile(r'\d+.?\d{0,2}')
currency_re = re.compile(r'[A-Za-z]+') # regular expression for 1 or more upper or lower case leter
nonalpha = re.compile(r'[^a-zA-Z ]+') # regular expression for 1 or more non alpha character
#------------------------------------------------------------------------------------------------------
def getMoney(message,category): # returns upto 3 sets of amount and currency if present
global money_re_list
global amount_re
global currency_re
message = message.replace(',','').replace(' ','')
Amount = ['-1','-1','-1']
Currency = ['-','-','-']
RS = []
RS += re.findall(money_re_list[0],message) # checks with a particular regex to get ammount
RS += re.findall(money_re_list[2],message) # checks with other
if category in ['Balance'] :
RS += re.findall(money_re_list[1],message) # if category is balance, checks with 1 more
if category in ['Debit'] :
RS += re.findall(money_re_list[3],message) # if category is debit, checks with 1 more
if RS == [] :
try :
RS = ['INR ' + re.search(money_re_list[4],message).group(1)] # if after above tries still nothing is found
except : # it tries another regex
pass
# if RS == [] :
# for moneypattern in money_re_list[3:]:
if len(RS) < 3 : # if after above process still we have less then 3 amounts, try remaining regex
for i in range(5, len(money_re_list)):
RS += re.findall(money_re_list[i],message)
RS = RS[:3] # pick only 1st 3 amounts from them
credit_limit = [] # replce the last amount with the 1st amount found from the credit card limit re list
for credit_limit_re in credit_card_limit_re_list :
#print credit_limit_re, '*********', type(credit_limit_re)
credit_limit += re.findall(credit_limit_re,message)
try :
RS[2] = credit_limit[0]
except IndexError :
RS = RS[:2]
pass
for i in range(len(RS)): # for each amount, split it into its currency and the actual amount
AMT = re.search(amount_re,RS[i]).group()
#print RS[i],'-------------------------------'
CUR = re.search(currency_re,RS[i]).group()
Amount[i] = str(AMT)
Currency[i] = str(CUR).replace("BALANCE","INR").replace('IS','INR').replace('X','INR').replace('LEDG','INR').replace('BAL','INR')
return Currency+Amount # returns a list of currency and corresponding actual amounts
#--------------------------------------------------------------------------------------------
def getCategory(message): # simple method that just uses the methods from checkers file to return the category of the message
message = str(message)
#print(message[:10])
if checkers_func(message, 'Declined'):
return "Declined"
if checkers_func(message, 'ATM') and not checkers_func(message, 'Credit'):
return "ATM"
#return 'Debit'
if checkers_func(message, 'OTP') :
return "OTP"
if checkers_func(message, 'Balance') and not checkers_func(message, 'Credit') and not checkers_func(message, 'Debit') and not checkers_func(message, 'Debit_2'):
return "Balance"
if checkers_func(message, 'Debit_2') and not checkers_func(message, 'Credit'):
return "Debit"
#return "Debit_2"
if checkers_func(message, 'Credit'):
return "Credit"
if checkers_func(message, 'Debit'):
return "Debit"
if checkers_func(message, 'Min_balance'):
return "Minimum_balance"
if checkers_func(message, 'Payment_due'):
return "Payment_due"
if checkers_func(message, 'Warning'):
return "Warning"
if checkers_func(message, 'Acknowledge'):
return "Acknowledge"
if checkers_func(message, 'Advert'):
return "Advert"
if checkers_func(message, 'Info'):
return "Info"
else :
return "None"
#-------------------------------------------------------------------------------------------------
def getAccountType(message): # simple method that just uses the methods from checkers file to return the Account_type of the message
if checkers_func(message, 'Debit_Card'):
return "Debit_Card"
if checkers_func(message, 'Credit_Card'):
return "Credit_Card"
if checkers_func(message, 'CASA'):
return "CASA"
if checkers_func(message, 'Wallet') :
return 'Wallet'
if checkers_func(message, 'Prepaid_Card'):
return 'Prepaid_Card'
if checkers_func(message, 'Loan'):
return 'Loan'
else:
return "_NA_"
#-------------------------------------------------------------------------------------------------------
def getTxnInstrument(message): # returns the transection instrument
if checkers_func(message, 'Debit_Card'):
return "Debit_Card"
if checkers_func(message, 'ATM'):
return "ATM"
if checkers_func(message, 'NEFT'):
return "NEFT"
if checkers_func(message, 'IMPS'):
return "IMPS"
if checkers_func(message, 'NetBanking'):
return "NetBanking"
else:
return '_NA_'
#-------------------------------------------------------------------------------
def getAccountNumber(message): # retuns the account number
global account_number_re_list
account_number = "_NA_"
for account_number_re in account_number_re_list : # iterates over the ac no re list from regex to find the account number
search_object = re.search(account_number_re,message)
if search_object and not checkers_func(message, 'Account_Number_False_Alarm') : # NOW THE Payee Ac No WILL NOT BE EXTRECTED
account_number = ""
account_number += str(search_object.group(1))
break # only 1st account number is taken
if len(account_number) < 4 : # only last 4 digits of the account number are picked up, if at all the are more digits
account_number = "0"*(4-len(account_number)) + account_number
else :
account_number = account_number[-4:]
return account_number
#--------------------------------------------------------------------------------
def getTransactionSource(message , category): # tries to return the transaction source, i.e. vendor , etc
global nonalpha
if category == 'Debit_2':
for RE in debit_2_vendor_re_list: # tries to match the message with each regex and returns the 1st match
so = re.search(RE,message)
if so :
v = re.sub(nonalpha , '',so.group(1)) # substitues the non alpha characters with '' , i.e. removes the nonalpha chatacters
for p in junk_re_list :
v = re.sub( p, '',v)
return v
return 'ERROR/Not_Specified'
if category == 'Credit':
for RE in credit_vendor_re_list:
so = re.search(RE,message)
if so :
v = re.sub(nonalpha , '',so.group(1))
for p in junk_re_list :
v = re.sub( p, '',v)
if v == "RS" or v == "R" :
v = 'ERROR/Not_Specified'
return v
return 'ERROR/Not_Specified'
if category == 'Debit':
for RE in debit_vendor_re_list:
so = re.search(RE,message)
if so :
v = re.sub(nonalpha , '',so.group(1))
for p in junk_re_list :
v = re.sub( p, '',v)
return v
return 'ERROR/Not_Specified'
return 'ERROR/Not_Specified'
#---------------------------------------------------------------------------------
def getBankName(message_source): # returns the bank name from the sms senders ID
global bank_dict
message_source = str(message_source)[3:].upper()[:6]
if message_source in bank_dict:
bank_name = bank_dict[message_source]
else :
bank_name = "_NA_"
return bank_name
#--------------------------------------------------------------------------------------
def getReferenceNumber(message): # returns the reference number in the message if present
#message = message.replace('/','')
global reference_number_re_list
for pattern in reference_number_re_list :
search_object = re.search(pattern,message)
if search_object :
ref_no =search_object.group(1)
if re.search(number,ref_no): # the reference number must contain atleast 1 numerical/digit
return ref_no
return "_NA_"
#---------------------------------------------------------------------------------------
def getData(message): # this is the main method that retuns all the data as a list with the help of above functions
global debit_vendor_re_list
global debit_2_vendor_re_list
global credit_vendor_re_list
global nonalpha
global junk_re_list
message = str(message)
message = message.replace('\n','')
account_number = getAccountNumber(message)
category = getCategory(message)
money_currency = getMoney(message,category)
trensection_source = getTransactionSource(message,category)
account_type = getAccountType(message)
reference_number = getReferenceNumber(message)
txn_instrument = getTxnInstrument(message)
return [category] + [money_currency[0]] + [money_currency[3]]+ [money_currency[1]]+ [money_currency[4]]+ [money_currency[2]]+ [money_currency[5]]+ [account_number]+ [trensection_source] + [account_type] + [reference_number] + [txn_instrument]
#-----------------------------------------------------------------------------------------------------
def getBankDetails(message_source): # returns the bank name from the sms senders ID
global bank_dict
message_source = str(message_source)[3:].upper()[:6]
if message_source in bank_dict:
bank_det = bank_dict[message_source]
else :
bank_det = "_NA_"
return bank_det
<file_sep>/function_definitions/bank_sms_preperation_function_definitions/bank_sms_attributes_generation_script.py
from datetime import datetime
import pandas as pd
import re
from function_definitions.getters import getData
from function_definitions.getters import getBankName
from function_definitions.getters import getBankDetails
"""
This script generates different attribute columns like account number, account type, message type etc, for the filtered out bank sms.
"""
message_id_re = re.compile(r'\w+-\w+')
def bank_sms_attributes_generation_func(bank_sms_df):
fp = open('data_files/Logs/exception_logs', 'a')
ExceptionCounter = 0
for idx, row in bank_sms_df.iterrows():
print 1 , idx
try:
CustomerID = int(row['CustomerID'])
except Exception as e:
ExceptionCounter += 1
fp.write(str(ExceptionCounter) + '\t' + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '\t' + e.message + " >>> " + 'Found for CustomerID ' + str(row['CustomerID']) + '\n')
continue
try:
SmsID = int(row['SmsID'])
#print SmsID, '$$$$$', type(SmsID)
except Exception as e:
ExceptionCounter += 1
fp.write(str(ExceptionCounter) + '\t' + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '\t' + e.message + " >>> " + 'Found for CustomerID ' + str(row['CustomerID']) + ' at SmsID ' + str(row['SmsID']) + '\n')
continue
try:
row['Message'] = str(row['Message']).upper()
except Exception as e:
ExceptionCounter += 1
fp.write(str(ExceptionCounter) + '\t' + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '\t' + e.message + " >>> " + 'Found for CustomerID ' + str(row['CustomerID']) + ' at row having SmsID '+str(row['SmsID'])+' and column "Message"' + '\n')
continue
try:
bank_sms_df.at[idx,"MessageTimestamp"] = datetime.fromtimestamp(row['MessageDate']/1000)
except Exception as e:
ExceptionCounter += 1
fp.write(str(ExceptionCounter) + '\t' + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '\t' + e.message + " >>> " + 'Found for CustomerID ' + str(row['CustomerID']) + ' at row having SmsID '+str(row['SmsID'])+' and column "MessageDate"' + '\n')
continue
if re.search(message_id_re,row['MessageSource']):
pass
else :
ExceptionCounter += 1
fp.write(str(ExceptionCounter) + '\t' + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '\t' + "MessageSource ID was not having format xx-xxx... for CustomerID " + str(row['CustomerID']) + ' at row having SmsID '+str(row['SmsID']) + '\n')
continue
extracted_data = getData(row['Message'])
bank_name = getBankName(row['MessageSource'])
bank_details = getBankDetails(row['MessageSource'])
bank_sms_df.at[idx,"MessageType"] = extracted_data[0]
bank_sms_df.at[idx,"Currency_1"] = extracted_data[1]
bank_sms_df.at[idx,"Amt_1"] = extracted_data[2]
bank_sms_df.at[idx,"Currency_2"] = extracted_data[3]
bank_sms_df.at[idx,"Amt_2"] = extracted_data[4]
bank_sms_df.at[idx,"Currency_3"] = extracted_data[5]
bank_sms_df.at[idx,"Amt_3"] = extracted_data[6]
bank_sms_df.at[idx,"Vendor"] = extracted_data[8]
bank_sms_df.at[idx,"AccountNo"] = extracted_data[7]
bank_sms_df.at[idx,"AccountType"] = extracted_data[9]
bank_sms_df.at[idx,"ReferenceNumber"] = extracted_data[10]
bank_sms_df.at[idx, "TxnInstrument"] = extracted_data[11]
#bank_sms_df.at[idx,"BankName"] = bank_name
bank_sms_df.at[idx,"BankName"] = bank_details[0]
bank_sms_df.at[idx,"SENDER_PARENT"] = bank_details[1]
bank_sms_df.at[idx,"SENDER_CHILD_1"] = bank_details[2]
bank_sms_df.at[idx,"SENDER_CHILD_2"] = bank_details[3]
bank_sms_df.at[idx,"SENDER_CHILD_3"] = bank_details[4]
#Replacing Message type from ATM to debit
if extracted_data[0] == 'ATM':
bank_sms_df.at[idx, "MessageType"] = 'Debit'
bank_sms_df = bank_sms_df.sort_values(by=["CustomerID", "AccountNo", "MessageTimestamp"], ascending=[True, True, True])
#Storing none type sms type to a another csv as non-classified
non_classified_sms = bank_sms_df[bank_sms_df['MessageType'] == 'None']
non_classified_sms = non_classified_sms[['SmsID', 'CustomerID', 'Message', 'MessageSource', 'MessageDate']]
non_classified_sms.to_csv('data_files/Non_classified/non_classified_sms.csv', index=False)
bank_sms_df.to_csv('data_files/intermediate_output_files/banks/bank_sms_classified.csv',index = False)
bank_sms_df.index = range(len(bank_sms_df.index.values))
#Closing log file
fp.close()
return bank_sms_df<file_sep>/function_definitions/bank_sms_preperation_function_definitions/user_bank_account_combiation_generation_script.py
import pandas as pd
from parameter_calculation_func_definition_script import get_net_transaction
Months = [1, 3, 6, 12]
TxnTypes = ['Credit', 'Debit', 'Both']
def user_bank_account_combination_generation_func(df):
result = pd.DataFrame()
df_unique = df.drop_duplicates(['CustomerID', 'BankName', 'AccountNo'], inplace=True)
for idx, row in df_unique .iterrows():
CustomerID = row['CustomerID']
BankName = row['BankName']
AccountNo = row['AccountNo']
AccountType = row['AccountType']
for Month in Months:
for TxnType in TxnTypes:
AvgSpendPerMonth = getAvgSpendPerMonth(df,TransactionType=TxnType,AccountType=AccountType,Month=Month,AccountNumber=AccountNo,SenderName=BankName)
MaxBal = get_maximum_balance(df,TransactionType=TxnType,AccountType=AccountType,Month=Month,AccountNumber=AccountNo,SenderName=BankName)
AvgMaxBal = getAvgMaxBal(df,TransactionType=TxnType,AccountType=AccountType,Month=Month,AccountNumber=AccountNo,SenderName=BankName)
MinBal = get_minimum_balance(df,TransactionType=TxnType,AccountType=AccountType,Month=Month,AccountNumber=AccountNo,SenderName=BankName)
AvgMinBal = getAvgMinBal(df,TransactionType=TxnType,AccountType=AccountType,Month=Month,AccountNumber=AccountNo,SenderName=BankName)
Utilization = getUtilization(df,TransactionType=TxnType,AccountType=AccountType,Month=Month,AccountNumber=AccountNo,SenderName=BankName)
to_be_appended = pd.DataFrame({'AvgMonthlySpendInLast'+Month+'MonthsForAccountType'+AccountType:pd.Series(AvgSpendPerMonth), \
'MaxBalInLast'+Month+'MonthsForAccountType'+AccountType:pd.Series(MaxBal)})
<file_sep>/bank_sms_analysis_main.py
import pandas as pd
from time import time
from function_definitions.sms_level1_classification_func import bank_sms_filtering_func
from function_definitions.bank_sms_preperation_function_definitions.bank_sms_attributes_generation_script import bank_sms_attributes_generation_func
from function_definitions.bank_sms_preperation_function_definitions.classified_vs_nonclassified_tally_generation_script import classified_vs_nonclassified_tally_generation_func
from function_definitions.bank_sms_preperation_function_definitions.account_type_rectification_script import account_type_rectification_func
from function_definitions.bank_sms_preperation_function_definitions.casa_to_debit_card_mapping_script import casa_to_debit_card_mapping_func
from function_definitions.bank_sms_preperation_function_definitions.balance_sms_adjustment_script import balance_sms_adjustment_func
from function_definitions.bank_sms_preperation_function_definitions.duplicate_flag_generation_script import duplicate_flag_generation_func
from function_definitions.bank_sms_preperation_function_definitions.bank_sms_error_tally_generation_script import bank_sms_error_tally_generation_func
from function_definitions.bank_sms_preperation_function_definitions.balance_rectification_script import balance_rectification_func
from function_definitions.bank_sms_preperation_function_definitions.balance_error_based_dummy_entry_generation_script import balance_error_based_dummy_entry_generation_func
from function_definitions.bank_sms_preperation_function_definitions.parameter_calculation_script import parameter_calculation_func
from function_definitions.bank_sms_preperation_function_definitions.post_parameter_calculation_processing_script import post_parameter_calculation_func
ti = time()*1000
#Filtering bank sms from all sms
bank_sms_df = bank_sms_filtering_func("user_sms_pipe.csv") # creates a 'bank_sms_raw.csv' file
#Creating some new attributes in dataframe
bank_sms_df = bank_sms_attributes_generation_func(bank_sms_df)
#Creating data classified vs nonclassified tally
classified_vs_nonclassified_tally_generation_func(bank_sms_df)
#Rectifying the account type tag for each sms
bank_sms_df = account_type_rectification_func(bank_sms_df)
#Mapping of CASA account to its linked debit card
bank_sms_df = casa_to_debit_card_mapping_func(bank_sms_df)
#Adjusting balance sms so that we can consider them to calculate daily opening and closing balance
bank_sms_df = balance_sms_adjustment_func(bank_sms_df)
#Generating duplicate transaction flag
bank_sms_df = duplicate_flag_generation_func(bank_sms_df)
#Calculating error tally between consecutive transactions
bank_sms_df = bank_sms_error_tally_generation_func(bank_sms_df)
#Identifying bulk transactions and rectifying balance for those transactions
bank_sms_df = balance_rectification_func(bank_sms_df)
#Generating dummy sms entries based on balance error so as to compensate error.
bank_sms_df = balance_error_based_dummy_entry_generation_func(bank_sms_df)
#Calculating some CASA parameters
casa_parameter_data = parameter_calculation_func(bank_sms_df,'CASA')
#Calculating rest of the CASA parameters
casa_parameter_data = post_parameter_calculation_func(casa_parameter_data,'CASA')
#Calculating some Credit_Card parameters
credit_parameter_data = parameter_calculation_func(bank_sms_df,'Credit_Card')
#Calculating rest of the Credit_Card parameters
credit_parameter_data = post_parameter_calculation_func(credit_parameter_data,'Credit_Card')
tf = time()*1000
print 'THE END',(tf-ti)
<file_sep>/function_definitions/bank_sms_preperation_function_definitions/casa_to_debit_card_mapping_script.py
import pandas as pd
from time import sleep
"""
This script maps the debit card with its corresponding linked casa account if a user has only single casa account and a single debit card
in one bank.
"""
def casa_to_debit_card_mapping_func(bank_sms_df):
#Creating a new column LinkedDebitCardNumber and intializing with _NA_
bank_sms_df['LinkedDebitCardNumber'] = '_NA_'
#Storing count of each account type for each user-bank-account combination in a dictionary
user_bank_combinations_idx_dict = {}
for idx, row in bank_sms_df.iterrows():
print 3 , '\t\t' , idx
CustomerID = row['CustomerID']
BankName = row['BankName']
key = str(CustomerID)+'*'+str(BankName)
try:
user_bank_combinations_idx_dict[key].append(idx)
except KeyError:
user_bank_combinations_idx_dict[key] = [idx]
#Mapping of account number with account type
account_type_account_number_dict = {}
for key in user_bank_combinations_idx_dict.keys():
print 3 , '\t\t' , key
for idx in user_bank_combinations_idx_dict[key]:
account_number = bank_sms_df.at[idx, 'AccountNo']
account_type = bank_sms_df.at[idx, 'AccountType']
if account_number != '_NA_' :
try:
account_type_account_number_dict[account_type] = [account_number]
except KeyError:
account_type_account_number_dict[account_type].append(account_number)
try:
account_type_account_number_dict['CASA'] = list(set(account_type_account_number_dict['CASA']))
account_type_account_number_dict['Debit_Card'] = list(set(account_type_account_number_dict['Debit_Card']))
if len(account_type_account_number_dict['CASA']) == 1 and len(account_type_account_number_dict['Debit_Card']) == 1:
#print "no_key_error"
for idx in user_bank_combinations_idx_dict[key]:
account_type = bank_sms_df.at[idx, 'AccountType']
if account_type == 'CASA' :
bank_sms_df.at[idx, 'LinkedDebitCardNumber'] = account_type_account_number_dict['Debit_Card'][0]
if account_type == 'Debit_Card':
bank_sms_df.at[idx, 'AccountType'] = 'CASA'
bank_sms_df.at[idx, 'AccountNo'] = account_type_account_number_dict['CASA'][0]
bank_sms_df.at[idx, 'LinkedDebitCardNumber'] = account_type_account_number_dict['Debit_Card'][0]
bank_sms_df.at[idx, 'TxnInstrument'] = 'Debit_Card'
except KeyError:
pass
account_type_account_number_dict = {}
bank_sms_df.to_csv('data_files/intermediate_output_files/banks/bank_sms_classified_account_type_rectified2.csv', index=False)
bank_sms_df.index = range(len(bank_sms_df.index.values))
return bank_sms_df
<file_sep>/function_definitions/all_dict_generator.py
import pandas
bank_dict = {}
utilities_dict = {}
# opens a file containg bank ID to bank name mapping and creates a dictionary out of it
# later on "sms_level1_classification_func.py" will use this to segregate bank messages from all the messages
# fp = open('data_files/sms_classification_level1_keywords/financial/bank_id_to_bank_name_mapping','r')
# for bank in fp.read().split('\n'):
# if bank != '':
# bank = bank.split('\t')
# bank_dict[bank[0]] = [ value for value in bank[1:] ]
df = pandas.read_csv('data_files/sms_classification_level1_keywords/financial/TABLE_20160704_SENDER_CLASSIFICATION_v02.csv')
df = df[df['SENDER_PARENT'] == 'FINANCIAL' ]
df.fillna('_NA_',inplace=True)
for i , row in df.iterrows():
bank_dict[row['SENDER']] = [row['SENDER_NAME'],row['SENDER_PARENT'],row['SENDER_CHILD_1'],row['SENDER_CHILD_2'],row['SENDER_CHILD_3']]
#print bank_dict
fp = open('data_files/sms_classification_level1_keywords/utilities/sender_id_to_sender_name_mapping','r')
for utility in fp.read().split('\n'):
if utility != '':
utility = utility.split('\t')
utilities_dict[utility[0]] = [ value for value in utility[1:]]
<file_sep>/function_definitions/bank_sms_preperation_function_definitions/post_parameter_calculation_processing_script.py
import pandas
from numpy import timedelta64
from datetime import datetime,timedelta
def post_parameter_calculation_func(bank_sms_df,account_type):
bank_sms_df['TransactionDirectionFlag'] = 'Equal'
bank_sms_df['TransactionDirectionIndicator'] = 'Multidirectional'
bank_sms_df['OpeningBalance'] = '_NA_'
bank_sms_df['ClosingBalance'] = '_NA_'
for idx,row in bank_sms_df.iterrows():
print 10 , '\t\t' , idx
if int(row['TotalCreditTxns']) > int(row['TotalDebitTxns']) :
bank_sms_df.at[idx,'TransactionDirectionFlag'] = 'Net_Credit'
elif int(row['TotalCreditTxns']) < int(row['TotalDebitTxns']) :
bank_sms_df.at[idx,'TransactionDirectionFlag'] = 'Net_Debit'
else :
pass
if float(row['PercentOfCreditTxns']) in [float(0),float(100)] :
bank_sms_df.at[idx,'TransactionDirectionIndicator'] = 'Unidirectional'
elif float(row['PercentOfCreditTxns']) == float(50) :
bank_sms_df.at[idx,'TransactionDirectionIndicator'] = 'Bidirectional'
else :
pass
#print bank_sms_df.index.values
#raw_input()
for idx,row in bank_sms_df.iterrows():
print 10 , '\t\t' , idx
if ( ( row['TransactionDirectionFlag'] == 'Net_Credit' ) and (row['TransactionDirectionIndicator'] == 'Unidirectional') ):
try :
if float(row['MaxBalance']) != '_NA_' :
bank_sms_df.at[idx,'OpeningBalance'] = float(row['MaxBalance']) - float(row['NetTxnAmt'])
except :
pass
elif( ( row['TransactionDirectionFlag'] == 'Net_Debit' ) and (row['TransactionDirectionIndicator'] == 'Unidirectional') ):
try :
if float(row['MinBalance']) != '_NA_' :
bank_sms_df.at[idx,'OpeningBalance'] = float(row['MinBalance']) - float(row['NetTxnAmt'])
except :
pass
else :
pass
if ( ( row['TransactionDirectionFlag'] == 'Net_Credit' ) and (row['TransactionDirectionIndicator'] == 'Unidirectional') ):
try :
if float(row['MaxBalance']) != '_NA_':
bank_sms_df.at[idx,'ClosingBalance'] = float(row['MaxBalance'])
except ValueError :
pass
elif( ( row['TransactionDirectionFlag'] == 'Net_Debit' ) and (row['TransactionDirectionIndicator'] == 'Unidirectional') ):
try :
if float(row['MinBalance']) != '_NA_' :
bank_sms_df.at[idx,'ClosingBalance'] = float(row['MinBalance'])
except ValueError :
pass
else :
pass
bank_sms_df.to_csv('data_files/intermediate_output_files/banks/Post_'+account_type+'_parameters.csv',index = False)
return bank_sms_df<file_sep>/function_definitions/bank_sms_preperation_function_definitions/account_type_rectification_script.py
import pandas as pd
from time import sleep
import operator
"""
This script replaces the account type of a account with account type in which majority of the sms for that account are tagged \
if same account is tagged in more than one type
"""
def account_type_rectification_func(bank_sms_df):
#Here, we are not considering _NA_ account number because their type cannot be rectified
account_type_rectified = bank_sms_df[bank_sms_df['AccountNo'] != '_NA_']
account_type_rectified.index = range(len(account_type_rectified.index))
#Storing count of each account type for each user-bank-account combination in dictionary
user_account_combinations_dict = {}
user_account_combinations_idx_dict = {}
for idx, row in account_type_rectified.iterrows():
print 2 , "\t\t" , idx
CustomerID = row['CustomerID']
AccountNo = row['AccountNo']
AccountType = row['AccountType']
key = str(CustomerID)+'*'+str(AccountNo)
#print CustomerID, type(CustomerID), '^^^^^^^'
#print AccountNo, type(AccountNo), '%%%%%%%%'
#print key, '############'
try:
user_account_combinations_idx_dict[key].append(idx)
except KeyError:
user_account_combinations_idx_dict[key] = [idx]
if user_account_combinations_dict.get(key) == None:
user_account_combinations_dict[key] = {AccountType:1}
else:
if user_account_combinations_dict[key].get(AccountType) == None:
user_account_combinations_dict[key][AccountType] = 1
else:
user_account_combinations_dict[key][AccountType] = user_account_combinations_dict[key].get(AccountType)+1
#print len(user_account_combinations_dict), '@@@@@@@@@@@@@@@@@'
#Eliminating dictionary keys which has length = 1 because they are correctly classified
for key in user_account_combinations_dict.keys():
if len(user_account_combinations_dict[key]) == 1 :
del user_account_combinations_dict[key]
#Sorting each dictionary key based on its values
for dict_key in user_account_combinations_dict.keys():
user_account_combinations_dict[dict_key] = sorted(user_account_combinations_dict[dict_key].items(), key=operator.itemgetter(1), reverse=True)
#print len(user_account_combinations_dict), '**********'
#Account type correction logic
for key in user_account_combinations_dict.keys():
actual_account_type = ""
if user_account_combinations_dict[key][0][0] == '_NA_':
actual_account_type = user_account_combinations_dict[key][1][0]
else:
actual_account_type = user_account_combinations_dict[key][0][0]
for idx in user_account_combinations_idx_dict[key]:
account_type_rectified.at[idx, 'AccountType'] = actual_account_type
#Appending accounts having account number _NA_ (which were previously filtered out above) to rectified account dataframe
bank_sms_df_NA = bank_sms_df[bank_sms_df['AccountNo'] == '_NA_']
account_type_rectified = account_type_rectified.append(bank_sms_df_NA)
#Sorting dataframe according to user-bank-account-timestamp
account_type_rectified.sort_values(['CustomerID', 'BankName', 'AccountNo', 'MessageTimestamp'], inplace=True)
account_type_rectified.to_csv('data_files/intermediate_output_files/banks/bank_sms_classified_account_type_rectified.csv', index=False)
account_type_rectified.index = range(len(account_type_rectified.index.values))
return account_type_rectified
<file_sep>/function_definitions/bank_sms_preperation_function_definitions/bank_sms_error_tally_generation_script.py
import pandas as pd
import time
from time import sleep
"""
This script calculates error between consecutive sms for each user-bank-account combination. It also calculates the timespan (in days) over which
the error has occured.
"""
def bank_sms_error_tally_generation_func(bank_sms_df):
#Filtering out RepeatedTxnFlag = 1 i.e duplicate sms
bank_sms_df = bank_sms_df[bank_sms_df['RepeatedTxnFlag'] != 1]
bank_sms_df = bank_sms_df[( ( (bank_sms_df['MessageType'] == 'Debit') & (bank_sms_df['Amt_1'] != -1) ) | (bank_sms_df['MessageType'] == 'Balance') | (bank_sms_df['MessageType'] == 'Credit') ) & (bank_sms_df['AccountNo'] != "_NA_")]
bank_sms_df.index = range(len(bank_sms_df))
#Creating Amt_2_calculated, Error,ConsecutiveTxnTimespan columns and initializing them with -1, _NA_ and _NA_ resp.
bank_sms_df['Amt_2_calculated'] = -1
bank_sms_df['Error'] = '_NA_'
bank_sms_df['ConsecutiveTxnTimespan'] = '_NA_'
#To consider balance sms in our calculations, we are overwriting Amt_2 of balance message with Amt_1 and making Amt_1 as 0.
for idx, row in bank_sms_df.iterrows():
print 6, '\t\t',idx
if row['MessageType'] == 'Balance':
bank_sms_df.at[idx, "Amt_2"] = float(row['Amt_1'])
bank_sms_df.at[idx, "Amt_1"] = 0
user_account_combinations_dict = {}
for idx, row in bank_sms_df.iterrows():
print 6, '\t\t',idx
key = str(row['CustomerID'])+str(row['BankName'])+str(row['AccountNo'])
try :
user_account_combinations_dict[key].append(idx)
except KeyError :
user_account_combinations_dict[key] = [idx]
#----------------------------------------------------------
for key in user_account_combinations_dict.keys():
print 6 , '\t\t' , key
indexes = user_account_combinations_dict[key]
flag = 0
for idx in indexes[:-1] :
if abs(float(bank_sms_df.at[idx,'Amt_2']) + 1) < 0.001 and flag == 0 : #float(bank_sms_df.at[idx,'Amt_2']) == -1
continue
else:
flag = 1
current_bal_given = float(bank_sms_df.at[idx, 'Amt_2'])
current_bal_calculated = float(bank_sms_df.at[idx, 'Amt_2_calculated'])
current_bal = 0
if abs(current_bal_given + 1) > 0.001: #current_bal_given != -1:
current_bal = current_bal_given
elif abs(current_bal_calculated + 1) > 0.001: #current_bal_calculated != -1
current_bal = current_bal_calculated
else:
continue
next_amt1 = float(bank_sms_df.at[idx+1, 'Amt_1'])
if bank_sms_df.at[idx+1, 'MessageType'] in ['Debit', 'Balance']:
Amt_2_calculated = current_bal - next_amt1
elif bank_sms_df.at[idx+1, 'MessageType'] == 'Credit':
Amt_2_calculated = current_bal + next_amt1
bank_sms_df.at[idx+1, 'Amt_2_calculated'] = Amt_2_calculated
if abs(float(bank_sms_df.at[idx+1, 'Amt_2']) + 1) > 0.001 and abs(float(bank_sms_df.at[idx+1, 'Amt_2_calculated']) + 1) > 0.001:
#(float(bank_sms_df.at[idx+1, 'Amt_2']) != -1) and (float(bank_sms_df.at[idx+1, 'Amt_2_calculated']) != -1)
# and (bank_sms_df.at[idx+1, 'MessageType'] != 'Balance'):
error_timespan = (bank_sms_df.at[idx+1,'MessageTimestamp'] - bank_sms_df.at[idx,'MessageTimestamp']).days
error = float(bank_sms_df.at[idx+1, 'Amt_2']) - float(bank_sms_df.at[idx+1, 'Amt_2_calculated'])
if abs(error) < 1.0 :
error = 0
bank_sms_df.at[idx+1, 'Error'] = error
bank_sms_df.at[idx+1, 'ConsecutiveTxnTimespan'] = error_timespan
bank_sms_df.to_csv('data_files/intermediate_output_files/banks/bank_sms_filtered_flaged.csv', index=False)
bank_sms_df.index = range(len(bank_sms_df.index.values))
return bank_sms_df
<file_sep>/function_definitions/checkers.py
from keyphrases import Declined_keyphrases_list
from keyphrases import ATM_keyphrases_list
from keyphrases import Debit_keyphrases_list
from keyphrases import Debit_2_keyphrases_list
from keyphrases import Balance_keyphrases_list
from keyphrases import Credit_keyphrases_list
from keyphrases import OTP_keyphrases_list
from keyphrases import Payment_Due_keyphrases_list
from keyphrases import Info_keyphrases_list
from keyphrases import Minimum_balance_keyphrases_list
from keyphrases import Warning_keyphrases_list
from keyphrases import Acknowledge_keyphrases_list
from keyphrases import Advert_keyphrases_list
from keyphrases import CASA_keyphrases_list
from keyphrases import Debit_Card_keyphrases_list
from keyphrases import Credit_Card_keyphrases_list
from keyphrases import Wallet_keyphrases_list
from keyphrases import Prepaid_Card_keyphrases_list
from keyphrases import Loan_keyphrases_list
from keyphrases import NEFT_keyphrases_list
from keyphrases import IMPS_keyphrases_list
from keyphrases import NetBanking_keyphrases_list
from keyphrases import Cheque_keyphrases_list
from keyphrases import Account_Number_False_Alarm_keyphrases_list
# Basically all the functions use keys from 'keyphrases' file and return true if the message has any of those key phrases
keyphrases_dict = {'ATM':ATM_keyphrases_list, 'Declined':Declined_keyphrases_list, 'Debit':Debit_keyphrases_list, 'Debit_2':Debit_2_keyphrases_list, \
'Credit':Credit_keyphrases_list, 'Balance':Balance_keyphrases_list, 'OTP':OTP_keyphrases_list, 'Min_balance':Minimum_balance_keyphrases_list, \
'Info':Info_keyphrases_list, 'Payment_due':Payment_Due_keyphrases_list, 'Advert':Advert_keyphrases_list, 'Warning':Warning_keyphrases_list, \
'Acknowledge':Acknowledge_keyphrases_list, 'CASA':CASA_keyphrases_list, 'Debit_Card':Debit_Card_keyphrases_list, 'Credit_Card':Credit_Card_keyphrases_list, \
'Wallet':Wallet_keyphrases_list, 'Prepaid_Card':Prepaid_Card_keyphrases_list, 'Loan':Loan_keyphrases_list, 'Account_Number_False_Alarm':Account_Number_False_Alarm_keyphrases_list, \
'NEFT':NEFT_keyphrases_list, 'IMPS':IMPS_keyphrases_list, 'NetBanking':NetBanking_keyphrases_list, 'Cheque':Cheque_keyphrases_list}
def checkers_func(message, word):
keyphrases_list = keyphrases_dict[word]
for key in keyphrases_list:
key = key.split('|')
truthvalue = [keyword in message for keyword in key]
if False not in truthvalue :
return True
return False <file_sep>/function_definitions/bank_sms_preperation_function_definitions/parameter_calculation_func_definition_script.py
import pandas as pd
def get_relevant_dataframe(df,TransactionType='both',AccountType='CASA',Month=1,AccountNumber=0,SenderName=''):
if TransactionType == 'both' :
TransactionTypelist = ['Credit','Debit']
else :
TransactionTypelist = [TransactionType]
if AccountNumber >=0 :
new_df = df[ (df['MessageType'].map(lambda x : x in TransactionTypelist ) ) & (df['AccounTransactionTypeype'] == at) & (df['AccountNo'] == AccountNumber) & (df['BankName'] == SenderName) ]
else :
new_df = df[ (df['MessageType'].map(lambda x : x in TransactionTypelist ) ) & (df['AccounTransactionTypeype'] == at) & (df['BankName'] == SenderName) ]
last_date = new_df.tail(1)['Date']
last_valid_date = last_date - timedelta64(m,'M')
new_df = new_df[ new_df['Date'] > last_valid_date ]
return new_df
def get_no_of_transactions(df,TransactionType='both',AccountType='CASA',Month=1,AccountNumber=0,SenderName=''):
df = get_relevant_dataframe(df,TransactionType=TransactionType,AccountType=AccountType,Month=Month,AccountNumber=AccountNumber,SenderName=SenderName)
return len(df)
def getAvgTransactions(df,TransactionType='both',AccountType='CASA',Month=1,AccountNumber=0,SenderName=''):
TotalTxns = get_no_of_transactions(df,TransactionType=TransactionType,AccountType=AccountType,Month=Month,AccountNumber=AccountNumber,SenderName=SenderName)
AvgTransactions = float(TotalTxns)/Month
return AvgTransactions
#Intermediate func
def get_net_transaction(df,TransactionType='both',AccountType='CASA',Month=1,AccountNumber=0,SenderName=''):
df = get_relevant_dataframe(df,TransactionType=TransactionType,AccountType=AccountType,Month=Month,AccountNumber=AccountNumber,SenderName=SenderName)
NetTxnAmt = 0
for idx , row in df.iterrows():
NetTxnAmt+= float(row['NetTxnAmt'])
return NetTxnAmt
def getAvgSpendPerMonth(df,TransactionType='both',AccountType='CASA',Month=1,AccountNumber=0,SenderName=''):
NetTxn = get_net_transaction(df,TransactionType=TransactionType,AccountType=AccountType,Month=Month,AccountNumber=AccountNumber,SenderName=SenderName)
AvgSpendPerMonth = float(NetTxn)/Month
return AvgSpendPerMonth
def getAvgSpendPerTxn(df,TransactionType='both',AccountType='CASA',Month=1,AccountNumber=0,SenderName=''):
NetTxn = get_net_transaction(df,TransactionType=TransactionType,AccountType=AccountType,Month=Month,AccountNumber=AccountNumber,SenderName=SenderName)
TotalTxns = get_no_of_transactions(df,TransactionType=TransactionType,AccountType=AccountType,Month=Month,AccountNumber=AccountNumber,SenderName=SenderName)
try:
AvgSpendPerTxn = float(NetTxn)/TotalTxns
except ZeroDivisionError:
AvgSpendPerTxn = 0
return AvgSpendPerTxn
def get_maximum_balance(df,TransactionType='both',AccountType='CASA',Month=1,AccountNumber=0,SenderName=''):
df = get_relevant_dataframe(df,TransactionType=TransactionType,AccountType=AccountType,Month=Month,AccountNumber=AccountNumber,SenderName=SenderName)
maxbal = -99999999
for idx,row in df.iterrows():
if row['MaxBalance'] > maxbal and str(row['MaxBalance']).isdigit():
maxbal = row['MaxBalance']
if maxbal == -99999999:
return '_NA_'
return maxbal
def getAvgMaxBal(df,TransactionType='both',AccountType='CASA',Month=1,AccountNumber=0,SenderName=''):
MaxBalList = []
df = get_relevant_dataframe(df,TransactionType=TransactionType,AccountType=AccountType,Month=Month,AccountNumber=AccountNumber,SenderName=SenderName)
last_date = df.tail(1)['Date']
for i in range(Month):
last_valid_date = last_date - timedelta64(1,'M')
df = df[ (df['Date'] > last_valid_date) & (df['Date'] < last_date) ]
maxbal = -99999999
for idx,row in df.iterrows():
if row['MaxBalance'] > maxbal and str(row['MaxBalance']).isdigit():
maxbal = row['MaxBalance']
if maxbal == -99999999:
continue
MaxBalList.append(maxbal)
last_date = last_valid_date
try:
AvgMaxBal = float(sum(MaxBalList))/len(MaxBalList)
except ZeroDivisionError:
AvgMaxBal = '_NA_'
return AvgMaxBal
def get_minimum_balance(df,TransactionType='both',AccountType='CASA',Month=1,AccountNumber=0,SenderName=''):
df = get_relevant_dataframe(df,TransactionType=TransactionType,AccountType=AccountType,Month=Month,AccountNumber=AccountNumber,SenderName=SenderName)
minbal = 99999999
for idx,row in df.iterrows():
if row['MinBalance'] < minbal and str(row['MinBalance']).isdigit():
minbal = row['MaxBalance']
if minbal == 99999999:
return '_NA_'
return minbal
def getAvgMinBal(df,TransactionType='both',AccountType='CASA',Month=1,AccountNumber=0,SenderName=''):
MinBalList = []
df = get_relevant_dataframe(df,TransactionType=TransactionType,AccountType=AccountType,Month=Month,AccountNumber=AccountNumber,SenderName=SenderName)
last_date = df.tail(1)['Date']
for i in range(Month):
last_valid_date = last_date - timedelta64(1,'M')
df = df[ (df['Date'] > last_valid_date) & (df['Date'] < last_date) ]
minbal = 99999999
for idx,row in df.iterrows():
if row['MinBalance'] < minbal and str(row['MinBalance']).isdigit():
minbal = row['MaxBalance']
if minbal == 99999999:
continue
MinBalList.append(minbal)
last_date = last_valid_date
try:
AvgMinBal = float(sum(MinBalList))/len(MinBalList)
except ZeroDivisionError:
AvgMinBal = '_NA_'
return AvgMinBal
def getUtilization(df,TransactionType='both',AccountType='Credit_Card',Month=1,AccountNumber=0,SenderName=''):
df = get_relevant_dataframe(df,TransactionType=TransactionType,AccountType=AccountType,Month=Month,AccountNumber=AccountNumber,SenderName=SenderName)
minbal = get_minimum_balance(df,TransactionType='both',AccountType='Credit_Card',Month=1,AccountNumber=0,SenderName='')
Amt_3 = -99999999
for idx, row in df.iterrows():
if row['Amt_3'].isdigit() and row['Amt_3'] > Amt_3 :
Amt_3 = row['Amt_3']
if Amt_3 == -99999999:
Amt_3 = '_NA_'
try:
Utilization = (float(Amt_3)-float(minbal))/float(Amt_3)
except ValueError:
Utilization = '_NA_'
except ZeroDivisionError:
Utilization = '_NA_'
return Utilization<file_sep>/function_definitions/bank_sms_preperation_function_definitions/balance_rectification_script.py
import pandas as pd
from datetime import datetime
"""
In bulk transaction usually the balance is constant which is final balance this script identifies this type of transaction
and tries to make sure if it is really a bulk txn. This script creates BulkTxnFlag column. Here,
If txn is not a bulk txn -> flag 0
If suspected bulk txn -> flag 1
If confirmed bulk txn -> flag 2
"""
def balance_rectification_func(bank_sms_df):
#Creating bulk_transaction_flag
bank_sms_df['BulkTxnFlag'] = 0
#print bank_sms_df.head()
user_account_amt2_combination_dict = {}
prev_key = ''
for idx, row in bank_sms_df.iterrows():
print 7, '\t\t' , idx
if row['MessageType'] == 'Debit' and abs(float(row['Amt_1']) + 1) >= 0.001 : #float(row['Amt_1']) != -1
bank_sms_df.at[idx, 'TxnAmount'] = -1 * float(row['Amt_1'])
else:
bank_sms_df.at[idx, 'TxnAmount'] = float(row['Amt_1'])
if abs(float(row['Amt_2']) + 1 ) >= 0.001: #float(row['Amt_2']) != -1
date = row['MessageTimestamp'].strftime('%Y-%m-%d')
key = str(row['CustomerID'])+str(row['BankName'])+str(row['AccountNo'])+str(row['Amt_2'])+str(date)
try:
user_account_amt2_combination_dict[key].append(idx)
except KeyError:
user_account_amt2_combination_dict[key] = [idx]
try:
if len(user_account_amt2_combination_dict[prev_key]) == 1:
del user_account_amt2_combination_dict[prev_key]
except KeyError:
pass
prev_key = key
else:
continue
for key in user_account_amt2_combination_dict.keys() :
print 7, '\t\t' , key
indexes = user_account_amt2_combination_dict[key]
error_total = 0
for idx in indexes:
try:
error_total += float(bank_sms_df.at[idx, 'Error'])
except TypeError:
#print 'TypeException'
error_total = 1
except ValueError:
#print 'ValueException'
error_total = 1
if abs(error_total) < 0.001:
for idx in indexes:
bank_sms_df.at[idx, 'BulkTxnFlag'] = 2
else:
for idx in indexes:
bank_sms_df.at[idx, 'BulkTxnFlag'] = 1
bank_sms_df = bank_sms_df[['SmsID', 'CustomerID', 'BankName', 'SENDER_PARENT' , 'SENDER_CHILD_1' , 'SENDER_CHILD_2' , 'SENDER_CHILD_3' , 'AccountNo', 'LinkedDebitCardNumber', 'AccountType', 'MessageSource', 'Message', 'MessageTimestamp', 'ReferenceNumber', 'TxnInstrument', 'MessageType', 'Currency_1', 'Amt_1', 'Currency_2', 'Amt_2', 'Amt_2_calculated', 'Error', 'ConsecutiveTxnTimespan', 'Currency_3', 'Amt_3', 'Vendor', 'TxnAmount', 'RepeatedTxnFlag', 'BulkTxnFlag']]
bank_sms_df.to_csv('data_files/intermediate_output_files/banks/bank_sms_filtered_flaged.csv', index=False)
bank_sms_df.index = range(len(bank_sms_df.index.values))
return bank_sms_df<file_sep>/function_definitions/bank_sms_preperation_function_definitions/classified_vs_nonclassified_tally_generation_script.py
import pandas as pd
def classified_vs_nonclassified_tally_generation_func(bank_sms_df):
#Creating following two dictionaries-
#1. User-bank-account as key and its indexes as values.
#2. BankName as key and its indexes as values.
user_bank_account_combination_idx_dict = {}
bank_name_idx_dict = {}
for idx, row in bank_sms_df.iterrows():
CustomerID = row['CustomerID']
AccountNo = row['AccountNo']
key1 = str(CustomerID) + '*' + str(AccountNo)
key2 = str(row['BankName'])
try:
user_bank_account_combination_idx_dict[key1].append(idx)
except KeyError:
user_bank_account_combination_idx_dict[key1] = [idx]
try:
bank_name_idx_dict[key2].append(idx)
except KeyError:
bank_name_idx_dict[key2] = [idx]
#Creating empty dataframe to store the result
df = pd.DataFrame()
for key in user_bank_account_combination_idx_dict:
CustomerID = key.split('*')[0]
AccountNo = key.split('*')[1]
if AccountNo != '_NA_':
index_list = user_bank_account_combination_idx_dict[key]
ClassifiedCounter = 0
NonClassifiedCounter = 0
for index in index_list:
MessageType = bank_sms_df.at[index, 'MessageType']
if MessageType == 'None':
NonClassifiedCounter += 1
else:
ClassifiedCounter += 1
PercentOfSmsClassified = (float(ClassifiedCounter)/(float(ClassifiedCounter)+float(NonClassifiedCounter)))*100
to_be_appended = pd.DataFrame({'CustomerID':pd.Series(CustomerID), 'AccountNo':pd.Series(AccountNo), 'TotalSmsClassified':ClassifiedCounter, \
'TotalSmsNonClassified':NonClassifiedCounter, 'PercentOfSmsClassified':PercentOfSmsClassified})
df = df.append(to_be_appended)
df = df[['CustomerID', 'AccountNo', 'TotalSmsClassified', 'TotalSmsNonClassified', 'PercentOfSmsClassified']]
df.to_csv('data_files/intermediate_output_files/banks/classified_vs_nonclassified_userwise_tally.csv', index=False)
#Creating empty dataframe to store the result
df = pd.DataFrame()
for key in bank_name_idx_dict:
index_list = bank_name_idx_dict[key]
ClassifiedCounter = 0
NonClassifiedCounter = 0
for index in index_list:
MessageType = bank_sms_df.at[index, 'MessageType']
if MessageType == 'None':
NonClassifiedCounter += 1
else:
ClassifiedCounter += 1
PercentOfSmsClassified = (float(ClassifiedCounter)/(float(ClassifiedCounter)+float(NonClassifiedCounter)))*100
to_be_appended = pd.DataFrame({'BankName':pd.Series(key), 'TotalSmsClassified':ClassifiedCounter, \
'TotalSmsNonClassified':NonClassifiedCounter, 'PercentOfSmsClassified':PercentOfSmsClassified})
df = df.append(to_be_appended)
df = df[['BankName', 'TotalSmsClassified', 'TotalSmsNonClassified', 'PercentOfSmsClassified']]
df.to_csv('data_files/intermediate_output_files/banks/classified_vs_nonclassified_bankwise_tally.csv', index=False)
<file_sep>/function_definitions/sms_level1_classification_func.py
import pandas as pd
from all_dict_generator import bank_dict
from all_dict_generator import utilities_dict
"""
This scirpt contains functions wich are used to filter out the desired sms(for e.g. Bank sms, Utility sms, etc.) from given sms.
"""
def new_line_eliminator_func(Message): # gets rid of white spaces
Message = str(Message).strip()
return Message
def utf_8_encoder_func(sms): # encodes the message text with 'utf-8' encoding and returns, if at all some other encoding was used
try :
SMS = str(sms.encode('utf-8'))
#print SMS
except :
SMS = str(sms)
return SMS
def isbank(message_source): # uses the dictionary provided by all_dict_generator and returns true if the message is from bank
global bank_dict
message_source = message_source[3:].upper()
message_source = message_source[:6]
if message_source in bank_dict :
return True
return False
def isutility(message_source): # uses the dictionary provided by all_dict_generator and returns true if the message is from utility
global utilities_dict
message_source = message_source[3:].upper()
message_source = message_source[:6]
if message_source in utilities_dict :
return True
return False
def bank_sms_filtering_func(filename): # filters out the bank messages
user_sms_raw_df = pd.read_csv(filename,sep='|', lineterminator='~', converters = {'Message':new_line_eliminator_func})
#user_sms_raw_df = pd.read_csv(filename)
user_sms_raw_df.drop_duplicates('Message',inplace=True)
#user_sms_raw_df['Message'] = user_sms_raw_df['Message'].map(lambda x: str(x).strip())
#user_sms_raw_df['Message'] = user_sms_raw_df['Message'].apply(utf_8_encoder_func)
bank_sms_raw_df = user_sms_raw_df[user_sms_raw_df['MessageSource'].map(isbank) == True ]
bank_sms_raw_df.to_csv('data_files/intermediate_output_files/banks/bank_sms_raw.csv', index = False)
return bank_sms_raw_df
def utility_sms_filtering_func(filename): # filters out the utility messages
user_sms_raw_df = pd.read_csv(filename,sep='|', lineterminator='~', converters = {'Message':new_line_eliminator_func})
user_sms_raw_df.drop_duplicates('Message',inplace=True)
#user_sms_raw_df['Message'] = user_sms_raw_df['Message'].map(lambda x: str(x).strip())
#user_sms_raw_df['Message'] = user_sms_raw_df['Message'].apply(utf_8_encoder_func)
bank_sms_raw_df = user_sms_raw_df[user_sms_raw_df['MessageSource'].map(isutility) == True ]
bank_sms_raw_df.to_csv('data_files/intermediate_output_files/utility/utility_sms_raw.csv', index = False)
return bank_sms_raw_df
<file_sep>/function_definitions/regex_extractor_from_pickle.py
import pickle
# reads a pkl file created by 'storeregex.py' and extracts all the
# regular expressions , so that getters.py can use it later to
# extract data from the messages
#
fileobject = open('function_definitions/regex.pkl','rb')
regex = pickle.load(fileobject)
fileobject.close()
account_number_re_list = regex['account_number_regex']
debit_vendor_re_list = regex['debit_vendor_regex']
debit_2_vendor_re_list = regex['debit_2_vendor_regex']
credit_vendor_re_list = regex['credit_vendor_regex']
money_re_list = regex['money_regex']
junk_re_list = regex['junk_regex']
reference_number_re_list = regex['reference_number_regex']
credit_card_limit_re_list = regex['credit_card_limit_regex'] | 38fcb741dae1d0f5805dd5493106ee11c282af7a | [
"Python"
] | 22 | Python | prathamesh07/Bank_sms_analysis_1.1 | 1f1328bc8e54d27a0ce08acf3bd4b57699442fae | 8ace8136f7502689615515b7c3fccbc298802a1b |
refs/heads/master | <repo_name>barskykd/netronix_test_task<file_sep>/src/sensor-data.ts
/**
* Sensor measurements for single moment in time
*/
type Datum<T> = {
/**
* timestamp
*/
t: number,
/**
* measurement values
*/
v: T[],
// aggregates for number data
/**
* sum of values
*/
sum?: number | null
/**
* average of values
*/
avg?: number | null,
/**
* last value
*/
last?: T,
/**
* minimum value
*/
min?: T,
/**
* maximum value
*/
max?: T,
}
/**
* Find Datum with timestamp t in array. Or creates and insert new one.
* @param data - array of Datum's
* @param t - timestamp
*/
function findOrCreateDatum<T>(data: Datum<T>[], t: number) {
for (let idx = data.length-1; idx >= 0; --idx) {
if (data[idx].t == t) {
return {
idx,
datum: data[idx],
data
};
}
if (data[idx].t < t) {
let newDatum: Datum<T> = {t, v: []};
let newData = [...data.slice(0, idx+1), newDatum, ...data.slice(idx+1)];
return {
idx,
datum: newDatum,
data: newData
}
}
}
let newDatum: Datum<T> = {t, v: []};
let newData = [newDatum, ...data];
return {
idx:newData.length - 1,
datum: newDatum,
data: newData
}
}
/**
* Measurement groupes by sensor names.
*/
type DataBySensorName = {
[key:string]: Datum<any>[];
}
/**
* Collects and keeps measurements.
*/
export default class SensorData {
eventSource?: any;
private unitsBySensor: {[key:string]:string} = {};
private dataBySensor: DataBySensorName = {}
private sensorNames: string[] = [];
private listeners: ((sensorNames: string[])=>void)[] = [];
/**
* @param url - event source url. If undefined - object will not collect data by itself.
*/
constructor(private url?: string) {
if (url) {
this.eventSource = new window.EventSource(url);
this.eventSource.onmessage = this.onevent.bind(this);
this.eventSource.onerror = (e:any) => console.log(e);
}
}
/**
* returns list of all sensor names found so far
*/
public getSensorNames(): string[] {
return this.sensorNames;
}
/**
* returns last measurement unit for sensor
* @param sensorName - name of sensor;
*/
public getUnit(sensorName: string): string {
return this.unitsBySensor[sensorName] || "";
}
/**
* Returns specified number of last measurements
* @param sensorName - name of the sensor
* @param count - number of measurements to return
*/
public getLastValues(sensorName: string, count: number): Datum<any>[] {
let values = this.dataBySensor[sensorName] || [];
if (values.length > count) {
return values.slice(values.length - count);
}
return [...values];
}
/**
* Add callbacks on arrival of new data.
* @param f
*/
public addListener(f: ()=>void) {
this.listeners.push(f);
}
/**
* Remove callbacks on arrival of new data
* @param f
*/
public removeListener(f: ()=>void) {
this.listeners = this.listeners.filter(x => x!==f);
}
/**
* EventSource.onmessage handler. Can be used to add data manually;
* @param e
*/
public onevent(e: any) {
let data = JSON.parse(e.data);
let updatedSensors: string[] = [];
for (let d of data) {
if (!this.dataBySensor[d.name]) {
this.dataBySensor[d.name] = [];
}
this.unitsBySensor[d.name] = d.unit || "";
updatedSensors.push(d.name);
for (let m of d.measurements) {
let time = m[0]*1000;
let v = m[1];
let values = this.dataBySensor[d.name];
let {idx, datum, data:newData} = findOrCreateDatum(values, time);
this.dataBySensor[d.name] = newData;
datum.v.push(v);
datum.last = v;
if (typeof(v) == 'number') {
datum.sum = (datum.sum || 0) + v;
datum.avg = datum.sum / datum.v.length;
if (datum.max === null || datum.max === undefined || datum.max < v) {
datum.max = v;
}
if (datum.min === null || datum.min === undefined || datum.min > v) {
datum.min = v;
}
}
}
}
this.sensorNames = Object.keys(this.dataBySensor).sort();
for (let listener of this.listeners) {
listener(updatedSensors);
}
}
}<file_sep>/README.md
# Netronix Test Task
Simple SPA that displays data from EventSource implemented with TypeScript
## Building
1. Make sure node.js 7+ installed
1. Run
> npm install && npm run build
## Testing
Open public/index.html with [browser supporting EventSource](http://caniuse.com/#feat=eventsource).
## Unit tests
To run unit tests run
> npm test
## [Online demo](http://barskykd.github.io/netronix/)
<file_sep>/src/index.d.ts
interface Window {
EventSource: any,
google: any,
maps_loaded_callback: any
}
declare module 'd3';<file_sep>/src/test/test.ts
import SensorData from '../sensor-data';
import { expect } from 'chai';
const TEST_SENSOR1_NAME = 'Sensor1';
const TEST_SENSOR1_UNIT = 'test_unit';
const TEST_SENSOR1_MEASUREMENT_TIME = Math.round(new Date().valueOf() / 1000);
const TEST_SENSOR1_MEASUREMENT_TIME2 = Math.round(new Date().valueOf() / 1000 - 50);
const TEST_MEASUREMENT = [{
name: TEST_SENSOR1_NAME,
unit: TEST_SENSOR1_UNIT,
measurements: [[TEST_SENSOR1_MEASUREMENT_TIME, 100]]
}]
const TEST_MEASUREMENT2 = [...TEST_MEASUREMENT,
{
name: TEST_SENSOR1_NAME,
unit: TEST_SENSOR1_UNIT,
measurements: [[TEST_SENSOR1_MEASUREMENT_TIME, 200]]
}
]
const TEST_MEASUREMENT3 = [...TEST_MEASUREMENT2, {
name: TEST_SENSOR1_NAME,
unit: TEST_SENSOR1_UNIT,
measurements: [[TEST_SENSOR1_MEASUREMENT_TIME2, 300]]
}]
describe('SensorData', () => {
it ('should call listeners', () => {
let sd = new SensorData();
let timesListenerCalled = 0;
sd.addListener(() => timesListenerCalled++);
sd.onevent({
data: JSON.stringify(TEST_MEASUREMENT)
})
expect(timesListenerCalled).to.equal(1);
sd.onevent({
data: JSON.stringify(TEST_MEASUREMENT)
})
expect(timesListenerCalled).to.equal(2);
})
it('should return correct sensor names and units', ()=> {
let sd = new SensorData();
sd.onevent({
data: JSON.stringify(TEST_MEASUREMENT)
})
let sensorNames = sd.getSensorNames();
let unit = sd.getUnit(TEST_SENSOR1_NAME);
expect(sensorNames).to.be.a('array').that.have.lengthOf(1).and.eqls([TEST_SENSOR1_NAME]);
expect(unit).to.equal(TEST_SENSOR1_UNIT);
})
it('should return correct data for single measurement', () => {
let sd = new SensorData();
sd.onevent({
data: JSON.stringify(TEST_MEASUREMENT)
})
let lastValues = sd.getLastValues(TEST_SENSOR1_NAME, 10);
let datum = lastValues[0];
expect(lastValues.length).to.equal(1);
expect(datum.v).to.be.a('array').that.have.lengthOf(1).and.eqls([100]);
expect(datum).to.include({
t: TEST_SENSOR1_MEASUREMENT_TIME * 1000,
last: 100,
max: 100,
min: 100,
avg: 100
});
});
it('should return correct data for multiple values in single moment', () => {
let sd = new SensorData();
sd.onevent({
data: JSON.stringify(TEST_MEASUREMENT2)
})
let lastValues = sd.getLastValues(TEST_SENSOR1_NAME, 10);
let datum = lastValues[0];
expect(lastValues).to.have.lengthOf(1);
expect(datum.v).to.be.a('array').that.have.lengthOf(2)
.and.eqls([100, 200]);
expect(datum).to.include({
t: TEST_SENSOR1_MEASUREMENT_TIME * 1000,
last: 200,
max: 200,
min: 100,
avg: 150
});
});
it('should return correct data for multiple values', () => {
let sd = new SensorData();
sd.onevent({
data: JSON.stringify([TEST_MEASUREMENT2[0]])
})
sd.onevent({
data: JSON.stringify([TEST_MEASUREMENT2[1]])
})
let lastValues = sd.getLastValues(TEST_SENSOR1_NAME, 10);
let datum = lastValues[0];
expect(lastValues).to.have.lengthOf(1);
expect(datum.v).to.be.a('array').that.have.lengthOf(2)
.and.eqls([100, 200]);
expect(datum).to.include({
t: TEST_SENSOR1_MEASUREMENT_TIME * 1000,
last: 200,
max: 200,
min: 100,
avg: 150
});
});
it ('should return data sorted by time', () => {
let sd =new SensorData();
sd.onevent({
data: JSON.stringify(TEST_MEASUREMENT3)
})
let lastValues = sd.getLastValues(TEST_SENSOR1_NAME, 10);
let datum1 = lastValues[0];
let datum2 = lastValues[1];
expect(lastValues).to.have.lengthOf(2);
expect(datum1.v).to.be.a('array').that.have.lengthOf(1)
.and.eqls([300]);
expect(datum1).to.include({
t: TEST_SENSOR1_MEASUREMENT_TIME2 * 1000,
last: 300,
max: 300,
min: 300,
avg: 300
});
expect(datum2.v).to.be.a('array').that.have.lengthOf(2)
.and.eqls([100, 200]);
expect(datum2).to.include({
t: TEST_SENSOR1_MEASUREMENT_TIME * 1000,
last: 200,
max: 200,
min: 100,
avg: 150
});
})
});<file_sep>/src/google_maps.ts
const API_KEY = '<KEY>';
// is google maps loaded
let _isLoaded = false;
// is google maps currently loading
let _isLoading = false;
// callbacks to call on load finish
let callbacks: (()=>void)[] = [];
const GOOGLE_MAPS_SCRIPT_TAG_ID = 'google-MAPS-script-tag-id';
const GOOGLE_MAPS_URL = 'https://maps.googleapis.com/maps/api/js?key=' + API_KEY + '&callback=maps_loaded_callback';
/** Inject script tag for google maps api. Calls callback on finish.
* Does nothing if map already fully loaded */
export function load(callback: ()=>void) {
if (_isLoaded) {
return;
}
if (_isLoading) {
callbacks.push(callback);
return;
}
_isLoading = true;
callbacks.push(callback);
let script = document.createElement('script');
script.type = 'text/javascript';
script.async = true;
script.src = GOOGLE_MAPS_URL;
script.id = GOOGLE_MAPS_SCRIPT_TAG_ID;
document.getElementsByTagName('head')[0].appendChild(script);
}
/**
* Callback called by google maps. Sess GOOGLE_MAPS_URL above;
*/
window.maps_loaded_callback = function() {
_isLoaded = true;
for (let cb of callbacks) {
try {
cb();
} catch (e) {
console.error(e.stack);
}
}
}
/**
* returns true if google maps api fully loaded.
*/
export function isLoaded() {
return _isLoaded;
} | f89e3c4763dde19505f7dec412b4708b3b0bd760 | [
"Markdown",
"TypeScript"
] | 5 | TypeScript | barskykd/netronix_test_task | 666a9d7a8daa72a1baf3ed3e29902f21e6030f5b | efed277e2141c4fb14d570ffe4e46b0d51392be2 |
refs/heads/master | <file_sep>namespace BizzareBazaar
{
abstract class ItemDecorator : IItem
{
private readonly IItem _originalItem;
protected ItemDecorator(IItem item)
{
_originalItem = item;
}
public virtual double GetPrice()
{
return _originalItem.GetPrice();
}
public virtual string GetDescription()
{
return _originalItem.GetDescription();
}
public virtual int GetItemNumber()
{
return _originalItem.GetItemNumber();
}
}
}<file_sep>namespace BizzareBazaar
{
class StribogRsDecorator : ItemDecorator
{
public StribogRsDecorator(IItem item) : base(item)
{
}
public override string GetDescription()
{
return base.GetDescription() + " with Stribog runestone";
}
public override double GetPrice()
{
return base.GetPrice() + 10;
}
}
}<file_sep>namespace BizzareBazaar
{
public interface IItem
{
double GetPrice();
string GetDescription();
int GetItemNumber();
}
}<file_sep>namespace BizzareBazaar
{
public class BasicItem : IItem
{
private readonly double _price;
private readonly string _description;
private readonly int _itemNumber;
public BasicItem(double price, string description, int itemNumber)
{
_itemNumber = itemNumber;
_price = price;
_description = description;
}
public string GetDescription()
{
return _description;
}
public double GetPrice()
{
return _price;
}
public int GetItemNumber()
{
return _itemNumber;
}
}
}<file_sep>namespace BizzareBazaar
{
class DevanaRsDecorator : ItemDecorator
{
public DevanaRsDecorator(IItem item) : base(item)
{
}
public override string GetDescription()
{
return base.GetDescription() + " with Devana runestone";
}
public override double GetPrice()
{
return base.GetPrice() + 10;
}
}
}<file_sep>namespace BizzareBazaar
{
enum CustomerClass
{
Peasant,
Wizard,
Warrior
}
class CustomerFactory
{
private CustomerFactory()
{
}
public static Customer CreateCustomer(CustomerClass customerClass, string customerName)
{
Customer customer = null;
switch (customerClass)
{
case CustomerClass.Peasant:
customer = new PeasantCustomer(customerName);
break;
case CustomerClass.Warrior:
customer = new WarriorCustomer(customerName);
break;
case CustomerClass.Wizard:
customer = new WizardCustomer(customerName);
break;
}
return customer;
}
}
}<file_sep>namespace BizzareBazaar
{
class EpicMagicDamageDecorator : ItemDecorator
{
public EpicMagicDamageDecorator(IItem item) : base(item)
{
}
public override string GetDescription()
{
return base.GetDescription() + " with EPIC magic damage";
}
public override double GetPrice()
{
return base.GetPrice() + 10;
}
}
}<file_sep>using BizzareBazaar;
using NUnit.Framework;
namespace BazaarUnitTest
{
[TestFixture]
public class ItemProductionTest
{
// Testing unit testing itself with positivetest
[Test]
public void PositiveTest()
{
int x = 7;
int y = 7;
Assert.AreEqual(x, y);
}
// Tests if passed object is not null in storage
[Test]
public void StorageIsNotNull()
{
for (int i = 0; i < 10; i++)
{
ItemProduction.ProduceItem();
}
for (int i = 0; i < Singleton.Inventory.Count; i++)
{
Assert.NotNull(Singleton.Inventory[i]);
}
}
//Tests if storage is not empty when storage is supposed to not be empty
[Test]
public void StorageIsNotEmpty()
{
for (int i = 0; i < 10; i++)
{
ItemProduction.ProduceItem();
}
Assert.IsNotEmpty(Singleton.Inventory);
}
//Tests if storage is already empty
[Test]
public void StorageIsEmpty()
{
Assert.IsEmpty(Singleton.Inventory);
}
}
}<file_sep>using System;
namespace BizzareBazaar
{
class PeasantCustomer : Customer
{
public PeasantCustomer(string customerCustomerGroup)
{
CustomerGroup = customerCustomerGroup;
}
public override void PrintBoothNumber()
{
Console.Write(CustomerGroup);
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Timers;
namespace BizzareBazaar
{
class ItemProduction
{
private static Timer _timer;
public static Singleton Storage = Singleton.Instance;
private static volatile int _itemNumber;
private static readonly Object Lock = new Object();
public void PrintStorage()
{
foreach (IItem item in Singleton.Inventory)
{
PrintItem(item);
}
}
public List<IItem> GetInventory()
{
return Singleton.Inventory;
}
public void PrintItem(IItem item)
{
Console.WriteLine("item #" + item.GetItemNumber() + " " +
item.GetDescription() + " | Price: " + item.GetPrice());
}
public static void SetTimerAndProduceItems()
{
_timer = new Timer {Interval = 200};
_timer.Elapsed += OnTimedEvent;
_timer.AutoReset = true;
_timer.Enabled = true;
}
public static void OnTimedEvent(Object source, ElapsedEventArgs e)
{
if (!StopProduction())
{
ProduceItem();
}
else
{
_timer.Stop();
_timer.Dispose();
}
}
public static void ProduceItem()
{
lock (Lock)
{
ItemCreator item = new ItemCreator();
Singleton.Inventory.Add(item.CreateRndItem(_itemNumber));
_itemNumber++;
}
}
private static bool StopProduction()
{
return _itemNumber > 100;
}
}
}<file_sep>namespace BizzareBazaar
{
class GreaterDevanaRsDecorator : ItemDecorator
{
public GreaterDevanaRsDecorator(IItem item) : base(item)
{
}
public override string GetDescription()
{
return base.GetDescription() + " with Greater Devana Runestone";
}
public override double GetPrice()
{
return base.GetPrice() + 10;
}
}
}<file_sep>using BizzareBazaar;
using NUnit.Framework;
namespace BazaarUnitTest
{
[TestFixture]
public class BoothTest
{
[Test]
public void Boothtest()
{
Booth booth = new Booth(1, 10);
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
namespace BizzareBazaar
{
class Program
{
public static void Main(string[] args)
{
//PRODUCES ITEM IN ITEMPRODUCTION EVERY SECOND | Stores ALL items in ItemProduction until fetched by booth
ItemProduction.ProduceItem();
ItemProduction.SetTimerAndProduceItems();
// Creates customer from CustomerFactory (Customers are waking up from a good night sleep)
Customer wizardCustomer = CustomerFactory.CreateCustomer(CustomerClass.Wizard, "WizardCustomer");
Customer peasantCustomer = CustomerFactory.CreateCustomer(CustomerClass.Peasant, "PeasantCustomer");
Customer warriorCustomer = CustomerFactory.CreateCustomer(CustomerClass.Warrior, "WarriorCustomer");
// Booths at Bazaar preparing for a new day
Booth booth1 = new Booth(10, 0);
Booth booth2 = new Booth(5, 1);
List<Booth> boothList = new List<Booth> {booth1, booth2};
// The customers arrives at the Bazaar
List<Person> customers = new List<Person> {wizardCustomer, peasantCustomer, warriorCustomer};
Controller controller = new Controller();
// The Booths at the Bazaar begins to get items from the supplier
controller.InitiateBoothFetch(boothList);
Thread[] itemForSaleThread = new Thread[boothList.Count];
<<<<<<< HEAD
//for (int i = 0; i < 5; i++)
//{
// // Lamda
// Thread t = new Thread(() => controller.MakeTransactionsOnList(boothList, customers));
// transactionThreads[i] = t;
//}
Console.WriteLine("The Bazaar Of The Bizaare is now OPEN!");
=======
Console.WriteLine("The Bazaar Of The Bizaare is now OPEN!");
Console.WriteLine("Press any key to start shopping");
>>>>>>> e0600c9f5aa31cc5ab4ed556b64a6e7ec3e3292d
Console.ReadKey();
while (!BoothClosed(boothList))
{
for (int i = 0; i < itemForSaleThread.Length; i++)
{
Thread th = new Thread(() => new Thread(() => controller.PutItemUpForSale(boothList.ElementAt(i))));
itemForSaleThread[i] = th;
th.Start();
}
//foreach (var booth in boothList)
//{
// controller.PutItemUpForSale(booth);
//}
Thread t = new Thread(() => controller.MakeTransactionsOnList(boothList, customers));
t.Start();
t.Join();
}
Console.WriteLine("The Bazaare is now closed. Please come again tomorrow!");
Console.ReadKey();
}
public static bool BoothClosed(List<Booth> boothList)
{
for (int i = 0; i < boothList.Count; i++)
{
if (boothList.ElementAt(i).DailyQuota > 0)
{
return false;
}
}
return true;
}
}
}<file_sep>using System;
using System.Linq;
using System.Text;
namespace BizzareBazaar
{
abstract class Customer : Person
{
protected string CustomerGroup { get; set; }
public void BuyItem(Booth booth)
{
if (booth.Inventory.Count != 0)
{
Inventory.Add(booth.Inventory.First());
}
}
protected string GetCustomerGroup()
{
return CustomerGroup;
}
public virtual void ShowInventory()
{
foreach (var item in Inventory)
{
Console.WriteLine(item.GetDescription());
}
}
public virtual void PrintInformation(string customerClass)
{
StringBuilder builder = new StringBuilder();
builder.Append("CustomerGroup: " + GetCustomerGroup() + "\n");
}
public override void PrintBoothNumber()
{
Console.WriteLine();
}
public override string GetDescription()
{
return CustomerGroup;
}
public IItem GetLastItem()
{
return Inventory.Last();
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
namespace BizzareBazaar
{
class Controller
{
private readonly View _view;
private readonly Object _lock = new Object();
public Controller()
{
_view = new View();
}
public void InitiateBoothFetch(List<Booth> boothModel)
{
foreach (var booth in boothModel)
{
booth.SetTimerAndFetchItems();
}
}
public void PutItemUpForSale(Booth booth)
{
if (booth.Inventory.Count != 0)
{
lock (_lock)
{
IItem item = booth.Inventory.First();
_view.ItemForSale(item, booth);
}
}
}
public void MakeTransactionsOnList(List<Booth> booths, List<Person> customers)
{
Random randomBooth = new Random();
Random randomCustomer = new Random();
lock (_lock)
{
MakeTransaction(booths.ElementAt(randomBooth.Next(0, booths.Count)),
(Customer) customers.ElementAt(randomCustomer.Next(0, customers.Count)));
}
}
public void MakeTransaction(Booth booth, Customer customer)
{
if (booth.Inventory.Count != 0)
{
customer.BuyItem(booth);
booth.RemoveFirstItemFromInventory();
lock (_lock)
{
_view.ItemBought(customer.GetLastItem(), customer, booth);
}
booth.DailyQuota--;
}
}
}
}<file_sep>using System;
namespace BizzareBazaar
{
class ItemCreator
{
string[] decorators =
{
"Basic item", "Magic item",
"Epic Magic item", "Stribog Runestone",
"Greater Stribog Runestone", "Devana Runestone",
"Greater Devana Runestone", "Sannesh Runestone", "Greater Sannesh Runestone"
};
public IItem CreateRndItem(int itemNumber)
{
IItem basicItem = new BasicItem(10, "BasicItem", itemNumber);
Random rnd = new Random();
int rndNum = rnd.Next(0, 10);
for (int i = 0; i < decorators.Length; i++)
{
if (i == rndNum)
{
switch (decorators[i])
{
case "Basic item":
break;
case "Magic item":
basicItem = new MagicDecorator(basicItem);
break;
case "Epic Magic item":
basicItem = new EpicMagicDamageDecorator(basicItem);
break;
case "Devana Runestone":
basicItem = new DevanaRsDecorator(basicItem);
break;
case "Greater Devana Runestone":
basicItem = new GreaterDevanaRsDecorator(basicItem);
break;
case "Stribog Runestone":
basicItem = new StribogRsDecorator(basicItem);
break;
case "Greater Stribog Runestone":
basicItem = new GreaterStribogRsDecorator(basicItem);
break;
case "Sannesh Runestone":
basicItem = new SanneshRsDecorator(basicItem);
break;
case "Greater Sannesh Runestone":
basicItem = new GreaterSanneshRsDecorator(basicItem);
break;
}
}
}
return basicItem;
}
}
}<file_sep>using System;
namespace BizzareBazaar
{
class View
{
public void PrintItem(IItem item)
{
Console.WriteLine(item.GetDescription() + " | Price: " + item.GetPrice());
}
public void ItemBought(IItem item, Person customerName, Booth booth)
{
Console.WriteLine("\t\t\t" + customerName.GetDescription() + " bought " + "Item#"
+ item.GetItemNumber() + item.GetDescription() + " from " + booth.GetDescription());
}
public void ItemForSale(IItem item, Booth booth)
{
Console.WriteLine("item#" + item.GetItemNumber() + " " + item.GetDescription() + " can now be bought from " +
booth.GetDescription());
}
public void PrintPersonDescription(Person person)
{
person.PrintBoothNumber();
}
}
}<file_sep>namespace BizzareBazaar
{
class GreaterSanneshRsDecorator : ItemDecorator
{
public GreaterSanneshRsDecorator(IItem item) : base(item)
{
}
public override string GetDescription()
{
return base.GetDescription() + " with Greater Sannesh Runestone(Kel`Thuzad quest item ;)";
}
public override double GetPrice()
{
return base.GetPrice() + 10;
}
}
}<file_sep>namespace BizzareBazaar
{
class WarriorCustomer : Customer
{
public WarriorCustomer(string customerCustomerGroup)
{
CustomerGroup = customerCustomerGroup;
}
public override void PrintBoothNumber()
{
PrintInformation(GetCustomerGroup());
}
}
}<file_sep>using System.Collections.Generic;
using System.Linq;
namespace BizzareBazaar
{
public static class Global
{
public static IItem GetFirstItem(List<IItem> inventory)
{
return inventory.First();
}
public static void AddOneItemToInventory(List<IItem> inventory, IItem item)
{
inventory.Add(item);
}
public static void AddItemsToInventory(List<IItem> inventory, List<IItem> itemList)
{
inventory.AddRange(itemList);
}
public static void RemoveFirstItemFromInventory(List<IItem> inventory)
{
inventory.RemoveAt(0);
}
public static void RemoveLastItemFromInventory(List<IItem> inventory)
{
inventory.RemoveAt(inventory.Count);
}
public static string GetDescription(IItem item)
{
return item.GetDescription();
}
}
}
<file_sep>namespace BazaarUnitTest
{
class MainTest
{
public static void Main(string[] args)
{
}
}
}<file_sep>namespace BizzareBazaar
{
class GreaterStribogRsDecorator : ItemDecorator
{
public GreaterStribogRsDecorator(IItem item) : base(item)
{
}
public override string GetDescription()
{
return base.GetDescription() + " with Greater Stribog runestone";
}
public override double GetPrice()
{
return base.GetPrice() + 10;
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Timers;
namespace BizzareBazaar
{
class Booth : IManipulateInventory
{
public int DailyQuota { get; set; }
public List<IItem> Inventory { get; set; } = new List<IItem>();
public int BoothNumber { get; set; }
private readonly Timer _timer = new Timer {Interval = 500};
public bool SoldQuota = false;
public Booth(int quota, int boothNumber)
{
DailyQuota = quota;
BoothNumber = boothNumber;
}
public IItem GetFirstItem()
{
return Inventory.First();
}
public void AddOneItemToInventory(IItem item)
{
Inventory.Add(item);
}
public void AddItemsToInventory(List<IItem> itemList)
{
Inventory.AddRange(itemList);
}
public void RemoveFirstItemFromInventory()
{
Inventory.RemoveAt(0);
}
public void SetTimerAndFetchItems()
{
_timer.Elapsed += OnTimedEvent;
_timer.AutoReset = true;
_timer.Enabled = true;
}
public void FetchFirstItem()
{
if (Singleton.Inventory.Count != 0)
{
Inventory.Add(Singleton.Inventory.First());
Singleton.Inventory.Remove(Singleton.Inventory.First());
}
if (DailyQuota <= 0)
{
_timer.Stop();
_timer.Dispose();
}
}
public void OnTimedEvent(Object source, ElapsedEventArgs e)
{
if (Singleton.Inventory.Count != 0 && DailyQuota > 0)
{
FetchFirstItem();
}
}
public IItem ItemUpForSale()
{
return Inventory.First();
}
public string GetDescription()
{
return "Booth#" + BoothNumber;
}
//La stå
public void PrintItem(IItem item)
{
Console.WriteLine("Item# " + item.GetItemNumber() + " " + item.GetDescription() + "Price is: " + item.GetPrice());
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace BizzareBazaar
{
interface IManipulateInventory
{
List<IItem> Inventory {get; set;}
IItem GetFirstItem();
void AddOneItemToInventory(IItem item);
void AddItemsToInventory(List<IItem> itemList);
void RemoveFirstItemFromInventory();
string GetDescription();
}
}
<file_sep>using System.Collections.Generic;
using System.Linq;
namespace BizzareBazaar
{
internal abstract class Person : IManipulateInventory
{
public List<IItem> Inventory { get; set; } = new List<IItem>();
protected Person()
{
}
public IItem GetFirstItem()
{
return Inventory.FirstOrDefault();
}
public void AddOneItemToInventory(IItem item)
{
Inventory.Add(item);
}
public void AddItemsToInventory(List<IItem> itemList)
{
Inventory.AddRange(itemList);
}
public void RemoveFirstItemFromInventory()
{
Inventory.RemoveAt(0);
}
public virtual string GetDescription()
{
return "Person.GetDescription: A basic, boring person";
}
public abstract void PrintBoothNumber();
}
}<file_sep>using System.Collections.Generic;
namespace BizzareBazaar
{
public sealed class Singleton
{
private static volatile Singleton _instance;
private static readonly object Lock = new object();
public static List<IItem> Inventory { get; set; } = new List<IItem>();
private Singleton()
{
}
public static Singleton Instance
{
get
{
if (_instance == null)
{
lock (Lock)
{
if (_instance == null)
_instance = new Singleton();
}
}
return _instance;
}
}
}
}<file_sep>namespace BizzareBazaar
{
class WizardCustomer : Customer
{
public WizardCustomer(string customerCustomerGroup)
{
CustomerGroup = customerCustomerGroup;
}
public override void PrintBoothNumber()
{
PrintInformation(GetCustomerGroup());
}
}
} | 224209cccbc27f33a12a7b5bbb3c6985bddc6f54 | [
"C#"
] | 27 | C# | kottelett/BizzareBazaar | c8402cae298c963022f59c8eafc1fa502b2952c8 | ce93184214076e4900746a9141e9279752996136 |
refs/heads/master | <repo_name>off-by-0point5/paper-snowball<file_sep>/src/main/java/com/github/offby0point5/mc/plugin/paper/snowball/SnowballEvents.java
package com.github.offby0point5.mc.plugin.paper.snowball;
import com.destroystokyo.paper.event.server.ServerTickStartEvent;
import net.kyori.adventure.text.Component;
import net.kyori.adventure.text.format.TextColor;
import org.bukkit.*;
import org.bukkit.block.BlockFace;
import org.bukkit.enchantments.Enchantment;
import org.bukkit.entity.Snowball;
import org.bukkit.entity.*;
import org.bukkit.event.Event;
import org.bukkit.event.EventHandler;
import org.bukkit.event.Listener;
import org.bukkit.event.block.Action;
import org.bukkit.event.entity.ProjectileHitEvent;
import org.bukkit.event.inventory.InventoryClickEvent;
import org.bukkit.event.inventory.InventoryDragEvent;
import org.bukkit.event.player.*;
import org.bukkit.event.world.WorldInitEvent;
import org.bukkit.inventory.EquipmentSlot;
import org.bukkit.inventory.ItemFlag;
import org.bukkit.inventory.ItemStack;
import org.bukkit.inventory.meta.ItemMeta;
import org.bukkit.projectiles.ProjectileSource;
import org.bukkit.scoreboard.DisplaySlot;
import org.bukkit.scoreboard.Objective;
import org.bukkit.scoreboard.Score;
import org.bukkit.scoreboard.Scoreboard;
import org.bukkit.util.Vector;
import java.util.*;
public class SnowballEvents implements Listener {
private enum PlayerState {
WAITING,
PLAYING
}
private enum GameState {
RUNNING,
WAITING
}
private enum GameTarget {
KILL_LIMIT,
TIME_LIMIT,
NEUTRAL
}
// State variables
private final Map<Player, PlayerState> playerStateMap = new HashMap<>(); // if player is waiting for next game or playing
private final Map<Player, GameTarget> playerGameTargetVotingMap = new HashMap<>(); // mode voting per player
private final Map<Player, Integer> playerKillsMap = new HashMap<>(); // deaths per player
private final Map<Player, Integer> playerDeathsMap = new HashMap<>(); // kills per player
private final Map<Player, Integer> playerSnowballsMap = new HashMap<>(); // snowballs per player
private final Map<Player, Integer> playerPickupTicksMap = new HashMap<>(); // pickup ticks per player
private final Map<Player, Integer> catchModeTicks = new HashMap<>(); // ticks remaining until player leaves catch mode
private final Map<Player, Scoreboard> playerScoreboardMap = new HashMap<>(); // scoreboard display for each player
private final Map<Player, List<Objective>> playerObjectiveListMap = new HashMap<>(); // display objectives for each player
private final Map<String, Boolean> clickAction = new HashMap<>(); // if player issued a click action in this tick
private GameTarget gameTarget; // either playing until time is up or one player has reached certain amount of kills
private int gameTargetAmount; // how much of the target needs to be reached
private int stateTicks; // how long the current state lasts or will last
private GameState gameState; // if the game is running or waiting for players
private int lastRemaining; // the last value of the remainingTarget variable issued a title display to the players
// Game rules
private World world; // World/Dimension the game takes place
private final Material snowballOnGroundItem; // item to represent snowballs on ground (in item frames)
private final Material snowballItem; // item to represent snowballs in inventory
private final Material emptyItem; // item to represent zero snowballs in inventory (for right click detection)
private final ItemStack voteTime; // displayed in wait mode to vote for TIME_LIMIT mode in next round
private final ItemStack voteKills; // displayed in wait mode to vote for KILL_LIMIT mode in next round
private final int minPlayers; // Players needed to start waiting counter
private final int waitTicks; // Ticks to wait before each game
private final int standardTimeLimit; // When GameTarget TIME_LIMIT is set (in ticks)
private final int standardKillLimit; // When GameTarget KILL_LIMIT is set (in kills/hits)
private final int snowballPickupTicks; // how long players have to sneak to pick up snowballs
private final int maxSnowballs; // maximum of snowballs players can obtain
private final int startSnowballs; // start amount of snowballs
private final float catchableAngle; // only balls coming in less than this angle can be caught
private final int catchTicks; // when right click was issued, player gets catch mode for this many ticks
public SnowballEvents() {
this.gameTarget = GameTarget.TIME_LIMIT;
this.snowballOnGroundItem = Material.SNOW_BLOCK;
this.snowballItem = Material.COAL;
this.emptyItem = Material.FLINT_AND_STEEL;
// Item for kill limit
this.voteKills = new ItemStack(Material.TOTEM_OF_UNDYING);
ItemMeta itemMeta = this.voteKills.getItemMeta();
itemMeta.displayName(Component.text("Trefferlimit"));
itemMeta.addItemFlags(ItemFlag.HIDE_ENCHANTS);
this.voteKills.setItemMeta(itemMeta);
// Item for time limit
this.voteTime = new ItemStack(Material.CLOCK);
itemMeta = this.voteTime.getItemMeta();
itemMeta.displayName(Component.text("Zeitlimit"));
itemMeta.addItemFlags(ItemFlag.HIDE_ENCHANTS);
this.voteTime.setItemMeta(itemMeta);
this.minPlayers = 2; // two players needed as it is competitive
this.waitTicks = 200; // 10 seconds between rounds
this.standardKillLimit = 20; // one player needs 20 kills to win kill limited mode
this.standardTimeLimit = 60*2+30; // play two and a half minutes in time limited mode
this.snowballPickupTicks = 30; // 1.5 seconds to pick up snowballs
this.maxSnowballs = 3; // one can have three snowballs, but not more
this.startSnowballs = 1; // each player gets one snowball to start with
this.catchableAngle = 0.3f; // angle in which one can catch snowballs
this.catchTicks = 6; // as right clicks come approximately every each 5 ticks
this.startWaiting();
}
@EventHandler
public void onWorldInit(WorldInitEvent event) {
World world = event.getWorld();
this.world = world;
if (!world.getName().equals("world")) return;
world.setDifficulty(Difficulty.PEACEFUL);
world.setViewDistance(5);
world.setAutoSave(false); // disable auto saving
world.setPVP(false);
world.setGameRule(GameRule.KEEP_INVENTORY, true);
world.setGameRule(GameRule.DO_IMMEDIATE_RESPAWN, true);
world.setGameRule(GameRule.FALL_DAMAGE, false);
world.setGameRule(GameRule.RANDOM_TICK_SPEED, 0); // may change if using ticks to spawn snowballs
world.setGameRule(GameRule.DO_WEATHER_CYCLE, false);
world.setGameRule(GameRule.DO_DAYLIGHT_CYCLE, false);
world.setTime(600);
// Set up item frames
Collection<ItemFrame> frames = world.getEntitiesByClass(ItemFrame.class);
for (ItemFrame frame : frames) {
if (frame.getAttachedFace() != BlockFace.DOWN) continue;
frame.setRotation(Rotation.CLOCKWISE_45);
frame.setInvulnerable(true);
frame.setFixed(true);
frame.setVisible(false);
frame.setGlowing(true);
}
}
// Make inventories unmodifiable
@EventHandler
public void onInventoryClick(InventoryClickEvent event) { event.setCancelled(true); }
@EventHandler
public void onInventoryDrag(InventoryDragEvent event) { event.setCancelled(true); }
@EventHandler
public void onItemDrop(PlayerDropItemEvent event) { event.setCancelled(true); }
// do not interact with anything and always throw snowballs
@EventHandler
public void onInteract(PlayerInteractEvent event){
event.setCancelled(true);
// Protect environment
if (event.getAction() == Action.PHYSICAL) return;
// Ignore offhand
if (event.getHand() != EquipmentSlot.HAND) return;
// Allow to use doors
if (event.getClickedBlock() != null && (event.getClickedBlock().getType() == Material.OAK_DOOR
|| event.getClickedBlock().getType() == Material.DARK_OAK_DOOR
|| event.getClickedBlock().getType() == Material.ACACIA_DOOR
|| event.getClickedBlock().getType() == Material.BIRCH_DOOR
|| event.getClickedBlock().getType() == Material.JUNGLE_DOOR
|| event.getClickedBlock().getType() == Material.SPRUCE_DOOR
|| event.getClickedBlock().getType() == Material.CRIMSON_DOOR
|| event.getClickedBlock().getType() == Material.WARPED_DOOR)) {
event.setUseInteractedBlock(Event.Result.ALLOW);
this.clickAction.put(event.getPlayer().getName(), false); // cancel all events after this
return;
}
if (this.gameState == GameState.RUNNING) {
// Ignore if there was an interaction in this tick before
if (!this.clickAction.getOrDefault(event.getPlayer().getName(), true)) return;
// Right click to catch balls
if (event.getAction() == Action.RIGHT_CLICK_BLOCK || event.getAction() == Action.RIGHT_CLICK_AIR) {
this.clickAction.put(event.getPlayer().getName(), false); // cancel all events after this
this.catchModeTicks.put(event.getPlayer(), this.catchTicks); // set player into catch mode
return;
}
// Left click to throw balls
if (event.getItem() != null && event.getItem().getType() == this.snowballItem
&& (event.getAction() == Action.LEFT_CLICK_BLOCK || event.getAction() == Action.LEFT_CLICK_AIR)
&& this.playerSnowballsMap.getOrDefault(event.getPlayer(), 0) >= 1) {
this.playerSnowballsMap.putIfAbsent(event.getPlayer(), 0);
this.playerSnowballsMap.computeIfPresent(event.getPlayer(), (k, v) -> v - 1);
updatePlayerInventory(event.getPlayer());
event.getPlayer().launchProjectile(Snowball.class);
this.catchModeTicks.put(event.getPlayer(), 0); // reset player catch mode
}
} else { // if game is waiting
if (event.getItem() != null
&& (event.getAction() == Action.RIGHT_CLICK_AIR || event.getAction() == Action.RIGHT_CLICK_BLOCK)) {
switch (event.getItem().getType()) {
case CLOCK:
this.playerGameTargetVotingMap.put(event.getPlayer(), GameTarget.TIME_LIMIT);
event.getPlayer().sendActionBar(Component.text("Du stimmst für ein Zeitlimit"));
this.updatePlayerInventory(event.getPlayer());
break;
case TOTEM_OF_UNDYING:
this.playerGameTargetVotingMap.put(event.getPlayer(), GameTarget.KILL_LIMIT);
event.getPlayer().sendActionBar(Component.text("Du stimmst für ein Trefferlimit"));
this.updatePlayerInventory(event.getPlayer());
break;
}
event.getPlayer().playSound(event.getPlayer().getLocation(), Sound.BLOCK_DISPENSER_FAIL, 1.0f, 1.0f);
}
}
}
@EventHandler
public void onInteractEntity(PlayerInteractEntityEvent event) {
this.clickAction.put(event.getPlayer().getName(), false); // cancel all events after this
this.catchModeTicks.put(event.getPlayer(), this.catchTicks); // enable catch mode for player
event.setCancelled(true);
}
@EventHandler
public void onPlayerJoin(PlayerJoinEvent event) {
Player player = event.getPlayer();
// Display welcome message
player.sendTitle("", "§6Schneeballschlacht!", 30, 60, 30);
// Set up player
this.playerStateMap.put(player, PlayerState.WAITING);
// todo add setPlayerState()
this.playerGameTargetVotingMap.put(player, GameTarget.NEUTRAL);
this.playerDeathsMap.put(player, 0);
this.playerKillsMap.put(player, 0);
this.playerScoreboardMap.put(player, Bukkit.getScoreboardManager().getNewScoreboard());
this.playerScoreboardMap.get(player).registerNewObjective("display", "dummy",
Component.text("Schneeballschlacht")).setDisplaySlot(DisplaySlot.SIDEBAR);
player.setScoreboard(this.playerScoreboardMap.get(player));
updateScoreboards();
updatePlayerInventory(event.getPlayer());
}
@EventHandler
public void onPlayerLeave(PlayerQuitEvent event) {
// clear player data
Player player = event.getPlayer();
this.playerStateMap.remove(player);
this.playerSnowballsMap.remove(player);
this.playerKillsMap.remove(player);
this.playerDeathsMap.remove(player);
this.playerPickupTicksMap.remove(player);
this.playerScoreboardMap.remove(player);
this.playerGameTargetVotingMap.remove(player);
// End round if not enough players are in game ( and a round is running)
if (this.gameState == GameState.RUNNING) {
List<Player> players = this.world.getPlayers();
int playersInGame = 0;
for (Player statePlayer : players) {
if (this.playerStateMap.get(statePlayer) == PlayerState.PLAYING) playersInGame++;
}
if (playersInGame < this.minPlayers) {
this.startWaiting();
for (Player notifyPlayer : players) {
notifyPlayer.sendTitle("Zu wenig Spieler!", "Runde vorbei!", 10, 30, 20);
notifyPlayer.playSound(notifyPlayer.getLocation(), Sound.ENTITY_GHAST_SCREAM, 1.0f, 1.0f);
}
}
}
}
@EventHandler // Snowball hit and catch
public void onProjectileHit(ProjectileHitEvent event) {
if (event.getEntity().getType() == EntityType.SNOWBALL){
Entity hitEntity = event.getHitEntity();
ProjectileSource projectileSource = event.getEntity().getShooter();
if (hitEntity instanceof Player && projectileSource instanceof Player) {
// Snowball hitting logic
Player source = (Player) projectileSource;
Player target = (Player) hitEntity;
if (!(this.playerStateMap.get(source) == PlayerState.PLAYING
&& this.playerStateMap.get(target) == PlayerState.PLAYING)) return;
// If player is in catch mode
if (this.catchModeTicks.getOrDefault(target,0) > 0) {
// get angle of ball to catch
Vector targetDirection = target.getLocation().getDirection();
Vector ballDirection = event.getEntity().getLocation().getDirection();
float angle = targetDirection.angle(ballDirection);
System.out.println(angle);
// If player looks in right direction
if (angle <= this.catchableAngle) {
if (this.playerSnowballsMap.getOrDefault(target, 0) < this.maxSnowballs) {
this.playerSnowballsMap.putIfAbsent(target, 0);
this.playerSnowballsMap.computeIfPresent(target, (k, v) -> v+1);
updatePlayerInventory(target);
}
target.sendActionBar(Component.text(
"Du hast " + source.getName() + "'s Ball gefangen!",
TextColor.color(50, 255, 50)));
source.sendActionBar(Component.text(
target.getName() + " hat gefangen!",
TextColor.color(255, 50, 50)));
return;
}
}
// Kill hit player =======================================
this.playerDeathsMap.computeIfPresent(target, (k, v) -> v+1);
target.damage(1000000); // todo use teleportation and make player spawns
target.sendActionBar(Component.text(
"Du wurdest von " + source.getName() + " getroffen!",
TextColor.color(255, 50, 50)));
// Honor shooting player ======================================
this.playerKillsMap.computeIfPresent(source, (k, v) -> v+1);
source.sendActionBar(Component.text(
"Du hast " + target.getName() + " getroffen!",
TextColor.color(50, 255, 50)));
// Update Scoreboard
this.updateScoreboards();
}
}
}
@EventHandler
public void onTick(ServerTickStartEvent event) {
switch (this.gameState) {
case WAITING:
if (this.world.getPlayers().size() >= this.minPlayers) this.stateTicks--;
else {
this.stateTicks = this.waitTicks;
break;
}
if (this.stateTicks == 0) {
for (Player player : this.world.getPlayers()) {
player.sendTitle("", "§aStart!", 0, 10, 10);
player.playSound(player.getLocation(), Sound.BLOCK_NOTE_BLOCK_PLING, 1.0f, 1.0f);
}
this.startGame();
break;
}
// show titles with remaining time
if (this.stateTicks % (5*20) == 0 || (this.stateTicks <= 60 && this.stateTicks % 20 == 0)) {
for (Player player : this.world.getPlayers()) {
player.sendTitle("", ""+this.stateTicks / 20, 0, 10, 10);
player.playSound(player.getLocation(), Sound.BLOCK_NOTE_BLOCK_BASS, 1.0f, 1.0f);
}
}
break;
case RUNNING:
this.clickAction.clear(); // reset disallowed interaction
// count down ticks of catch mode per player
for (Player player : this.catchModeTicks.keySet()) {
this.catchModeTicks.computeIfPresent(player, (k, v) -> (v>0) ? v-1 : 0);
}
// Every tick a snowball is spawned in some of the item frames (~10 seconds -> chance 0.005/tick)
Collection<ItemFrame> frames = world.getEntitiesByClass(ItemFrame.class);
for (ItemFrame frame : frames) {
if (frame.getAttachedFace() != BlockFace.DOWN) continue;
if (frame.getItem().getType().equals(Material.SNOW_BLOCK)) continue;
if (new Random().nextInt(200) == 0) { // 0.5%
frame.setItem(new ItemStack(this.snowballOnGroundItem), false);
}
}
// If player sneaks on item frame, picks up snowball
List<Player> worldPlayers = world.getPlayers();
for (Player player : worldPlayers) {
if (!player.isSneaking()) {
if (this.playerPickupTicksMap.getOrDefault(player, 0) > 0) {
player.sendActionBar(Component.text("Schneeball liegen gelassen!",
TextColor.color(255, 0, 100)));
}
this.playerPickupTicksMap.put(player, 0);
continue;
}
List<Entity> entities = player.getNearbyEntities(0.5, 0.5, 0.5);
boolean isNearItemFrame = false;
for (Entity entity : entities) {
if (entity instanceof ItemFrame
&& ((ItemFrame) entity).getAttachedFace() == BlockFace.DOWN
&& ((ItemFrame) entity).getItem().getType() == this.snowballOnGroundItem) {
if (this.playerSnowballsMap.getOrDefault(player, 0) >= this.maxSnowballs) {
player.sendActionBar(Component.text("Mehr kannst du nicht tragen!",
TextColor.color(255, 100, 50)));
this.playerPickupTicksMap.put(player, 0);
break;
}
if (this.playerPickupTicksMap.getOrDefault(player, 0) >= this.snowballPickupTicks) {
((ItemFrame) entity).setItem(null);
this.playerSnowballsMap.putIfAbsent(player, 0);
this.playerSnowballsMap.computeIfPresent(player, (k, v) -> v+1);
updatePlayerInventory(player);
player.sendActionBar(Component.text("Schneeball aufgehoben!",
TextColor.color(0, 255, 100)));
this.playerPickupTicksMap.put(player, 0);
} else {
this.playerPickupTicksMap.putIfAbsent(player, 0);
this.playerPickupTicksMap.computeIfPresent(player, (k, v) -> v=v+1);
if (this.playerPickupTicksMap.getOrDefault(player, 0) % 2 == 0) {
player.sendActionBar(Component.text(String.format("Schneeball aufheben: %.0f%%",
100.0 * this.playerPickupTicksMap.getOrDefault(player, 0)
/ this.snowballPickupTicks),
TextColor.color(0, 170, 255)));
}
}
isNearItemFrame = true;
break;
}
}
if (!isNearItemFrame) {
if (this.playerPickupTicksMap.getOrDefault(player, 0) > 0) {
player.sendActionBar(Component.text("Schneeball liegen gelassen!",
TextColor.color(255, 0, 100)));
}
this.playerPickupTicksMap.put(player, 0);
}
}
this.stateTicks++;
int targetRemaining = 0;
switch (this.gameTarget) {
case TIME_LIMIT:
targetRemaining = this.gameTargetAmount - (this.stateTicks/20);
// titles show remaining time
if (targetRemaining != this.lastRemaining) {
if ((targetRemaining % 60 == 0)
|| (targetRemaining <= 30 && targetRemaining % 10 == 0)
|| targetRemaining < 5) {
String timeString = String.format("§c%02d:%02d verbleibend",
targetRemaining/60, targetRemaining%60);
for (Player player : this.world.getPlayers()) {
player.sendTitle("", timeString,
0, 20, 5);
player.playSound(player.getLocation(), Sound.BLOCK_NOTE_BLOCK_BASS, 1.0f, 1.0f);
}
}
this.lastRemaining = targetRemaining;
}
break;
case KILL_LIMIT:
targetRemaining = this.gameTargetAmount - getHighestKills();
// titles show remaining kills
if (targetRemaining != this.lastRemaining) {
if (targetRemaining == 10 || targetRemaining <= 3)
for (Player player : this.world.getPlayers()) {
player.sendTitle("", targetRemaining + " verbleibend!",
0, 20, 5);
player.playSound(player.getLocation(), Sound.BLOCK_NOTE_BLOCK_BASS, 1.0f, 1.0f);
}
this.lastRemaining = targetRemaining;
}
break;
}
if (targetRemaining == 0) {
// show titles with placement
this.startWaiting(); // end this round
}
// Update time in side display
if (this.stateTicks % 20 == 0) this.updateScoreboards();
break;
default: this.startWaiting();
}
}
private void startGame() {
this.gameTarget = getTargetVoting();
this.gameState = GameState.RUNNING;
this.stateTicks = 0;
switch (this.gameTarget) {
case KILL_LIMIT:
this.gameTargetAmount = this.standardKillLimit;
break;
case TIME_LIMIT:
this.gameTargetAmount = this.standardTimeLimit;
break;
}
for (Player player : this.playerStateMap.keySet()) {
if (this.playerStateMap.get(player) == PlayerState.WAITING)
this.playerStateMap.put(player, PlayerState.PLAYING);
this.playerSnowballsMap.put(player, this.startSnowballs);
this.playerKillsMap.put(player, 0);
this.playerDeathsMap.put(player, 0);
this.playerPickupTicksMap.clear();
Location playerLocation = player.getLocation();
playerLocation.setY(5.0);
player.teleport(playerLocation);
this.updatePlayerInventory(player);
// todo spawn a the farthest point away from players possible
}
}
private void startWaiting() {
this.gameState = GameState.WAITING;
this.stateTicks = this.waitTicks;
for (Player player : this.playerStateMap.keySet()) {
if (this.playerStateMap.get(player) == PlayerState.PLAYING)
this.playerStateMap.put(player, PlayerState.WAITING);
Location playerLocation = player.getLocation();
playerLocation.setY(20.0);
player.teleport(playerLocation);
this.updatePlayerInventory(player);
}
}
private void updatePlayerInventory(Player player) {
ItemStack[] hotbar = new ItemStack[9];
switch (this.playerStateMap.get(player)) {
case PLAYING:
int snowballsAmount = this.playerSnowballsMap.get(player);
if (snowballsAmount == 0) Arrays.fill(hotbar, new ItemStack(this.emptyItem, 1));
else Arrays.fill(hotbar, new ItemStack(this.snowballItem, snowballsAmount));
break;
case WAITING:
ItemStack timeItem = this.voteTime.clone();
ItemStack killItem = this.voteKills.clone();
ItemMeta itemMeta;
switch (this.playerGameTargetVotingMap.get(player)) {
case TIME_LIMIT:
itemMeta = timeItem.getItemMeta();
itemMeta.addEnchant(Enchantment.DURABILITY, 1, true);
timeItem.setItemMeta(itemMeta);
break;
case KILL_LIMIT:
itemMeta = killItem.getItemMeta();
itemMeta.addEnchant(Enchantment.DURABILITY, 1, true);
killItem.setItemMeta(itemMeta);
case NEUTRAL: break;
}
hotbar[3] = timeItem;
hotbar[5] = killItem;
break;
}
player.getInventory().setContents(hotbar);
}
private void updateScoreboards() {
// === Get lines displayed to all players ==================
// remaining time/kills until game ends
String remainingString = "";
switch (this.gameTarget) {
case TIME_LIMIT:
int targetRemaining = this.gameTargetAmount - (this.stateTicks/20);
remainingString = String.format("§bVerbleibende Zeit: %02d:%02d", targetRemaining/60, targetRemaining%60);
break;
case KILL_LIMIT:
targetRemaining = this.gameTargetAmount - this.getHighestKills();
remainingString = String.format("§bDer/Dem Besten fehlen Treffer: %d", targetRemaining);
}
// time the game is running
String timeString = String.format("§dZeit vergangen: %02d:%02d", this.stateTicks/1200, (this.stateTicks/20)%60);
// get players ranks sorted by kills
List<String> playerRanksLines = new ArrayList<>();
List<Map.Entry<Player, Integer>> entryList = new ArrayList<>(this.playerKillsMap.entrySet());
int playersNum = entryList.size();
int currentPlace = playersNum - 1;
int lastPlace = playersNum - currentPlace;
int lastScore = getHighestKills();
entryList.sort(Map.Entry.comparingByValue((integer, t1) -> t1.equals(integer) ? 0 : (t1 > integer) ? 1 : -1));
for (Map.Entry<Player, Integer> entry : entryList) {
if (entry.getValue() < lastScore) {
lastPlace = playersNum - currentPlace;
lastScore = entry.getValue();
}
currentPlace--;
playerRanksLines.add(String.format("§6%d. §b%s§r: §a%d§r/§c%d", lastPlace, entry.getKey().getName(),
entry.getValue(), this.playerDeathsMap.get(entry.getKey())));
}
// === Display lines ============================================
for (Player player : this.world.getPlayers()) {
Scoreboard scoreboard = this.playerScoreboardMap.get(player);
int objectiveNum = 0;
this.playerObjectiveListMap.computeIfAbsent(player, k -> new ArrayList<>());
while (this.playerObjectiveListMap.get(player).size() < 2) {
this.playerObjectiveListMap.get(player).add(scoreboard.registerNewObjective(
"display"+objectiveNum++, "dummy", Component.text("")));
}
Objective display = this.playerObjectiveListMap.get(player).get(0);
Objective render = this.playerObjectiveListMap.get(player).get(1);
String objectiveName = render.getName();
render.unregister();
render = scoreboard.registerNewObjective(objectiveName, "dummy", Component.text(""));
render.displayName(Component.text("§e"+player.getName()+" spielt Schneeballschlacht!"));
// use two objectives to "render" one in background, then switch -> mitigating flicker
// Set objectives in reversed order into list
this.playerObjectiveListMap.get(player).set(0, render);
this.playerObjectiveListMap.get(player).set(1, display);
// display remaining time/kills
// List<String> contents = new ArrayList<>(this.playerSideContentMap.get(player));
List<String> contents = new ArrayList<>();
// game state info
contents.add(timeString);
contents.add(remainingString);
contents.add("§6==============================");
contents.addAll(playerRanksLines);
contents.add("§6==============================");
int lineCount = contents.size();
for (String line : contents) {
Score score = render.getScore(line);
int occurrence = 0;
while (score.isScoreSet()) {
score = render.getScore(line+("§"+occurrence++));
}
score.setScore(lineCount--);
}
render.setDisplaySlot(DisplaySlot.SIDEBAR);
}
}
private int getHighestKills() {
int highestKills = 0;
for (Player player : this.playerKillsMap.keySet()) {
if (this.playerKillsMap.get(player) > highestKills)
highestKills = this.playerKillsMap.get(player);
}
return highestKills;
}
private GameTarget getTargetVoting() {
int timeVotes = 0;
int killVotes = 0;
for (GameTarget target : this.playerGameTargetVotingMap.values()) {
switch (target) {
case TIME_LIMIT:
timeVotes++;
break;
case KILL_LIMIT:
killVotes++;
}
}
if (timeVotes > killVotes) return GameTarget.TIME_LIMIT;
else if (killVotes > timeVotes) return GameTarget.KILL_LIMIT;
else if (this.gameTarget == null || this.gameTarget == GameTarget.NEUTRAL) return GameTarget.KILL_LIMIT;
else return this.gameTarget;
}
}
| dbb9594fa85ded61539f2bd8caed804692a6effe | [
"Java"
] | 1 | Java | off-by-0point5/paper-snowball | 2aa25a9c72418506b91c0e0aa33906ef2bd5c817 | 803cc762863a76988b9f56946fab7737aaadf1e9 |
refs/heads/master | <repo_name>Scrum-Snaccs/Scrum-Snaccs-A1<file_sep>/first.js
module.exports = function() {
return "Hello Scrum Snaccs"
}<file_sep>/test/firstTest.js
const expect = require('chai').expect;
const nock = require('nock');
const BlogPost = require('../models/blogPost');
const request = require("request");
// import * as app from "./app"
const axios = require('axios');
// const {getBlogPost} = require('../client/src/Submitter')
const chai = require('chai')
, chaiHttp = require('chai-http');
chai.use(chaiHttp);
chai.should();
const response = require('./response');
//const app = require("../server").app;
// const getUser = require('./index').getUser;
let app;
let mongoose;
let blog = axios.get('getBlogPost');
// Set up a json object properly based on what you are expecting.
before(() => {
app = require("../server").app;
mongoose = require("../server").mongoose;
// blog = require("../client/src/Submitter").getBlogPost();
});
describe('Status and content', function() {
describe ('Main page', function() {
it('Main page content', function() {
axios.get('http://localhost:8080' , function(error, response, body) {
expect(body).to.equal('Title');
});
});
}) })
// it('Main page status', function(done) {
// request('http://localhost:8080' , function(error, response, body) {
// expect(response.statusCode).to.equal(200);
// done();
// });
// });
// it('About page content', function(done) {
// request('http://localhost:8080/about' , function(error, response, body) {
// expect(response.statusCode).to.equal(404);
// done();
// });
// });
// before(() => {
// app = require("../server").app;
// mongoose = require("../server").mongoose;
// });
// it('Main page content', function(done) {
// request('http://localhost:8080' , function(error, response, body) {
// expect(body).to.equal('Hello World');
// done();
// });
// });
// it('Main page status', function(done) {
// request('http://localhost:8080' , function(error, response, body) {
// expect(response.statusCode).to.equal(200);
// done();
// });
// });
// it('About page content', function(done) {
// request('http://localhost:8080/about' , function(error, response, body) {
// expect(response.statusCode).to.equal(404);
// done();
// });
// });
const mockBlogpost = {
blogId: 1,
title: 'Software Engineering',
author: 'South',
type: 'Book',
pages: '10',
volume: '67',
method: 'Yellow',
participants: 'hdha',
year: '2020',
};
describe('Get Blogspot tests', () => {
it("Should add a new article called test Article", (done) => {
chai
.request('http://localhost:8080')
//.request(mongoose)
.post("/BlogPost")
.send(mockBlogpost)
.end((err, response) => {
response.should.have.status(200);
assert.equal(response.body, "Entry added!");
done();
});
}).timeout(5000);
it("Should return a article called test article", (done) => {
chai
.request('http://localhost:8080')
//.request(mongoose)
.get('/router/api')
.end((err, response) => {
response.should.have.status(200);
response.body.should.be.a.property("id");
response.body.should.be.a.property("title");
response.body.id.should.equal(blogId);
done();
});
}).timeout(5000);
})
<file_sep>/client/src/App.js
import React from "react";
import "../node_modules/bootstrap/dist/css/bootstrap.min.css";
import { BrowserRouter, BrowserRouter as Router, Route ,Switch} from "react-router-dom";
import HomePage from "./HomePage";
import Submitter from "./Submitter";
import Moderator from "./Moderator"
import NavigationBar from "./NavigationBar";
// import Switch from "react-bootstrap/esm/Switch";
// import Calendar from "./Calendar";
function App() {
return (
<Router>
<div className="container">
<NavigationBar/>
<br />
<Route path="/" exact component={HomePage} />
<Route path ="/submitter" exact component={Submitter}/>
<Route path ="/moderator" exact component ={Moderator}/>
</div>
</Router>
);
}
export default App;
<file_sep>/routes/api.js
const express = require('express');
const router = express.Router();
const BlogPost = require('../models/blogPost');
// Routes
router.post('/delete', async (req, res, next) => {
var id = req.body.id;
const result = await BlogPost.deleteOne( {"_id": id});
res.redirect('/');
});
router.post('/update', async (req, res, next) => {
var id = req.body.id;
const result = await BlogPost.update( {"_id": id}, { $unset: { statuss: "awaiting"} } );
res.redirect('/');
});
//get all
router.get('/search/:field_1/:field_2/:field_3', async(req, res, next) => {
var field_1 =req.params.field_1;
var field_2 =req.params.field_2;
var field_3 =req.params.field_3;
switch (field_2) {
case 'is':
try {
var query = { [field_1] : field_3 };
const result = await BlogPost.find(query, query2)
res.send(result);
} catch (error) {
console.log(error.message);
}
break;
case 'contains':
try {
var query = { [field_1]: new RegExp( field_3 , 'i') };
const result = await BlogPost.find(query, query2)
res.send(result);
} catch (error) {
console.log(error.message);
}
break;
case 'smaller':
try {
var query = { [field_1]: { $lt: Number(field_3) } };
const result = await BlogPost.find(query, query2)
res.send(result);
} catch (error) {
console.log(error.message);
}
break;
case 'greater':
try {
var query = { [field_1]: { $gt: Number(field_3 )} };
var query2 = { "statuss" : "awaiting"};
const result = await BlogPost.find(query, query2)
res.send(result);
} catch (error) {
console.log(error.message);
}
break;
case 'any':
try {
const result = await BlogPost.find({},{__v:0});
res.send(result);
} catch (error) {
console.log(error.message);
}
case 'mod':
try {
const result = await BlogPost.find({"statuss" : "awaiting"});
res.send(result);
} catch (error) {
console.log(error.message);
}
default:
try {
const result = await BlogPost.find({},{__v:0});
res.send(result);
} catch (error) {
console.log(error.message);
}
}
});
router.get('/', async(req, res, next) => {
try {
const result = await BlogPost.find({},{__v:0});
res.send(result);
} catch (error) {
console.log(error.message);
}
});
router.get('/search/title/any', async(req, res, next) => {
try {
const result = await BlogPost.find({},{__v:0});
res.send(result);
} catch (error) {
console.log(error.message);
}
});
router.get('/mod', async(req, res, next) => {
try {
const result = await BlogPost.find({"statuss" : "awaiting"});
res.send(result);
} catch (error) {
console.log(error.message);
}
});
//create new
router.post('/save', async (req, res, next) => {
try {
const newBlogPost = new BlogPost(req.body);
const result = await newBlogPost.save();
res.send(result);
} catch (error) {
console.log(error.message);
}
});
module.exports = router;<file_sep>/client/src/firstTest.js
var assert = require('chai').assert;
var first = require('./first');
describe('First', function () {
it('first should return Hello Scrum Snaccs', function (){
assert.equal(first(), 'Hello Scrum Snaccs');
});
});<file_sep>/README.md
# Scrum-Snaccs-A1<file_sep>/client/src/Article.js
import React, { Component } from "react";
import axios from "axios";
//import "react-datepicker/dist/react-datepicker.css";
import Button from "@material-ui/core/Button";
export default class Article extends Component {
constructor(props) {
super(props);
this.state = {
title: '',
author: '',
type: '',
pages: 0,
volume: '',
method: '',
participants: '',
year: 2020,
field_1: 'title',
field_2: 'any',
field_3: '',
posts: [],
role: props.location.state !== undefined ? props.location.state.role : "",
};
}
componentDidMount() {
switch (this.state.role) {
case "moderator":
axios
.get("/" + this.state.role + "/id" + this.props.match.params.id)
.then((response) => {
console.log(response.data);
this.setState(response.data);
})
.catch(function (error) {
console.log(error);
});
break;
case "analyst":
axios
.get("/" + this.state.role + "/id" + this.props.match.params.id)
.then((response) => {
console.log(response.data);
this.setState(response.data);
})
.catch(function (error) {
console.log(error);
});
break;
default:
axios
.get("/entries/id" + this.props.match.params.id)
.then((response) => {
console.log(response.data);
this.setState(response.data);
})
.catch(function (error) {
console.log(error);
});
}
}
handleAccept = (e) => {
e.preventDefault();
axios
.post("/" + this.state.role + "/move", { id: this.props.match.params.id })
.then((res) => {
alert(res.data);
window.location = "/article/role=" + this.state.role;
})
.catch((error) => {
alert(error);
});
};
handleReject = (e) => {
e.preventDefault();
axios
.delete("/" + this.state.role + "/delete/id" + this.props.match.params.id)
.then((res) => {
alert(res.data.title + " Deleted");
console.log(res);
window.location = "/article/role=" + this.state.role;
})
.catch((error) => {
alert(error);
});
};
render() {
return (
<div>
<p>{this.state.article}</p>
<h3>{this.state.title} </h3>
<h6>{this.state.author} </h6>
<hr />
<p style={{ display: this.state.type ? "" : "none" }}>
<b>Type: </b>
{this.state.type}
</p>
<p style={{ display: this.state.pages ? "" : "none" }}>
<b>Pages: </b>
{this.state.pages}
</p>
<p style={{ display: this.state.volume ? "" : "none" }}>
<b>volume: </b>
{this.state.volume}
</p>
<p style={{ display: this.state.method ? "" : "none" }}>
<b>Method: </b>
{this.state.method}
</p>
<p style={{ display: this.state.participants ? "" : "none" }}>
<b>Participants: </b>
{this.state.participants}
</p>
<p style={{ display: this.state.year ? "" : "none" }}>
<b>Year: </b>
{this.state.year}
</p>
<hr />
{this.state.role === "moderator" && (
<div>
<Button variant="contained" color="primary" onClick={this.handleAccept}>
Accept
</Button>
<Button variant="contained" color="secondary" onClick={this.handleReject}>
Reject
</Button>
</div>
)}
{this.state.role === "analyst" && (
<div>
<Button variant="contained" color="primary" onClick={this.handleAccept}>
Accept
</Button>
<Button variant="contained" color="secondary" onClick={this.handleReject}>
Reject
</Button>
</div>
)}
<hr />
</div>
);
}
}
<file_sep>/client/src/Moderator.js
import React from 'react';
import axios from 'axios';
import NavigationBar from "./NavigationBar";
import "../node_modules/bootstrap/dist/css/bootstrap.min.css";
import { BrowserRouter as Router, Route } from "react-router-dom";
import './App.css';
class Moderator extends React.Component {
state = {
statuss: 'awaiting',
title: '',
author: '',
type: '',
pages: '',
volume: '',
method: '',
participants: '',
year: '',
field_1: 'title',
field_2: 'any',
field_3: '',
posts: []
};
componentDidMount = () => {
this.getBlogPost();
};
getBlogPost = () => {
axios.get('/api/search/statuss/is/awaiting')
.then((response) => {
const data = response.data;
this.setState({ posts: data });
console.log('Data has been received!!');
})
.catch(() => {
alert('Error retrieving data!!!');
});
};
handleChange = ({ target }) => {
const { name, value } = target;
this.setState({ [name]: value });
};
submit = (event) => {
event.preventDefault();
const payload = {
statuss: this.state.statuss,
title: this.state.title,
author: this.state.author,
type: this.state.type,
pages: this.state.pages,
volume: this.state.volume,
method: this.state.method,
participants: this.state.participants,
year: this.state.year
};
axios({
url: '/api/save',
method: 'POST',
data: payload
})
.then(() => {
console.log('Data has been sent to the server');
this.resetUserInputs();
//this.getBlogPost();
alert("Success!");
})
.catch(() => {
console.log('Internal server error');
});
};
search = (event) => {
event.preventDefault();
axios.get('/api/search/statuss/is/awaiting')
.then((response) => {
const data = response.data;
this.setState({ posts: data });
console.log('Data has been received!!');
})
.catch(() => {
alert('Error retrieving data!!!');
});
};
findall = (event) => {
event.preventDefault();
this.getBlogPost();
};
resetUserInputs = () => {
this.setState({
statuss: 'awaiting',
title: '',
author: '',
type: '',
pages: '',
volume: '',
method: '',
participants: '',
year: ''
});
};
displayBlogPost = (posts) => {
if (!posts.length) return null;
return posts.map((post, index) => (
<div key={index} className="blog-post__display">
<p>{post.statuss}</p>
<p>{post.title}</p>
<p>{post.author}</p>
<p>{post.type}</p>
<p>{post.pages}</p>
<p>{post.volume}</p>
<p>{post.method}</p>
<p>{post.participants}</p>
<p>{post.year}</p>
<form action="/api/delete" method="post">
<input type="hidden" name="id" value = {post._id}/>
<button type="submit">DELETE post</button>
</form>
<form action="/api/update" method="post">
<input type="hidden" name="id" value = {post._id}/>
<button type="submit">approve post</button>
</form>
</div>
));
};
render() {
console.log('State: ', this.state);
//JSX
return(
<div className="moderator">
<h2>SCRUM SEER </h2>
{/*
<form onSubmit={this.submit}>
<div className="form-input">
<input
type="text"
name="title"
placeholder="title"
value={this.state.title}
onChange={this.handleChange}
/>
</div>
<div className="form-input">
<input
type="text"
name="author"
placeholder="author"
value={this.state.author}
onChange={this.handleChange}
/>
</div>
<div className="form-input">
<input
type="text"
name="type"
placeholder="type"
value={this.state.type}
onChange={this.handleChange}
/>
</div>
<div className="form-input">
<input
type="text"
name="pages"
placeholder="pages"
value={this.state.pages}
onChange={this.handleChange}
/>
</div>
<div className="form-input">
<input
type="text"
name="volume"
placeholder="volume"
value={this.state.volume}
onChange={this.handleChange}
/>
</div>
<div className="form-input">
<input
type="text"
name="method"
placeholder="method"
value={this.state.method}
onChange={this.handleChange}
/>
</div>
<div className="form-input">
<input
type="text"
name="participants"
placeholder="participants"
value={this.state.participants}
onChange={this.handleChange}
/>
</div>
<div className="form-input">
<input
type="text"
name="year"
placeholder="year"
value={this.state.year}
onChange={this.handleChange}
/>
</div>
<button>Submit</button>
</form>
*/}
<form onSubmit={this.search}>
<button>refresh</button>
</form>
<div className="blog-">
{this.displayBlogPost(this.state.posts)}
</div>
</div>
);
};
}
export default Moderator;<file_sep>/models/blogPost.js
const mongoose = require('mongoose');
// Schema
const Schema = mongoose.Schema;
const BlogPostSchema = new Schema({
statuss: String,
title: String,
author: String,
type: String,
pages: Number,
volume: Number,
method: String,
participants: String,
year: Number,
postdate: {
type: Date,
default: Date.now()
}
});
// Model
const BlogPost = mongoose.model('BlogPost', BlogPostSchema);
module.exports = BlogPost; | fb3f80f58bd3286d7bb29575c4b2d3567cbe1647 | [
"JavaScript",
"Markdown"
] | 9 | JavaScript | Scrum-Snaccs/Scrum-Snaccs-A1 | ac795210547460f81b92180ef71e7f8efcc355d3 | f684b02ab7644f0c115d7cd125b506398e87448c |
refs/heads/master | <file_sep># JSLab1-Part2
<file_sep>var player1 = 40; //declaring a variable for player start points
var grantChirpus = 10; // declaring a variable for Grant Chirpus start points
startGame(); //calls starGame function
function startGame() {
var greeting = prompt("Hello, Would You Like To Play a Game? Please Enter 1 for YES and 2 for NO.");
if (greeting === "1") {
var userName = prompt("OK. Please Type Your Name").toUpperCase();
var startGreeting = prompt("Thanks " + userName + " !! To Start, Please Press ENTER.");
} else {
console.log("Ok, have a nice day......");
}
}
startCombat(); //calls startCombat function
function startCombat() {
var choice = prompt("Would you like to ATTACK or Quit?");
if (choice === "attack") {
getDamage(); // calls getDamage function
function getDamage() {
grantChirpus -= (Math.floor(Math.random() * 5) + 1);
player1 -= (Math.floor(Math.random() * 5) + 1);
console.log("The Chirpinator's Score is " + grantChirpus);
console.log("Your Score is " + player1);
}
}
if (choice === "quit") { // your a quitter message is logged if user chooses quit
console.log("OK Quitter. Foolishness is giving up without any effort or not giving up after putting in all your effort. Secret of freedom is putting all your effort and giving up!.");
}
}
| 87a621b25313780c249059c1855271159a38f7e8 | [
"Markdown",
"JavaScript"
] | 2 | Markdown | ChristopherNamyst/JSLab1-Part2 | 9c90b335cdab250c078c0ddadc5f1d2914e8076f | 0e2fb659681b76d2e5d963443b54e1dac15be279 |
refs/heads/master | <repo_name>jbrandao99/LTW<file_sep>/Projeto/templates/tpl_search.php
<?php function draw_search()
{
/**
* Draws the search section.
*/ ?>
<title>Search Properties</title>
<section class="search">
<header><h2>Search a Place</h2></header>
<form method="post" action="../pages/rental.php">
<input type="search" name="location" placeholder="Location">
<input type="range" name="price" min="0" max="500" value="250" id="range_slider_input">
<p>Price per night: <span id="price_range"></span>€</p>
<input type="text" placeholder="Check-In" onchange="updateCheckout()" onfocus="(this.type='date')" oninput="this.className = ''" id="begin_date" name="begin_date" min="<?php echo date('Y-m-d'); ?>" required></<input>
<input type="text" placeholder="Check-Out" onfocus="(this.type='date')" oninput="this.className = ''"name="end_date" id="end_date" required></<input>
<input type="submit" value="Search">
</form>
</section>
<script src="../javascript/script.js" defer></script>
<script src="../javascript/range_slider.js"></script>
<script src="../javascript/datefield.js"></script>
<?php
} ?><file_sep>/Projeto/actions/action_reservation.php
<?php
include_once('../includes/session.php');
include_once('../database/db_properties.php');
include_once('../database/db_user.php');
include_once('../database/db_reservations.php');
// Verify if user is logged in
if (!isset($_SESSION['username'])) {
die(header('Location: ../pages/login.php'));
}
$property_id = $_POST['id'];
$priceperday = $_POST['price'];
$start = $_POST['checkIn'];
$end = $_POST['checkOut'];
$property = getProperty($property_id);
if ($start>$end) {
$temp = $start;
$start = $end;
$end = $temp;
}
$date1 = new DateTime($start);
$date2 = new DateTime($end);
$interval = $date1->diff($date2);
$price = $interval->days * $priceperday ;
if (addReservation($property_id, $start, $end, $price)) {
$_SESSION['messages'][] = array('type' => 'success', 'content' => 'You have succesfully reserved '.$property['title']);
die(header('Location: ../pages/reservations.php'));
} else {
$_SESSION['messages'][] = array('type' => 'error', 'content' => $property['title'].' is not available during those days!');
die(header('Location: ../pages/property.php?id='.$property_id));
}
<file_sep>/Projeto/templates/tpl_property.php
<?php
include_once('../database/db_properties.php');
include_once('../database/db_user.php');
include_once('../database/db_reservations.php');
include_once('../includes/session.php');
function draw_property($property)
{
$photos = getPropertyPhotos($property['id']); ?>
<title><?=$property['title']?></title>
<article class="property">
<header>
<h2><?=$property['title']?></a></h2>
</header>
<main>
<div class="row" >
<div class="column" name="profilePicture" >
<img src="../images/properties/<?php echo $photos[0]['path']; ?>" onclick="openModal();currentSlide(1)" class="hover-shadow">
</div>
</div>
<div id="myModal" class="modal">
<span class="close cursor" onclick="closeModal()">×</span>
<div class="modal-content">
<div class="mySlides">
<div class="numbertext">1 / 3</div>
<img src="../images/properties/<?php echo $photos[0]['path']; ?>" style="width:100%">
</div>
<div class="mySlides">
<div class="numbertext">2 / 3</div>
<img src="../images/properties/<?php echo $photos[1]['path']; ?>" style="width:100%">
</div>
<div class="mySlides">
<div class="numbertext">3 / 3</div>
<img src="../images/properties/<?php echo $photos[2]['path']; ?>" style="width:100%">
</div>
<a class="prev" onclick="plusSlides(-1)">❮</a>
<a class="next" onclick="plusSlides(1)">❯</a>
</div>
</div>
<h3>Description: <?=$property['description']?></h3>
<h3>Location: <?=$property['location']?></h3>
<h4>Price per night: <?=$property['price']?>€</h4>
<?php
$reservations = getPropertyReservations($property['id']); ?>
<h3>Available from: <?=$property['availabilityStart']?> to: <?=$property['availabilityEnd']?></h3>
<div id="listReservations">
<h3 id="reservations">List of Reservations</h3>
<?php if (count($reservations) == 0) { ?>
<h4> No reservations yet. </h4>
<?php } else { ?>
<div id="columnIdentifiers">
<h4 id="datesWord">Check-in -> Check-Out</h4>
</div>
<?php foreach ($reservations as $reservation) {
draw_reservation($reservation);
}
} ?>
</div>
<div id="reservation">
<form id="reservationForm" method="post" action="../actions/action_reservation.php">
<input id="id" type='hidden' name='id' value='<?= $property['id'] ?>' />
<input id="price" type='hidden' name='price' value='<?= $property['price'] ?>' />
<input type="text" placeholder="Check-In" onchange="updateCheckout()" onfocus="(this.type='date')" oninput="this.className = ''" name="checkIn" id="begin_date" min="<?php echo date('Y-m-d'); ?>" required></<input>
<input type="text" placeholder="Check-Out" onfocus="(this.type='date')" oninput="this.className = ''" name="checkOut" id="end_date" required></<input>
<p id="totalPrice"></p>
<p id="message"></p>
<?php ?>
<input id="button" name="bookButton" type="submit" value="Book">
</form>
</div>
<div id="comments">
<h3>Comments </h3>
<div id="messages"></div>
<input type="hidden" name="username" value="<?= $_SESSION['username'] ?> ">
<input type="hidden" name="place_id" value="<?= $property['id']?> ">
<input type="text" name="message" placeholder="Say something nice about this place" pattern="[a-zA-Z\s.\-'!\?/]+">
<button id = 'send'>Send Message</button>
</div>
<?php
if ((checkIsPropertyOwner($property['id']))) {
?>
<button onclick="window.location.href='../pages/editproperty.php?id=' + '<?= $property['id']?>'" ><i class="far fa-edit fa-2x"></i></button>
<button onclick="window.location.href='../actions/action_delete_property.php?id=' + '<?= $property['id']?>'" ><i class="fas fa-trash fa-2x"></i></button>
<?php
} ?>
</main>
</article>
<script src="../javascript/comments.js" defer></script>
<script src="../javascript/datefield.js"></script>
<script src="../javascript/pictures.js"></script>
<?php
} ?>
<?php function add_property()
{
?>
<title>Add Property</title>
<section class="add_property">
<form id="add_prop" method="post" action="../actions/action_add_property.php" enctype="multipart/form-data">
<h1>Add Property</h1>
<!-- One "tab" for each step in the form: -->
<div class="tab"><h2>Information</h2>
<input type="text" placeholder="title" oninput="this.className = ''" name="title" required></<input>
<input type="text" placeholder="description" oninput="this.className = ''" name="description" required></<input>
<input type="text" placeholder="location" oninput="this.className = ''" name="location" required></<input>
</div>
<div class="tab"><h2>Dates Available</h2>
<input type="text" placeholder="from" onfocus="(this.type='date')" oninput="this.className = ''" name="start_date" min="<?php echo date('Y-m-d'); ?>" id="begin_date" onchange="updateCheckout()" required></<input>
<input type="text" placeholder="until" onfocus="(this.type='date')" oninput="this.className = ''" id="end_date" name="end_date" required></<input>
</div>
<div class="tab"><h2>Pricing</h2>
<input type="number" placeholder="price" oninput="this.className = ''" name="price" required></<input>
</div>
<div class="tab"><h2>Images</h2>
<div id="imageProp">
<img src="../images/site/image-placeholder.jpg" onclick="pictureClick()" id="profileDisplay"/>
<input type="file" oninput="this.className = ''" onchange=" createImageP(this)" style="display:none;" name="profilePicture" id="profilePicture"
accept = "image/jpeg" required></<input>
</div>
</div>
<div class="button_prop" style="overflow:auto;">
<div id="btn_prop" style="float:right;">
<button type="button" id="prevBtn" onclick="nextPrev(-1)">Previous</button>
<button type="button" id="nextBtn" onclick="nextPrev(1)">Next</button>
</div>
</div>
<!-- Circles which indicates the steps of the form: -->
<div style="text-align:center;margin-top:10px; padding-bottom:10px; background-color: #0088a9;">
<span class="step"></span>
<span class="step"></span>
<span class="step"></span>
<span class="step"></span>
</div>
</form>
</section>
<script src="../javascript/add_property.js"></script>
<script src="../javascript/datefield.js"></script>
<script src="../javascript/pictures.js"></script>
<?php
} ?>
<?php function draw_reservation($reservation)
{
?>
<div class="reservation">
<h4 id="dates"> <?= $reservation['startDate'] ?> | <?= $reservation['endDate'] ?> </h4>
</div>
<?php
} ?>
<?php function edit_property($property)
{
?>
<title>Edit Property</title>
<section class="add_property">
<form id="add_prop" method="post" action="../actions/action_edit_property.php" enctype="multipart/form-data">
<h1>Edit Property</h1>
<!-- One "tab" for each step in the form: -->
<div class="tab"><h2>Information</h2>
<input type='hidden' placeholder="id" oninput="this.className = ''" name="id" value = "<?= $property['id'] ?>" display:none required></<input>
<input type="text" placeholder="title" oninput="this.className = ''" name="title" value = "<?= $property['title'] ?>"required></<input>
<input type="text" placeholder="description" oninput="this.className = ''" name="description" value = "<?= $property['description'] ?>" required></<input>
<input type="text" placeholder="location" oninput="this.className = ''" name="location" value = "<?= $property['location'] ?>" required></<input>
</div>
<div class="tab"><h2>Dates Available</h2>
<input type="text" placeholder="from" onfocus="(this.type='date')" oninput="this.className = ''" name="start_date" value = "<?= $property['availabilityStart'] ?>" required></<input>
<input type="text" placeholder="until" onfocus="(this.type='date')" oninput="this.className = ''" name="end_date" value = "<?= $property['availabilityEnd'] ?>" required></<input>
</div>
<div class="tab"><h2>Pricing</h2>
<input type="number" placeholder="price" oninput="this.className = ''" name="price" value = "<?= $property['price'] ?>" required></<input>
</div>
<div class="button_prop" style="overflow:auto;">
<div id="btn_prop" style="float:right;">
<button type="button" id="prevBtn" onclick="nextPrev(-1)">Previous</button>
<button type="button" id="nextBtn" onclick="nextPrev(1)">Next</button>
</div>
</div>
<!-- Circles which indicates the steps of the form: -->
<div style="text-align:center;margin-top:10px; padding-bottom:10px; background-color: #0088a9;">
<span class="step"></span>
<span class="step"></span>
<span class="step"></span>
</div>
</form>
</section>
<script src="../javascript/add_property.js"></script>
<script src="../javascript/pictures.js"></script>
<?php
} ?>
<file_sep>/Projeto/documentation/README.txt
<NAME> - up201700127 - effort: (100/3)%
<NAME> - up201705573 - effort: (100/3)%
<NAME> - up201700132 - effort: (100/3)%
Credentials:
You can either register or login with an existing account, for example:
mpinho 123;
admin 123;
We have not used any external libraries.<file_sep>/Projeto/actions/action_contact.php
<?php
include_once('../includes/session.php');
$name = $_POST['name'];
$subject = $_POST['subject'];
$mail = $_POST['email'];
$message = $_POST['message'];
$mailTo = "<EMAIL>";
$headers= "Rent-a-Place: " . $mail;
$txt = "You have received an e-mail from " . $name . ".\n" . $message;
mail($mailTo, $subject, $txt, $headers);
$_SESSION['messages'][] = array('type' => 'success', 'content' => 'Thanks for your contact '.$_POST['name']);
die(header('Location: ../pages/contact.php'));
<file_sep>/Projeto/pages/rental.php
<?php
include_once('../includes/session.php');
include_once('../templates/tpl_common.php');
include_once('../templates/tpl_rental.php');
include_once('../database/db_properties.php');
// Verify if user is logged in
if (!isset($_SESSION['username'])) {
die(header('Location: login.php'));
}
$location = $_POST['location'];
$price = $_POST['price'];
$start = $_POST['begin_date'];
$end = $_POST['end_date'];
if ($start>$end) {
$temp = $start;
$start = $end;
$end = $temp;
}
$rentals = searchProperties($price, $location, $start, $end);
if (empty($rentals)) {
$_SESSION['messages'][] = array('type' => 'error', 'content' => 'No properties found');
die(header('Location: search.php'));
}
draw_header($_SESSION['username']);
draw_rentals($rentals);
draw_footer();
<file_sep>/Projeto/templates/tpl_aboutus.php
<?php function draw_aboutus()
{
/**
* Draws the aboutus section.
*/ ?>
<title>About Us</title>
<section class="aboutus">
<header><h2>About Us</h2></header>
<p></p>
<div class="authors">
<img src="../images/site/fellipe.jpg" alt="author1" height="250" width="224">
<h3>Fellipe</h3>
<p>Co-founder</p>
</div>
<div class="authors">
<img src="../images/site/muriel.jpg" alt="author2" height="250" width="224">
<h3>Muriel</h3>
<p>Co-founder</p>
</div>
<div class="authors">
<img src="../images/site/brandao.jpg" alt="author3" height="250" width="224">
<h3>Brandão</h3>
<p>Co-founder</p>
</div>
</section>
<?php
} ?><file_sep>/Projeto/templates/tpl_reservations.php
<?php
include_once('../database/db_properties.php');
function draw_userReservations($reservations)
{
?>
<title>My Reservations</title>
<h2>My reservations</h2>
<?php if (isset($_SESSION['messages'])) { ?>
<h4 id="messages">
<?php foreach ($_SESSION['messages'] as $message) { ?>
<div class="<?= $message['type'] ?>"><?= $message['content'] ?></div>
<?php } ?>
</h4>
<?php unset($_SESSION['messages']);
} ?>
<?php if (count($reservations) == 0) { ?>
<h3> You haven't made a reservation yet. <a id="reservation_label" href="../pages/search.php">Get Started</a></h3>
<?php } ?>
<section id="properties">
<?php
foreach ($reservations as $reservation) { ?>
<div id="container">
<?php draw_reservations($reservation); ?>
</div>
<?php } ?>
</section>
<?php
} ?>
<?php function draw_reservations($reservation)
{
/**
* Draws the reservations section.
*/
$rental= getProperty($reservation['propertyID']);
$photos = getPropertyPhotos($rental['id']); ?>
<a href="../pages/property.php?id=<?=$reservation['propertyID']?> " class="rental">
<article>
<main>
<div class="row">
<div class="column">
<img src="../images/properties/<?php echo $photos[0]['path']; ?>" alt="Property Image"/>
</div>
<div class="column">
<h2><?=$rental['title']?></h2>
<h3>Location: <?=$rental['location']?></h3>
<h3>Price: <?=$reservation['price']?>€</h3>
<p id="checkIn"><h3>Check-In: <?= $reservation['startDate'] ?></h3></p>
<p id="checkOut"><h3>Check-Out: <?= $reservation['endDate'] ?></h3></p>
<form id="deleteReservation" action="../actions/action_removeReservation.php" method="post">
<input type="hidden" id="reservation_id" name="reservation_id" value="<?= $reservation['id'] ?> ">
<input type="hidden" id="csrf" name="csrf" value="<?=$_SESSION['csrf']?>">
<input id="button" type="submit" value="Cancel">
</form>
</div>
</div>
</article> </a>
</main>
<?php
} ?><file_sep>/Projeto/javascript/pictures.js
var i = 0;
function pictureClick() {
document.querySelector('#profilePicture').click();
}
function displayImage(e) {
if (e.files[0]) {
var reader = new FileReader();
reader.onload = function (e) {
document.querySelector('#profileDisplay').setAttribute('src', e.target.result);
}
reader.readAsDataURL(e.files[0]);
}
}
function createImage(element) {
var newImageProp = element.parentElement.cloneNode(true);
document.getElementsByClassName("tab")[3].appendChild(newImageProp);
}
function displayImageP(e) {
if (e.files[0]) {
var reader = new FileReader();
var nodes = document.querySelectorAll('#profileDisplay');
var target = nodes[nodes.length - 2];
var target2 = nodes[nodes.length - 1];
reader.onload = function (e) {
target.setAttribute('src', e.target.result);
target.nextElementSibling.setAttribute('name', 'picture' + i++);
target2.setAttribute('src', "../images/site/image-placeholder.jpg");
}
reader.readAsDataURL(e.files[0]);
}
}
function createImageP(element) {
var newImageProp = element.parentElement.cloneNode(true);
var list = document.getElementsByClassName("tab")[3];
list.append(newImageProp);
displayImageP(element);
}
// Open the Modal
function openModal() {
document.getElementById("myModal").style.display = "block";
}
// Close the Modal
function closeModal() {
document.getElementById("myModal").style.display = "none";
}
var slideIndex = 1;
showSlides(slideIndex);
// Next/previous controls
function plusSlides(n) {
showSlides(slideIndex += n);
}
// Thumbnail image controls
function currentSlide(n) {
showSlides(slideIndex = n);
}
function showSlides(n) {
var i;
var slides = document.getElementsByClassName("mySlides");
if (n > slides.length) { slideIndex = 1 }
if (n < 1) { slideIndex = slides.length }
for (i = 0; i < slides.length; i++) {
slides[i].style.display = "none";
}
slides[slideIndex - 1].style.display = "block";
}<file_sep>/Projeto/templates/tpl_common.php
<?php function draw_header($username)
{
/**
* Draws the header for all pages. Receives an username
* if the user is logged in in order to draw the logout
* link.
*/?>
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<link rel="stylesheet" type="text/css" href="../css/header.css">
<link rel="stylesheet" type="text/css" href="../css/style.css">
<link rel="stylesheet" type="text/css" href="../css/footer.css">
<link rel="stylesheet" type="text/css" href="../css/auth.css">
<link rel="stylesheet" type="text/css" href="../css/profile.css">
<link rel="stylesheet" type="text/css" href="../css/aboutus.css">
<link rel="stylesheet" type="text/css" href="../css/rental.css">
<link rel="stylesheet" type="text/css" href="../css/property.css">
<link rel="stylesheet" type="text/css" href="../css/contact.css">
<link rel="stylesheet" type="text/css" href="../css/search.css">
<link rel="stylesheet" type="text/css" href="../css/reservations.css">
<link rel="shortcut icon" href="../images/site/accusoft.png">
<script src="https://kit.fontawesome.com/bb66e67d26.js" crossorigin="anonymous"></script>
</head>
<body>
<header class="main_header">
<nav>
<?php if ($username != null) { ?>
<ul id="header_links">
<li><h1><a href="login.php" ><i class="fab fa-accusoft"></i> Rent-a-Place</a></h1></li>
<li class="right_links"><a id="logout" href="../actions/action_logout.php">Logout</a></li>
<li class="right_links"><a href="profile.php"><?=$username?></a></li>
<li class="right_links"><a href="reservations.php">Reservations</a></li>
<li class="right_links"><a href="manage.php">My Places</a></li>
</ul>
<?php } else { ?>
<ul id="header_links">
<li><h1><a href="login.php" ><i class="fab fa-accusoft"></i> Rent-a-Place</a></h1></li>
</ul>
<?php } ?>
</nav>
</header>
<?php if (isset($_SESSION['messages'])) {?>
<section id="messages">
<?php foreach ($_SESSION['messages'] as $message) { ?>
<div class="<?=$message['type']?>"><?=$message['content']?></div>
<?php } ?>
</section>
<?php unset($_SESSION['messages']); } ?>
<?php
} ?>
<?php function draw_footer()
{
/**
* Draws the footer for all pages.
*/ ?>
<footer class="main_footer">
<ul id="footer_links">
<li>
© 2019 Rent-a-Place
</li>
<li class="footer_right">
<a href="aboutus.php">About Us</a>
</li>
<li class="footer_right">
<a href="contact.php">Contact</a>
</li>
</ul>
</footer>
</body>
</html>
<?php
} ?><file_sep>/Projeto/pages/sendmessage.php
<?php
include_once('../includes/database.php');
$timestamp = time();
$last_id = $_GET['last_id'];
$place_id = $_GET['place_id'];
$db = Database::instance()->db();
if (isset($_GET['username']) && !empty($_GET['text'])) {
$username = $_GET['username'];
$text = $_GET['text'];
$stmt = $db->prepare("INSERT INTO comments VALUES (null, ?, ?, ?,?)");
$stmt->execute(array($timestamp, $username, $text,$place_id));
}
$stmt = $db->prepare("SELECT * FROM comments WHERE place_id = ? AND id > ? ORDER BY date DESC LIMIT 5");
$stmt->execute(array($place_id,$last_id));
$comments = $stmt->fetchAll();
$comments = array_reverse($comments);
foreach ($comments as $index => $comment) {
$time = date('Y-m-d', $comment['date']);
$comments[$index]['time'] = $time;
}
echo json_encode($comments);
?>
<file_sep>/Projeto/database/db_properties.php
<?php
include_once('../includes/database.php');
include_once('../database/db_user.php');
include_once('../includes/session.php');
function getAllProperties()
{
$db = Database::instance()->db();
$stmt = $db->prepare('SELECT * FROM Properties');
$stmt->execute();
return $stmt->fetchAll();
}
function getProperties($username)
{
$db = Database::instance()->db();
$user = getUser($username);
$stmt = $db->prepare('SELECT * FROM Properties WHERE ownerID = ?');
$stmt->execute(array($user['id']));
return $stmt->fetchAll();
}
function searchProperties($price, $location, $start, $end)
{
$db = Database::instance()->db();
if (empty($location)) {
$stmt = $db->prepare('SELECT * FROM Properties WHERE (price <= ? AND availabilityStart <= ? AND availabilityEnd >= ?)');
$stmt->execute(array($price,$start,$end));
} else {
$stmt = $db->prepare('SELECT * FROM Properties WHERE (price <= ? AND UPPER(location) = UPPER(?) AND availabilityStart <= ? AND availabilityEnd >= ?)');
$stmt->execute(array($price,$location,$start,$end));
}
return $stmt->fetchAll();
}
function checkIsPropertyOwner($property_id)
{
$db = Database::instance()->db();
$user = getUser($_SESSION['username']);
$stmt = $db->prepare('SELECT * FROM Properties WHERE ownerID = ? AND id = ?');
$stmt->execute(array($user['id'], $property_id));
return $stmt->fetch()?true:false;
}
function addProperty($ownerID, $price, $title, $location, $description, $start, $end)
{
$db = Database::instance()->db();
$stmt = $db->prepare('INSERT INTO Properties VALUES(NULL, ?, ?, ?, ?, ?, ?, ?)');
$stmt->execute(array($ownerID, $price, $title, $location, $description,$start,$end));
$property_id = $db->lastInsertId();
return $property_id;
}
function editProperty($id, $price, $title, $location, $description, $start, $end)
{
$db = Database::instance()->db();
$stmt = $db->prepare('UPDATE Properties SET price = ? , title = ? , location = ? , description = ? , availabilityStart = ? , availabilityEnd = ? WHERE id = ?');
$stmt->execute(array($price, $title, $location, $description,$start,$end,$id));
return 1;
}
function getProperty($property_id)
{
$db = Database::instance()->db();
$stmt = $db->prepare('SELECT * FROM Properties WHERE id = ?');
$stmt->execute(array($property_id));
return $stmt->fetch();
}
function deleteProperty($property_id)
{
$db = Database::instance()->db();
$stmt = $db->prepare('DELETE FROM Properties WHERE id = ?');
$stmt->execute(array($property_id));
}
function addPropertyPhoto($property_id, $photo)
{
$db = Database::instance()->db();
$stmt = $db->prepare('INSERT INTO Photos VALUES(NULL, ?, ?)');
$stmt->execute(array($property_id,$photo));
return 1;
}
function removePropertyPhoto($photoID)
{
$db = Database::instance()->db();
$stmt = $db->prepare('DELETE FROM Photos WHERE id =?');
$stmt->execute(array($photoID));
return 1;
}
function getPropertyPhotos($property_id)
{
$db = Database::instance()->db();
$stmt = $db->prepare('SELECT * FROM Photos WHERE propertyID = ?');
$stmt->execute(array($property_id));
return $stmt->fetchAll();
}
<file_sep>/Projeto/pages/editproperty.php
<?php
include_once('../includes/session.php');
include_once('../templates/tpl_common.php');
include_once('../templates/tpl_property.php');
include_once('../database/db_properties.php');
include_once('../database/db_user.php');
// Verify if user is logged in
if (!isset($_SESSION['username'])) {
die(header('Location: login.php'));
}
$userid = getUser($_SESSION['username'])['id'];
$property= getProperty($_GET['id']);
if ($userid != $property['ownerID']) {
die(header('Location: search.php'));
}
draw_header($_SESSION['username']);
edit_property($property);
draw_footer();
<file_sep>/Projeto/actions/action_edit_property.php
<?php
include_once('../includes/session.php');
include_once('../database/db_properties.php');
include_once('../database/db_user.php');
// Verify if user is logged in
if (!isset($_SESSION['username'])) {
die(header('Location: ../pages/login.php'));
}
$id = $_POST['id'];
$title = $_POST['title'];
$description = $_POST['description'];
$location = $_POST['location'];
$price = $_POST['price'];
$start = $_POST['start_date'];
$end = $_POST['end_date'];
$userid = getUser($_SESSION['username'])['id'];
$property= getProperty($id);
if ($userid != $property['ownerID']) {
die(header('Location: /pages/search.php'));
}
if ($start>$end) {
$temp = $start;
$start = $end;
$end = $temp;
}
$pattern = "/^[a-z A-Z]+$/";
if (!preg_match($pattern, $title)) {
$_SESSION['messages'][] = array('type' => 'error', 'content' => 'Title can only contain letters!');
die(header('Location: ../pages/manage.php'));
}
if (!preg_match($pattern, $location)) {
$_SESSION['messages'][] = array('type' => 'error', 'content' => 'Location can only contain letters!');
die(header('Location: ../pages/manage.php'));
}
if (!preg_match("/^[a-z A-Z0-9]+$/", $description)) {
$_SESSION['messages'][] = array('type' => 'error', 'content' => 'Description can only contain letters and numbers!');
die(header('Location: ../pages/manage.php'));
}
if (editProperty($id, $price, $title, $location, $description, $start, $end)) {
$_SESSION['messages'][] = array('type' => 'success', 'content' => 'Edited Property successfully!');
die(header('Location: ../pages/manage.php'));
} else {
$_SESSION['messages'][] = array('type' => 'error', 'content' => 'Description can only contain letters and numbers!');
die(header('Location: ../pages/manage.php'));
}
<file_sep>/Projeto/templates/tpl_profile.php
<?php function draw_profile()
{
include_once('../includes/session.php');
include_once('../database/db_user.php');
$user = getUser($_SESSION['username']);
/**
* Draws the signup section.
*/ ?>
<title>Edit Profile</title>
<section class="profile">
<header><h2>Edit Profile</h2></header>
<form method="post" action="../actions/action_profile.php" enctype="multipart/form-data">
<img src="../images/users/<?php echo $user['profilePicture']; ?>" onclick="pictureClick()" id="profileDisplay"/>
<label for="profilePicture">Profile Picture</label>
<input type="file" name="profilePicture" onchange="displayImage(this)" id="profilePicture" accept = "image/jpeg" style = "display:none;">
<input type="<PASSWORD>" name="password" placeholder="<PASSWORD>" required>
<input type="text" name="newusername" placeholder="new username">
<input type="<PASSWORD>" name="newpassword" placeholder="<PASSWORD>">
<input type="submit" value="Change">
</form>
</section>
<script src="../javascript/pictures.js"></script>
<?php
} ?><file_sep>/Projeto/actions/action_delete_property.php
<?php
include_once('../includes/session.php');
include_once('../database/db_properties.php');
include_once('../database/db_user.php');
// Verify if user is logged in
if (!isset($_SESSION['username'])) {
die(header('Location: ../pages/login.php'));
}
$userid = getUser($_SESSION['username'])['id'];
$propertyowner = getProperty($_GET['id'])['ownerID'];
if ($userid != $propertyowner) {
die(header('Location: search.php'));
}
if (!(deleteProperty($_GET['id']))) {
$_SESSION['messages'][] = array('type' => 'success', 'content' => 'Succesfully deleted property');
die(header('Location: ../pages/manage.php'));
} else {
$_SESSION['messages'][] = array('type' => 'error', 'content' => 'Failed to delete property!');
die(header('Location: ../pages/property.php'));
}
<file_sep>/Projeto/pages/search.php
<?php
include_once('../includes/session.php');
include_once('../templates/tpl_common.php');
include_once('../templates/tpl_search.php');
// Verify if user is logged in
if (isset($_SESSION['username'])) {
draw_header($_SESSION['username']);
draw_search();
draw_footer();
} else {
die(header('Location: login.php'));
}
<file_sep>/Projeto/database/db_reservations.php
<?php
include_once('../includes/database.php');
include_once('../database/db_user.php');
include_once('../database/db_properties.php');
include_once('../includes/session.php');
function addReservation($property_id, $start, $end, $price)
{
$db = Database::instance()->db();
if (checkReservationConflict($property_id, $start, $end)) {
$stmt = $db->prepare('INSERT INTO Reservations VALUES(NULL, ?, ?, ?,?,?)');
$touristID = getUser($_SESSION['username'])['id'];
$stmt->execute(array($touristID,$property_id,$start,$end,$price));
return 1;
}
return 0;
}
function getReservations()
{
$db = Database::instance()->db();
$touristID = getUser($_SESSION['username'])['id'];
$stmt = $db->prepare('SELECT * FROM Reservations WHERE touristID = ? ');
$stmt->execute(array($touristID));
return $stmt->fetchAll();
}
function getPropertyReservations($property_id)
{
$db = Database::instance()->db();
$stmt = $db->prepare('SELECT * FROM Reservations WHERE propertyID = ? GROUP BY startDate');
$stmt->execute(array($property_id));
return $stmt->fetchAll();
}
function checkReservationConflict($property_id, $start, $end)
{
$db = Database::instance()->db();
$stmt = $db->prepare('SELECT *
FROM Reservations
WHERE propertyID = ? AND
(startDate >= ? AND
startDate <= ? OR
endDate >= ? AND
endDate <= ? OR
startDate <= ? AND
endDate >= ?
)');
$stmt->execute(array($property_id,$start,$end,$start,$end,$start,$end));
return empty($stmt->fetchAll());
}
function getReservation($reservationID)
{
$db = Database::instance()->db();
$stmt = $db->prepare('SELECT * FROM Reservations WHERE id = ? ');
$stmt->execute(array($reservationID));
return $stmt->fetch();
}
function removeReservation($reservationID)
{
$db = Database::instance()->db();
$reservation = getReservation($reservationID);
$date1=date_create(date('Y-m-d'));
$date2=date_create($reservation['startDate']);
$diff=date_diff($date1, $date2);
if ($diff->d >= 14) {
$stmt = $db->prepare('DELETE FROM Reservations WHERE id =?');
$stmt->execute(array($reservationID));
return 1;
}
return 0;
}
<file_sep>/Projeto/actions/action_profile.php
<?php
include_once('../includes/session.php');
include_once('../database/db_user.php');
$newusername = $_POST['newusername'];
$password = $_POST['<PASSWORD>'];
$newpassword = $_POST['newpassword'];
if (empty($newusername)) {
$newusername = $_SESSION['username'];
}
if (empty($newpassword)) {
$newpassword = $password;
}
if (!preg_match("/^[a-zA-Z0-9]+$/", $newusername)) {
$_SESSION['messages'][] = array('type' => 'error', 'content' => 'Username can only contain letters and numbers!');
die(header('Location: ../pages/profile.php'));
}
$hash = sha1($newusername);
$profilePicture = $hash.".jpg";
$target = '../images/users/' . $profilePicture;
if (editUser($newusername, $password, $newpassword)) {
if ($_FILES['profilePicture']['error']== 0) {
editProfilePicture($password, $profilePicture);
move_uploaded_file($_FILES['profilePicture']['tmp_name'], $target);
}
$_SESSION['username'] = $newusername;
$_SESSION['messages'][] = array('type' => 'success', 'content' => 'Succesfully edited your profile');
die(header('Location: ../pages/search.php'));
} else {
$_SESSION['username'] = $username;
$_SESSION['messages'][] = array('type' => 'error', 'content' => 'Failed to edit your profile!');
die(header('Location: ../pages/profile.php'));
}
<file_sep>/Projeto/actions/action_add_property.php
<?php
include_once('../includes/session.php');
include_once('../database/db_properties.php');
include_once('../database/db_user.php');
// Verify if user is logged in
if (!isset($_SESSION['username'])) {
die(header('Location: ../pages/login.php'));
}
$user = getUser($_SESSION['username']);
$userID= $user['id'];
$title = $_POST['title'];
$description = $_POST['description'];
$location = $_POST['location'];
$price = $_POST['price'];
$start = $_POST['start_date'];
$end = $_POST['end_date'];
if ($start>$end) {
$temp = $start;
$start = $end;
$end = $temp;
}
$pattern = "/^[a-z A-Z]+$/";
if (!preg_match($pattern, $title)) {
$_SESSION['messages'][] = array('type' => 'error', 'content' => 'Title can only contain letters!');
die(header('Location: ../pages/property.php'));
}
if (!preg_match($pattern, $location)) {
$_SESSION['messages'][] = array('type' => 'error', 'content' => 'Location can only contain letters!');
die(header('Location: ../pages/property.php'));
}
if (!preg_match("/^[a-z A-Z0-9]+$/", $description)) {
$_SESSION['messages'][] = array('type' => 'error', 'content' => 'Description can only contain letters and numbers!');
die(header('Location: ../pages/property.php'));
}
$property_id = addProperty($userID, $price, $title, $location, $description, $start, $end);
foreach ($_FILES as $file) {
$hash = sha1($file['name']).'.jpg';
addPropertyPhoto($property_id, $hash);
$target = '../images/properties/' . $hash;
move_uploaded_file($file['tmp_name'], $target);
}
$_SESSION['messages'][] = array('type' => 'success', 'content' => 'Succesfully added property');
die(header('Location: ../pages/manage.php'));
<file_sep>/Projeto/javascript/datefield.js
function updateCheckout() {
var date1 = document.querySelector('#begin_date');
var date2 = document.querySelector('#end_date');
date2.setAttribute('min',date1.value);
}<file_sep>/Projeto/templates/tpl_rental.php
<?php
include_once('../database/db_properties.php');
include_once('../database/db_user.php');
function draw_rentals($rentals)
{
?>
<title>Properties</title>
<section class="rentals">
<?php
foreach ($rentals as $rental) {
draw_rental($rental);
} ?>
</section>
<?php
} ?>
<?php function draw_add()
{
?>
<section class="add_menu">
<button type="button" onclick="window.location.href='../pages/addproperty.php'" >Add Property</button>
</section>
<?php
} ?>
<?php function draw_rental($rental)
{
$photos = getPropertyPhotos($rental['id']);
$username = getUserbyID($rental['ownerID']); ?>
<a href="property.php?id=<?=$rental['id']?> " class="rental">
<article>
<main>
<div class="row">
<div class="column">
<img alt="Property Image" src="../images/properties/<?php echo $photos[0]['path']; ?>"/>
</div>
<div class="column">
<h2><?=$rental['title']?></h2>
<h3>Description: <?=$rental['description']?></h3>
<h3>Location: <?=$rental['location']?></h3>
<h4>Price per night: <?=$rental['price']?>€</h4>
</div>
</div>
</main>
</article>
</a>
<?php
} ?><file_sep>/Projeto/pages/manage.php
<?php
include_once('../includes/session.php');
include_once('../templates/tpl_common.php');
include_once('../templates/tpl_rental.php');
include_once('../database/db_properties.php');
// Verify if user is logged in
if (!isset($_SESSION['username'])) {
die(header('Location: login.php'));
}
$rentals = getProperties($_SESSION['username']);
draw_header($_SESSION['username']);
draw_rentals($rentals);
draw_add();
draw_footer();
<file_sep>/Projeto/database/db_user.php
<?php
include_once('../includes/database.php');
/**
* Verifies if a certain username, password combination
* exists in the database. Use the sha1 hashing function.
*/
function checkUserPassword($username, $password)
{
$db = Database::instance()->db();
$stmt = $db->prepare('SELECT * FROM Users WHERE username = ?');
$stmt->execute(array($username));
$user = $stmt->fetch();
return $user !== false && password_verify($password, $user['password']);
}
function checkIfUsernameExists($username)
{
$db = Database::instance()->db();
$stmt = $db->prepare('SELECT * FROM Users WHERE username = ?');
$stmt->execute(array($username));
$user = $stmt->fetch();
return $user !== false;
}
function insertUser($username, $email, $password, $name, $profilePicture)
{
$db = Database::instance()->db();
$options = ['cost' => 12];
$stmt = $db->prepare('INSERT INTO Users VALUES(NULL, ?, ?, ?, ?, ?)');
$stmt->execute(array($username,$email, password_hash($password, PASSWORD_DEFAULT, $options),$name, $profilePicture));
}
function editUser($newusername, $password, $newpassword)
{
$db = Database::instance()->db();
$options = ['cost' => 12];
$user = getUser($_SESSION['username']);
if (checkUserPassword($user['username'], $password)) {
$stmt = "UPDATE Users SET password = ? , username = ? WHERE id = ?";
$db->prepare($stmt)->execute([password_hash($newpassword, PASSWORD_DEFAULT, $options),$newusername,$user['id']]);
return 1;
}
return 0;
}
function editProfilePicture($password, $profilePicture)
{
$db = Database::instance()->db();
$options = ['cost' => 12];
$user = getUser($_SESSION['username']);
unlink('../images/users/'.$user['profilePicture']);
if (checkUserPassword($user['username'], $password)) {
$stmt = "UPDATE Users SET profilePicture = ? WHERE id = ?";
$db->prepare($stmt)->execute([$profilePicture,$user['id']]);
return 1;
}
return 0;
}
function getUser($username)
{
$db = Database::instance()->db();
$stmt = $db->prepare('SELECT * FROM Users WHERE username = ?');
$stmt->execute(array($username));
return $stmt->fetch();
}
function getUserbyID($id)
{
$db = Database::instance()->db();
$stmt = $db->prepare('SELECT * FROM Users WHERE id = ?');
$stmt->execute(array($id));
return $stmt->fetch();
}
function getSentMessages($username)
{
$db = Database::instance()->db();
$stmt = $db->prepare('SELECT * FROM Messages WHERE senderUsername = ? ');
$stmt->execute(array($username));
return $stmt->fetchAll();
}
function getReceivedMessages($username)
{
$db = Database::instance()->db();
$stmt = $db->prepare('SELECT * FROM Messages WHERE recipientUsername = ? ');
$stmt->execute(array($username));
return $stmt->fetchAll();
}
<file_sep>/Projeto/templates/tpl_contact.php
<?php function draw_contact()
{
/**
* Draws the contact section.
*/ ?>
<title>Contact Us</title>
<section class="contact">
<title>Contact Us</title>
<header><h2>Contact Us</h2></header>
<form method="post" action="../actions/action_contact.php">
<input type="text" name="name" placeholder="<NAME>" required>
<input type="email" name="email" placeholder="email address" required>
<input type="text" name="subject" placeholder="subject" required>
<textarea name="message" placeholder="write something" required></textarea>
<input type="submit" value="submit">
</form>
</section>
<?php
} ?>
<file_sep>/Projeto/templates/tpl_auth.php
<?php function draw_login()
{
/**
* Draws the login section.
*/ ?>
<title>Login</title>
<section class="login">
<header><h2>Welcome Back</h2></header>
<form method="post" action="../actions/action_login.php">
<input type="text" name="username" placeholder="username" required>
<input type="<PASSWORD>" name="password" placeholder="<PASSWORD>" required>
<input type="submit" value="Login">
</form>
<footer>
<p>Don't have an account? <a href="signup.php">Signup!</a></p>
</footer>
</section>
<?php
} ?>
<?php function draw_signup()
{
/**
* Draws the signup section.
*/ ?>
<title>Sign Up</title>
<section class="signup">
<header><h2>New Account</h2></header>
<form method="post" action="../actions/action_signup.php" enctype="multipart/form-data">
<img src="../images/site/placeholder.jpg" onclick="pictureClick()" id="profileDisplay"/>
<label for="profilePicture">Profile Picture</label>
<input type="file" name="profilePicture" onchange="displayImage(this)" id="profilePicture" style="display:none;">
<input type="text" name="name" placeholder="full name" required>
<input type="text" name="username" placeholder="username" required>
<input type="<PASSWORD>" name="password" placeholder="<PASSWORD>" pattern="(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{8,}" title="Must contain at least one number and one uppercase and lowercase letter, and at least 8 or more characters" required>
<input type="email" name="email" placeholder="email address" pattern="[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,}$" required>
<input type="submit" value="Signup">
</form>
<footer>
<p>Already have an account? <a href="login.php">Login!</a></p>
</footer>
</section>
<script src="../javascript/pictures.js"></script>
<?php
} ?>
<file_sep>/Projeto/database/createDB.sql
--
-- File generated with SQLiteStudio v3.2.1 on Wed Dec 18 12:37:31 2019
--
-- Text encoding used: UTF-8
--
PRAGMA foreign_keys = off;
BEGIN TRANSACTION;
-- Table: Comments
DROP TABLE IF EXISTS Comments;
CREATE TABLE Comments (
id INTEGER PRIMARY KEY,
text VARCHAR NOT NULL,
rating INTEGER CHECK (rating >= 0 AND
rating <= 5)
NOT NULL,
propertyID INTEGER NOT NULL,
touristID INTEGER NOT NULL
);
-- Table: Owners
DROP TABLE IF EXISTS Owners;
CREATE TABLE Owners (
id INTEGER PRIMARY KEY
);
INSERT INTO Owners (id) VALUES (1);
INSERT INTO Owners (id) VALUES (2);
INSERT INTO Owners (id) VALUES (3);
INSERT INTO Owners (id) VALUES (4);
-- Table: Photos
DROP TABLE IF EXISTS Photos;
CREATE TABLE Photos (
id INTEGER PRIMARY KEY,
description VARCHAR NOT NULL,
propertyID INTEGER,
path TEXT NOT NULL
);
INSERT INTO Photos (id, description, propertyID, path) VALUES (1, 'Living Room', 1, '30aa4c6fa8249b9d8e3ab0954c443f2f0a92ec79.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (2, 'Bedroom', 1, '7decab1e2fd610fad0b85ac70d214143937dedd3.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (3, 'Kitchen', 1, '98304c010b0c4a8ae7d3bce58b80126a15b375a3.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (4, 'Garden', 5, '87ef72c4943cab1a15dcf73b4e6642b7335c8158.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (5, 'Bedroom', 5, '0675a2ad891f8814846931458b95004c37d1b855.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (6, 'Living Room', 5, '02e2c7887ddd1ae783ba24dba95ade15c2dc0dec.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (7, 'Kitchen', 3, '0969d0da5d8d1bc7ef93d52a8c81f3aef79677f6.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (8, 'Living Room', 3, 'd178e9652bac44f834c9446698b82c10019ecbbe.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (9, 'Bedroom', 3, '637f994ad2e7cf948ac9d6e4acd23b688d4705c4.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (10, 'Bedroom', 4, 'd74e80e1312ee7b55eac9f3e9816670511c0585b.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (11, 'Kitchen', 4, '2e8c602af5ddb4f72e20d7c532b9577ecea50d66.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (12, 'Living Room', 4, '85db0aedfbc8b50dd4fe40def5b60124708b1b03.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (13, 'Skylight', 2, 'f092f7491f15ec2b5199dbdc3187ca76fb1cbf70.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (14, 'Bedroom', 2, '1bb6dbdca449efac35a6f2aff38f321bb2166e84.jpg');
INSERT INTO Photos (id, description, propertyID, path) VALUES (15, 'Living Room', 2, '69f74787ee1c561894d6e01563c5ae2cf9656c28.jpg');
-- Table: Properties
DROP TABLE IF EXISTS Properties;
CREATE TABLE Properties (
id INTEGER PRIMARY KEY,
ownerID INTEGER NOT NULL,
price FLOAT NOT NULL,
title VARCHAR NOT NULL,
location VARCHAR NOT NULL,
description VARCHAR NOT NULL,
availabilityStart DATE,
availabilityEnd DATE
);
INSERT INTO Properties (id, ownerID, price, title, location, description, availabilityStart, availabilityEnd) VALUES (1, 4, 30.0, 'Downtown Apartment', 'Porto', '3 Rooms', '2019-12-25', '2020-12-31');
INSERT INTO Properties (id, ownerID, price, title, location, description, availabilityStart, availabilityEnd) VALUES (2, 1, 43.0, 'Ribeira Vintage', 'Porto', '1 Room', '2020-01-01', '2020-12-31');
INSERT INTO Properties (id, ownerID, price, title, location, description, availabilityStart, availabilityEnd) VALUES (3, 3, 97.0, 'Infante Apartment', 'Porto', '2 Rooms', '2020-02-01', '2020-12-31');
INSERT INTO Properties (id, ownerID, price, title, location, description, availabilityStart, availabilityEnd) VALUES (4, 2, 102.0, 'Charming Loft', 'Gaia', '2 Rooms', '2020-01-01', '2020-07-31');
INSERT INTO Properties (id, ownerID, price, title, location, description, availabilityStart, availabilityEnd) VALUES (5, 2, 70.0, 'Studio Garden', 'Gaia', '1 Room', '2020-05-01', '2020-06-31');
-- Table: Reservations
DROP TABLE IF EXISTS Reservations;
CREATE TABLE Reservations (
id INTEGER PRIMARY KEY,
touristID INTEGER,
propertyID INTEGER,
startDate DATE NOT NULL,
endDate DATE NOT NULL,
price FLOAT NOT NULL
);
INSERT INTO Reservations (id, touristID, propertyID, startDate, endDate, price) VALUES (1, 4, 3, '22/01/2019', '24/01/2019', 194.0);
INSERT INTO Reservations (id, touristID, propertyID, startDate, endDate, price) VALUES (2, 2, 1, '22/07/2019', '30/07/2019', 240.0);
INSERT INTO Reservations (id, touristID, propertyID, startDate, endDate, price) VALUES (4, 3, 4, '17/04/2019', '22/05/2019', 510.0);
INSERT INTO Reservations (id, touristID, propertyID, startDate, endDate, price) VALUES (5, 2, 4, '2020-01-01', '2020-01-15', 1428.0);
-- Table: Tourists
DROP TABLE IF EXISTS Tourists;
CREATE TABLE Tourists (
id INTEGER PRIMARY KEY
);
INSERT INTO Tourists (id) VALUES (1);
INSERT INTO Tourists (id) VALUES (2);
INSERT INTO Tourists (id) VALUES (3);
INSERT INTO Tourists (id) VALUES (5);
-- Table: Users
DROP TABLE IF EXISTS Users;
CREATE TABLE Users (
id INTEGER PRIMARY KEY,
username VARCHAR NOT NULL
UNIQUE,
email VARCHAR NOT NULL
UNIQUE,
password VARCHAR NOT NULL,
name VARCHAR NOT NULL,
profilePicture TEXT
);
INSERT INTO Users (id, username, email, password, name, profilePicture) VALUES (1, 'admin', '<EMAIL>', '$2y$12$XyUbUN0YHX1p0dKn.uFyqeCouwja7Kaj7mWUGPWNL8FQT2gZAFdhW', 'admin', 'd033e22ae348aeb5660fc2140aec35850c4da997.jpg');
INSERT INTO Users (id, username, email, password, name, profilePicture) VALUES (2, 'mpinho', '<EMAIL>', '$2y$12$8d1DDkLNFqW9qKnxyn8vVuZOfMmHB7OtMeDoodaNFYgZ6kitwjLOK', '<NAME>', '5c54bbe60aaf5ef4114e5fa836209fed8ce267a4.jpg');
INSERT INTO Users (id, username, email, password, name, profilePicture) VALUES (3, 'ranheri', '<EMAIL>', '$2y$12$1x1q20CBxWmHgkVYVDfxnO5F52cjSgCZ9v/kwox5wTBex/JA52ED2', '<NAME>', '<PASSWORD>65112d6a0185afba745.jpg');
INSERT INTO Users (id, username, email, password, name, profilePicture) VALUES (4, 'brandao', '<EMAIL>', <PASSWORD>', '<NAME>', '8e87a2ff3f3506026253fdd68bd03d38e70c148e.jpg');
INSERT INTO Users (id, username, email, password, name, profilePicture) VALUES (5, 'tourist', '<EMAIL>', <PASSWORD>', 'tourist', '<PASSWORD>4.jpg');
COMMIT TRANSACTION;
PRAGMA foreign_keys = on;
| 9a406c2d73ab9b2a4fa513ec8d4daf6ebd054c97 | [
"JavaScript",
"SQL",
"Text",
"PHP"
] | 27 | PHP | jbrandao99/LTW | 9c177d4e0cc2b8f674d2db5d022e721150019990 | 3fb2e35dc98912af29550856aa9be1033cb48bfe |
refs/heads/master | <file_sep># Zadania_okresowe
Do Raspberry Pi został podłączony cyfrowy czujnik temperatury oraz wilgotności (DHT11). Temperatura jest odczytywana za pomocą gotowego programu udostępnionego przez producenta czujnika. W uzyskanym programie należało tylko odpowiednio zainicjalizować zmienne. Użyty czujnik jest dosyć wolny, więc czujnik zwraca czasami wartość dopiero po kilku próbach, aby mieć pewność, że będą dodawane do bazy poprawne wartości napisałem program, który sprawdza odpowiedź programu pobierającego dane z czujnika. Temperatura jest sprawdzana w pętli dopóki nie zostanie otrzymana prawidłowa wartość, jeśli otrzymujemy prawidłową wartość pięć razy jest obliczana z nich średnia, jednak jeśli wartość nie jest prawidłowa, program czeka trzy sekundy i sprawdza jeszcze raz.
Test.py – program producenta czujnika Adafruit DHT11 umożliwiający odczyt z czujnika.
Test1.py- Program sprawdzający poprawność odpowiedzi programu test.py oraz dodający wartości do bazy danych.
Program test.py jest wywoływany przez program test1.py.
Następnie został ustawiony Cron, aby uruchamiał program co 10 minut.
*/10 * * * * sudo python /home/pi/libraries/Adafruit-Raspberry-Pi-Python-Code/Adafruit_DHT_Driver/test1.py > /dev/null 2>&1
<file_sep>#!/usr/bin/python
import subprocess
import re
import sys
import time
import MySQLdb
id_thermometer = "DHT110000000001"
gpio_thermometer = 4
def DHT_read(pin):
#define lists:
temp_list=[]
hum_list=[]
#define the number of times toread the sensor
read_count=5
#set counters to zero
i=0
sensor_error=0
#begin the loop for reading the sensor
while i<read_count:
#run DHT driver to read data from sensor
output = subprocess.check_output(["sudo","python","/home/pi/libraries/Adafruit-Raspberry-Pi-Python-Code/Adafruit_DHT_Driver/test.py"]);
#get the temperature out of the 'output' string
matches_temp = re.search("Temp =\s+([0-9.]+)", output)
matches_hum = re.search("Hum =\s+([0-9.]+)", output)
#check for an error
if (not matches_temp):
time.sleep(3)
print "error detected"
sensor_error=sensor_error+1
continue
temp = (float(matches_temp.group(1)))
#add to temp list
temp_list.append(temp)
i=i+1
#calculate humidity
humidity = float(matches_hum.group(1))
#add to humidity list
hum_list.append(humidity)
#calculate average temp
sum_temp=0
w= len(temp_list)
while w > 0:
sum_temp=sum_temp+temp_list[w-1]
w=w-1
temp_avg=sum_temp / len(temp_list)
#calculate average humidity
sum_hum=0
w= len(hum_list)
while w > 0:
sum_hum=sum_hum+hum_list[w-1]
w=w-1
hum_avg=sum_hum / len(hum_list)
return float(round(temp_avg,1)), float(round(hum_avg,1))
# adds a record to the database
def insert_temp_reading (thermometer,temperature,humidity):
conn = MySQLdb.connect("localhost","user","Pass","basename" )
cursor = conn.cursor()
params = [thermometer,temperature,humidity]
try:
cursor.execute("INSERT INTO temperature_monitor (counter,thermometer,date,temperature,humidity) VALUE (NULL,%s,NOW(),%s,%s)",params)
conn.commit()
except MySQLdb.Error, e:
print "An error has occurred. %s" %e
finally:
cursor.close()
conn.close()
# main section
def main():
current_temp,current_humidity = DHT_read(gpio_thermometer)
insert_temp_reading (id_thermometer,current_temp,current_humidity)
print "Thermometer:" + id_thermometer + "|Temperature:" + `current_temp` + "|Humidity:" + `current_humidity`
if __name__ == '__main__':
main()
| 02afd61758ad2b8b71e4970c8fab3106e1592dfc | [
"Markdown",
"Python"
] | 2 | Markdown | przemek207/Zadania_okresowe | 9fa1c16cba1c8188faf9d4057666494d4f567554 | 7f2d72a9a1dc6e4228b158de6df34c74c481aa04 |
refs/heads/master | <file_sep>import os
from itertools import combinations_with_replacement, product
import threading
import numpy as np
from scipy.spatial import distance
from scipy.stats import entropy
from scipy.stats import pearsonr
from sklearn.model_selection import KFold
from FeatureExtractionMode.utils.utils_write import FormatWrite
from MachineLearningAlgorithm.utils.utils_read import files2vectors_seq
SEED = 42
# 欧氏距离
def euclidean_distance(vec1, vec2):
# ord=2: 二范数
score = np.linalg.norm(vec1-vec2, ord=2)
return round(score, 4)
# 曼哈顿距离
def manhattan_distance(vec1, vec2):
# ord=1: 一范数
score = np.linalg.norm(vec1 - vec2, ord=1)
return round(score, 4)
# 切比雪夫距离
def chebyshev_distance(vec1, vec2):
# ord=np.inf: 无穷范数
score = np.linalg.norm(vec1-vec2, ord=np.inf)
return round(score, 4)
# 汉明距离
def hamming_distance(vec1, vec2):
# 适用于二进制编码格式 !
return len(np.nonzero(vec1-vec2)[0]) # 返回整数
# 杰卡德相似度
def jaccard_similarity_coefficient(vec1, vec2):
# 适用于二进制编码格式 !
score = distance.pdist(np.array([vec1, vec2]), "jaccard")[0]
return round(score, 4)
# 余弦相似度
def cosine_similarity(vec1, vec2):
score = np.dot(vec1, vec2)/(np.linalg.norm(vec1)*(np.linalg.norm(vec2)))
return round(score, 4)
# 皮尔森相关系数
def pearson_correlation_coefficient(vec1, vec2):
score = pearsonr(vec1, vec2)[0]
return round(score, 4)
# 相对熵又称交叉熵, Kullback-Leible散度(即KL散度)等,
# 这个指标不能用作距离衡量,因为该指标不具有对称性, 为了在并行计算时统一,采用对称KL散度
def kl_divergence(vec1, vec2):
score = (entropy(vec1, vec2) + entropy(vec2, vec1)) / 2.0
return round(score, 4)
def score_func_one(method, vectors1, vectors2, index1, index2, return_dict, sem):
sem.acquire()
if method == 'ED':
score = euclidean_distance(vectors1[index1], vectors2[index2])
elif method == 'MD':
score = manhattan_distance(vectors1[index1], vectors2[index2])
elif method == 'CD':
score = chebyshev_distance(vectors1[index1], vectors2[index2])
elif method == 'HD':
score = hamming_distance(vectors1[index1], vectors2[index2])
elif method == 'JSC':
score = jaccard_similarity_coefficient(vectors1[index1], vectors2[index2])
elif method == 'CS':
score = cosine_similarity(vectors1[index1], vectors2[index2])
elif method == 'PCC':
score = pearson_correlation_coefficient(vectors1[index1], vectors2[index2])
elif method == 'KLD':
score = kl_divergence(vectors1[index1], vectors2[index2])
else:
print('Semantic Similarity method error!')
return False
return_dict[(index1, index2)] = score
sem.release()
def score4train_vec(method, train_vectors, process_num):
threads = []
sem = threading.Semaphore(process_num)
return_dict = {}
row = train_vectors.shape[0]
for i, j in combinations_with_replacement(list(range(len(train_vectors))), 2):
threads.append(threading.Thread(target=score_func_one,
args=(method, train_vectors, train_vectors, i, j, return_dict, sem)))
for t in threads:
t.start()
for t in threads:
t.join()
score_mat = np.zeros([row, row])
for i in range(row):
for j in range(row):
if i <= j:
score_mat[i][j] = return_dict[(i, j)]
else:
score_mat[i][j] = return_dict[(j, i)]
return score_mat
def score4test_vec(method, train_vectors, test_vectors, process_num):
threads = []
sem = threading.Semaphore(process_num)
return_dict = {}
train_row = train_vectors.shape[0]
test_row = test_vectors.shape[0]
for i, j in product(list(range(test_row)), list(range(train_row))):
threads.append(threading.Thread(target=score_func_one,
args=(method, test_vectors, train_vectors, i, j, return_dict, sem)))
for t in threads:
t.start()
for t in threads:
t.join()
score_mat = np.zeros([test_row, train_row])
for i in range(test_row):
for j in range(train_row):
score_mat[i][j] = return_dict[(i, j)]
return score_mat
def partition_vectors(vectors, folds_num):
fold = KFold(folds_num, shuffle=True, random_state=np.random.RandomState(SEED))
folds_temp = list(fold.split(vectors))
folds = []
for i in range(folds_num):
test_index = folds_temp[i][1]
train_index = folds_temp[i][0]
folds.append((train_index, test_index))
return folds
def get_partition(vectors, labels, train_index, val_index):
x_train = vectors[train_index]
x_val = vectors[val_index]
y_train = labels[train_index]
y_val = labels[val_index]
return x_train, y_train, x_val, y_val
def score_process(method, vec_files, labels, cv, out_format, process_num):
vectors = files2vectors_seq(vec_files, out_format)
dir_name, _ = os.path.splitext(vec_files[0])
score_dir = dir_name + '/score/'
if cv == 'j':
folds_num = len(vectors)
elif cv == '10':
folds_num = 10
else:
folds_num = 5
folds = partition_vectors(vectors, folds_num)
count = 0
for train_index, test_index in folds:
x_train, y_train, x_test, y_test = get_partition(vectors, labels, train_index, test_index)
train_mat = score4train_vec(method, x_train, process_num)
test_mat = score4test_vec(method, x_train, x_test, process_num)
count += 1
temp_dir = score_dir + 'Fold%d/' % count
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
FormatWrite(train_mat, out_format, temp_dir + 'train_score.txt').write_to_file()
FormatWrite(test_mat, out_format, temp_dir + 'test_score.txt').write_to_file()
np.savetxt(temp_dir + 'train_label.txt', y_train)
np.savetxt(temp_dir + 'test_label.txt', y_test)
np.savetxt(temp_dir + 'test_index.txt', test_index)
def ind_score_process(method, vectors, ind_vec_file, labels, ind_labels, out_format, process_num):
ind_vectors = files2vectors_seq(ind_vec_file, out_format)
dir_name, _ = os.path.split(ind_vec_file[0])
score_dir = dir_name + '/ind_score/'
train_mat = score4train_vec(method, vectors, process_num)
test_mat = score4test_vec(method, vectors, ind_vectors, process_num)
if not os.path.exists(score_dir):
os.makedirs(score_dir)
FormatWrite(train_mat, out_format, score_dir + 'train_score.txt').write_to_file()
FormatWrite(test_mat, out_format, score_dir + 'test_score.txt').write_to_file()
np.savetxt(score_dir + 'train_label.txt', labels)
np.savetxt(score_dir + 'test_label.txt', ind_labels)
<file_sep>import heapq
import itertools
import os
import pickle
import sys
from .utils_fasta import get_seqs
from ..utils.utils_const import PROTEIN
from ..utils.utils_pssm import sep_file, produce_all_frequency
PROTEIN_X = "ACDEFGHIKLMNPQRSTVWYX"
DNA_X = "ACGTX"
RNA_X = "ACGUX"
def make_km_list(k, alphabet):
try:
return ["".join(e) for e in itertools.product(alphabet, repeat=k)]
except TypeError:
print("TypeError: k must be an inter and larger than 0, alphabet must be a string.")
raise TypeError
except ValueError:
print("TypeError: k must be an inter and larger than 0")
raise ValueError
def seq_length_fixed(seq_list, fixed_len):
sequence_list = []
for seq in seq_list:
seq_len = len(seq)
if seq_len <= fixed_len:
for i in range(fixed_len - seq_len):
seq += 'X'
else:
seq = seq[:fixed_len]
sequence_list.append(seq)
return sequence_list
def km_words(input_file, alphabet, fixed_len, word_size, fixed=True):
""" convert sequence to corpus """
with open(input_file, 'r') as f:
seq_list = get_seqs(f, alphabet)
if fixed is True:
seq_list = seq_length_fixed(seq_list, fixed_len) # 注意这里的fixed_len对于蛋白质一般是400, RNA一般为1001
corpus = []
for sequence in seq_list:
word_list = []
# windows slide along sequence to generate gene/protein words
for i in range(len(sequence) - word_size + 1):
word = sequence[i:i + word_size]
word_list.append(word)
corpus.append(word_list)
return corpus
def get_rev_comp_dict(k):
""" get reverse composition dictionary
:return: {'ACG': 'CGT', ...} """
km_list = make_km_list(k, "ACGTX") # 只能是DNA序列
rev_comp_dict = {}
# Make a reversed version of the string.
for km in km_list:
rev_sequence = list(km)
rev_sequence.reverse()
rev_sequence = ''.join(rev_sequence)
return_value = ""
for letter in rev_sequence:
if letter == "A":
return_value += "T"
elif letter == "C":
return_value += "G"
elif letter == "G":
return_value += "C"
elif letter == "T":
return_value += "A"
elif letter == "X":
return_value += "X"
else:
error_info = ("Unknown DNA character (%s)\n" % letter)
sys.exit(error_info)
# Store this value for future use.
rev_comp_dict[km] = return_value
return rev_comp_dict
def rev_km_words(input_file, alphabet, fixed_len, word_size, fixed=True):
""" convert sequence to corpus """
with open(input_file, 'r') as f:
seq_list = get_seqs(f, alphabet)
if fixed is True:
seq_list = seq_length_fixed(seq_list, fixed_len) # 注意这里的fixed_len对于蛋白质一般是400, RNA一般为1001
rev_comp_dict = get_rev_comp_dict(word_size)
corpus = [] # 构建一个不同单词的语料库,来方便后续的operation
for sequence in seq_list:
word_list = []
# windows slide along sequence to generate gene/protein words
for i in range(len(sequence) - word_size + 1):
word = sequence[i:i + word_size]
rev_km_word = rev_comp_dict[word]
word_list.append(rev_km_word)
corpus.append(word_list)
return corpus
def get_mismatch_dict(k, alphabet):
""" Attention: Fixed m = 1
:return: {'AA': ['AA', 'TA', 'GA', 'CA', 'AT', 'AG', 'AC'], ...}"""
# 这里在产生mismatch words的过程中固定m=1, 否则产生的语料库过大且不合理!
# 如:k=3, m=2时,len(dict['ACT']) = 4*4 -3*3, 意味着有37个3苷酸能take them as the occurrences of 'ACT'.
mismatch_dict = {}
km_list = make_km_list(k, alphabet)
for km in km_list:
sub_list = [''.join(km)]
for j in range(k):
for letter in list(set(alphabet) ^ set(km[j])): # 求字母并集
substitution = list(km)
substitution[j] = letter
substitution = ''.join(substitution)
sub_list.append(substitution)
mismatch_dict[km] = sub_list
return mismatch_dict
def mismatch_words(input_file, alphabet, fixed_len, word_size, fixed=True):
""" convert sequence to corpus """
with open(input_file, 'r') as f:
seq_list = get_seqs(f, alphabet)
if fixed is True:
seq_list = seq_length_fixed(seq_list, fixed_len) # 注意这里的fixed_len对于蛋白质一般是400, RNA一般为1001
mismatch_dict = get_mismatch_dict(word_size, alphabet)
corpus = [] # 构建一个不同单词的语料库,来方便后续的operation
for sequence in seq_list:
word_list = []
# windows slide along sequence to generate gene/protein words
for i in range(len(sequence) - word_size + 1):
word = sequence[i:i + word_size]
mis_words = mismatch_dict[word]
word_list += mis_words
corpus.append(word_list)
return corpus
def subsequence_words(input_file, alphabet, fixed_len, word_size, fixed=True):
""" convert sequence to corpus """
with open(input_file, 'r') as f:
seq_list = get_seqs(f, alphabet)
if fixed is True:
seq_list = seq_length_fixed(seq_list, fixed_len) # 注意这里的fixed_len对于蛋白质一般是400, RNA一般为1001
corpus = []
for seq in seq_list:
corpus.append(combination_dck(seq, word_size, dc=5)) # 这里非常特殊,需要注意!
return corpus
def combination_dck(s, k, dc):
# dc: 距离控制参数
if k == 0:
return ['']
sub_letters = []
# 此处涉及到一个 python 遍历循环的特点:当遍历的对象为空(列表,字符串...)时,循环不会被执行,range(0) 也是一样
for i in range(len(s)):
for letter in combination_dck(s[i + 1: i + dc], k - 1, dc):
sub_letters += [s[i] + letter]
return sub_letters
def produce_one_top_n_gram(pssm_file, n):
"""Produce top-n-gram for one pssm file.
:param pssm_file: the pssm file used to generate top-n-gram.
:param n: the top n most frequency amino acids in the corresponding column of a frequency profile
"""
tng_list = []
new_alpha_list = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F',
'P', 'S', 'T', 'W', 'Y', 'V']
with open(pssm_file, 'r') as f:
count = 0
for line in f:
count += 1
if count <= 3 or len(line.strip().split()) < 20:
pass
else:
line = line.strip().split()[22:-2]
line = list(map(eval, line))
# print line
data = heapq.nlargest(n, enumerate(line), key=lambda x: x[1])
# print data
if n == 1:
new_alpha = new_alpha_list[data[0][0]]
else:
new_alpha = ''
indices, val = list(zip(*data))
for i in indices:
new_alpha += new_alpha_list[i]
tng_list.append(new_alpha)
return tng_list
def produce_tng_blosum(seq_file, blosum_dict, n):
"""Generate top-n-gram by blosum62 matrix.
:param seq_file: the sequence file containing one sequence.
:param blosum_dict: the dict which stores the blosum62 matrix.
:param n: the top n most frequency amino acids in the corresponding column of a frequency profile.
"""
tng_list = []
with open(seq_file, 'r') as f:
for line in f:
if line.strip().startswith('>'):
continue
else:
line = line.strip()
for amino in line:
amino_blosum = blosum_dict[amino]
data = heapq.nlargest(n, enumerate(amino_blosum), key=lambda x: x[1])
if n == 1:
index = data[0][0]
new_alpha = blosum_dict['alphas'][index]
else:
new_alpha = ''
indices, val = list(zip(*data))
for i in indices:
new_alpha += blosum_dict['alphas'][i]
tng_list.append(new_alpha)
return tng_list
def produce_top_n_gram(pssm_dir, seq_name, n, sw_dir):
"""Produce top-n-gram for all the pssm files.
:param pssm_dir: the directory used to store pssm files.
:param seq_name: the name of sequences.
:param n: the top n most frequency amino acids in the corresponding column of a frequency profile
:param sw_dir: the main dir of software.
"""
dir_name = os.path.split(pssm_dir)[0]
dir_list = os.listdir(pssm_dir)
index_list = []
for elem in dir_list:
pssm_full_path = ''.join([pssm_dir, '/', elem])
name, suffix = os.path.splitext(elem)
if os.path.isfile(pssm_full_path) and suffix == '.pssm':
index_list.append(int(name))
index_list.sort()
if len(index_list) != len(seq_name):
BLOSUM62 = sw_dir + 'psiblast/blosum62.pkl'
with open(BLOSUM62, 'rb') as f:
blosum_dict = pickle.load(f)
# print tng_all_list
for i in range(1, len(seq_name) + 1):
if i in index_list:
pssm_full_path = ''.join([pssm_dir, '/', str(i), '.pssm'])
tng = produce_one_top_n_gram(pssm_full_path, n)
else:
seq_file = ''.join([dir_name, '/', str(i), '.txt'])
tng = produce_tng_blosum(seq_file, blosum_dict, n)
yield tng
def convert_tng_to_fasta(pssm_dir, seq_name, origin_file_name, n, sw_dir):
"""Convert top-n-gram to fasta format.
:param n: the top n most frequency amino acids in the corresponding column of a frequency profile
:param pssm_dir: pssm directory.
:param seq_name: the name of sequences.
:param origin_file_name: the name of the input file in FASTA format.
:param sw_dir: the main dir of software.
"""
file_name, suffix = os.path.splitext(origin_file_name)
tng_file = ''.join([file_name, '_tng', suffix])
with open(tng_file, 'w') as f:
for index, tng in enumerate(produce_top_n_gram(pssm_dir, seq_name, n, sw_dir)):
f.write('>')
f.write(seq_name[index])
f.write('\n')
for elem in tng:
f.write(elem)
f.write('\n')
return tng_file
def tng_words(input_file, fixed_len, word_size, n, process_num, cur_dir, fixed=True):
"""Generate DT words.
tng: By replacing all the amino acids
in a protein with their corresponding Top-n-grams,
a protein sequence can be represented as a sequence of
Top-n-grams instead of a sequence of amino acids.
"""
dir_name, seq_name = sep_file(input_file)
sw_dir = cur_dir + '/software/'
pssm_dir = produce_all_frequency(dir_name, sw_dir, process_num)
tng_seq_file = convert_tng_to_fasta(pssm_dir, seq_name, input_file, n, sw_dir)
fixed_len = fixed_len*n
return km_words(tng_seq_file, PROTEIN, fixed_len, word_size, fixed)
def dr_words(input_file, alphabet, fixed_len, max_dis, fixed=True):
"""
The Distance Residue method.
:param input_file: 输入满足格式的序列文件
:param alphabet: DNA/RNA/Protein的字母表
:param fixed: fix or not, 默认未固定长度,但对于一些词袋模型的词,是不固定的
:param fixed_len: fixed length for protein sequence
:param max_dis: the value of the maximum distance.
"""
assert int(max_dis) > 0
with open(input_file, 'r') as f:
seq_list = get_seqs(f, alphabet)
if fixed is True:
seq_list = seq_length_fixed(seq_list, fixed_len) # 注意这里的fixed_len对于蛋白质一般是400, RNA一般为1001
corpus = []
for sequence in seq_list:
corpus_temp = []
for i in range(fixed_len):
corpus_temp.append(sequence[i])
# Paper: Using distances between Top-n-gram and residue pairs for protein remote homology detection
# 代码是基于以上文章得到,与BioSeq-Analysis2.0中的dr_method部分有差别,需要注意!
for j in range(1, max_dis + 1):
if i + j < fixed_len:
corpus_temp.append(sequence[i] + sequence[i + j])
corpus.append(corpus_temp)
return corpus
def dt_words(input_file, fixed_len, max_dis, process_num, cur_dir, fixed=True):
"""Generate DT words.
DT: replacing all the amino acids in a protein with their corresponding Top-n-grams.
"""
dir_name, seq_name = sep_file(input_file)
sw_dir = cur_dir + '/software/'
pssm_dir = produce_all_frequency(dir_name, sw_dir, process_num)
tng_seq_file = convert_tng_to_fasta(pssm_dir, seq_name, input_file, 1, sw_dir)
return dr_words(tng_seq_file, PROTEIN, fixed_len, max_dis, fixed)
<file_sep>from ..utils.utils_words import km_words
from ..utils.utils_algorithm import tf_idf
def km_tf_idf(input_file, alphabet, fixed_len, word_size, fixed=True):
corpus = km_words(input_file, alphabet, fixed_len, word_size, fixed)
return tf_idf(corpus)
<file_sep>import os
import re
import subprocess
import sys
import numpy as np
from .pse import get_phyche_list, get_extra_index, get_phyche_value, get_aaindex, extend_aaindex
from ..utils.utils_const import DNA, RNA, PROTEIN
from ..utils.utils_fasta import get_seqs
from ..utils.utils_words import seq_length_fixed
def get_values(prop, sup_info):
values = ""
name = re.search(prop, sup_info)
if name:
str_prop = prop + "\s*\,(.+)"
b = re.search(str_prop, sup_info)
if b:
values = b.group(1)
return values
def sep_sequence(seq, k):
i = k - 1
sub_seqs = []
while i < len(seq):
j = 0
nuc = ''
while j < k:
nuc = seq[i - j] + nuc
j = j + 1
sub_seqs.append(nuc)
i += 1
return sub_seqs
def get_specific_value(olinuc, olinucs, prop, sup_info):
olinucs = olinucs.strip().split(",")
values = get_values(prop, sup_info).rstrip()
values = values.strip().split(",")
count = olinucs.index(olinuc)
value = values[count]
return float(value)
def ave_p(seq, olinucs, length, k, prop, sup_info):
limit = length - k + 1
i = 1
s = 0
while i < limit or i == limit:
value = get_specific_value(seq[i - 1], olinucs, prop, sup_info)
s = s + value
i = i + 1
s = s / limit
return s
# geary
# --------------------------------------
# inputs: seq = string, length = int, k = int, lamada = int, prop = string,
# SupFileName = string
# output: final = int
def geary(seq, olinucs, length, k, lamada, prop, sup_info):
lim = length - k + 1
limit = length - k - lamada + 1
b = 1
sqr = 0
while b < limit or b == limit:
cur_value = get_specific_value(seq[b - 1], olinucs, prop, sup_info)
next_value = get_specific_value(seq[b + lamada - 1], olinucs, prop, sup_info)
sqr = sqr + ((cur_value - next_value) * (cur_value - next_value))
b = b + 1
top = sqr * lim
limit2 = (length - k - lamada + 1)
c = 1
sqr2 = 0
while c < limit2 or c == limit2:
current = get_specific_value(seq[c - 1], olinucs, prop, sup_info)
avg = ave_p(seq, olinucs, length, k, prop, sup_info)
sqr2 = sqr2 + (current - avg) * (current - avg)
c = c + 1
bottom = sqr2 * limit * 2
final = float((top / bottom) * 1000) / 1000.0
return final
# Moreau
# -------------------------------------
# inputs: seq = string, length = int, k = int, lamada = int, prop = string,
# supFileName = string
# output: final = int
def moreau(seq, olinucs, length, k, lamada, prop, sup_info):
limit = length - k - lamada + 1
d = 1
prod = 0
while d < limit or d == limit:
cur_value = get_specific_value(seq[d - 1], olinucs, prop, sup_info)
next_value = get_specific_value(seq[d + lamada - 1], olinucs, prop, sup_info)
prod = prod + (cur_value * next_value)
d = d + 1
final = prod / limit
return final
# moran
# --------------------------------------
# inputs: seq = string, length = int, k = int, lamada = int, prop = string,
# SupFileName = string
# output: final = int
def moran(seq, olinucs, length, k, lamada, prop, sup_info):
limit = length - k - lamada + 1
j = 1
top = 0
avg = ave_p(seq, olinucs, length, k, prop, sup_info)
while j < limit or j == limit:
cur_value = get_specific_value(seq[j - 1], olinucs, prop, sup_info)
partOne = cur_value - avg
next_value = get_specific_value(seq[j + lamada - 1], olinucs, prop, sup_info)
partTwo = next_value - avg
top = top + (partOne * partTwo)
j = j + 1
top = top / limit
limit2 = length - k + 1
bottom = 0
b = 1
while b < limit2 or b == limit2:
current = get_specific_value(seq[b - 1], olinucs, prop, sup_info)
bottom = bottom + ((current - avg) * (current - avg))
b = b + 1
bottom = bottom / limit2
final = top / bottom
return final
def auto_correlation(auto_method, input_file, props, k, lamada, alphabet):
if not props:
error_info = 'Error, The phyche_list, extra_index_file and all_prop can\'t be all False.'
raise ValueError(error_info)
input_data = open(input_file, 'r')
sequences = get_seqs(input_data, alphabet)
# Getting supporting info from files
full_path = os.path.realpath(__file__)
if k == 2 and alphabet == RNA:
sup_file_name = '%s/data/Supporting_Information_S1_RNA.txt' % os.path.dirname(full_path)
elif k == 2 and alphabet == DNA:
sup_file_name = '%s/data/Supporting_Information_S1_DNA.txt' % os.path.dirname(full_path)
elif k == 3 and alphabet == DNA:
sup_file_name = '%s/data/Supporting_Information_S3_DNA.txt' % os.path.dirname(full_path)
else:
print('Supporting Information error!')
return False
sup_file = open(sup_file_name, 'r')
sup_info = sup_file.read()
# o = re.search('Physicochemical properties\,(.+)\n', sup_info)
o = re.search('Physicochemical properties,(.+)\n', sup_info)
olinucs = ''
if o:
olinucs = o.group(1).rstrip()
sup_file.close()
# Writing to output file
m = 0
vectors = []
for sequence in sequences:
length = len(sequence)
seq = sep_sequence(sequence, k)
values = []
for prop in props:
if auto_method.upper() == 'MAC':
value = float("%.3f" % moran(seq, olinucs, length, k, lamada, prop, sup_info))
values.append(value)
elif auto_method.upper() == 'GAC':
value = float("%.3f" % geary(seq, olinucs, length, k, lamada, prop, sup_info))
values.append(value)
elif auto_method.upper() == 'NMBAC':
value = float("%.3f" % moreau(seq, olinucs, length, k, lamada, prop, sup_info))
values.append(value)
vectors.append(values)
m += 1
return np.array(vectors)
# ====================================================================================================
def acc(input_data, k, lag, phyche_list, alphabet, extra_index_file=None, all_prop=False, theta_type=1):
"""This is a complete acc in PseKNC.
:param alphabet:
:param lag:
:param input_data:
:param k: int, the value of k-tuple.
:param phyche_list: list, the input physicochemical properties list.
:param extra_index_file: a file path includes the user-defined phyche_index.
:param all_prop: bool, choose all physicochemical properties or not.
:param theta_type: the value 1, 2 and 3 for ac, cc or acc.
"""
phyche_list = get_phyche_list(k, phyche_list,
extra_index_file=extra_index_file, alphabet=alphabet, all_prop=all_prop)
phyche_vals = None
if alphabet == DNA or alphabet == RNA:
if extra_index_file is not None:
extra_phyche_index = get_extra_index(extra_index_file)
from .util_sr import normalize_index
phyche_vals = get_phyche_value(k, phyche_list, alphabet,
normalize_index(extra_phyche_index, alphabet, is_convert_dict=True))
else:
phyche_vals = get_phyche_value(k, phyche_list, alphabet)
elif alphabet == PROTEIN:
phyche_vals = get_aaindex(phyche_list)
if extra_index_file is not None:
phyche_vals.extend(extend_aaindex(extra_index_file))
seqs = get_seqs(input_data, alphabet)
if alphabet == PROTEIN:
# Transform the data format to dict {acid: [phyche_vals]}.
phyche_keys = list(phyche_vals[0].index_dict.keys())
phyche_vals = [list(e.index_dict.values()) for e in phyche_vals]
new_phyche_vals = list(zip(*[e for e in phyche_vals]))
phyche_vals = {key: list(val) for key, val in zip(phyche_keys, new_phyche_vals)}
if theta_type == 1:
return make_ac_vec(seqs, lag, phyche_vals, k)
elif theta_type == 2:
return make_cc_vec(seqs, lag, phyche_vals, k)
elif theta_type == 3:
return make_acc_vec(seqs, lag, phyche_vals, k)
def make_ac_vec(sequence_list, lag, phyche_value, k):
# Get the length of phyche_vals.
phyche_values = list(phyche_value.values())
len_phyche_value = len(phyche_values[0])
vec_ac = []
for sequence in sequence_list:
len_seq = len(sequence)
each_vec = []
for temp_lag in range(1, lag + 1):
for j in range(len_phyche_value):
# Calculate average phyche_value for a nucleotide.
ave_phyche_value = 0.0
for i in range(len_seq - k):
nucleotide = sequence[i: i + k]
ave_phyche_value += float(phyche_value[nucleotide][j])
ave_phyche_value /= (len_seq - k)
# Calculate the vector.
temp_sum = 0.0
for i in range(len_seq - temp_lag - k + 1):
nucleotide1 = sequence[i: i + k]
nucleotide2 = sequence[i + temp_lag: i + temp_lag + k]
temp_sum += (float(phyche_value[nucleotide1][j]) - ave_phyche_value) * (
float(phyche_value[nucleotide2][j]))
each_vec.append(round(temp_sum / (len_seq - temp_lag - k + 1), 8))
vec_ac.append(each_vec)
return np.array(vec_ac)
def make_cc_vec(sequence_list, lag, phyche_value, k):
phyche_values = list(phyche_value.values())
len_phyche_value = len(phyche_values[0])
vec_cc = []
for sequence in sequence_list:
len_seq = len(sequence)
each_vec = []
for temp_lag in range(1, lag + 1):
for i1 in range(len_phyche_value):
for i2 in range(len_phyche_value):
if i1 != i2:
# Calculate average phyche_value for a nucleotide.
ave_phyche_value1 = 0.0
ave_phyche_value2 = 0.0
for j in range(len_seq - k):
nucleotide = sequence[j: j + k]
ave_phyche_value1 += float(phyche_value[nucleotide][i1])
ave_phyche_value2 += float(phyche_value[nucleotide][i2])
ave_phyche_value1 /= (len_seq - k)
ave_phyche_value2 /= (len_seq - k)
# Calculate the vector.
temp_sum = 0.0
for j in range(len_seq - temp_lag - k + 1):
nucleotide1 = sequence[j: j + k]
nucleotide2 = sequence[j + temp_lag: j + temp_lag + k]
temp_sum += (float(phyche_value[nucleotide1][i1]) - ave_phyche_value1) * \
(float(phyche_value[nucleotide2][i2]) - ave_phyche_value2)
each_vec.append(round(temp_sum / (len_seq - temp_lag - k + 1), 8))
vec_cc.append(each_vec)
return np.array(vec_cc)
def make_acc_vec(seqs, lag, phyche_values, k):
# from functools import reduce
# zipped = list(zip(make_ac_vec(seqs, lag, phyche_values, k), make_cc_vec(seqs, lag, phyche_values, k)))
# return np.array(reduce(lambda x, y: x + y, e) for e in zipped)
ac_vec = make_ac_vec(seqs, lag, phyche_values, k)
cc_vec = make_cc_vec(seqs, lag, phyche_values, k)
acc_vec = np.hstack((ac_vec, cc_vec))
return acc_vec
# --------------------------------------------------------------------------
# PDT method
# --------------------------------------------------------------------------
def pdt_cmd_(input_file, lamada, sw_dir):
"""Concatenation of pdt command.
:param input_file: the input sequence file in FASTA format.
:param lamada: the value of parameter lamada.
:param sw_dir: the main dir of software.
"""
if sys.platform.startswith('win'):
pdt_cmd = sw_dir + 'pdt/pdt.exe'
else:
pdt_cmd = sw_dir + 'pdt/pdt'
os.chmod(pdt_cmd, 0o777)
aaindex_file = sw_dir + 'pdt/aaindex_norm.txt'
file_path, suffix = os.path.splitext(input_file)
output_file = ''.join([file_path, '_pdt', suffix])
cmd = ''.join([pdt_cmd, ' ', input_file, ' ', aaindex_file, ' ', str(lamada), ' ', output_file])
subprocess.call(cmd, shell=True)
return os.path.abspath(output_file)
def pdt(input_file, lamada, sw_dir):
"""Execute pdt command and generate feature vectors.
:param input_file: the input sequence file in FASTA format.
:param lamada: the value of parameter lamada.
:param sw_dir: the main dir of software.
"""
output_file = pdt_cmd_(input_file, lamada, sw_dir)
vector_list = []
with open(output_file, 'r') as f:
for line in f:
temp_list = line.strip().split('\t')
vector = [round(float(elem), 3) for elem in temp_list]
vector_list.append(vector)
# print vector_list
return np.array(vector_list)
def nd(input_file, alphabet, fixed_len):
# for Gene
with open(input_file, 'r') as f:
seq_list = get_seqs(f, alphabet)
seq_list = seq_length_fixed(seq_list, fixed_len)
nd_list = np.zeros((len(seq_list), fixed_len))
for seq in seq_list:
for j in range(fixed_len):
if j < len(seq):
if seq[j].upper() == 'A':
nd_list[j] = round(seq[0:j + 1].count('A') / (j + 1), 3)
elif seq[j].upper() == 'U':
nd_list[j] = round(seq[0:j + 1].count('U') / (j + 1), 3)
elif seq[j].upper() == 'C':
nd_list[j] = round(seq[0:j + 1].count('C') / (j + 1), 3)
elif seq[j].upper() == 'G':
nd_list[j] = round(seq[0:j + 1].count('G') / (j + 1), 3)
elif seq[j].upper() == 'T':
nd_list[j] = round(seq[0:j + 1].count('T') / (j + 1), 3)
else:
nd_list[j] = 0.0
return nd_list
<file_sep>import multiprocessing
import os
import time
from CheckAll import ml_params_check, dl_params_check, make_params_dicts, Classification, DeepLearning, \
Method_Semantic_Similarity, prepare4train_seq
from FeatureExtractionMode.utils.utils_write import opt_params2file, gen_label_array, fixed_len_control
from MachineLearningAlgorithm.Classification.dl_machine import dl_cv_process, dl_ind_process
from MachineLearningAlgorithm.Classification.ml_machine import ml_cv_process, ml_cv_results, ml_ind_results, \
ml_score_cv_process, ml_score_cv_results, ml_score_ind_results
from MachineLearningAlgorithm.utils.utils_read import files2vectors_info, seq_label_read, read_dl_vec4seq
from SemanticSimilarity import ind_score_process
def ml_process(args):
# 从输入的向量文件获取特征向量,标签数组和样本数目
vectors, sp_num_list, vec_files = files2vectors_info(args.vec_file, args.format)
# 生成标签数组
label_array = gen_label_array(sp_num_list, args.label)
# 对SVM或RF的参数进行检查并生成参数字典集合
all_params_list_dict = {}
all_params_list_dict = ml_params_check(args, all_params_list_dict)
# 列表字典 ---> 字典列表
params_dict_list = make_params_dicts(all_params_list_dict)
# 并行参数筛选
pool = multiprocessing.Pool(args.cpu)
# 在参数便利前进行一系列准备工作: 1. 固定划分;2.设定指标;3.指定任务类型
args = prepare4train_seq(args, label_array, dl=False)
params_dict_list_pro = []
for i in range(len(params_dict_list)):
params_dict = params_dict_list[i]
params_dict['out_files'] = vec_files
params_dict_list_pro.append(pool.apply_async(one_ml_process, (args, vectors, label_array, args.folds, vec_files,
params_dict)))
pool.close()
pool.join()
params_selected = params_select(params_dict_list_pro, args.results_dir)
ml_results(args, vectors, label_array, args.folds, params_selected['out_files'], params_selected)
if args.ind_vec_file is not None:
# 从输入的独立测试向量文件获取特征向量,标签数组和样本数目
ind_vectors, ind_sp_num_list, ind_vec_files = files2vectors_info(args.ind_vec_file, args.format)
# 生成标签数组
ind_label_array = gen_label_array(ind_sp_num_list, args.label)
ind_ml_results(args, vectors, label_array, ind_vectors, ind_label_array, params_selected)
def one_ml_process(args, vectors, labels, folds, vec_files, params_dict):
if args.score == 'none':
params_dict = ml_cv_process(args.ml, vectors, labels, folds, args.metric_index, args.sp, args.multi, args.res,
params_dict)
else:
params_dict = ml_score_cv_process(args.ml, vec_files, args.folds_num, args.metric_index,
args.sp, args.multi, args.format, params_dict)
return params_dict
def ml_results(args, vectors, labels, folds, vec_files, params_selected):
if args.score == 'none':
ml_cv_results(args.ml, vectors, labels, folds, args.sp, args.multi, args.res, args.results_dir, params_selected)
else:
ml_score_cv_results(args.ml, vec_files, labels, args.folds_num, args.sp, args.multi,
args.format, args.results_dir, params_selected)
return params_selected
def ind_ml_results(args, vectors, labels, ind_vectors, ind_labels, params_selected):
if args.score == 'none':
ml_ind_results(args.ml, ind_vectors, ind_labels, args.multi, args.res, args.results_dir, params_selected)
else:
ind_score_process(args.score, vectors, args.ind_vec_file, labels, ind_labels, args.format, args.cpu)
ml_score_ind_results(args.ml, args.ind_vec_file[0], args.sp, args.multi, args.format,
args.results_dir, params_selected)
def params_select(params_list, out_dir):
evaluation = params_list[0].get()['metric']
params_list_selected = params_list[0].get()
for i in range(len(params_list)):
if params_list[i].get()['metric'] > evaluation:
evaluation = params_list[i].get()['metric']
params_list_selected = params_list[i].get()
del params_list_selected['metric']
# params_list_selected['PSI'] = metric
# table_params(params_list_selected, True) # 打印选择后的参数
opt_params2file(params_list_selected, out_dir) # 将最优参数写入文件
return params_list_selected
def dl_process(args):
# 从输入的向量文件获取特征向量,标签数组和样本数目
# fixed_seq_len_list: 最大序列长度为fixed_len的序列长度的列表
vectors, sp_num_list, fixed_seq_len_list = read_dl_vec4seq(args, args.vec_file, return_sp=True)
# 生成标签数组
label_array = gen_label_array(sp_num_list, args.label)
# 控制序列的固定长度
args.fixed_len = fixed_len_control(fixed_seq_len_list, args.fixed_len)
# 对Deep Learning的参数进行检查并生成参数字典集合
all_params_list_dict = {}
all_params_list_dict = dl_params_check(args, all_params_list_dict)
# 列表字典 ---> 字典列表
params_dict = make_params_dicts(all_params_list_dict)[0]
# split data set according to cross validation approach
if args.ind_vec_file is None:
# 在参数便利前进行一系列准备工作: 1. 固定划分;2.设定指标;3.指定任务类型
args = prepare4train_seq(args, label_array, dl=True)
dl_cv_process(args.ml, vectors, label_array, fixed_seq_len_list, args.fixed_len, args.folds, args.results_dir,
params_dict)
else:
# 从输入的向量文件获取特征向量,标签数组和样本数目
ind_vectors, ind_sp_num_list, ind_fixed_seq_len_list = read_dl_vec4seq(args, args.vec_file, return_sp=True)
ind_label_array = seq_label_read(ind_sp_num_list, args.label)
dl_ind_process(args.ml, vectors, label_array, fixed_seq_len_list, args.ind_vectors, ind_label_array,
ind_fixed_seq_len_list, args.fixed_len, args.results_dir, params_dict)
def main(args):
print("\nStep into analysis...\n")
start_time = time.time()
args.results_dir = os.path.dirname(os.path.abspath(args.vec_file[0])) + '/'
args.res = False
if args.ml in DeepLearning:
dl_process(args)
else:
ml_process(args)
print("Done.")
print(("Used time: %.2fs" % (time.time() - start_time)))
if __name__ == '__main__':
import argparse
parse = argparse.ArgumentParser(prog='BioSeq-NLP', description="Step into analysis, please select parameters ")
parse.add_argument('-ml', type=str, choices=Classification, required=True,
help="The machine learning algorithm, for example: Support Vector Machine(SVM).")
# parameters for scoring
parse.add_argument('-score', type=str, choices=Method_Semantic_Similarity, default='none',
help="Choose whether calculate semantic similarity score and what method for calculation.")
# ----------------------- parameters for MachineLearning---------------------- #
parse.add_argument('-cpu', type=int, default=1,
help="The number of CPU cores used for multiprocessing during parameter selection process."
"(default=1).")
parse.add_argument('-grid', type=int, nargs='*', choices=[0, 1], default=0,
help="grid = 0 for rough grid search, grid = 1 for meticulous grid search.")
# parameters for svm
parse.add_argument('-cost', type=int, nargs='*', help="Regularization parameter of 'SVM'.")
parse.add_argument('-gamma', type=int, nargs='*', help="Kernel coefficient for 'rbf' of 'SVM'.")
# parameters for rf
parse.add_argument('-tree', type=int, nargs='*', help="The number of trees in the forest for 'RF'.")
# ----------------------- parameters for DeepLearning---------------------- #
parse.add_argument('-lr', type=float, default=0.99, help="The value of learning rate for deep learning.")
parse.add_argument('-epochs', type=int, help="The epoch number for train deep model.")
parse.add_argument('-batch_size', type=int, default=50, help="The size of mini-batch for deep learning.")
parse.add_argument('-dropout', type=float, default=0.6, help="The value of dropout prob for deep learning.")
# parameters for LSTM, GRU
parse.add_argument('-hidden_dim', type=int, default=256,
help="The size of the intermediate (a.k.a., feed forward) layer.")
parse.add_argument('-n_layer', type=int, default=2, help="The number of units for 'LSTM' and 'GRU'.")
# parameters for CNN
parse.add_argument('-out_channels', type=int, default=256, help="The number of output channels for 'CNN'.")
parse.add_argument('-kernel_size', type=int, default=5, help="The size of stride for 'CNN'.")
# parameters for Transformer and Weighted-Transformer
parse.add_argument('-d_model', type=int, default=256,
help="The dimension of multi-head attention layer for Transformer or Weighted-Transformer.")
parse.add_argument('-d_ff', type=int, default=1024,
help="The dimension of fully connected layer of Transformer or Weighted-Transformer.")
parse.add_argument('-n_heads', type=int, default=4,
help="The number of heads for Transformer or Weighted-Transformer.")
# parameters for Reformer
parse.add_argument('-n_chunk', type=int, default=8,
help="The number of chunks for processing lsh attention.")
parse.add_argument('-rounds', type=int, default=1024,
help="The number of rounds for multiple rounds of hashing to reduce probability that similar "
"items fall in different buckets.")
parse.add_argument('-bucket_length', type=int, default=64,
help="Average size of qk per bucket, 64 was recommended in paper")
# parameters for ML parameter selection and cross validation
parse.add_argument('-metric', type=str, choices=['Acc', 'MCC', 'AUC', 'BAcc', 'F1'], default='Acc',
help="The metric for parameter selection")
parse.add_argument('-cv', choices=['5', '10', 'j'], default='5',
help="The cross validation mode.\n"
"5 or 10: 5-fold or 10-fold cross validation.\n"
"j: (character 'j') jackknife cross validation.")
parse.add_argument('-sp', type=str, choices=['none', 'over', 'under', 'combine'], default='none',
help="Select technique for oversampling.")
# ----------------------- parameters for input and output ---------------------- #
parse.add_argument('-vec_file', nargs='*', required=True, help="The input feature vector files.")
parse.add_argument('-label', type=int, nargs='*', required=True,
help="The corresponding label of input sequence files. For deep learning method, the label can "
"only set as positive integer")
parse.add_argument('-ind_vec_file', nargs='*', help="The input feature vector files of independent test dataset.")
parse.add_argument('-fixed_len', type=int,
help="The length of sequence will be fixed via cutting or padding. If you don't set "
"value for 'fixed_len', it will be the maximum length of all input sequences. ")
parse.add_argument('-format', default='csv', choices=['tab', 'svm', 'csv', 'tsv'],
help="The output format (default = csv).\n"
"tab -- Simple format, delimited by TAB.\n"
"svm -- The libSVM training data format.\n"
"csv, tsv -- The format that can be loaded into a spreadsheet program.")
argv = parse.parse_args()
main(argv)
<file_sep>from .kmer_bow import km_bow
from .mismatch_bow import mismatch_bow
from .subsequence_bow import subsequence_bow
from .tng_bow import tng_bow
from .dr_bow import dr_bow
from .dt_bow import dt_bow
from ..utils.utils_write import vectors2files
from ..utils.utils_const import DNA, RNA, PROTEIN
def bow(input_file, category, words, sample_num_list, out_format, out_file_list, cur_dir, tm=False, **param_dict):
if category == 'DNA':
alphabet = DNA
elif category == 'RNA':
alphabet = RNA
else:
alphabet = PROTEIN
if words == 'Kmer':
bow_vectors = km_bow(input_file, k=param_dict['word_size'], alphabet=alphabet, rev_comp=False)
elif words == 'RevKmer':
bow_vectors = km_bow(input_file, k=param_dict['word_size'], alphabet=alphabet, rev_comp=True)
elif words == 'Mismatch':
bow_vectors = mismatch_bow(input_file, alphabet, k=param_dict['word_size'], m=param_dict['mis_num'])
elif words == 'Subsequence':
bow_vectors = subsequence_bow(input_file, alphabet, k=param_dict['word_size'], delta=param_dict['delta'])
elif words == 'Top-N-Gram':
bow_vectors = tng_bow(input_file, n=param_dict['top_n'], cur_dir=cur_dir, process_num=param_dict['cpu'])
elif words == 'DR':
bow_vectors = dr_bow(input_file, max_dis=param_dict['max_dis'])
elif words == 'DT':
bow_vectors = dt_bow(input_file, max_dis=param_dict['max_dis'], cur_dir=cur_dir, process_num=param_dict['cpu'])
else:
print('word segmentation method error!')
return False
if tm is False:
vectors2files(bow_vectors, sample_num_list, out_format, out_file_list)
else:
return bow_vectors
<file_sep>from ..utils.utils_words import km_words
from ..utils.utils_algorithm import text_rank
def km_text_rank(input_file, alphabet, fixed_len, word_size, alpha, fixed=True):
corpus = km_words(input_file, alphabet, fixed_len, word_size, fixed)
return text_rank(corpus, alpha)
<file_sep>import os
import numpy as np
from ..utils.utils_words import make_km_list
from ..utils.utils_const import PROTEIN
from ..utils.utils_pssm import sep_file, produce_all_frequency
from ..utils.utils_words import produce_top_n_gram
def tng_bow(input_file, n, cur_dir, process_num):
"""Generate top-n-gram list.
:param input_file: input sequence file.
:param n: the n most frequent amino acids in the amino acid frequency profiles.
:param cur_dir: the main dir of code.
:param process_num: the number of processes used for multiprocessing.
"""
pssm_path, seq_name = sep_file(input_file)
sw_dir = cur_dir + '/software/'
pssm_dir = produce_all_frequency(pssm_path, sw_dir, process_num)
print('pssm_dir: ', pssm_dir)
# 调试模式 on/off
# pssm_dir = cur_dir + "/data/results/Protein/sequence/OHE/SVM/PSSM/all_seq/pssm"
dir_name = os.path.split(pssm_dir)[0]
fasta_name = os.path.split(dir_name)[1]
final_result = ''.join([dir_name, '/final_result'])
print('final_result: ', final_result)
if not os.path.isdir(final_result):
os.mkdir(final_result)
tng_file_name = ''.join([final_result, '/', fasta_name, '_new.txt'])
with open(tng_file_name, 'w') as f:
for index, tng in enumerate(produce_top_n_gram(pssm_dir, seq_name, n, sw_dir)):
f.write('>')
f.write(seq_name[index])
f.write('\n')
for elem in tng:
f.write(elem)
f.write(' ')
f.write('\n')
gram_list = make_km_list(n, PROTEIN)
vector_list = []
for tng in produce_top_n_gram(pssm_dir, seq_name, n, sw_dir):
vec_len = len(tng)
# print vec_len
vector = []
for elem in gram_list:
gram_count = tng.count(elem)
occur_freq = round((gram_count * 1.0) / vec_len, 4)
vector.append(occur_freq)
vector_list.append(vector)
return np.array(vector_list)
<file_sep>import sys
import re
import numpy as np
from ..utils.utils_words import make_km_list
from ..utils.utils_bow import frequency
from ..utils.utils_fasta import get_seqs
def find_rev_comp(sequence, rev_comp_dictionary):
# Save time by storing reverse complements in a hash.
if sequence in rev_comp_dictionary:
return rev_comp_dictionary[sequence]
# Make a reversed version of the string.
rev_sequence = list(sequence)
rev_sequence.reverse()
rev_sequence = ''.join(rev_sequence)
return_value = ""
for letter in rev_sequence:
if letter == "A":
return_value += "T"
elif letter == "C":
return_value += "G"
elif letter == "G":
return_value += "C"
elif letter == "T":
return_value += "A"
elif letter == "N":
return_value += "N"
else:
error_info = ("Unknown DNA character (%s)\n" % letter)
sys.exit(error_info)
# Store this value for future use.
rev_comp_dictionary[sequence] = return_value
return return_value
def _cmp(a, b):
return (a > b) - (a < b)
def make_rev_comp_km_list(km_list):
rev_comp_dictionary = {}
new_km_list = [km for km in km_list if _cmp(km, find_rev_comp(km, rev_comp_dictionary)) <= 0]
return new_km_list
def km_bow(input_file, k, alphabet, rev_comp=False):
"""Generate km vector."""
if rev_comp and re.search(r'[^acgtACGT]', ''.join(alphabet)) is not None:
sys.exit("Error, Only DNA sequence can be reverse compliment.")
with open(input_file, 'r') as f:
seq_list = get_seqs(f, alphabet)
vector = []
km_list = make_km_list(k, alphabet)
non = 1
for seq in seq_list:
print(non)
non += 1
count_sum = 0
# Generate the km frequency dict.
km_count = {}
for km in km_list:
temp_count = frequency(seq, km)
if not rev_comp:
if km not in km_count:
km_count[km] = 0
km_count[km] += temp_count
else:
rev_km = find_rev_comp(km, {})
if km <= rev_km:
if km not in km_count:
km_count[km] = 0
km_count[km] += temp_count
else:
if rev_km not in km_count:
km_count[rev_km] = 0
km_count[rev_km] += temp_count
count_sum += temp_count
# Normalize.
if not rev_comp:
count_vec = [km_count[km] for km in km_list]
else:
rev_comp_km_list = make_rev_comp_km_list(km_list)
count_vec = [km_count[km] for km in rev_comp_km_list]
count_vec = [round(float(e) / count_sum, 8) for e in count_vec]
vector.append(count_vec)
return np.array(vector)
<file_sep>import multiprocessing
import os
import time
from CheckAll import Machine_Learning_Algorithm, DeepLearning, prepare4train_res
from FeatureExtractionMode.utils.utils_write import opt_params2file
from MachineLearningAlgorithm.Classification.ml_machine import ml_cv_process
from MachineLearningAlgorithm.Classification.ml_machine import ml_cv_results, ml_ind_results
from MachineLearningAlgorithm.SequenceLabelling.dl_machine import dl_cv_process as res_dcp
from MachineLearningAlgorithm.SequenceLabelling.dl_machine import dl_ind_process as res_dip
from MachineLearningAlgorithm.utils.utils_read import files2vectors_res, read_base_mat4res, res_label_read
def res_cl_process(args):
# 读取特征向量文件
vectors, sp_num_list = files2vectors_res(args.vec_file, args.format)
# 根据不同标签样本数目生成标签数组
label_array = res_label_read(sp_num_list, args.label)
# ** 残基层面特征提取和标签数组生成完毕 ** #
# 在参数便利前进行一系列准备工作: 1. 固定划分;2.设定指标;3.指定任务类型
args = prepare4train_res(args, label_array, dl=False)
# ** 通过遍历SVM/RF参数字典列表来筛选参数 ** #
# SVM/RF参数字典
params_dict_list = args.params_dict_list
# 多进程控制
pool = multiprocessing.Pool(args.cpu)
params_dict_list_pro = []
for i in range(len(params_dict_list)):
params_dict = params_dict_list[i]
params_dict_list_pro.append(pool.apply_async(one_cl_process, (args, vectors, label_array, args.folds,
params_dict)))
pool.close()
pool.join()
# ** 筛选结束 ** #
# 根据指标进行参数选择
params_selected = params_select(params_dict_list_pro, args.results_dir)
# 构建分类器
ml_cv_results(args.ml, vectors, label_array, args.folds, args.sp, args.multi, args.res, args.results_dir,
params_selected)
# -------- 独立测试-------- #
# 即,将独立测试数据集在最优的model上进行测试
if args.ind_vec_file is not None:
# 读取特征向量文件
ind_vectors, ind_sp_num_list = files2vectors_res(args.ind_vec_file, args.format)
# 根据不同标签样本数目生成标签数组
ind_label_array = res_label_read(ind_sp_num_list, args.label)
# ** 残基层面特征提取和标签数组生成完毕 ** #
ml_ind_results(args.ml, ind_vectors, ind_label_array, args.multi, args.res, args.results_dir, params_selected)
# -------- 独立测试-------- #
def res_dl_process(args):
# 深度学习参数字典
params_dict = args.params_dict_list[0]
# 读取base文件向量,对向量矩阵和序列长度数组进行处理
vec_mat, fixed_seq_len_list = read_base_mat4res(args.fea_file, args.fixed_len)
# 不同于SVM/RF, 深度学习
if args.ind_vec_file is None:
# 在参数便利前进行一系列准备工作: 1. 固定划分;2.设定指标;3.指定任务类型
args = prepare4train_res(args, args.res_labels_list, dl=False)
res_dcp(args.ml, vec_mat, args.res_labels_list, fixed_seq_len_list, args.fixed_len, args.folds,
args.results_dir, params_dict)
else:
ind_res_dl_fe_process(args, vec_mat, args.res_labels_list, fixed_seq_len_list, params_dict)
def ind_res_dl_fe_process(args, vec_mat, res_labels_list, fixed_seq_len_list, params_dict):
print('########################## Independent Test Begin ##########################\n')
ind_vec_mat, ind_fixed_seq_len_list = read_base_mat4res(args.ind_fea_file, args.fixed_len)
res_dip(args.ml, vec_mat, res_labels_list, fixed_seq_len_list, ind_vec_mat, args.ind_res_labels_list,
ind_fixed_seq_len_list, args.fixed_len, args.results_dir, params_dict)
print('########################## Independent Test Finish ##########################\n')
def one_cl_process(args, vectors, labels, folds, params_dict):
params_dict = ml_cv_process(args.ml, vectors, labels, folds, args.metric_index, args.sp, args.multi, args.res,
params_dict)
return params_dict
def params_select(params_list, out_dir):
evaluation = params_list[0].get()['metric']
params_list_selected = params_list[0].get()
for i in range(len(params_list)):
if params_list[i].get()['metric'] > evaluation:
evaluation = params_list[i].get()['metric']
params_list_selected = params_list[i].get()
del params_list_selected['metric']
opt_params2file(params_list_selected, out_dir) # 将最优参数写入文件
return params_list_selected
def main(args):
print("\nStep into analysis...\n")
start_time = time.time()
args.results_dir = os.path.dirname(os.path.abspath(args.vec_file[0])) + '/'
args.res = True
if args.ml in DeepLearning:
res_dl_process(args)
else:
res_cl_process(args)
print("Done.")
print(("Used time: %.2fs" % (time.time() - start_time)))
if __name__ == '__main__':
import argparse
parse = argparse.ArgumentParser(prog='BioSeq-NLP', description="Step into analysis, please select parameters ")
parse.add_argument('-ml', type=str, choices=Machine_Learning_Algorithm, required=True,
help="The machine-learning algorithm for constructing predictor, "
"for example: Support Vector Machine (SVM).")
# ----------------------- parameters for MachineLearning---------------------- #
parse.add_argument('-cpu', type=int, default=1,
help="The number of CPU cores used for multiprocessing during parameter selection process."
"(default=1).")
parse.add_argument('-grid', type=int, nargs='*', choices=[0, 1], default=0,
help="grid = 0 for rough grid search, grid = 1 for meticulous grid search.")
# parameters for svm
parse.add_argument('-cost', type=int, nargs='*', help="Regularization parameter of 'SVM'.")
parse.add_argument('-gamma', type=int, nargs='*', help="Kernel coefficient for 'rbf' of 'SVM'.")
# parameters for rf
parse.add_argument('-tree', type=int, nargs='*', help="The number of trees in the forest for 'RF'.")
# ----------------------- parameters for DeepLearning---------------------- #
parse.add_argument('-lr', type=float, default=0.99, help="The value of learning rate for deep learning.")
parse.add_argument('-epochs', type=int, help="The epoch number for train deep model.")
parse.add_argument('-batch_size', type=int, default=50, help="The size of mini-batch for deep learning.")
parse.add_argument('-dropout', type=float, default=0.6, help="The value of dropout prob for deep learning.")
# parameters for LSTM, GRU
parse.add_argument('-hidden_dim', type=int, default=256,
help="The size of the intermediate (a.k.a., feed forward) layer.")
parse.add_argument('-n_layer', type=int, default=2, help="The number of units for 'LSTM' and 'GRU'.")
# parameters for CNN
parse.add_argument('-out_channels', type=int, default=256, help="The number of output channels for 'CNN'.")
parse.add_argument('-kernel_size', type=int, default=5, help="The size of stride for 'CNN'.")
# parameters for Transformer and Weighted-Transformer
parse.add_argument('-d_model', type=int, default=256,
help="The dimension of multi-head attention layer for Transformer or Weighted-Transformer "
"or Reformer.")
parse.add_argument('-d_ff', type=int, default=1024,
help="The dimension of feed forward layer of Transformer or Weighted-Transformer "
"or Reformer.")
parse.add_argument('-n_heads', type=int, default=4,
help="The number of heads for multi-head attention.")
# parameters for Reformer
parse.add_argument('-n_chunk', type=int, default=8,
help="The number of chunks for processing lsh attention.")
parse.add_argument('-rounds', type=int, default=1024,
help="The number of rounds for multiple rounds of hashing to reduce probability that similar "
"items fall in different buckets.")
parse.add_argument('-bucket_length', type=int, default=64,
help="Average size of qk per bucket, 64 was recommended in paper")
# parameters for ML parameter selection and cross validation
parse.add_argument('-metric', type=str, choices=['Acc', 'MCC', 'AUC', 'BAcc', 'F1'], default='Acc',
help="The metric for parameter selection")
parse.add_argument('-cv', choices=['5', '10', 'j'], default='5',
help="The cross validation mode.\n"
"5 or 10: 5-fold or 10-fold cross validation.\n"
"j: (character 'j') jackknife cross validation.")
parse.add_argument('-sp', type=str, choices=['none', 'over', 'under', 'combine'], default='none',
help="Select technique for oversampling.")
# ----------------------- parameters for input and output ---------------------- #
parse.add_argument('-vec_file', nargs='*', required=True, help="The input feature vector file(s).")
parse.add_argument('-label_file', required=True, help="The corresponding label file is required.")
parse.add_argument('-ind_vec_file', nargs='*', help="The feature vector files of independent test dataset.")
parse.add_argument('-ind_label_file', help="The corresponding label file of independent test dataset.")
parse.add_argument('-fixed_len', type=int,
help="The length of sequence will be fixed via cutting or padding. If you don't set "
"value for 'fixed_len', it will be the maximum length of all input sequences. ")
# parameters for output
parse.add_argument('-format', default='csv', choices=['tab', 'svm', 'csv', 'tsv'],
help="The output format (default = csv).\n"
"tab -- Simple format, delimited by TAB.\n"
"svm -- The libSVM training data format.\n"
"csv, tsv -- The format that can be loaded into a spreadsheet program.")
argv = parse.parse_args()
main(argv)
<file_sep>import os
import time
from CheckAll import FE_PATH_Res, FE_BATCH_PATH_Res
from CheckAll import results_dir_check, check_contain_chinese, seq_sys_check, res_feature_check, Method_Res
from FeatureExtractionMode.OHE.OHE4vec import ohe2res_base, sliding_win2files, mat_list2frag_array
from FeatureExtractionMode.utils.utils_write import read_res_seq_file, read_res_label_file, fixed_len_control, \
res_file_check, out_res_file, out_dl_frag_file, res_base2frag_vec
from MachineLearningAlgorithm.utils.utils_read import read_base_vec_list4res
def create_results_dir(args, cur_dir):
if args.bp == 1:
results_dir = cur_dir + FE_BATCH_PATH_Res + str(args.category) + "/" + str(args.method) + "/"
else:
results_dir = cur_dir + FE_PATH_Res
results_dir_check(results_dir)
return results_dir
def res_fe_process(args, fragment):
# ** 残基层面特征提取和标签数组生成开始 ** #
# 为存储SVM和RF输入特征的文件命名
out_files = out_res_file(args.label, args.results_dir, args.format, args.fragment, ind=False)
# 读取base特征文件, 待写入
vectors_list = read_base_vec_list4res(args.fea_files)
# fragment判断,生成对应的特征向量
if fragment == 0:
assert args.window is not None, "If -fragment is 0, lease set window size!"
# 在fragment=0时,通过滑窗技巧为每个残基生成特征
sliding_win2files(vectors_list, args.res_labels_list, args.window, args.format, out_files)
else:
# 在fragment=1时, 将每个残基片段的base特征进行flatten
mat_list2frag_array(vectors_list, args.res_labels_list, args.fixed_len, args.format, out_files)
def frag_fe_process(args):
# ** 当fragment为1,且选则深度学习特征提取方法时进行下列操作 ** #
# 生成特征向量文件名
out_files = out_dl_frag_file(args.label, args.results_dir, ind=False)
# 生成深度特征向量文件
res_base2frag_vec(args.fea_file, args.res_labels_list, args.fixed_len, out_files)
def main(args):
print("\nStep into analysis...\n")
start_time = time.time()
current_path = os.path.dirname(os.path.realpath(__file__))
args.current_dir = os.path.dirname(os.getcwd())
# 判断中文目录
check_contain_chinese(current_path)
# 判断mode和ml的组合是否合理
args.mode = 'OHE'
args.score = 'none'
seq_sys_check(args, True)
# 生成结果文件夹
args.results_dir = create_results_dir(args, args.current_dir)
# 读取序列文件里每条序列的长度
seq_len_list = read_res_seq_file(args.seq_file, args.category)
# 读取标签列表和标签长度列表 res_labels_list --> list[list1, list2,..]
args.res_labels_list, label_len_list = read_res_label_file(args.label_file)
# fragment=0: 判断标签是否有缺失且最短序列长度是否大于5; fragment=1: 判断标签是否唯一
res_file_check(seq_len_list, label_len_list, args.fragment)
# 这里直接针对残基问题设置标签
args.label = [1, 0]
# 控制序列的固定长度(只需要在benchmark dataset上操作一次)
args.fixed_len = fixed_len_control(seq_len_list, args.fixed_len)
# 对每个残基层面的method进行检查
res_feature_check(args)
# 所有res特征在基准数据集上的基础输出文件
args.fea_file = args.results_dir + 'res_features.txt'
# 提取残基层面特征,生成向量文件
ohe2res_base(args.seq_file, args.category, args.method, args.current_dir, args.pp_file, args.rss_file,
args.fea_file, args.cpu)
if args.fragment == 1:
if args.dl == 1:
frag_fe_process(args)
else:
res_fe_process(args, 1)
else:
if args.dl == 1:
pass
else:
res_fe_process(args, 0)
print("Done.")
print(("Used time: %.2fs" % (time.time() - start_time)))
if __name__ == '__main__':
import argparse
parse = argparse.ArgumentParser(prog='BioSeq-NLP', description="Step into analysis, please select parameters ")
# parameters for whole framework
parse.add_argument('-category', type=str, choices=['DNA', 'RNA', 'Protein'], required=True,
help="The category of input sequences.")
parse.add_argument('-method', type=str, required=True, choices=Method_Res,
help="Please select feature extraction method for residue level analysis")
# parameters for residue
parse.add_argument('-dl', type=int, default=0, choices=[0, 1],
help="Select whether use sliding window technique to transform sequence-labelling question "
"to classification question")
parse.add_argument('-window', type=int,
help="The window size when construct sliding window technique for allocating every "
"label a short sequence")
parse.add_argument('-fragment', type=int, default=0, choices=[0, 1],
help="Please choose whether use the fragment method, 1 is yes while 0 is no.")
# parameters for one-hot encoding
parse.add_argument('-cpu', type=int, default=1,
help="The maximum number of CPU cores used for multiprocessing in generating frequency profile")
parse.add_argument('-pp_file', type=str,
help="The physicochemical properties file user input.\n"
"if input nothing, the default physicochemical properties is:\n"
"DNA dinucleotide: Rise, Roll, Shift, Slide, Tilt, Twist.\n"
"DNA trinucleotide: Dnase I, Bendability (DNAse).\n"
"RNA: Rise, Roll, Shift, Slide, Tilt, Twist.\n"
"Protein: Hydrophobicity, Hydrophilicity, Mass.")
parse.add_argument('-rss_file', type=str,
help="The second structure file for all input sequences.(The order of a specific sequence "
"should be corresponding to the order in 'all_seq_file.txt' file")
# parameters for input
parse.add_argument('-seq_file', required=True, help="The input file in FASTA format.")
parse.add_argument('-label_file', required=True, help="The corresponding label file.")
# parameters for output
parse.add_argument('-format', default='csv', choices=['tab', 'svm', 'csv', 'tsv'],
help="The output format (default = csv).\n"
"tab -- Simple format, delimited by TAB.\n"
"svm -- The libSVM training data format.\n"
"csv, tsv -- The format that can be loaded into a spreadsheet program.")
parse.add_argument('-bp', type=int, choices=[0, 1], default=0,
help="Select use batch mode or not, the parameter will change the directory for generating file "
"based on the method you choose.")
argv = parse.parse_args()
main(argv)
<file_sep>from ..utils.utils_words import dr_words
from ..utils.utils_algorithm import tf_idf
def dr_tf_idf(input_file, alphabet, fixed_len, max_dis, fixed=True):
corpus = dr_words(input_file, alphabet, fixed_len, max_dis, fixed)
print(corpus)
return tf_idf(corpus)
<file_sep>import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression
from pytorch_pretrained_bert import BertTokenizer
import torch.nn as nn
import torch
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
# with open("vocabulary.txt", "w", encoding='utf-8') as f:
# for value in tokenizer.vocab.keys():
# f.write(str(value) + '\n')
# iris = datasets.load_iris()
# iris_X = iris['data']
# iris_Y = iris['target']
#
# # print(iris_X[:2, :])
# # print(iris_Y[:2])
#
# X_train, X_test, Y_train, Y_test = train_test_split(iris_X, iris_Y, test_size=0.3)
#
# # print(Y_test)
#
# # knn = KNeighborsClassifier()
# # knn.fit(X_train, Y_train)
# # print(knn.predict(X_test))
# # print(Y_test)
#
# boston = datasets.load_boston()
# data_X = boston['data']
# data_y = boston['target']
#
# model = LinearRegression()
# model.fit(data_X, data_y)
# print(model.predict(data_X[:4, :]))
# print(data_y[:4])
# print(model.coef_)
# print(model.intercept_)
loss = nn.BCEWithLogitsLoss()
input = torch.randn(3, requires_grad=True)
target = torch.empty(3).random_(2)
output = loss(input, target)
print(output)<file_sep>import math
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch import nn
from torch.autograd import Function
# TODO: Transforemr
# Reference
# **Paper**
# - Vaswani et al., "Attention is All You Need", NIPS 2017
# - Ahmed et al., "Weighted Transformer Network for Machine Translation", Arxiv 2017
# **Code**
# https://github.com/jayparks/transformer
class Linear(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
init.xavier_normal_(self.linear.weight)
init.zeros_(self.linear.bias)
def forward(self, inputs):
return self.linear(inputs)
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k, n_heads, dropout=.1):
super(ScaledDotProductAttention, self).__init__()
self.scale_factor = np.sqrt(d_k)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.n_heads = n_heads
def forward(self, q, k, v, attn_mask=None):
# q: [b_size x n_heads x len_q x d_k]
# k: [b_size x n_heads x len_k x d_k]
# v: [b_size x n_heads x len_v x d_v] note: (len_k == len_v)
# attn: [b_size x n_heads x len_q x len_k]
scores = torch.matmul(q, k.transpose(-1, -2)) / self.scale_factor
if attn_mask is not None:
# attn_mask: [b_size x len_q x len_k]
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1) # [b_size x n_heads x len_q x len_k]
assert attn_mask.size() == scores.size()
scores.masked_fill_(attn_mask, -1e9)
attn = self.dropout(self.softmax(scores))
# outputs: [b_size x n_heads x len_q x d_v]
context = torch.matmul(attn, v)
return context, attn
class LayerNormalization(nn.Module):
def __init__(self, d_hid, eps=1e-6):
super(LayerNormalization, self).__init__()
self.gamma = nn.Parameter(torch.ones(d_hid))
self.beta = nn.Parameter(torch.zeros(d_hid))
self.eps = eps
def forward(self, z):
mean = z.mean(dim=-1, keepdim=True, )
std = z.std(dim=-1, keepdim=True, )
ln_out = (z - mean) / (std + self.eps)
ln_out = self.gamma * ln_out + self.beta
return ln_out
class _MultiHeadAttention(nn.Module):
def __init__(self, d_k, d_v, d_model, n_heads, dropout):
super(_MultiHeadAttention, self).__init__()
self.d_k = d_k
self.d_v = d_v
self.d_model = d_model
self.n_heads = n_heads
self.w_q = Linear(d_model, d_k * n_heads)
self.w_k = Linear(d_model, d_k * n_heads)
self.w_v = Linear(d_model, d_v * n_heads)
self.attention = ScaledDotProductAttention(d_k, n_heads, dropout)
def forward(self, q, k, v, attn_mask):
# q: [b_size x len_q x d_model] - > 实例: torch.Size([50, 20, 64])
# k: [b_size x len_k x d_model]
# v: [b_size x len_k x d_model]
b_size = q.size(0)
# q_s: [b_size x n_heads x len_q x d_k]
# k_s: [b_size x n_heads x len_k x d_k]
# v_s: [b_size x n_heads x len_k x d_v]
q_s = self.w_q(q).view(b_size, -1, self.n_heads, self.d_k).transpose(1, 2)
k_s = self.w_k(k).view(b_size, -1, self.n_heads, self.d_k).transpose(1, 2)
v_s = self.w_v(v).view(b_size, -1, self.n_heads, self.d_v).transpose(1, 2)
# context: [b_size x n_heads x len_q x d_v], attn: [b_size x n_heads x len_q x len_k]
context, attn = self.attention(q_s, k_s, v_s, attn_mask=attn_mask)
# context: [b_size x len_q x n_heads * d_v]
context = context.transpose(1, 2).contiguous().view(b_size, -1, self.n_heads * self.d_v)
# 将多层缩放点积注意力得到的上下文向量串接起来
# 如果在view之前用了transpose, permute等,需要用contiguous()来返回一个contiguous copy
# return the context and attention weights
return context, attn
class MultiHeadAttention(nn.Module):
def __init__(self, d_k, d_v, d_model, n_heads, dropout):
super(MultiHeadAttention, self).__init__()
self.n_heads = n_heads
self.multi_head_attn = _MultiHeadAttention(d_k, d_v, d_model, n_heads, dropout)
self.projects = Linear(n_heads * d_v, d_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = LayerNormalization(d_model)
def forward(self, q, k, v, attn_mask):
# q: [b_size x len_q x d_model]
# k: [b_size x len_k x d_model]
# v: [b_size x len_v x d_model] note (len_k == len_v)
residual = q
# context: a tensor of shape [b_size x len_q x n_heads * d_v]
context, attn = self.multi_head_attn(q, k, v, attn_mask=attn_mask)
# project back to the residual size, outputs: [b_size x len_q x d_model]
output = self.dropout(self.projects(context)) # 多头注意力的输入和输出尺寸完全相同
return self.layer_norm(residual + output), attn
class MultiBranchAttention(nn.Module):
def __init__(self, d_k, d_v, d_model, d_ff, n_branches, dropout):
super(MultiBranchAttention, self).__init__()
self.d_k = d_k
self.d_v = d_v
self.d_model = d_model
self.d_ff = d_ff
self.n_branches = n_branches
self.multihead_attn = _MultiHeadAttention(d_k, d_v, d_model, n_branches, dropout)
# additional parameters for BranchedAttention
self.w_o = nn.ModuleList([Linear(d_v, d_model) for _ in range(n_branches)])
self.w_kp = torch.rand(n_branches)
self.w_kp = nn.Parameter(self.w_kp / self.w_kp.sum())
self.w_a = torch.rand(n_branches)
self.w_a = nn.Parameter(self.w_a / self.w_a.sum())
self.pos_ffn = nn.ModuleList([
PoswiseFeedForwardNet(d_model, d_ff // n_branches, dropout) for _ in range(n_branches)])
self.dropout = nn.Dropout(dropout)
self.layer_norm = LayerNormalization(d_model)
init.xavier_normal(self.w_o)
def forward(self, q, k, v, attn_mask):
# q: [b_size x len_q x d_model]
# k: [b_size x len_k x d_model]
# v: [b_size x len_v x d_model] note (len_k == len_v)
residual = q
# context: a tensor of shape [b_size x len_q x n_branches * d_v]
context, attn = self.multih_attn(q, k, v, attn_mask=attn_mask)
# context: a list of tensors of shape [b_size x len_q x d_v] len: n_branches
context = context.split(self.d_v, dim=-1)
# outputs: a list of tensors of shape [b_size x len_q x d_model] len: n_branches
outputs = [self.w_o[i](context[i]) for i in range(self.n_branches)]
outputs = [kappa * output for kappa, output in zip(self.w_kp, outputs)]
outputs = [pos_ffn(output) for pos_ffn, output in zip(self.pos_ffn, outputs)]
outputs = [alpha * output for alpha, output in zip(self.w_a, outputs)]
# output: [b_size x len_q x d_model]
output = self.dropout(torch.stack(outputs).sum(dim=0))
return self.layer_norm(residual + output), attn
class PoswiseFeedForwardNet(nn.Module):
def __init__(self, d_model, d_ff, dropout=0.1):
super(PoswiseFeedForwardNet, self).__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
self.dropout = nn.Dropout(dropout)
self.layer_norm = LayerNormalization(d_model)
def forward(self, inputs):
# inputs: [b_size x len_q x d_model]
residual = inputs
output = self.relu(self.conv1(inputs.transpose(1, 2)))
# outputs: [b_size x len_q x d_model]
output = self.conv2(output).transpose(1, 2)
output = self.dropout(output)
return self.layer_norm(residual + output)
class EncoderLayer(nn.Module):
def __init__(self, d_k, d_v, d_model, d_ff, n_heads, dropout=0.1):
super(EncoderLayer, self).__init__()
self.enc_self_attn = MultiHeadAttention(d_k, d_v, d_model, n_heads, dropout)
self.pos_ffn = PoswiseFeedForwardNet(d_model, d_ff, dropout)
def forward(self, enc_inputs, self_attn_mask):
enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs,
enc_inputs, attn_mask=self_attn_mask)
enc_outputs = self.pos_ffn(enc_outputs)
return enc_outputs, attn
class WeightedEncoderLayer(nn.Module):
def __init__(self, d_k, d_v, d_model, d_ff, n_branches, dropout=0.1):
super(WeightedEncoderLayer, self).__init__()
self.enc_self_attn = MultiBranchAttention(d_k, d_v, d_model, d_ff, n_branches, dropout)
def forward(self, enc_inputs, self_attn_mask):
return self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, attn_mask=self_attn_mask)
def get_attn_pad_mask(seq_q, seq_k):
assert seq_q.dim() == 2 and seq_k.dim() == 2
# print(seq_q.size())
b_size, len_q = seq_q.size()
b_size, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # b_size x 1 x len_k
return pad_attn_mask.expand(b_size, len_q, len_k) # b_size x len_q x len_k
class Transformer(nn.Module):
def __init__(self, enc_inputs_len, n_layers, d_k, d_v, d_model, d_ff, n_heads,
dropout=0.1, weighted=False):
super(Transformer, self).__init__()
self.d_model = d_model
self.projects = Linear(enc_inputs_len, d_model)
self.layer_type = EncoderLayer if not weighted else WeightedEncoderLayer
self.layers = nn.ModuleList(
[self.layer_type(d_k, d_v, d_model, d_ff, n_heads, dropout) for _ in range(n_layers)])
# 原论文编码解码均有6层,上一层的输出为下一层的输入, 我们不要解码层
def forward(self, enc_inputs, seq_mask, mask=False, return_attn=False):
# enc_inputs.size() -> torch.Size([50, 20, 4]) (batch_size, seq_len, d_model)
if mask is False:
enc_self_attn_mask = None
else:
enc_self_attn_mask = get_attn_pad_mask(seq_mask, seq_mask)
# 之所以修改是由于mask与字符序列的pad有关(因为补齐而导致部分序列后面一部分为0值),但当以矩阵作为输入时,这一点不存在了
enc_outputs = self.projects(enc_inputs)
# print('After project operation:')
# print(enc_outputs.size()) # torch.Size([50, 20, 64])
enc_self_attns = []
for layer in self.layers:
enc_outputs, enc_self_attn = layer(enc_outputs, enc_self_attn_mask)
if return_attn:
enc_self_attns.append(enc_self_attn)
# enc_outputs: [b_size x len_q x d_model], attn: [b_size x n_heads x len_q x len_k]
return enc_outputs, enc_self_attns
# TODO: Reformer
# Reference
# **Paper**
# https://openreview.net/pdf?id=rkgNKkHtvB
# **Code**
# https://github.com/lucidrains/reformer-pytorch
def deterministic_dropout(x: torch.Tensor, seed=0, dropout=0):
generator = torch.Generator(device=x.get_device())
generator.manual_seed(seed)
dropout_mask = torch.bernoulli(x, p=1 - dropout, generator=generator)
return dropout_mask * x / (1 - dropout)
def look_back(input_tensor: torch.Tensor) -> torch.Tensor:
"""
Looks back one bucket
"""
shift = torch.cat([input_tensor[:, -1:], input_tensor[:, :-1]], dim=1)
# [batch * head, n_buckets, bucket_length, d_k, rounds]
concat = torch.cat([shift, input_tensor], dim=2)
# [batch * head, n_buckets, bucket_length * 2, d_k, rounds]
return concat
def reverse_sort(indice: torch.Tensor, dim: int) -> torch.Tensor:
"""
Unsorts sorted indice
"""
new_size = [1] * indice.dim()
new_size[dim] = indice.size(dim)
arange = indice.new_empty(size=new_size)
torch.arange(new_size[dim], out=arange)
arange = arange.expand_as(indice)
new_indice = torch.empty_like(indice)
new_indice.scatter_(dim=dim, index=indice, src=arange)
return new_indice
def expand(input_tensor: torch.Tensor, dim=0, num=1) -> torch.Tensor:
"""
Shortcut for unsqueeze + expand
"""
new_size = [-1] * (input_tensor.dim() + 1)
new_size[dim] = num
return input_tensor.unsqueeze(dim=dim).expand(new_size)
def expand_gather(input_tensor: torch.Tensor, dim: int, index: torch.Tensor, expand_dim=0, num=1) -> torch.Tensor:
expanded_index = expand(index, dim=expand_dim, num=num)
return input_tensor.gather(dim=dim, index=expanded_index)
def get_dup_keys(input_tensor: torch.Tensor, rounds=0) -> torch.Tensor:
sorted_flat_key, flat_key_indice = torch.sort(input_tensor, dim=-1)
# [batch * head, length, bucket_length * 2 * rounds]
count_shift_keys = torch.ones_like(sorted_flat_key)
# [batch * head, length, bucket_length * 2 * rounds]
for i in range(1, rounds):
equiv_flat_key = (sorted_flat_key[..., i:] == sorted_flat_key[..., :-i]).int()
count_shift_keys[..., i:] += equiv_flat_key
count_shift_keys[..., :-i] += equiv_flat_key
count_key_indice = reverse_sort(flat_key_indice, dim=2)
# [batch * head, length, bucket_length * 2 * rounds]
return torch.gather(count_shift_keys, dim=-1, index=count_key_indice)
def top_p_sample(prob: torch.Tensor, perc=0.5) -> np.array:
sorted_prob, sorted_indices = torch.sort(prob, dim=-1, descending=True)
cumsum = torch.cumsum(sorted_prob, dim=-1)
mask = cumsum < perc
one_more_indice = mask.long().sum(dim=-1, keepdim=True)
mask.scatter_(dim=-1, index=one_more_indice, value=True)
sorted_prob.masked_fill_(~mask, value=0.0)
masked_prob = sorted_prob.gather(dim=-1, index=reverse_sort(sorted_indices, dim=-1))
return torch.multinomial(masked_prob, num_samples=1)
class LocalitySensitiveHash(nn.Module):
"""
Implements Locality Sensitive Hash
class is used to save random matrix used for hashing
"""
def __init__(self, d_model, n_heads, rounds):
super(LocalitySensitiveHash, self).__init__()
self.d_k = d_model // n_heads
self.rounds = rounds
self.rand_matrix = None
def forward(self, inp: torch.Tensor, n_buckets=0, random=True):
# size of input tensor: [batch * head // chunk, length, d_k] 应该是这样才合理
batch_size = inp.size(0)
length = inp.size(1)
inp = F.normalize(inp, p=2, dim=-1) # 按照某个维度计算范数,p表示计算p范数(等于2就是2范数),dim计算范数的维度
# [batch * head, length, d_k]
if random:
self.rand_matrix = torch.randn([batch_size, self.d_k, self.rounds, n_buckets // 2], device=inp.get_device())
# [batch * head, d_k, rounds, n_buckets // 2]
self.rand_matrix /= torch.norm(self.rand_matrix, dim=1, keepdim=True)
# [batch * head, d_k, rounds, n_buckets // 2]
matmul = torch.einsum('...ij,...jkl->...ikl', inp, self.rand_matrix)
# [batch * head, length, rounds, n_buckets // 2]
hashes = torch.argmax(torch.cat([matmul, -matmul], dim=-1), dim=-1).int() # paper: h(x) = arg max([xR;-xR])
# [batch * head, length, rounds]
arange = hashes.new_empty((1, length, 1))
# [1, length, 1]
hashes = hashes * length + torch.arange(length, out=arange).expand_as(hashes)
# 这里是用了P-stable hash 的假设和公式吗(参考:http://www.cppblog.com/humanchao/archive/2018/02/24/215521.html)
# [batch * head, length, rounds]
return hashes
class LSHAttention(nn.Module):
"""
Implements LSHAttention
class is used to save LocalitySensitiveHash
"""
def __init__(self, d_model, n_heads, rounds, bucket_length, dropout_prob):
super(LSHAttention, self).__init__()
self.d_k = d_model // n_heads
self.rounds = rounds
self.bucket_length = bucket_length
self.dropout = dropout_prob
self.lsh = LocalitySensitiveHash(d_model, n_heads, rounds)
def forward(self, query, value, seed, random=True):
# size of query and value: [batch * head // chunk, length, d_k]
# 以下注释应该均将batch * head // chunk视为batch * head!
length = query.size(1)
n_buckets = length // self.bucket_length
sorted_hashes, hash_indice = torch.sort(self.lsh(query, n_buckets, random), dim=1)
# [batch * head, length, rounds]
original_indice = reverse_sort(hash_indice, dim=1)
# [batch * head, length, rounds]
reordered_query = expand_gather(
expand(query, dim=3, num=self.rounds), dim=1,
index=hash_indice, expand_dim=2, num=self.d_k
)
# [batch * head, length, d_k, rounds]
reordered_query = reordered_query.reshape(
-1, n_buckets, self.bucket_length, self.d_k, self.rounds
)
# [batch * head, n_buckets, bucket_length, d_k, rounds]
lookback_key = F.normalize(look_back(reordered_query), p=2, dim=-2)
# [batch * head, n_buckets, bucket_length * 2, d_k, rounds]
matmul_qk = torch.einsum(
'...ijk,...ljk->...ilk', reordered_query, lookback_key
) / math.sqrt(self.d_k)
# [batch * head, n_buckets, bucket_length, bucket_length * 2, rounds]
sorted_hashes = sorted_hashes.reshape(
-1, n_buckets, self.bucket_length, self.rounds
) // length
# [batch * head, n_buckets, bucket_length, rounds]
matmul_qk.masked_fill_(
mask=(sorted_hashes[..., None, :] != look_back(sorted_hashes)[..., None, :, :]),
value=-1e9
)
query_indice = hash_indice.reshape(
-1, n_buckets, self.bucket_length, self.rounds
).int()
# [batch * head, n_buckets, bucket_length, rounds]
key_indice = look_back(query_indice)
# [batch * head, n_buckets, bucket_length * 2, rounds]
matmul_qk.masked_fill_(
mask=(query_indice[..., None, :] < key_indice[..., None, :, :]), value=-1e9
)
matmul_qk.masked_fill_(
mask=(query_indice[..., None, :] == key_indice[..., None, :, :]), value=-1e5
)
key_indice = expand(key_indice, dim=2, num=self.bucket_length).flatten(1, 2)
# [batch * head, length, bucket_length * 2, rounds]
key_indice = expand_gather(
key_indice,
dim=1, index=original_indice,
expand_dim=2, num=self.bucket_length * 2
)
# [batch * head, length, bucket_length * 2, rounds]
count_key = get_dup_keys(
key_indice.flatten(-2, -1), self.rounds
).reshape(-1, length, self.bucket_length * 2, self.rounds)
# [batch * head, length, bucket_length * 2, rounds]
count_key = expand_gather(
count_key, dim=1, index=hash_indice, expand_dim=2, num=self.bucket_length * 2
)
# [batch * head, length, bucket_length * 2, rounds]
matmul_qk = matmul_qk.flatten(1, 2)
# [batch * head, length, bucket_length * 2, rounds]
logsumexp_qk = torch.logsumexp(matmul_qk, dim=2)
# [batch * head, length, rounds]
softmax_qk = torch.exp(matmul_qk - count_key.float().log_() - logsumexp_qk[..., None, :])
# [batch * head, length, bucket_length * 2, rounds]
if self.training:
softmax_qk = deterministic_dropout(softmax_qk, seed=seed, dropout=self.dropout)
# [batch * head, length, bucket_length * 2, rounds]
reordered_value = expand_gather(
expand(value, dim=3, num=self.rounds), dim=1,
index=hash_indice, expand_dim=2, num=self.d_k
)
# [batch * head, length, d_k, rounds]
reordered_value = reordered_value.reshape(
-1, n_buckets, self.bucket_length, self.d_k, self.rounds
)
# [batch * head, n_buckets, bucket_length, d_k, rounds]
softmax_qk = softmax_qk.reshape(
-1, n_buckets, self.bucket_length, self.bucket_length * 2, self.rounds
)
# [batch * head, n_buckets, bucket_length, bucket_length * 2, rounds]
attention = torch.einsum('...ijl,...jkl->...ikl', softmax_qk, look_back(reordered_value))
# [batch * head, n_buckets, bucket_length, d_k, rounds]
attention = attention.flatten(1, 2)
# [batch * head, length, d_k, rounds]
attention = expand_gather(
attention, dim=1, index=original_indice, expand_dim=2, num=self.d_k
)
# [batch * head, length, d_k, rounds]
logsumexp_qk = torch.gather(logsumexp_qk, dim=1, index=original_indice)
# [batch * head, length, rounds]
logsumexp_qk = F.softmax(logsumexp_qk, dim=1)
# [batch * head, length, rounds]
attention = torch.einsum('...ij,...j->...i', attention, logsumexp_qk)
# [batch * head, length, d_k]
return attention
class MultiRoundLSHAttention(nn.Module):
"""
Implements Multi Round LSH Attention
class is defined to save LSHAttention
"""
def __init__(self, d_model, n_heads, n_chunk, rounds, bucket_length, dropout_prob):
super(MultiRoundLSHAttention, self).__init__()
self.d_k = d_model // n_heads
self.head = n_heads
self.chunk = n_chunk
self.linear_query = nn.Linear(d_model, d_model)
self.linear_value = nn.Linear(d_model, d_model)
self.linear_out = nn.Linear(d_model, d_model)
self.lshattention = LSHAttention(d_model, n_heads, rounds, bucket_length, dropout_prob)
def forward(self, input_tensor, seed, random=True):
# input_tensor: [batch, head, d_model]
length = input_tensor.size(1)
query = self.linear_query(input_tensor).reshape(-1, length, self.head, self.d_k).transpose_(1, 2)
# [batch, head, length, d_k]
value = self.linear_value(input_tensor).reshape(-1, length, self.head, self.d_k).transpose_(1, 2)
# [batch, head, length, d_k]
chunked_query = torch.chunk(query.flatten(0, 1), chunks=self.chunk, dim=0) # flatten: 合并或推平指定维度
# [batch * head // chunk, length, d_k]
chunked_value = torch.chunk(value.flatten(0, 1), chunks=self.chunk, dim=0)
# [batch * head // chunk, length, d_k] -> 这里指的是每一chunk的维度
attention = torch.cat([
self.lshattention(q, v, seed + i, random) for q, v, i
in zip(chunked_query, chunked_value, range(self.chunk))
], dim=0).reshape(-1, self.head, length, self.d_k)
# [batch, head, length, d_k]
attention = attention.transpose(1, 2).flatten(-2, -1)
# [batch, length, d_model]
return self.linear_out(attention)
class Block(nn.Module):
def __init__(self, d_model, dropout_prob, func):
super(Block, self).__init__()
self.func = func
self.norm = nn.LayerNorm(d_model)
self.dropout = dropout_prob
def forward(self, x, seed, random=True):
norm = self.norm(x)
out = self.func(norm, (1 << 63) - seed, random)
if self.training:
return deterministic_dropout(out, seed=seed, dropout=self.dropout)
return out
class ChunkFeedForward(nn.Module):
def __init__(self, d_model, d_ff, dropout_prob):
super(ChunkFeedForward, self).__init__()
self.chunk = d_ff // d_model
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.dropout = dropout_prob
def forward(self, input_tensor, seed, random=True):
# [batch, length, d_model]
chunks = torch.chunk(input_tensor, chunks=self.chunk, dim=1)
# [batch, length // chunk, d_model]
output = [F.gelu(self.linear1(chunk)) for chunk in chunks]
# [batch, length // chunk, d_ff]
if self.training:
output = [
deterministic_dropout(chunk, seed + i, dropout=self.dropout)
for chunk, i in zip(output, range(self.chunk))]
# [batch, length // chunk, d_ff]
output = torch.cat([self.linear2(chunk) for chunk in output], dim=1)
# [batch, length, d_model]
return output
class Reversible(Function):
def __init__(self):
super(Reversible, self).__init__()
@staticmethod
def forward(ctx, *args):
layer, input_1, input_2 = args
ctx.layer = layer
with torch.no_grad():
output_1, output_2 = layer(input_1, input_2)
Reversible.outputs = (output_1.detach(), output_2.detach())
return output_1, output_2
@staticmethod
def backward(ctx, *grad_outputs):
output_1_grad, output_2_grad = grad_outputs
output_1, output_2 = Reversible.outputs
output_1.requires_grad = True
output_2.requires_grad = True
with torch.enable_grad():
g_output_1 = ctx.layer.g_block(output_1, ctx.layer.g_seed)
g_output_1.backward(output_2_grad)
with torch.no_grad():
input_2 = output_2 - g_output_1
del output_2, g_output_1
input_1_grad = output_1_grad + output_1.grad
del output_1_grad
output_1.grad = None
with torch.enable_grad():
input_2.requires_grad = True
f_input_2 = ctx.layer.f_block(input_2, ctx.layer.f_seed, False)
f_input_2.backward(input_1_grad)
with torch.no_grad():
input_1 = output_1 - f_input_2
del output_1, f_input_2
input_2_grad = output_2_grad + input_2.grad
del output_2_grad
input_2.grad = None
Reversible.outputs = (input_1.detach(), input_2.detach())
return None, input_1_grad, input_2_grad, None
class ReversibleDecoderLayer(nn.Module):
def __init__(self, d_model, d_ff, n_heads, n_chunk, rounds, bucket_length, dropout_prob):
super(ReversibleDecoderLayer, self).__init__()
self.attn = MultiRoundLSHAttention(d_model, n_heads, n_chunk, rounds, bucket_length, dropout_prob)
self.feed_forward = ChunkFeedForward(d_model, d_ff, dropout_prob)
self.f_block = Block(d_model, dropout_prob, self.attn)
self.g_block = Block(d_model, dropout_prob, self.feed_forward)
def forward(self, x1, x2):
y1 = x1 + self.f_block(x2, self.f_seed)
y2 = x2 + self.g_block(y1, self.g_seed)
return y1, y2
# 其实为decoder
class Reformer(nn.Module):
def __init__(self, d_model, d_ff, n_heads, n_chunk, rounds, bucket_length, n_layer, dropout_prob):
super(Reformer, self).__init__()
self.layers = nn.ModuleList([ReversibleDecoderLayer(d_model, d_ff, n_heads, n_chunk, rounds, bucket_length,
dropout_prob) for _ in range(n_layer)])
def forward(self, x1, x2):
for layer in self.layers:
layer.f_seed = int(np.random.randint(0, 1 << 63, dtype=np.int64))
layer.g_seed = int(np.random.randint(0, 1 << 63, dtype=np.int64))
x1, x2 = Reversible.apply(layer, x1, x2)
return x2
<file_sep>from ..utils.utils_topic import lsa, PLsa, lda
from ..TR.TR4vec import text_rank
def text_rank_tm(tm_method, input_file, labels, category, words, fixed_len, sample_num_list, out_format, out_file_list,
cur_dir, **param_dict):
vectors = text_rank(input_file, category, words, fixed_len, sample_num_list, out_format, out_file_list, cur_dir,
True, **param_dict)
if tm_method == 'LSA':
tm_vectors = lsa(vectors, com_prop=param_dict['com_prop'])
elif tm_method == 'PLSA':
_, tm_vectors = PLsa(vectors, com_prop=param_dict['com_prop']).em_algorithm()
elif tm_method == 'LDA':
tm_vectors = lda(vectors, labels=None, com_prop=param_dict['com_prop'])
elif tm_method == 'Labeled-LDA':
tm_vectors = lda(vectors, labels=labels, com_prop=param_dict['com_prop'])
else:
print('Topic model method error!')
return False
return tm_vectors
<file_sep>from ..utils.utils_words import dr_words
from ..utils.utils_algorithm import text_rank
def dr_text_rank(input_file, alphabet, fixed_len, max_dis, alpha, fixed=True):
corpus = dr_words(input_file, alphabet, fixed_len, max_dis, fixed)
return text_rank(corpus, alpha)
<file_sep>from ..utils.utils_words import tng_words
from ..utils.utils_algorithm import text_rank
def tng_text_rank(input_file, fixed_len, word_size, n, process_num, alpha, cur_dir, fixed=True):
corpus = tng_words(input_file, fixed_len, word_size, n, process_num, cur_dir, fixed)
return text_rank(corpus, alpha)
<file_sep>__author__ = '<NAME>'
import math
import os
import pickle
class AAIndex:
def __init__(self, head, index_dict):
self.head = head
self.index_dict = index_dict
def __str__(self):
return "%s\n%s" % (self.head, self.index_dict)
def extra_aaindex(filename):
"""Return AAIndex obj list.
"""
index_list = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I',
'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
aaindex = []
with open(filename, 'r') as f:
temp_h = ""
lines = f.readlines()
for ind, line in enumerate(lines):
if line[0] == 'H':
temp_h = line[2:].rstrip()
elif line[0] == 'I':
vals = lines[ind+1].rstrip().split() + lines[ind+2].rstrip().split()
index_val = {}
try:
index_val = {index: float(val) for index, val in zip(index_list, vals)}
except:
_sum = sum([float(val) for val in vals if val != 'NA'])
for ind, val in enumerate(vals):
if val != 'NA':
index_val[index_list[ind]] = float(vals[ind])
else:
index_val[index_list[ind]] = round(_sum / 20, 3)
# print(temp_h, vals)
aaindex.append(AAIndex(temp_h, index_val))
return aaindex
def norm_index_vals(index_vals):
"""Normalize index_vals.
:param index_vals: dict, {index: vals}.
"""
_norm_index_vals = {}
avg = float(sum(index_vals.values())) / 20
for index, val in list(index_vals.items()):
numerator = val - avg
denominator = math.sqrt((sum([pow(temp_val - avg, 2) for temp_val in list(index_vals.values())]) / 20))
_norm_index_vals[index] = round(numerator / denominator, 2)
return _norm_index_vals
def write_aaindex(aaindex, filename):
with open(filename, 'wb') as f:
pickle.dump(aaindex, f, protocol=2)
if __name__ == '__main__':
h1 = {'A': 0.620, 'R': -2.530, 'N': -0.780, 'D': -0.090, 'C': 0.290, 'Q': -0.850, 'E': -0.740, 'G': 0.480,
'H': -0.400, 'I': 1.380, 'L': 1.530, 'K': -1.500, 'M': 0.640, 'F': 1.190, 'P': 0.120, 'S': -0.180,
'T': -0.050, 'W': 0.810, 'Y': 0.260, 'V': 1.800}
h2 = {'A': -0.5, 'R': 3.0, 'N': 0.2, 'D': 3.0, 'C': -1.0, 'Q': 0.2, 'E': 3.0, 'G': 0.0, 'H': -0.5, 'I': -1.8,
'L': -1.8, 'K': 3.0, 'M': -1.3, 'F': -2.5, 'P': 0.0, 'S': 0.3, 'T': -0.4, 'W': -3.4, 'Y': -2.3, 'V': -1.5}
m = {'A': 71.079, 'R': 156.188, 'N': 114.104, 'D': 115.086, 'C': 103.145, 'Q': 128.131, 'E': 129.116, 'G': 57.0521,
'H': 137.141, 'I': 113.160, 'L': 113.160, 'K': 128.170, 'M': 131.99, 'F': 147.177, 'P': 97.177, 'S': 87.078,
'T': 101.105, 'W': 186.123, 'Y': 163.176, 'V': 99.133}
file_path = os.path.abspath('..') + "/data/aaindex3.txt"
print(file_path)
aaindex = extra_aaindex(file_path)
aaindex.extend([AAIndex('Hydrophobicity', h1), AAIndex('Hydrophilicity', h2), AAIndex('Mass', m)])
for ind, e in enumerate(aaindex):
aaindex[ind] = AAIndex(e.head, norm_index_vals(e.index_dict))
for e in aaindex:
if e.head == 'Hydrophobicity':
print((e.index_dict))
file_path = os.path.abspath('..') + "/data/aaindex.data"
write_aaindex(aaindex, file_path)
file_path = os.path.abspath('..') + "/data/aaindex.data"
with open(file_path, 'rb') as f:
norm_aaindex = pickle.load(f)
print('\n')
heads = [e.head for e in norm_aaindex]
# print(heads)
print((len(norm_aaindex)))
norm_h1 = norm_index_vals(h1)
norm_h2 = norm_index_vals(h2)
norm_m = norm_index_vals(m)<file_sep>from ..utils.utils_words import rev_km_words
from ..utils.utils_algorithm import tf_idf
def rev_km_tf_idf(input_file, alphabet, fixed_len, word_size, fixed=True):
corpus = rev_km_words(input_file, alphabet, fixed_len, word_size, fixed)
return tf_idf(corpus)
<file_sep>from abc import ABC
import torch
import torch.nn as nn
from imblearn.combine import SMOTETomek
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import TomekLinks
from numpy import random
from sklearn.model_selection import StratifiedKFold, KFold
START_TAG = "<START>"
STOP_TAG = "<STOP>"
# tag_to_ix = {"B": 0, "O": 1, START_TAG: 2, STOP_TAG: 3}
SEED = 42
# TODO: 将数据集划分为训练集和测试集
def construct_partition2two(labels, folds_num, stratified=True):
# 将数据集划分为n折,并进行保存? 如果需要进行相似性打分,则数据集划分提前到特征提取!
vectors = random.normal(loc=0.0, scale=1, size=(len(labels), 64))
if stratified is True:
fold = StratifiedKFold(n_splits=folds_num, shuffle=True, random_state=random.RandomState(SEED))
folds_temp = list(fold.split(vectors, labels))
else:
fold = KFold(n_splits=folds_num, shuffle=True, random_state=random.RandomState(SEED))
folds_temp = list(fold.split(vectors))
folds = []
for i in range(folds_num):
test_index = folds_temp[i][1]
train_index = folds_temp[i][0]
folds.append((train_index, test_index))
return folds
def sampling(mode, x_train, y_train):
# 只对训练数据进行采样
if mode == 'over':
# print('|*** Technique for sampling : oversampling ***|\n')
x_train, y_train = SMOTE(random_state=42).fit_sample(x_train, y_train)
elif mode == 'under':
# print('|*** Technique for sampling : under sampling ***|\n')
x_train, y_train = TomekLinks().fit_sample(x_train, y_train)
else:
# print('|*** Technique for sampling : combine oversampling and under sampling ***|\n')
x_train, y_train = SMOTETomek(random_state=42).fit_sample(x_train, y_train)
# print(sorted(Counter(y_train).items()))
return x_train, y_train
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return idx.item()
class CRF(nn.Module, ABC):
def __init__(self, hidden_dim, tag_to_ix):
super(CRF, self).__init__()
self.tag_to_ix = tag_to_ix
self.tag_set_size = len(tag_to_ix)
# Maps the output of the LSTM into tag space.
self.hidden2tag = nn.Linear(hidden_dim, self.tag_set_size)
# Matrix of transition parameters. Entry i,j is the score of
# transitioning *to* i *from* j.
self.transitions = nn.Parameter(torch.normal(0.5, 0.167, [self.tag_set_size, self.tag_set_size]),
requires_grad=True)
# self.transitions = nn.Parameter(torch.randn(self.tag_set_size, self.tag_set_size))
# These two statements enforce the constraint that we never transfer
# to the start tag and we never transfer from the stop tag
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
# self.transitions.data[tag_to_ix[START_TAG], :] = 1e-3
# self.transitions.data[:, tag_to_ix[STOP_TAG]] = 1e-3
# self.hidden = self.init_hidden()
#
# def init_hidden(self):
# return (torch.randn(2, 1, self.hidden_dim // 2),
# torch.randn(2, 1, self.hidden_dim // 2))
def _forward_alg_new_parallel(self, feats):
# Do the forward algorithm to compute the partition function
init_alphas = torch.full([feats.shape[0], self.tag_set_size], -10000.) # .to('cuda')
# init_alphas = torch.full([feats.shape[0], self.tag_set_size], 1e-3)
# START_TAG has all of the score.
init_alphas[:, self.tag_to_ix[START_TAG]] = 0.
# Wrap in a variable so that we will get automatic back-prop
# Iterate through the sentence
forward_var_list = [init_alphas]
for feat_index in range(feats.shape[1]): # -1
gamar_r_l = torch.stack([forward_var_list[feat_index]] * feats.shape[2]).transpose(0, 1)
t_r1_k = torch.unsqueeze(feats[:, feat_index, :], 1).transpose(1, 2) # +1
aa = gamar_r_l + t_r1_k + torch.unsqueeze(self.transitions, 0)
forward_var_list.append(torch.logsumexp(aa, dim=2))
terminal_var = forward_var_list[-1] + self.transitions[self.tag_to_ix[STOP_TAG]].repeat([feats.shape[0], 1])
alpha = torch.logsumexp(terminal_var, dim=1)
return alpha
def _score_sentence_parallel(self, feats, tags):
# Gives the score of provided tag sequences
# feats = feats.transpose(0,1)
score = torch.zeros(tags.shape[0]) # .to('cuda')
tags = torch.cat([torch.full([tags.shape[0], 1], self.tag_to_ix[START_TAG], dtype=torch.long), tags], dim=1)
for i in range(feats.shape[1]):
feat = feats[:, i, :]
# 明天把这一部分打印出来看看****
score = score + self.transitions[tags[:, i + 1], tags[:, i]] + feat[range(feat.shape[0]), tags[:, i + 1]]
score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[:, -1]]
return score
def neg_log_likelihood_parallel(self, feats, tags):
feats = self.hidden2tag(feats)
forward_score = self._forward_alg_new_parallel(feats)
gold_score = self._score_sentence_parallel(feats, tags)
return torch.sum(forward_score - gold_score)
def _viterbi_decode_new(self, feats):
back_pointers = []
# Initialize the viterbi variables in log space
init_vvars = torch.full((1, self.tag_set_size), -10000.) # .to('cuda')
# init_vvars = torch.full((1, self.tag_set_size), 1e-3) # .to('cuda')
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
# forward_var at step i holds the viterbi variables for step i-1
forward_var_list = [init_vvars]
for feat_index in range(feats.shape[0]):
gamma_r_l = torch.stack([forward_var_list[feat_index]] * feats.shape[1])
gamma_r_l = torch.squeeze(gamma_r_l)
next_tag_var = gamma_r_l + self.transitions
# bptrs_t=torch.argmax(next_tag_var,dim=0)
viterbi_vars_t, bptrs_t = torch.max(next_tag_var, dim=1)
t_r1_k = torch.unsqueeze(feats[feat_index], 0)
forward_var_new = torch.unsqueeze(viterbi_vars_t, 0) + t_r1_k
forward_var_list.append(forward_var_new)
back_pointers.append(bptrs_t.tolist())
# Transition to STOP_TAG
terminal_var = forward_var_list[-1] + self.transitions[self.tag_to_ix[STOP_TAG]]
best_tag_id = torch.argmax(terminal_var).tolist()
path_score = terminal_var[0][best_tag_id]
# path_scores = terminal_var[0][:2].tolist()
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for bptrs_t in reversed(back_pointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.tag_to_ix[START_TAG] # Sanity check
best_path.reverse()
return path_score, best_path
def calculate_pro_new_(self, feat, tags_hat):
# prob_batch = torch.zeros([feats.size()[0], feats.size()[1]])
# print(self.transitions)
self.transitions.data[self.tag_to_ix[START_TAG], :] = 1e-3
self.transitions.data[:, self.tag_to_ix[STOP_TAG]] = 1e-3
feat = feat.unsqueeze(0)
feat = self.hidden2tag(feat).squeeze() # [10, 4]
feat.data[:, self.tag_to_ix[START_TAG]] = 1e-3
feat.data[:, self.tag_to_ix[STOP_TAG]] = 1e-3
trans_mat = self.transitions.data - torch.min(self.transitions.data, dim=0)[0].expand_as(self.transitions.data)
state_mat = feat - torch.min(feat, dim=1, keepdim=True)[0].expand_as(feat)
tags_hat = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags_hat.view(-1)])
# [3, 1, .., 0] 共11个标签
prob_list = []
for i in range(feat.shape[0]):
# print(self.transitions[:, tags_hat[i]]) #
# print(feat[i, :])
score = trans_mat[tags_hat[i + 1], tags_hat[i]] + state_mat[i, tags_hat[i + 1]]
score_total = trans_mat[:2, tags_hat[i]] + state_mat[i, :2]
# print('score', score)
# print('score_total', score_total)
prob = score / torch.sum(score_total)
# print('prob', prob)
if tags_hat[i + 1] == 0:
prob_list.append(1 - prob.item())
# print('prob', 1 - prob.item())
else:
prob_list.append(prob.item())
# print('prob', prob.item())
return prob_list
def calculate_pro_new(self, feat, tags_hat):
# prob_batch = torch.zeros([feats.size()[0], feats.size()[1]])
# print(self.transitions)
# self.transitions.data[self.tag_to_ix[START_TAG], :] = 1e-3
# self.transitions.data[:, self.tag_to_ix[STOP_TAG]] = 1e-3
trans_mat = self.transitions.data[:2, :2]
# print(trans_mat)
trans_mat = trans_mat - torch.min(trans_mat, dim=0, keepdim=True)[0].expand_as(trans_mat)
# print(trans_mat)
feat = feat.unsqueeze(0)
feat = self.hidden2tag(feat).squeeze() # [10, 4]
state_mat = feat[:, :2]
state_mat = state_mat - torch.min(state_mat, dim=1, keepdim=True)[0].expand_as(state_mat)
# print(state_mat[0])
# state_mat = state_mat / torch.sum(state_mat, dim=0)
# print(state_mat[0])
# exit()
# feat.data[:, self.tag_to_ix[START_TAG]] = 1e-3
# feat.data[:, self.tag_to_ix[STOP_TAG]] = 1e-3
tags_hat = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags_hat.view(-1)])
# tags_hat = tags_hat.view(-1)
# print(tags_hat)
# [3, 1, .., 0] 共11个标签
prob_list = []
for i in range(feat.shape[0]):
# print(self.transitions[:, tags_hat[i]])
# print(feat[i, :])
if i == 0:
score = state_mat[i, tags_hat[i + 1]]
score_total = state_mat[i, ]
else:
score = trans_mat[tags_hat[i + 1], tags_hat[i]] + state_mat[i, tags_hat[i + 1]]
score_total = trans_mat[:, tags_hat[i]] + state_mat[i, ]
# print('score', score)
# print('score_total', score_total)
prob = score / torch.sum(score_total)
# print('prob', prob)
if tags_hat[i + 1] == 0:
prob_list.append(1 - prob.item())
# print('prob', 1 - prob.item())
else:
prob_list.append(prob.item())
# print('prob', prob.item())
return prob_list
def forward(self, feats): # dont confuse this with _forward_alg above.
# Get the emission scores from the BiLSTM
feats = self.hidden2tag(feats)
feats = feats.squeeze(0)
# Find the best path, given the features.
score, tag_seq = self._viterbi_decode_new(feats)
return score, tag_seq
<file_sep>import torch
from torch import nn
class CNNBiLSTM(torch.nn.Module):
def __init__(self, in_dim, hidden_dim, n_layer, fea_dim, n_class, prob=0.6):
super(CNNBiLSTM, self).__init__()
self.in_dim = in_dim
self.cnn = nn.Sequential(
nn.Conv2d(in_channels=1,
out_channels=16,
kernel_size=1,
stride=1),
# torch.nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.rnn = nn.LSTM(in_dim, hidden_dim, n_layer,
bidirectional=True,
batch_first=True,
dropout=prob)
self.fc = nn.Sequential(
nn.Linear(hidden_dim * 2, 1024),
nn.Linear(1024, fea_dim)
)
self.classifier = nn.Linear(fea_dim, n_class)
def extract_feature(self, x):
out = x.unsqueeze(1)
out = self.cnn(out)
out = out.view(out.size()[0], -1, self.in_dim)
out, _ = self.rnn(out) # torch.Size([50, 80, 256])
out = out[:, -1, :]
out = self.fc(out)
return out
def forward(self, x):
out = self.extract_feature(x)
out = self.classifier(out)
return out
<file_sep>import numpy as np
import torch
import torch.nn.functional as func
from torch import nn
from torch.utils.data import Dataset, DataLoader
from pytorch_pretrained_bert import BertModel, BertTokenizer
from .utils_former import Transformer, Reformer
# 需要GPU时改为cuda:0,由于个人笔记本显存不足,只能使用CPU计算 by wzb at 3.24
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # 让torch判断是否使用GPU,建议使用GPU环境,因为会快很多
FORMER = ['Transformer', 'Weighted-Transformer', 'Reformer']
torch.set_printoptions(threshold=np.inf)
# print(DEVICE)
# 新增Bert 暂无法使用 by wzb at 3.25
# class BertRes(nn.Module):
# def __init__(self, hidden_dim, n_classes):
# super(BertRes, self).__init__()
# self.bert = BertModel.from_pretrained("./bert_pretrain")
# for param in self.bert.parameters():
# param.requires_grad = True
# self.fc = nn.Linear(hidden_dim, n_classes)
#
# def forward(self, inputs, seq_mask):
# _, pooled = self.bert(inputs, attention_mask=seq_mask, output_all_encoded_layers=False)
# out = self.fc(pooled)
# return out
# 新增FastText 无法使用 by wzb at 3.26
# class FastTextRes(nn.Module):
# def __init__(self, in_dim, hidden_dim, n_classes, prob=0.6):
# super(FastTextRes, self).__init__()
# self.hidden_dim = hidden_dim
# self.fc1 = nn.Linear(in_dim, hidden_dim)
# self.fc2 = nn.Linear(hidden_dim, n_classes)
# # self.classifier = nn.Sequential(
# # nn.Linear(2 * hidden_dim, hidden_dim),
# # nn.ReLU(),
# # nn.Linear(hidden_dim, n_classes)
# # )
# # self.softmax = nn.Softmax()
#
# def forward(self, x):
# h = self.fc1(x.mean(1))
# z = self.fc2(h)
# return z
# 定义 Recurrent Network 模型
# class LSTMSeq(nn.Module):
# def __init__(self, in_dim, hidden_dim, n_layer, n_classes, prob=0.6):
# super(LSTMSeq, self).__init__()
# self.n_layer = n_layer
# self.hidden_dim = hidden_dim
# self.rnn = nn.LSTM(in_dim, hidden_dim, n_layer, batch_first=True, dropout=prob, bidirectional=True)
# self.classifier = nn.Sequential(
# nn.ReLU(),
# nn.Linear(2 * hidden_dim, n_classes)
# )
#
# def forward(self, x): # torch.Size([50, 20, 4]) (batch, seq_len, input_size) --> batch_first=True
# out, _ = self.rnn(x) # [b_size, len, 2*hidden_dim]
# out = out[:, -1, :] # [b_size, 2*hidden_dim]
# out = self.classifier(out) # [b_size, num_classes]
# return out
# my lstm_seq by wzb at 3.29
class LSTMSeq(nn.Module):
def __init__(self, in_size, hidden_size, num_of_layers, out_size, dropout=0.6):
super(LSTMSeq, self).__init__()
# self.lstm = nn.LSTM(in_size, hidden_size, num_of_layers, batch_first=True, dropout=dropout, bidirectional=True)
# self.classifier = nn.Sequential(
# nn.ReLU(),
# nn.Linear(2*hidden_size, out_size)
# )
self.l1 = nn.Linear(275*4, out_size)
# self.active = nn.ReLU()
# self.l2 = nn.Linear(hidden_size, out_size)
def forward(self, x):
# out, _ = self.lstm(x)
# out = out[:, -1, :]
# out = self.classifier(out)
x = x.view(([x.size()[0], -1]))
# print(x[0, :, :])
# print(input_data[0])
out = self.l1(x)
# out = self.active(out)
# out = self.l2(out)
return out
# 残基层面的逻辑不一样,应该单拉出来写
class LSTMRes(nn.Module):
def __init__(self, in_dim, hidden_dim, n_layer, n_classes, prob=0.5):
super(LSTMRes, self).__init__()
self.n_layers = n_layer
self.hidden_dim = hidden_dim
self.rnn = nn.LSTM(in_dim, hidden_dim, n_layer, batch_first=True, dropout=prob, bidirectional=True)
self.classifier = nn.Sequential(
nn.Linear(2 * hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, n_classes)
)
def forward(self, x):
# [batch_size, seq_len, in_dim]
x = x
out, hc = self.rnn(x) # [batch_size, seq_len, 2*hidden_dim]
out = self.classifier(out) # [batch_size, seq_len, num_classes]
return out
class GRUSeq(nn.Module):
def __init__(self, in_dim, hidden_dim, n_layer, n_classes, prob=0.6):
super(GRUSeq, self).__init__()
self.n_layer = n_layer
self.hidden_dim = hidden_dim
self.rnn = nn.GRU(in_dim, hidden_dim, n_layer, batch_first=True, dropout=prob, bidirectional=True)
self.classifier = nn.Sequential(
nn.ReLU(),
nn.Linear(2 * hidden_dim, n_classes)
)
def forward(self, x): # torch.Size([50, 20, 4]) (batch, seq_len, input_size) --> batch_first=True
out, _ = self.rnn(x) # torch.Size([50, 20, 128])
out = out[:, -1, :] # [b_size, 2*hidden_dim]
out = self.classifier(out) # [b_size, num_classes]
return out
# 残基层面的逻辑不一样,单拉出来写
class GRURes(nn.Module):
def __init__(self, in_dim, hidden_dim, n_layer, n_classes, prob=0.5):
super(GRURes, self).__init__()
self.n_layers = n_layer
self.hidden_dim = hidden_dim
self.rnn = nn.GRU(in_dim, hidden_dim, n_layer, batch_first=True, dropout=prob, bidirectional=True)
self.classifier = nn.Sequential(
nn.Linear(2 * hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, n_classes)
)
def forward(self, x):
# [batch_size, seq_len, in_dim]
out, hidden = self.rnn(x) # [batch_size, seq_len, 2*hidden_dim]
out = self.classifier(out) # [batch_size, seq_len, num_classes]
return out
# 这里CNN实现用的二维卷积,但生物信息类似文本,文本中经典的算法是textCNN,使用的一维卷积,这里我也用类似textCNN的模型尝试
class CNNSeq(nn.Module):
def __init__(self, inchannels, outchannels, kernel_size, n_classes, prob=0.5):
super(CNNSeq, self).__init__()
padding = (kernel_size - 1) // 2
# bn操作 by wzb at 3.29
self.bn_input = nn.BatchNorm1d(275, momentum=0.5)
self.cnn = nn.Conv1d(in_channels=inchannels,
out_channels=outchannels,
kernel_size=kernel_size,
padding=padding,
stride=1)
self.dropout = nn.Dropout(prob)
self.bn = nn.BatchNorm1d(outchannels, momentum=0.5)
self.classifier = nn.Sequential(
nn.Linear(outchannels, 2 * outchannels),
nn.ReLU(),
nn.Linear(2 * outchannels, n_classes)
)
def forward(self, x):
x = self.bn_input(x)
input_data = x.permute(0, 2, 1)
output = self.cnn(input_data)
output = self.bn(output)
output = func.max_pool1d(output, kernel_size=output.shape[2])
output = output.transpose(1, 2).contiguous()
output = output.view(output.shape[0], -1)
output = self.dropout(output)
output = self.classifier(output)
return output
# 定义CNN, CNN的实现有点问题,
class CNNRes(nn.Module):
def __init__(self, inchannels, outchannels, kernel_size, n_classes, prob):
super(CNNRes, self).__init__()
padding = (kernel_size - 1) // 2
self.cnn = nn.Conv1d(in_channels=inchannels,
out_channels=outchannels,
kernel_size=kernel_size,
padding=padding,
stride=1)
self.dropout = nn.Dropout(prob)
self.classifier = nn.Sequential(
nn.Linear(outchannels, 2 * outchannels),
nn.ReLU(),
nn.Linear(2 * outchannels, n_classes)
)
def forward(self, x):
# [batch_size , seq_len, inchannels]
input_data = x.permute(0, 2, 1)
output = self.cnn(input_data)
output = output.transpose(1, 2).contiguous()
output = self.dropout(output)
output = self.classifier(output)
return output
# 定义Transformer和WeightedTransformer序列层面的类
class TransformerSeq(nn.Module):
def __init__(self, fixed_len, feature_dim, n_layers, d_k, d_v, d_model, d_ff, n_heads, n_classes, dropout=0.1,
weighted=False):
super(TransformerSeq, self).__init__()
self.transformer = Transformer(feature_dim, n_layers, d_k, d_v, d_model, d_ff, n_heads, dropout, weighted)
self.classifier = nn.Sequential(
nn.Linear(d_model * fixed_len, d_model),
nn.ReLU(),
nn.Linear(d_model, n_classes)
)
def forward(self, inputs, seq_mask, mask=False, return_attn=False):
# inputs = inputs.view(inputs.size()[0], -1)
output, _ = self.transformer(inputs, seq_mask, mask, return_attn)
# 注意这里的self-attention不是取最后一个,因为self-attention和LSTM其实不太一样,这里直接将self-attention转变大小考虑了,因此初始化时多了一个fixed_len参数
output = output.view(output.shape[0], -1)
# print('经过transformer模型的大小', output.shape)
output = self.classifier(output)
# print('经过分类模型的大小', output.shape)
return output
# 定义Transformer和WeightedTransformer残基层面的类
class TransformerRes(nn.Module):
def __init__(self, feature_dim, n_layers, d_k, d_v, d_model, d_ff, n_heads, n_classes, dropout=0.1, weighted=False):
super(TransformerRes, self).__init__()
self.transformer = Transformer(feature_dim, n_layers, d_k, d_v, d_model, d_ff, n_heads, dropout, weighted)
self.classifier = nn.Sequential(
nn.Linear(d_model, 2 * d_model),
nn.ReLU(),
nn.Linear(2 * d_model, n_classes)
)
def forward(self, inputs, seq_mask, mask=True, return_attn=False):
output, _ = self.transformer(inputs, seq_mask, mask, return_attn)
# print('经过transformer模型的大小', output.shape)
output = self.classifier(output)
# print('经过分类模型的大小', output.shape)
return output
# 定义Reformer序列层面的类
class ReformerSeq(nn.Module):
def __init__(self, n_classes, fixed_len, d_model, d_ff, n_heads, n_chunk, rounds, bucket_length, n_layer,
dropout_prob=0.1):
super(ReformerSeq, self).__init__()
self.reformer = Reformer(d_model, d_ff, n_heads, n_chunk, rounds, bucket_length, n_layer, dropout_prob)
self.classifier = nn.Sequential(
nn.Linear(d_model * fixed_len, d_model),
nn.ReLU(),
nn.Linear(d_model, n_classes)
)
def forward(self, inputs, seq_mask, mask=False, return_attn=False):
# inputs = inputs.view(inputs.size()[0], -1)
output = self.reformer(inputs, inputs)
# 注意这里的self-attention不是取最后一个,因为self-attention和LSTM其实不太一样,
# 这里直接将self-attention转变大小考虑了,因此初始化时多了一个fixed_len参数
output = output.view(output.shape[0], -1)
# print('经过transformer模型的大小', output.shape)
output = self.classifier(output)
# print('经过分类模型的大小', output.shape)
return output
# 定义Transformer和WeightedTransformer残基层面的类
class ReformerRes(nn.Module):
def __init__(self, n_classes, d_model, d_ff, n_heads, n_chunk, rounds, bucket_length, n_layer, dropout_prob=0.1):
super(ReformerRes, self).__init__()
self.reformer = Reformer(d_model, d_ff, n_heads, n_chunk, rounds, bucket_length, n_layer, dropout_prob)
self.classifier = nn.Sequential(
nn.Linear(d_model, 2 * d_model),
nn.ReLU(),
nn.Linear(2 * d_model, n_classes)
)
def forward(self, inputs):
output = self.reformer(inputs, inputs)
# print('经过transformer模型的大小', output.shape)
output = self.classifier(output)
# print('经过分类模型的大小', output.shape)
return output
class MyDataset(Dataset):
def __init__(self, feature, target, length, max_len):
self.feature = feature
self.target = target
self.length = length
self.max_len = max_len
def __getitem__(self, index):
return self.feature[index], self.target[index], np.array([self.length[index]], dtype=np.int), self.max_len
def __len__(self):
return len(self.feature)
def batch_seq(data):
inputs = []
inputs_length = []
input_mask = []
targets = []
for feature, target, length, max_len in data:
inputs.append(torch.FloatTensor(feature).unsqueeze(0)) # [fixed_Len, fea_dim]
# print(length)
inputs_length.append(torch.LongTensor(np.array(length, dtype=np.int))) # [1]
# print(inputs_length)
mask = sequence_mask(torch.LongTensor(np.array(length, dtype=np.int)), max_len)
input_mask.append(torch.FloatTensor(mask)) # [1, max_len]
targets.append(torch.LongTensor(np.array([target], dtype=np.int))) # [1]
inputs = torch.cat(inputs)
# print(inputs.shape)
inputs_length = torch.cat(inputs_length)
# print(inputs_length)
input_mask = torch.cat(input_mask)
# print(input_mask.shape)
targets = torch.cat(targets)
# print(targets.shape)
return inputs, inputs_length, input_mask, targets
def batch_res(data):
inputs = []
inputs_length = []
input_mask = []
targets = []
for feature, target, length, max_len in data:
inputs.append(torch.FloatTensor(feature).unsqueeze(0)) # [fixed_Len, fea_dim]
# print(length)
inputs_length.append(torch.LongTensor(np.array(length, dtype=np.int))) # [1]
# print(inputs_length)
mask = sequence_mask(torch.LongTensor(np.array(length, dtype=np.int)), max_len)
input_mask.append(torch.FloatTensor(mask)) # [1, max_len]
targets.append(torch.LongTensor(np.array(target, dtype=np.int)).unsqueeze(0)) # [fixed_len, fea_dim]
# print(targets)
inputs = torch.cat(inputs)
# print(inputs.shape)
inputs_length = torch.cat(inputs_length)
# print(inputs_length)
input_mask = torch.cat(input_mask)
# print(input_mask.shape)
targets = torch.cat(targets)
# print(targets.shape)
return inputs, inputs_length, input_mask, targets
def sequence_mask(lengths, max_len):
"""
lengths: [len1, len2....] 一个长度为batch的包含序列长度的列表
max_len: 也就是红良的fixed_len
返回值: [batch_size, seq_len]
"""
batch_size = lengths.numel()
max_len = max_len
mask = torch.arange(0, max_len).type_as(lengths).unsqueeze(0).expand(batch_size, max_len).lt(
lengths.unsqueeze(1)).float()
return mask
def criterion_func(inputs, targets, seq_length, mask):
inputs = func.softmax(inputs, dim=-1) # [batch_size, seq_len, 2]
one_hot = func.one_hot(targets, num_classes=inputs.size()[-1]).float()
inputs = torch.clamp(inputs, min=1e-8, max=1)
prob = torch.sum(torch.log(inputs) * one_hot, dim=-1) # [batch_size, seq_len]
log_prob = -prob * mask
log_prob = log_prob.view(-1) # [batch_size*seq_len]
loss = torch.sum(log_prob) / torch.sum(seq_length)
return loss
class TorchNetSeq(object):
def __init__(self, net, max_len, criterion, params_dict):
super(TorchNetSeq, self).__init__()
self.net = net
self.dropout = params_dict['dropout']
self.batch_size = params_dict['batch_size']
self.max_len = max_len
self.criterion = criterion
self.params_dict = params_dict
def prepare(self, data, labels, input_length, shuffle=True):
dataset = MyDataset(data, labels, input_length, self.max_len)
data_iter = DataLoader(dataset, batch_size=self.batch_size, shuffle=shuffle, collate_fn=batch_seq)
return data_iter
def net_type(self, in_dim, n_classes):
if self.net == 'LSTM':
hidden_dim = self.params_dict['hidden_dim']
n_layer = self.params_dict['n_layer']
model = LSTMSeq(in_dim, hidden_dim, n_layer, n_classes, self.dropout).to(DEVICE)
elif self.net == 'GRU':
hidden_dim = self.params_dict['hidden_dim']
n_layer = self.params_dict['n_layer']
model = GRUSeq(in_dim, hidden_dim, n_layer, n_classes, self.dropout).to(DEVICE)
elif self.net == 'CNN':
out_channels = self.params_dict['out_channels']
kernel_size = self.params_dict['kernel_size']
model = CNNSeq(in_dim, out_channels, kernel_size, n_classes, self.dropout).to(DEVICE)
elif self.net == 'Transformer':
n_layer = self.params_dict['n_layer']
d_model = self.params_dict['d_model']
d_ff = self.params_dict['d_ff']
n_heads = self.params_dict['n_heads']
model = TransformerSeq(self.max_len, in_dim, n_layer, d_model, d_model, d_model,
d_ff, n_heads, n_classes, self.dropout, False).to(DEVICE)
elif self.net == 'Weighted-Transformer':
n_layer = self.params_dict['n_layer']
d_model = self.params_dict['d_model']
d_ff = self.params_dict['d_ff']
n_heads = self.params_dict['n_heads']
model = TransformerSeq(self.max_len, in_dim, n_layer, d_model, d_model, d_model,
d_ff, n_heads, n_classes, self.dropout, True).to(DEVICE)
else:
d_model = self.params_dict['d_model']
d_ff = self.params_dict['d_ff']
n_heads = self.params_dict['n_heads']
n_chunk = self.params_dict['n_chunk']
rounds = self.params_dict['rounds']
bucket_length = self.params_dict['bucket_length']
n_layer = self.params_dict['n_layer']
model = TransformerSeq(n_classes, self.max_len, d_model, d_ff, n_heads, n_chunk, rounds, bucket_length,
n_layer, self.dropout).to(DEVICE)
return model
def train(self, model, optimizer, train_x, train_y, train_len_list, epoch):
# in_dim = train_x.shape[-1]
model.train()
train_loader = self.prepare(train_x, train_y, train_len_list)
for batch_idx, (inputs, inputs_length, input_mask, target) in enumerate(train_loader):
torch.cuda.empty_cache()
inputs = inputs.cuda()
inputs_length = inputs_length.cuda()
input_mask = input_mask.cuda()
target = target.cuda()
if self.net in FORMER:
output = model(inputs, input_mask)
else:
output = model(inputs)
# 为实现二分类任务,由softmax修改为sigmoid by wzb at 3.29
# target = torch.unsqueeze(target, 1)
# target = torch.cat((target, 1-target), 1)
# loss = self.criterion(output, target.float())
loss = self.criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (batch_idx + 1) % 20 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(inputs), len(train_loader.dataset), 100. * batch_idx / len(train_loader),
loss.item()))
def test(self, model, test_x, test_y, test_len_list):
model.eval()
test_loss = 0
correct = 0
all_num = 0
predict = []
prob_list = []
target_list = []
test_loader = self.prepare(test_x, test_y, test_len_list, shuffle=False)
for inputs, inputs_length, input_mask, target in test_loader:
torch.cuda.empty_cache()
inputs = inputs.cuda()
inputs_length = inputs_length.cuda()
input_mask = input_mask.cuda()
target = target.cuda()
if self.net in FORMER:
output = model(inputs, input_mask)
else:
output = model(inputs)
# 为实现二分类任务,由softmax修改为sigmoid by wzb at 3.29
# output_1 = output[:, -1:, 1]
# output_1 = torch.softmax(output, dim=-1)
# output_1 = torch.max(output_1, dim=-1)
# test_loss += self.criterion(output_1.values, target.float())
# output = torch.softmax(output, dim=-1)
# # output = output[:, -1]
# predict_label = torch.max(output, dim=-1)[1]
# target1 = torch.unsqueeze(target, 1)
# target1 = torch.cat((target1, 1 - target1), 1)
# test_loss += self.criterion(output, target1.float())
# output = torch.softmax(output, dim=-1)
# predict_label = torch.max(output, dim=-1)[1]
test_loss += self.criterion(output, target)
output = torch.softmax(output, dim=-1)
predict_label = torch.max(output, dim=-1)[1]
num = 0
for i in range(len(input_mask)):
prob_list.append(float(output[i][1]))
predict.append(int(predict_label[i]))
target_list.append(int(target[i]))
if predict_label[i] == target[i]:
num += 1
correct += num
all_num += len(input_mask)
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, all_num,
100. * correct / all_num))
return predict, target_list, prob_list, test_loss
class TorchNetRes(object):
def __init__(self, net, max_len, criterion, params_dict):
super(TorchNetRes, self).__init__()
self.net = net
self.dropout = params_dict['dropout']
self.batch_size = params_dict['batch_size']
self.max_len = max_len
self.criterion = criterion
self.params_dict = params_dict
def prepare(self, data, labels, input_length, shuffle=True):
dataset = MyDataset(data, labels, input_length, self.max_len)
data_iter = DataLoader(dataset, batch_size=self.batch_size, shuffle=shuffle, collate_fn=batch_res)
return data_iter
def net_type(self, in_dim, n_classes):
if self.net == 'LSTM':
hidden_dim = self.params_dict['hidden_dim']
n_layer = self.params_dict['n_layer']
model = LSTMRes(in_dim, hidden_dim, n_layer, n_classes, self.dropout).to(DEVICE)
elif self.net == 'GRU':
hidden_dim = self.params_dict['hidden_dim']
n_layer = self.params_dict['n_layer']
model = GRURes(in_dim, hidden_dim, n_layer, n_classes, self.dropout).to(DEVICE)
elif self.net == 'CNN':
out_channels = self.params_dict['out_channels']
kernel_size = self.params_dict['kernel_size']
model = CNNRes(in_dim, out_channels, kernel_size, n_classes, self.dropout).to(DEVICE)
elif self.net == 'Transformer':
n_layer = self.params_dict['n_layer']
d_model = self.params_dict['d_model']
d_ff = self.params_dict['d_ff']
n_heads = self.params_dict['n_heads']
model = TransformerRes(in_dim, n_layer, d_model, d_model, d_model,
d_ff, n_heads, n_classes, self.dropout, False).to(DEVICE)
elif self.net == 'Weighted-Transformer':
n_layer = self.params_dict['n_layer']
d_model = self.params_dict['d_model']
d_ff = self.params_dict['d_ff']
n_heads = self.params_dict['n_heads']
model = TransformerRes(in_dim, n_layer, d_model, d_model, d_model,
d_ff, n_heads, n_classes, self.dropout, True).to(DEVICE)
# 添加Bert 暂无法使用 by wzb at 3.25
# elif self.net == 'Bert':
# n_layer = self.params_dict['n_layer']
# d_model = self.params_dict['d_model']
# d_ff = self.params_dict['d_ff']
# n_heads = self.params_dict['n_heads']
# model = BertRes(in_dim, n_layer).to(DEVICE)
#添加FastText by wzb at 3.26
elif self.net == 'FastText':
hidden_dim = self.params_dict['hidden_dim']
model = FastTextRes(in_dim, hidden_dim, n_classes, self.dropout)
else:
d_model = self.params_dict['d_model']
d_ff = self.params_dict['d_ff']
n_heads = self.params_dict['n_heads']
n_chunk = self.params_dict['n_chunk']
rounds = self.params_dict['rounds']
bucket_length = self.params_dict['bucket_length']
n_layer = self.params_dict['n_layer']
model = TransformerRes(n_classes, d_model, d_ff, n_heads, n_chunk, rounds, bucket_length, n_layer,
self.dropout).to(DEVICE)
return model
def train(self, model, optimizer, train_x, train_y, train_len_list, epoch):
# in_dim = train_x.shape[-1]
model.train()
train_loader = self.prepare(train_x, train_y, train_len_list)
for batch_idx, (inputs, inputs_length, input_mask, target) in enumerate(train_loader):
# 为实现GPU运算,将数据放置于GPU上 by wzb at 3.24
# torch.cuda.empty_cache()
# inputs = inputs.cuda()
# inputs_length = inputs_length.cuda()
# input_mask = input_mask.cuda()
# target = target.cuda()
if self.net in ['LSTM', 'GRU', 'CNN', 'FastText']:
output = model(inputs)
else:
output = model(inputs, input_mask)
# print(output.size())
# print(target)
# print(inputs_length)
# print(input_mask)
loss = self.criterion(output, target, inputs_length, input_mask)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (batch_idx + 1) % 20 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(inputs), len(train_loader.dataset), 100. * batch_idx / len(train_loader),
loss.item()))
def test(self, model, test_x, test_y, test_len_list):
model.eval()
test_loss = 0
correct = 0
all_num = 0
predict = []
prob_list = []
target_list = []
prob_list_format = []
predict_list_format = []
test_loader = self.prepare(test_x, test_y, test_len_list, shuffle=False)
for inputs, inputs_length, input_mask, target in test_loader:
# 为实现GPU运算,将数据放置于GPU上 by wzb at 3.24
# torch.cuda.empty_cache()
# inputs = inputs.cuda()
# inputs_length = inputs_length.cuda()
# input_mask = input_mask.cuda()
# target = target.cuda()
if self.net in FORMER:
output = model(inputs, input_mask)
else:
output = model(inputs)
test_loss += self.criterion(output, target, inputs_length, input_mask)
output = func.softmax(output, dim=-1)
predict_label = torch.max(output, dim=-1)[1]
num = 0
for i in range(len(input_mask)):
pred_list = []
prob = []
for j in range(len(input_mask[i])):
if input_mask[i][j] > 0:
prob_list.append(float(output[i][j][1]))
prob.append(float(output[i][j][1]))
pred_list.append(int(predict_label[i][j]))
predict.append(int(predict_label[i][j]))
target_list.append(int(target[i][j]))
if predict_label[i][j] == target[i][j]:
num += 1
prob_list_format.append(prob)
predict_list_format.append(pred_list)
correct += num
all_num += torch.sum(inputs_length)
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, all_num,
100. * correct / all_num))
return predict, target_list, prob_list, test_loss, prob_list_format, predict_list_format
<file_sep>from ..utils.utils_words import mismatch_words
from ..utils.utils_algorithm import text_rank
def mismatch_text_rank(input_file, alphabet, fixed_len, word_size, alpha, fixed=True):
corpus = mismatch_words(input_file, alphabet, fixed_len, word_size, fixed)
return text_rank(corpus, alpha)
<file_sep>import sys
class Seq:
def __init__(self, name, seq, no):
self.name = name
self.seq = seq.upper()
self.no = no
self.length = len(seq)
def __str__(self):
"""Output seq when 'print' method is called."""
return "%s\tNo:%s\tlength:%s\n%s" % (self.name, str(self.no), str(self.length), self.seq)
def is_under_alphabet(s, alphabet):
"""Judge the string is within the scope of the alphabet or not.
:param s: The string.
:param alphabet: alphabet.
Return True or the error character.
"""
for e in s:
if e not in alphabet:
return e
return True
def is_fasta(seq):
"""Judge the Seq object is in FASTA format.
Two situation:
1. No seq name.
2. Seq name is illegal.
3. No sequence.
:param seq: Seq object.
"""
if not seq.name:
error_info = 'Error, sequence ' + str(seq.no) + ' has no sequence name.'
print(seq)
sys.stderr.write(error_info)
return False
if -1 != seq.name.find('>'):
error_info = 'Error, sequence ' + str(seq.no) + ' name has > character.'
sys.stderr.write(error_info)
return False
if 0 == seq.length:
error_info = 'Error, sequence ' + str(seq.no) + ' is null.'
sys.stderr.write(error_info)
return False
return True
def read_fasta(f):
"""Read a fasta file.
:param f: HANDLE to input. e.g. sys.stdin, or open(<file>)
Return Seq obj list.
"""
name, seq = '', ''
count = 0
seq_list = []
lines = f.readlines()
for line in lines:
if not line:
break
if '>' == line[0]:
if 0 != count or (0 == count and seq != ''):
if is_fasta(Seq(name, seq, count)):
seq_list.append(Seq(name, seq, count))
else:
sys.exit(0)
seq = ''
name = line[1:].strip()
count += 1
else:
seq += line.strip()
count += 1
if is_fasta(Seq(name, seq, count)):
seq_list.append(Seq(name, seq, count))
else:
sys.exit(0)
return seq_list
def read_fasta_yield(f):
"""Yields a Seq object.
:param f: HANDLE to input. e.g. sys.stdin, or open(<file>)
"""
name, seq = '', ''
count = 0
while True:
line = f.readline()
if not line:
break
if '>' == line[0]:
if 0 != count or (0 == count and seq != ''):
if is_fasta(Seq(name, seq, count)):
yield Seq(name, seq, count)
else:
sys.exit(0)
seq = ''
name = line[1:].strip()
count += 1
else:
seq += line.strip()
if is_fasta(Seq(name, seq, count)):
yield Seq(name, seq, count)
else:
sys.exit(0)
def read_fasta_check_dna(f, alphabet):
"""Read the fasta file, and check its legality.
:param alphabet:
:param f: HANDLE to input. e.g. sys.stdin, or open(<file>)
Return the seq list.
"""
seq_list = []
for e in read_fasta_yield(f):
res = is_under_alphabet(e.seq, alphabet)
if res:
seq_list.append(e)
else:
error_info = 'Sorry, sequence ' + str(e.no) \
+ ' has character ' + str(res) + '.(The character must be ' + alphabet + ').'
sys.exit(error_info)
return seq_list
def get_sequence_check_dna(f, alphabet):
"""Read the fasta file.
Input: f: HANDLE to input. e.g. sys.stdin, or open(<file>)
Return the sequence list.
"""
sequence_list = []
for e in read_fasta_yield(f):
res = is_under_alphabet(e.seq, alphabet)
if res is not True:
print(e.name)
error_info = 'Error, sequence ' + str(e.no) \
+ ' has character ' + str(res) + '.(The character must be ' + alphabet + ').'
sys.exit(error_info)
else:
# print(e.no)
# print(e.name)
# print(e.seq)
sequence_list.append(e.seq)
return sequence_list
def is_sequence_list(sequence_list, alphabet):
"""Judge the sequence list is within the scope of alphabet and change the lowercase to capital."""
count = 0
new_sequence_list = []
for e in sequence_list:
e = e.upper()
count += 1
res = is_under_alphabet(e, alphabet)
if res is not True:
error_info = 'Sorry, sequence ' + str(count) \
+ ' has illegal character ' + str(res) + '.(The character must be A, C, G or T)'
sys.stderr.write(error_info)
return False
else:
new_sequence_list.append(e)
return new_sequence_list
def get_seqs(input_file, alphabet, desc=False):
"""Get sequences data from file or list with check.
:param alphabet: DNA, RNA or Protein
:param input_file: type file or list
:param desc: with this option, the return value will be a Seq object list(it only works in file object).
:return: sequence data or shutdown.
"""
# modified at 2020/05/10
if hasattr(input_file, 'read'):
if desc is False:
return get_sequence_check_dna(input_file, alphabet)
else:
return read_fasta_check_dna(input_file, alphabet) # return Seq(name, seq, count)
elif isinstance(input_file, list):
input_data = is_sequence_list(input_file, alphabet)
if input_data is not False:
return input_data
else:
sys.exit(0)
else:
error_info = 'Sorry, the parameter in get_data method must be list or file type.'
sys.exit(error_info)
<file_sep>from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
import joblib
import os
import numpy as np
from ..utils.utils_results import performance, final_results_output, prob_output, print_metric_dict
from ..utils.utils_plot import plot_roc_curve, plot_pr_curve, plot_roc_ind, plot_pr_ind
from ..utils.utils_math import sampling
from ..utils.utils_read import FormatRead
Metric_List = ['Acc', 'MCC', 'AUC', 'BAcc', 'Sn', 'Sp', 'Pr', 'Rc', 'F1']
def ml_cv_process(ml, vectors, labels, folds, metric, sp, multi, res, params_dict):
results = []
print_len = 40
if ml == 'SVM':
temp_str1 = ' cost = 2 ** ' + str(params_dict['cost']) + ' | ' + 'gamma = 2 ** ' + \
str(params_dict['gamma']) + ' '
else:
temp_str1 = ' tree = ' + str(params_dict['tree']) + ' '
print(temp_str1.center(print_len, '+'))
for train_index, val_index in folds:
x_train, y_train, x_val, y_val = get_partition(vectors, labels, train_index, val_index)
if sp != 'none':
x_train, y_train = sampling(sp, x_train, y_train)
if ml == 'SVM':
clf = svm.SVC(C=2 ** params_dict['cost'], gamma=2 ** params_dict['gamma'], probability=True)
else:
clf = RandomForestClassifier(random_state=42, n_estimators=params_dict['tree'])
clf.fit(x_train, y_train)
y_val_prob = clf.predict_proba(x_val)[:, 1] # 这里为什么是1呢
y_val_ = clf.predict(x_val)
result = performance(y_val, y_val_, y_val_prob, multi, res)
# acc, mcc, auc, balance_acc, sn, sp, p, r, f1
results.append(result)
cv_results = np.array(results).mean(axis=0)
params_dict['metric'] = cv_results[metric]
temp_str2 = ' metric value: ' + Metric_List[metric] + ' = ' + '%.3f ' % cv_results[metric]
print(temp_str2.center(print_len, '*'))
print('\n')
return params_dict
def get_partition(vectors, labels, train_index, val_index):
x_train = vectors[train_index]
x_val = vectors[val_index]
y_train = labels[train_index]
y_val = labels[val_index]
return x_train, y_train, x_val, y_val
def ml_cv_results(ml, vectors, labels, folds, sp, multi, res, out_dir, params_dict):
results = []
print_len = 60
print('\n')
if ml == 'SVM':
print(' The optimal parameters for SVM are as follows '.center(print_len, '*'))
temp_str1 = ' cost = 2 ** ' + str(params_dict['cost']) + ' | ' + 'gamma = 2 ** ' + \
str(params_dict['gamma']) + ' '
else:
print('The optimal parameters for RF is as follows'.center(print_len, '*'))
temp_str1 = ' tree = ' + str(params_dict['tree']) + ' '
print(temp_str1.center(print_len, '*'))
print('\n')
cv_labels = []
cv_prob = []
predicted_labels = np.zeros(len(labels))
predicted_prob = np.zeros(len(labels))
for train_index, test_index in folds:
x_train, y_train, x_test, y_test = get_partition(vectors, labels, train_index, test_index)
if sp != 'none':
x_train, y_train = sampling(sp, x_train, y_train)
if ml == 'SVM':
clf = svm.SVC(C=2 ** params_dict['cost'], gamma=2 ** params_dict['gamma'], probability=True)
else:
clf = RandomForestClassifier(random_state=42, n_estimators=params_dict['tree'])
clf.fit(x_train, y_train)
y_test_prob = clf.predict_proba(x_test)[:, 1]
y_test_ = clf.predict(x_test)
result = performance(y_test, y_test_, y_test_prob, multi, res)
# acc, mcc, auc, balance_acc, sn, sp, p, r, f1
results.append(result)
cv_labels.append(y_test)
cv_prob.append(y_test_prob)
predicted_labels[test_index] = y_test_
predicted_prob[test_index] = y_test_prob
plot_roc_curve(cv_labels, cv_prob, out_dir) # 绘制ROC曲线
plot_pr_curve(cv_labels, cv_prob, out_dir) # 绘制PR曲线
final_results = np.array(results).mean(axis=0)
print_metric_dict(final_results, ind=False)
print('\n')
final_results_output(final_results, out_dir, ind=False, multi=multi) # 将指标写入文件
prob_output(labels, predicted_labels, predicted_prob, out_dir) # 将标签对应概率写入文件
# 利用整个数据集训练并保存模型
if ml == 'SVM':
model = svm.SVC(C=2 ** params_dict['cost'], gamma=2 ** params_dict['gamma'], probability=True)
model_path = out_dir + 'cost_[' + str(params_dict['cost']) + ']_gamma_[' + str(
params_dict['gamma']) + ']_svm.model'
else:
model = RandomForestClassifier(random_state=42, n_estimators=params_dict['tree'])
model_path = out_dir + 'tree_' + str(params_dict['tree']) + '_rf.model'
if sp != 'none':
vectors, labels = sampling(sp, vectors, labels)
model.fit(vectors, labels)
joblib.dump(model, model_path) # 使用job lib保存模型
def ml_ind_results(ml, ind_vectors, ind_labels, multi, res, out_dir, params_dict):
if ml == 'SVM':
model_path = out_dir + 'cost_[' + str(params_dict['cost']) + ']_gamma_[' + str(
params_dict['gamma']) + ']_svm.model'
else:
model_path = out_dir + 'tree_' + str(params_dict['tree']) + '_rf.model'
model = joblib.load(model_path)
ind_prob = model.predict_proba(ind_vectors)[:, 1]
pre_labels = model.predict(ind_vectors)
final_result = performance(ind_labels, pre_labels, ind_prob, multi, res)
print_metric_dict(final_result, ind=True)
plot_roc_ind(ind_labels, ind_prob, out_dir) # 绘制ROC曲线
plot_pr_ind(ind_labels, ind_prob, out_dir) # 绘制PR曲线
final_results_output(final_result, out_dir, ind=True, multi=multi) # 将指标写入文件
prob_output(ind_labels, pre_labels, ind_prob, out_dir, ind=True) # 将标签对应概率写入文件
def ml_score_cv_process(ml, vec_files, folds_num, metric, sp, multi, in_format, params_dict):
dir_name, _ = os.path.splitext(vec_files[0])
score_dir = dir_name + '/score/'
print('\n')
print('Cross Validation Processing...')
print('\n')
print_len = 40
if ml == 'SVM':
temp_str1 = ' cost = 2 ** ' + str(params_dict['cost']) + ' | ' + 'gamma = 2 ** ' + \
str(params_dict['gamma']) + ' '
else:
temp_str1 = ' tree = ' + str(params_dict['tree']) + ' '
print(temp_str1.center(print_len, '+'))
results = []
for i in range(folds_num):
tar_dir = score_dir + 'Fold%d/' % (i+1)
x_train = FormatRead(tar_dir + 'train_score.txt', in_format).write_to_file()
x_val = FormatRead(tar_dir + 'test_score.txt', in_format).write_to_file()
y_train = np.loadtxt(tar_dir + 'train_label.txt')
y_train = y_train.astype(int)
y_val = np.loadtxt(tar_dir + 'test_label.txt')
y_val = y_val.astype(int)
if sp != 'none':
x_train, y_train = sampling(sp, x_train, y_train)
if ml == 'SVM':
clf = svm.SVC(C=2 ** params_dict['cost'], gamma=2 ** params_dict['gamma'], probability=True)
else:
clf = RandomForestClassifier(random_state=42, n_estimators=params_dict['tree'])
clf.fit(x_train, y_train)
y_val_prob = clf.predict_proba(x_val)[:, 1] # 这里为什么是1呢
y_val_ = clf.predict(x_val)
result = performance(y_val, y_val_, y_val_prob, multi)
# acc, mcc, auc, balance_acc, sn, sp, p, r, f1
results.append(result)
cv_results = np.array(results).mean(axis=0)
params_dict['metric'] = cv_results[metric]
temp_str2 = ' metric value: ' + Metric_List[metric] + ' = ' + '%.3f ' % cv_results[metric]
print(temp_str2.center(print_len, '*'))
print('\n')
return params_dict
def ml_score_cv_results(ml, vec_files, labels, folds_num, sp, multi, in_format, out_dir, params_dict):
dir_name, _ = os.path.splitext(vec_files[0])
score_dir = dir_name + '/score/'
results = []
cv_labels = []
cv_prob = []
predicted_labels = np.zeros(len(labels))
predicted_prob = np.zeros(len(labels))
for i in range(folds_num):
tar_dir = score_dir + 'Fold%d/' % (i + 1)
x_train = FormatRead(tar_dir + 'train_score.txt', in_format).write_to_file()
x_test = FormatRead(tar_dir + 'test_score.txt', in_format).write_to_file()
# 这里为什么loadtxt时设定dtype在Linux系统上不行呢?
y_train = np.loadtxt(tar_dir + 'train_label.txt')
y_train = y_train.astype(int)
y_test = np.loadtxt(tar_dir + 'test_label.txt')
y_test = y_test.astype(int)
test_index = np.loadtxt(tar_dir + 'test_index.txt')
test_index = list(test_index.astype(int))
# test_index = np.array(test_index, dtype=int)
if sp != 'none':
x_train, y_train = sampling(sp, x_train, y_train)
if ml == 'SVM':
clf = svm.SVC(C=2 ** params_dict['cost'], gamma=2 ** params_dict['gamma'], probability=True)
else:
clf = RandomForestClassifier(random_state=42, n_estimators=params_dict['tree'])
clf.fit(x_train, y_train)
y_test_prob = clf.predict_proba(x_test)[:, 1]
y_test_ = clf.predict(x_test)
result = performance(y_test, y_test_, y_test_prob, multi)
# acc, mcc, auc, balance_acc, sn, sp, p, r, f1
results.append(result)
cv_labels.append(y_test)
cv_prob.append(y_test_prob)
predicted_labels[test_index] = y_test_
predicted_prob[test_index] = y_test_prob
plot_roc_curve(cv_labels, cv_prob, out_dir) # 绘制ROC曲线
plot_pr_curve(cv_labels, cv_prob, out_dir) # 绘制PR曲线
final_results = np.array(results).mean(axis=0)
print_metric_dict(final_results, ind=False)
final_results_output(final_results, out_dir, ind=False, multi=multi) # 将指标写入文件
prob_output(labels, predicted_labels, predicted_prob, out_dir) # 将标签对应概率写入文件
def ml_score_ind_results(ml, ind_vec_file, sp, multi, in_format, out_dir, params_dict):
dir_name, _ = os.path.split(ind_vec_file)
tar_dir = dir_name + '/ind_score/'
x_train = FormatRead(tar_dir + 'train_score.txt', in_format).write_to_file()
x_test = FormatRead(tar_dir + 'test_score.txt', in_format).write_to_file()
y_train = np.loadtxt(tar_dir + 'train_label.txt')
y_train = y_train.astype(int)
y_test = np.loadtxt(tar_dir + 'test_label.txt')
y_test = y_test.astype(int)
if sp != 'none':
x_train, y_train = sampling(sp, x_train, y_train)
if ml == 'SVM':
model_path = out_dir + 'cost_[' + str(params_dict['cost']) + ']_gamma_[' + str(
params_dict['gamma']) + ']_svm_score.model'
clf = svm.SVC(C=2 ** params_dict['cost'], gamma=2 ** params_dict['gamma'], probability=True)
else:
clf = RandomForestClassifier(random_state=42, n_estimators=params_dict['tree'])
model_path = out_dir + 'tree_' + str(params_dict['tree']) + '_rf_score.model'
clf.fit(x_train, y_train)
ind_prob = clf.predict_proba(x_test)[:, 1]
pre_labels = clf.predict(x_test)
final_result = performance(y_test, pre_labels, ind_prob, multi)
print_metric_dict(final_result, ind=True)
plot_roc_ind(y_test, ind_prob, out_dir) # 绘制ROC曲线
plot_pr_ind(y_test, ind_prob, out_dir) # 绘制PR曲线
final_results_output(final_result, out_dir, ind=True, multi=multi) # 将指标写入文件
prob_output(y_test, pre_labels, ind_prob, out_dir, ind=True) # 将标签对应概率写入文件
joblib.dump(clf, model_path) # 使用job lib保存模型
<file_sep>from itertools import combinations_with_replacement, permutations
import numpy as np
def get_km_dict(k, alphabet):
""" Func: get km dictionary -> {'AA': 0, 'AC': 1, ..., 'TT': 15}"""
km_list = []
part_km = list(combinations_with_replacement(alphabet, k))
for element in part_km:
ele_set = set(permutations(element, k))
str_list = [''.join(ele) for ele in ele_set]
km_list += str_list
km_list = np.sort(km_list)
km_dict = {km_list[i]: i for i in range(len(km_list))}
return km_dict
def frequency(tol_str, tar_str):
"""Generate the frequency of tar_str in tol_str.
:param tol_str: mother string.
:param tar_str: substring.
"""
i, j, tar_count = 0, 0, 0
len_tol_str = len(tol_str)
len_tar_str = len(tar_str)
while i < len_tol_str and j < len_tar_str:
if tol_str[i] == tar_str[j]:
i += 1
j += 1
if j >= len_tar_str:
tar_count += 1
i = i - j + 1
j = 0
else:
i = i - j + 1
j = 0
return tar_count
<file_sep>from ..utils.utils_words import mismatch_words
from ..utils.utils_algorithm import tf_idf
def mismatch_tf_idf(input_file, alphabet, fixed_len, word_size, fixed=True):
corpus = mismatch_words(input_file, alphabet, fixed_len, word_size, fixed)
return tf_idf(corpus)
<file_sep>import os
import numpy as np
class FormatRead(object):
def __init__(self, filename, in_format):
self.in_file = filename # 单个文件的话需要先将字符串转换为列表
self.in_format = in_format
def read_csv(self):
"""transform csv format file to vector arrays"""
vectors = []
f = open(self.in_file, 'r')
lines = f.readlines()
for line in lines:
vector = line.strip().split(',')
vector = list(map(float, vector))
vectors.append(vector)
f.close()
return np.array(vectors, dtype=np.float32)
def read_tsv(self):
"""transform tsv format file to vector arrays"""
vectors = []
f = open(self.in_file, 'r')
lines = f.readlines()
for line in lines:
vector = line.strip().split('\t')
vector = list(map(float, vector))
vectors.append(vector)
f.close()
return np.array(vectors, dtype=np.float32)
def read_svm(self):
"""transform svm format file to vector arrays"""
vectors = []
f = open(self.in_file, 'r')
lines = f.readlines()
for line in lines:
vector = []
for i in range(1, len(line.split())):
temp = line.split()[i]
temp_vec = temp.split(':')[1]
vector.append(float(temp_vec))
vectors += vector
f.close()
return np.array(vectors, dtype=np.float32)
def read_tab(self):
"""transform tab format file to vector arrays"""
lens = 'flag'
vectors = []
f = open(self.in_file, 'r')
lines = f.readlines()
for line in lines:
if ':' in line or ',' in line:
print('The format of the input file should be tab format.')
return False
else:
lst = line.strip().split()
tmp = len(lst)
if lens == 'flag':
lens = tmp
elif tmp != lens:
print('The lengths of the feature vectors are not same. Please check.')
return False
lst = list(map(float, lst))
vectors.append(lst)
f.close()
return np.array(vectors, dtype=np.float32)
def write_to_file(self):
if self.in_format == 'svm':
return self.read_svm()
elif self.in_format == 'tab':
return self.read_tab()
elif self.in_format == 'csv':
return self.read_csv()
elif self.in_format == 'tsv':
return self.read_tsv()
else:
print('Output file format error! Please check.')
return False
def files2vectors_seq(file_list, in_format):
in_files = []
for in_file_name in file_list:
in_file_path = os.path.abspath(in_file_name)
assert os.path.isfile(in_file_path), 'The feature vector file: ' + in_file_path + ' is not exist!'
in_files.append(in_file_path)
vectors = None
for i, in_file in enumerate(in_files):
temp_vec = FormatRead(in_file, in_format).write_to_file()
if vectors is None:
vectors = temp_vec
else:
vectors = np.vstack((vectors, temp_vec))
return vectors
def files2vectors_info(file_list, in_format):
in_files = []
for in_file_name in file_list:
in_file_path = os.path.abspath(in_file_name)
assert os.path.isfile(in_file_path), 'The feature vector file: ' + in_file_path + ' is not exist!'
in_files.append(in_file_path)
vec_num_list = []
vectors = None
for i, in_file in enumerate(in_files):
temp_vec = FormatRead(in_file, in_format).write_to_file()
if vectors is None:
vectors = temp_vec
else:
vectors = np.vstack((vectors, temp_vec))
vec_num = len(temp_vec)
vec_num_list.append(vec_num)
return vectors, vec_num_list, in_files
def files2vectors_res(file_list, in_format):
in_files = []
for in_file_name in file_list:
in_file_path = os.path.abspath(in_file_name)
assert os.path.isfile(in_file_path), 'The feature vector file: ' + in_file_path + ' is not exist!'
in_files.append(in_file_path)
vec_num_list = []
vectors = None
for i, in_file in enumerate(in_files):
temp_vec = FormatRead(in_file, in_format).write_to_file()
if vectors is None:
vectors = temp_vec
else:
vectors = np.vstack((vectors, temp_vec))
vec_num = len(temp_vec)
vec_num_list.append(vec_num)
return vectors, vec_num_list
def read_dl_vec4seq(fixed_len, in_files, return_sp):
vectors_list = []
seq_len_list = []
sp_num_list = []
# print(in_files)
for in_file in in_files:
count = 0
f = open(in_file, 'r')
lines = f.readlines()
vectors = []
flag = 0
for line in lines:
if len(line.strip()) != 0:
if line[0] != '>':
vector = line.strip().split('\t')
vector = list(map(float, vector))
vectors.append(vector)
flag = 1
else:
if flag == 1:
seq_len_list.append(len(vectors))
vectors_list.append(np.array(vectors))
vectors = []
count += 1
flag = 0
f.close()
sp_num_list.append(count)
# print(len(vectors_list))
vec_mat, fixed_seq_len_list = fixed_opt(fixed_len, vectors_list, seq_len_list)
if return_sp is True:
return vec_mat, sp_num_list, fixed_seq_len_list
else:
return vec_mat, fixed_seq_len_list
def read_base_mat4res(in_file, fixed_len):
vectors_list = []
seq_len_list = []
f = open(in_file, 'r')
lines = f.readlines()
vectors = []
flag = 0
for line in lines:
if len(line.strip()) != 0:
if line[0] != '>':
vector = line.strip().split('\t')
vector = list(map(float, vector))
vectors.append(vector)
flag = 1
else:
if flag == 1:
seq_len_list.append(len(vectors))
vectors_list.append(np.array(vectors))
vectors = []
flag = 0
f.close()
vec_mat, fixed_seq_len_list = fixed_opt(fixed_len, vectors_list, seq_len_list)
return vec_mat, fixed_seq_len_list
def read_base_vec_list4res(in_file):
vectors_list = []
f = open(in_file, 'r')
lines = f.readlines()
vectors = []
flag = 0
for line in lines:
if len(line.strip()) != 0:
if line[0] != '>':
vector = line.strip().split('\t')
vector = list(map(float, vector))
vectors.append(vector)
flag = 1
else:
if flag == 1:
vectors_list.append(np.array(vectors))
vectors = []
flag = 0
f.close()
return vectors_list
def fixed_opt(fixed_len, vectors_list, seq_len_list):
vec_mat = []
for i in range(len(vectors_list)):
temp_arr = np.zeros((fixed_len, len(vectors_list[i][0])))
seq_len = len(vectors_list[i])
if seq_len > fixed_len:
seq_len_list[i] = fixed_len
temp_len = min(seq_len, fixed_len)
temp_arr[:temp_len, :] = vectors_list[i][:temp_len, :]
vec_mat.append(temp_arr)
print(np.array(vec_mat).shape)
return np.array(vec_mat), seq_len_list
def seq_label_read(vec_num_list, label_list):
labels = []
for i in range(len(label_list)):
labels += [label_list[i]] * vec_num_list[i]
return np.array(labels)
def res_label_read(vec_num_list, label_list):
labels = []
for i in range(len(label_list)):
labels += [label_list[i]] * vec_num_list[i]
return np.array(labels)
def res_dl_label_read(res_label_list, fixed_len):
res_label_mat = []
for res_label in res_label_list:
temp_arr = np.zeros(fixed_len)
seq_len = len(res_label)
temp_len = min(seq_len, fixed_len)
temp_arr[:temp_len] = res_label[:temp_len]
res_label_mat.append(temp_arr)
return np.array(res_label_mat)
<file_sep>from ..utils.utils_pssm import sep_file, produce_all_frequency
from ..utils.utils_words import convert_tng_to_fasta
from .dr_bow import dr_bow
def dt_bow(inputfile, max_dis, cur_dir, process_num):
"""Generate DT method feature vectors.
:param inputfile: input sequence file in FASTA format.
:param max_dis: the maximum distance between top-1-gram pairs.
:param cur_dir: the main dir of code.
:param process_num: the number of processes used for multiprocessing.
"""
dir_name, seq_name = sep_file(inputfile)
sw_dir = cur_dir + '/software/'
pssm_dir = produce_all_frequency(dir_name, sw_dir, process_num)
tng_seq_file = convert_tng_to_fasta(pssm_dir, seq_name, inputfile, 1, sw_dir)
return dr_bow(tng_seq_file, max_dis)
<file_sep>import sys
from ..utils.utils_const import DNA, RNA, PROTEIN
from .acc import acc, auto_correlation, pdt, nd
from .pse import zcpseknc
from .profile import make_acc_pssm_vector, pssm_dt_method, pssm_rt_method, pdt_profile
from .motif_pssm import motif_pssm
from ..utils.utils_write import vectors2files
METHODS_ACC_P = ['DAC', 'DCC', 'DACC', 'TAC', 'TCC', 'TACC', 'MAC', 'GAC', 'NMBAC', 'AC', 'CC', 'ACC', 'PDT']
METHODS_ACC_S = ['DAC', 'DCC', 'DACC', 'TAC', 'TCC', 'TACC', 'AC', 'CC', 'ACC']
METHODS_AC = ['DAC', 'TAC', 'AC']
METHODS_CC = ['DCC', 'TCC', 'CC']
METHODS_ACC = ['DACC', 'TACC', 'ACC']
K_2_DNA_METHODS = ['DAC', 'DCC', 'DACC']
K_3_DNA_METHODS = ['TAC', 'TCC', 'TACC']
DI_IND_6_DNA = ['Rise', 'Roll', 'Shift', 'Slide', 'Tilt', 'Twist']
TRI_IND_DNA = ['Dnase I', 'Bendability (DNAse)']
DI_IND_RNA = ['Rise (RNA)', 'Roll (RNA)', 'Shift (RNA)', 'Slide (RNA)', 'Tilt (RNA)', 'Twist (RNA)']
IND_3_PROTEIN = ['Hydrophobicity', 'Hydrophilicity', 'Mass']
ALL_DI_DNA_IND = ['Base stacking', 'Protein induced deformability', 'B-DNA twist',
'Dinucleotide GC Content', 'A-philicity', 'Propeller twist',
'Duplex stability-free energy', 'Duplex stability-disrupt energy', 'DNA denaturation',
'Bending stiffness', 'Protein DNA twist', 'Stabilising energy of Z-DNA',
'Aida_BA_transition', 'Breslauer_dG', 'Breslauer_dH', 'Breslauer_dS',
'Electron_interaction', 'Hartman_trans_free_energy', 'Helix-Coil_transition',
'Ivanov_BA_transition', 'Lisser_BZ_transition', 'Polar_interaction', 'SantaLucia_dG',
'SantaLucia_dH', 'SantaLucia_dS', 'Sarai_flexibility', 'Stability', 'Stacking_energy',
'Sugimoto_dG', 'Sugimoto_dH', 'Sugimoto_dS', 'Watson-Crick_interaction', 'Twist',
'Tilt', 'Roll', 'Shift', 'Slide', 'Rise', 'Stacking energy', 'Bend', 'Tip',
'Inclination', 'Major Groove Width', 'Major Groove Depth', 'Major Groove Size',
'Major Groove Distance', 'Minor Groove Width', 'Minor Groove Depth',
'Minor Groove Size', 'Minor Groove Distance', 'Persistance Length',
'Melting Temperature', 'Mobility to bend towards major groove',
'Mobility to bend towards minor groove', 'Propeller Twist', 'Clash Strength',
'Enthalpy', 'Free energy', 'Twist_twist', 'Tilt_tilt', 'Roll_roll', 'Twist_tilt',
'Twist_roll', 'Tilt_roll', 'Shift_shift', 'Slide_slide', 'Rise_rise', 'Shift_slide',
'Shift_rise', 'Slide_rise', 'Twist_shift', 'Twist_slide', 'Twist_rise', 'Tilt_shift',
'Tilt_slide', 'Tilt_rise', 'Roll_shift', 'Roll_slide', 'Roll_rise', 'Slide stiffness',
'Shift stiffness', 'Roll stiffness', 'Rise stiffness', 'Tilt stiffness',
'Twist stiffness', 'Wedge', 'Direction', 'Flexibility_slide', 'Flexibility_shift',
'Entropy']
DEFAULT_DI_DNA_IND = ['Twist', 'Tilt', 'Roll', 'Shift', 'Slide', 'Rise']
ALL_TRI_DNA_IND = ['Bendability-DNAse', 'Bendability-consensus', 'Trinucleotide GC Content',
'Nucleosome positioning', 'Consensus_roll', 'Consensus_Rigid', 'Dnase I',
'Dnase I-Rigid', 'MW-Daltons', 'MW-kg', 'Nucleosome', 'Nucleosome-Rigid']
DEFAULT_TRI_DNA_IND = ['Nucleosome positioning', 'Dnase I']
ALL_RNA_IND = ['Shift', 'Slide', 'Rise', 'Tilt', 'Roll', 'Twist', 'Stacking energy', 'Enthalpy', 'Entropy',
'Free energy', 'Hydrophilicity']
DEFAULT_RNA_IND = ['Shift', 'Slide', 'Rise', 'Tilt', 'Roll', 'Twist']
def read_k(alphabet, _method, k):
if alphabet == 'Protein':
return 1
elif alphabet == 'RNA':
return 2
if _method in K_2_DNA_METHODS:
return 2
elif _method in K_3_DNA_METHODS:
return 3
elif _method == 'ZCPseKNC':
return k
else:
print("Error in read_k.")
def read_index(index_file):
with open(index_file) as f_ind:
lines = f_ind.readlines()
ind_list = [index.rstrip() for index in lines]
return ind_list
def syntax_rules(method, input_file, category, sample_num_list, out_format, out_file_list, cur_dir, args, **param_dict):
res = None
sw_dir = cur_dir + '/software/'
if category == 'DNA':
alphabet = DNA
elif category == 'RNA':
alphabet = RNA
else:
alphabet = PROTEIN
if method in METHODS_ACC_S:
with open(input_file) as f:
k = read_k(category, method, 0)
# Get index_list.
if args.pp_file is not None:
ind_list = read_index(args.pp_file)
# print(ind_list)
else:
ind_list = []
default_e = []
# Set default pp index_list.
if category == 'DNA':
if k == 2:
default_e = DI_IND_6_DNA
elif k == 3:
default_e = TRI_IND_DNA
elif category == 'RNA':
default_e = DI_IND_RNA
else:
default_e = IND_3_PROTEIN
if method in METHODS_AC:
theta_type = 1
elif method in METHODS_CC:
theta_type = 2
else:
theta_type = 3
if args.ui_file is None and len(ind_list) == 0 and args.a is False:
lag = param_dict['lag']
res = acc(f, k, lag, default_e, alphabet,
extra_index_file=args.ui_file, all_prop=args.a, theta_type=theta_type)
else:
lag = param_dict['lag']
res = acc(f, k, lag, ind_list, alphabet,
extra_index_file=args.ui_file, all_prop=args.a, theta_type=theta_type)
elif method in ['MAC', 'GAC', 'NMBAC']:
lamada = param_dict['lamada']
assert 0 < lamada < 16, 'The value of -lamada should be larger than 0 and smaller than 16.'
if args.a is None:
args.a = False
if category == 'DNA':
if args.oli == 0:
if args.a is True:
res = auto_correlation(method, input_file, props=ALL_DI_DNA_IND, k=2,
lamada=lamada, alphabet=alphabet)
else:
res = auto_correlation(method, input_file, props=DEFAULT_DI_DNA_IND, k=2,
lamada=lamada, alphabet=alphabet)
if args.oli == 1:
if args.a is True:
res = auto_correlation(method, input_file, props=ALL_TRI_DNA_IND, k=3,
lamada=lamada, alphabet=alphabet)
else:
res = auto_correlation(method, input_file, props=DEFAULT_TRI_DNA_IND, k=3,
lamada=lamada, alphabet=alphabet)
elif category == 'RNA':
if args.a is True:
res = auto_correlation(method, input_file, props=ALL_RNA_IND, k=2,
lamada=lamada, alphabet=alphabet)
else:
res = auto_correlation(method, input_file, props=DEFAULT_RNA_IND, k=2,
lamada=lamada, alphabet=alphabet)
else:
error_info = "'MAC', 'GAC', 'NMBAC' method only for RNA and DNA sequence, please read manual"
sys.stderr.write(error_info)
return False
elif method == 'PDT':
lamada = param_dict['lamada']
assert 0 < lamada < 16, 'The value of -lamada should be larger than 0 and smaller than 16.'
res = pdt(input_file, lamada, sw_dir)
elif method == 'ZCPseKNC':
lamada = param_dict['lamada']
assert 0 < lamada < 16, 'The value of -lamada should be larger than 0 and smaller than 16.'
res = zcpseknc(input_file, k=param_dict['k'], w=param_dict['w'], lamada=lamada, alphabet=DNA)
elif method in ['ACC-PSSM', 'AC-PSSM', 'CC-PSSM']:
if method == 'ACC-PSSM':
vec_type = 'acc'
elif method == 'AC-PSSM':
vec_type = 'ac'
else:
vec_type = 'cc'
lag = param_dict['lag']
if lag < 1:
print('The value of -lag should be larger than 0.')
return False
else:
res = make_acc_pssm_vector(input_file, lag, vec_type, sw_dir, process_num=param_dict['cpu'])
elif method == 'ND':
res = nd(input_file, alphabet, fixed_len=args.fixed_len)
elif method == 'PSSM-DT':
res = pssm_dt_method(input_file, param_dict['cpu'], sw_dir)
elif method == 'PSSM-RT':
res = pssm_rt_method(input_file, param_dict['cpu'], sw_dir, fixed_len=args.fixed_len)
elif method == 'PDT-Profile':
lamada = param_dict['lamada']
assert 0 < lamada < 16, 'The value of -lamada should be larger than 0 and smaller than 16.'
res = pdt_profile(input_file, param_dict['n'], lamada, sw_dir, process_num=param_dict['cpu'])
elif method == 'Motif-PSSM':
# 这里的all_data应该为PSSM矩阵,后续需要修改
res = motif_pssm(input_file, PROTEIN, process_num=param_dict['cpu'],
batch_size=param_dict['batch_size'],
motif_file=args.motif_file, motif_database=args.motif_database,
fixed_len=args.fixed_len, cur_dir=cur_dir)
else:
error_info = 'The method of syntax rules is wrong, please check!'
sys.stderr.write(error_info)
return False
vectors2files(res, sample_num_list, out_format, out_file_list)
<file_sep>import numpy as np
from torch import nn, optim
from ..utils.utils_net import TorchNetRes, criterion_func
from ..utils.utils_plot import plot_roc_curve, plot_pr_curve, plot_roc_ind, plot_pr_ind
from ..utils.utils_results import performance, final_results_output, prob_output_res, print_metric_dict
def get_partition(feature, target, length, train_index, val_index):
feature = np.array(feature)
x_train = feature[train_index]
x_val = feature[val_index]
y_train = np.array(target)[train_index]
y_val = np.array(target)[val_index]
train_length = np.array(length)[train_index]
test_length = np.array(length)[val_index]
return x_train, x_val, y_train, y_val, train_length, test_length
def dl_cv_process(ml, vectors, labels, seq_length_list, max_len, folds, out_dir, params_dict):
results = []
cv_labels = []
cv_prob = []
# predicted_labels = np.zeros(len(seq_length_list))
# predicted_prob = np.zeros(len(seq_length_list))
count = 0
criterion = criterion_func
in_dim = vectors.shape[-1]
num_class = 2
multi = True if num_class > 2 else False
for train_index, val_index in folds:
x_train, x_val, y_train, y_val, train_length, test_length = get_partition(vectors, labels, seq_length_list,
train_index, val_index)
# print(y_train)
# print(y_val)
# exit()
model = TorchNetRes(ml, max_len, criterion, params_dict).net_type(in_dim, num_class)
optimizer = optim.Adam(model.parameters(), lr=params_dict['lr'])
epochs = params_dict['epochs']
# 筛选最后的模型参数
min_loss = float('inf')
final_predict_list = []
final_target_list = []
final_prob_list = []
# final_prob_list_format = []
# final_predict_list_format = []
for epoch in range(1, epochs+1):
TorchNetRes(ml, max_len, criterion, params_dict).train(model, optimizer, x_train, y_train, train_length,
epoch)
predict_list, target_list, prob_list, test_loss, prob_list_format, predict_list_format = \
TorchNetRes(ml, max_len, criterion, params_dict).test(model, x_val, y_val, test_length)
if test_loss < min_loss:
min_loss = test_loss
final_predict_list = predict_list
final_target_list = target_list
final_prob_list = prob_list
result = performance(final_target_list, final_predict_list, final_prob_list, multi, True)
results.append(result)
cv_labels.append(final_target_list)
cv_prob.append(final_prob_list)
# 这里为保存概率文件准备
count += 1
print("Round[%d]: Accuracy = %.3f" % (count, result[0]))
print('\n')
plot_roc_curve(cv_labels, cv_prob, out_dir) # 绘制ROC曲线
plot_pr_curve(cv_labels, cv_prob, out_dir) # 绘制PR曲线
final_results = np.array(results).mean(axis=0)
# table_metric(final_results, True)
print_metric_dict(final_results, ind=False)
final_results_output(final_results, out_dir, ind=False, multi=multi) # 将指标写入文件
def dl_ind_process(ml, vectors, labels, seq_length_list, ind_vectors, ind_labels, ind_seq_length_list, max_len, out_dir,
params_dict):
criterion = nn.CrossEntropyLoss()
in_dim = vectors.shape[-1]
num_class = 2
multi = True if num_class > 2 else False
model = TorchNetRes(ml, max_len, criterion, params_dict).net_type(in_dim, num_class)
optimizer = optim.Adam(model.parameters(), lr=params_dict['lr'])
epochs = params_dict['epochs']
# 筛选最后的模型参数
min_loss = float('inf')
final_predict_list = []
final_target_list = []
final_prob_list = []
for epoch in range(1, epochs+1):
TorchNetRes(ml, max_len, criterion, params_dict).train(model, optimizer, vectors, labels, seq_length_list,
epoch)
predict_list, target_list, prob_list, test_loss, prob_list_format, predict_list_format = \
TorchNetRes(ml, max_len, criterion, params_dict).test(model, ind_vectors, ind_labels, ind_seq_length_list)
if test_loss < min_loss:
min_loss = test_loss
final_predict_list = predict_list
final_target_list = target_list
final_prob_list = prob_list
final_result = performance(final_target_list, final_predict_list, final_prob_list, multi, True)
# table_metric(final_result, True)
print_metric_dict(final_result, ind=True)
plot_roc_ind(final_target_list, final_prob_list, out_dir) # 绘制ROC曲线
plot_pr_ind(final_target_list, final_prob_list, out_dir) # 绘制PR曲线
final_results_output(final_result, out_dir, ind=True, multi=multi) # 将指标写入文件
prob_output_res(final_target_list, final_predict_list, final_prob_list, out_dir)
<file_sep>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
import subprocess
import threading
from numpy import array
from .utils_const import PROTEIN
from .utils_fasta import Seq
def generate_pssm(pssm):
"""transform matrix to vector."""
pssm_vc = []
for i in range(len(pssm)):
result = [float(item) for item in pssm[i][1:]]
pssm_vc.append(result)
return array(pssm_vc)
def is_fasta_and_protein(seq):
"""Judge if the seq is in fasta format and protein sequences.
:param seq: Seq object
Return True or False.
"""
if not seq.name:
error_info = 'Error, sequence ' + str(seq.no) + ' has no sequence name.'
print(seq)
sys.stderr.write(error_info)
return False
if 0 == seq.length:
error_info = 'Error, sequence ' + str(seq.no) + ' is null.'
sys.stderr.write(error_info)
return False
for elem in seq.seq:
if elem not in PROTEIN and elem != 'x':
error_info = 'Sorry, sequence ' + str(seq.no) \
+ ' has character ' + str(elem) + '.(The character must be ' + PROTEIN + ').'
sys.stderr.write(error_info)
return False
return True
def check_and_save(file_name):
"""Read the input file and store as Seq objects.
:param file_name: the input protein sequence file.
return an iterator.
"""
name, seq = '', ''
count = 0
with open(file_name) as f:
for line in f:
if not line:
break
if '>' == line[0]:
if 0 != count or (0 == count and seq != ''):
if is_fasta_and_protein(Seq(name, seq, count)):
yield Seq(name, seq, count)
else:
sys.exit(0)
seq = ''
name = line[1:].strip()
count += 1
else:
if 'x' in line or 'X' in line:
line = line.replace('x', '')
line = line.replace('X', '')
seq += line.strip()
# count += 1
if is_fasta_and_protein(Seq(name, seq, count)):
yield Seq(name, seq, count)
else:
sys.exit(0)
def sep_file(file_name):
"""separate the input file. One sequence in one file.
:param file_name: the input file.
"""
dir_name, suffix = os.path.splitext(file_name)
# print(dir_name) # D:\Leon\bionlp\BioSeq-NLP/data/results/Protein/sequence/OHE/SVM/PSSM/all_seq
# print(suffix) # .txt
# exit()
if not os.path.exists(dir_name):
try:
os.makedirs(dir_name)
except OSError:
pass
# 注意这里的变化,在batch模式下,节约了每次生成pssm文件的时间
# else:
# rand_str = str(random.randint(0, 99999))
# dir_name = dir_name + '_' + rand_str
# os.mkdir(dir_name)
seq_name = []
for seq in check_and_save(file_name):
seq_name.append(seq.name)
seq_file = dir_name + '/' + str(seq.no) + '.txt'
with open(seq_file, 'w') as f:
f.write('>')
f.write(str(seq.name))
f.write('\n')
f.write(str(seq.seq))
return os.path.abspath(dir_name), seq_name
def produce_one_frequency(fasta_file, xml_file, pssm_file, sw_dir, sem):
"""Produce fequency profile for one sequence using psiblast.
:param fasta_file: the file storing one sequence.
:param xml_file: the generated xml file by psiblast.
:param pssm_file: the generated pssm file by psiblast.
:param sw_dir: the main dir of software.
:param sem: the semaphore used for multiprocessing.
"""
sem.acquire()
if sys.platform.startswith('win'):
psiblast_cmd = sw_dir + 'psiblast/psiblast.exe'
else:
psiblast_cmd = sw_dir + 'psiblast/psiblast'
os.chmod(psiblast_cmd, 0o777)
evalue_threshold = 0.001
num_iter = 3
outfmt_type = 5
BLAST_DB = sw_dir + 'psiblast/nrdb90/nrdb90'
# print('BLAST_DB:', BLAST_DB)
cmd = ' '.join([psiblast_cmd,
'-query ' + fasta_file,
'-db ' + BLAST_DB,
'-out ' + xml_file,
'-evalue ' + str(evalue_threshold),
'-num_iterations ' + str(num_iter),
'-num_threads ' + '5',
'-out_ascii_pssm ' + pssm_file,
'-outfmt ' + str(outfmt_type)
]
)
subprocess.call(cmd, shell=True)
time.sleep(2)
sem.release()
def produce_all_frequency(pssm_path, sw_dir, process_num):
"""Produce frequency profile for all the sequences.
:param pssm_path: the directory used to store the generated files.
:param sw_dir: the main dir of software.
:param process_num: the number of processes used for multiprocessing.
"""
sequence_files = []
for i in os.listdir(pssm_path):
seq_full_path = ''.join([pssm_path, '/', i])
if os.path.isfile(seq_full_path):
sequence_files.append(seq_full_path)
threads = []
sem = threading.Semaphore(process_num)
xml_dir = ''.join([pssm_path, '/xml'])
if not os.path.isdir(xml_dir):
os.mkdir(xml_dir)
pssm_dir = ''.join([pssm_path, '/pssm'])
if not os.path.isdir(pssm_dir):
os.mkdir(pssm_dir)
for seq_file in sequence_files:
name = os.path.splitext(os.path.split(seq_file)[1])[0]
xml_file = ''.join([xml_dir, '/', name, '.xml'])
pssm_file = ''.join([pssm_dir, '/', name, '.pssm'])
# process_list.append(mul.Process(target=produce_one_frequency,
# args=(seq_file, xml_file, pssm_file, sw_dir, semph)))
threads.append(threading.Thread(target=produce_one_frequency,
args=(seq_file, xml_file, pssm_file, sw_dir, sem)))
for t in threads:
t.start()
for t in threads:
t.join()
return pssm_dir
<file_sep>import os
import shutil
import sys
import numpy as np
from ..utils.utils_const import DNA, RNA, PROTEIN
from ..utils.utils_fasta import get_seqs
class FormatWrite(object):
def __init__(self, vectors, out_format, out_name):
self.vectors = vectors
self.out_format = out_format
self.out_name = out_name
def write_svm(self):
"""Write the vectors into disk in livSVM format."""
len_vector_list = len(self.vectors)
len_label_list = len(self.vectors)
if len_vector_list == 0:
sys.exit("The vector is none.")
if len_label_list == 0:
sys.exit("The label is none.")
if len_vector_list != len_label_list:
sys.exit("The length of vector and label is different.")
with open(self.out_name, 'w') as f:
for ind1, vec in enumerate(self.vectors):
temp_write = str(self.vectors[ind1])
for ind2, val in enumerate(vec):
temp_write += ' ' + str(ind2 + 1) + ':' + str(vec[ind2])
f.write(temp_write)
f.write('\n')
def write_tab(self):
"""Write the vectors into disk in tab format."""
with open(self.out_name, 'w') as f:
for vec in self.vectors:
f.write(str(vec[0]))
for val in vec[1:]:
f.write('\t' + str(val))
f.write('\n')
def write_csv(self):
"""Write the vectors into disk in csv format."""
import csv
with open(self.out_name, 'w', newline='') as csv_file:
spam_writer = csv.writer(csv_file, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for vec in self.vectors:
spam_writer.writerow(vec)
def write_tsv(self):
"""Write the vectors into disk in csv format."""
import csv
with open(self.out_name, 'w', newline='') as tsv_file:
spam_writer = csv.writer(tsv_file, delimiter='\t',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for vec in self.vectors:
spam_writer.writerow(vec)
def write_to_file(self):
if self.out_format == 'svm':
self.write_svm()
elif self.out_format == 'tab':
self.write_tab()
elif self.out_format == 'csv':
self.write_csv()
elif self.out_format == 'tsv':
self.write_tsv()
else:
print('Output file format error! Please check.')
return False
def vectors2files(vectors, sample_num_list, out_format, out_file_list):
# print('执行了vectors2files函数吗这里')
# print(vectors)
st = 0
for i in range(len(sample_num_list)):
ed = st + sample_num_list[i]
FormatWrite(vectors[st:ed, :], out_format, out_file_list[i]).write_to_file()
st = ed
for index, output_file in enumerate(out_file_list):
out_with_full_path = os.path.abspath(output_file)
if os.path.isfile(out_with_full_path):
if index == 0:
print('The output files of feature vectors for (ind) dataset can be found here:')
print(out_with_full_path)
print('\n')
def res_vectors2file(vec, out_format, out_file):
FormatWrite(vec, out_format, out_file).write_to_file()
out_with_full_path = os.path.abspath(out_file)
if os.path.isfile(out_with_full_path):
print(out_with_full_path)
def dl_vec2file(res_mats, sample_num_list, out_files):
count = 0
# print(sample_num_list)
# print(len(res_mats))
# print(len(res_mats[0]))
for out_file in out_files:
with open(out_file, 'w') as f:
for i in range(sample_num_list[count]):
f.write('>vec of sequence: %d\n' % (i+1))
for j in range(len(res_mats[i])):
# print('res_mats[i][j]', res_mats[i][j])
for val in list(res_mats[i][j]):
f.write('\t' + str(val))
f.write('\n')
f.write('> end')
count += 1
def write_res_base_vec(res_mats, out_file):
with open(out_file, 'w') as f:
for i in range(len(res_mats)):
f.write('>vec of sequence: %d\n' % (i+1))
for j in range(len(res_mats[i])):
for val in list(res_mats[i][j]):
f.write('\t' + str(val))
f.write('\n')
f.write('> end')
def res_base2frag_vec(in_file, res_labels, fixed_len, out_files):
vectors_list = []
f = open(in_file, 'r')
lines = f.readlines()
vectors = []
flag = 0
for line in lines:
line = line.strip()
if len(line) != 0:
if line[0] != '>':
vector = line.split('\t')
print(vector)
vector = list(map(float, vector))
vectors.append(vector)
flag = 1
else:
if flag == 1:
vectors_list.append(np.array(vectors))
vectors = []
flag = 0
f.close()
label_array = []
for res_label in res_labels:
label_array += res_label
pos_vec_mat = []
neg_vec_mat = []
for i in range(len(vectors_list)):
temp_arr = np.zeros((fixed_len, len(vectors_list[i][0])))
seq_len = len(vectors_list[i])
temp_len = min(seq_len, fixed_len)
temp_arr[:temp_len, :] = vectors_list[i][:temp_len, :]
if label_array[i] == 1:
pos_vec_mat.append(temp_arr)
else:
neg_vec_mat.append(temp_arr)
vec_mat_list = pos_vec_mat + neg_vec_mat
sp_num_list = [len(pos_vec_mat), len(neg_vec_mat)]
dl_vec2file(vec_mat_list, sp_num_list, out_files)
def fa_vectors2files(vectors, sample_num_list, out_format, file_list):
out_file_list = []
for in_file_name in file_list:
file_dir, file_name = os.path.split(in_file_name)
fa_file_name = 'fa_' + file_name
out_file_name = os.path.join(file_dir, fa_file_name)
out_file_list.append(out_file_name)
vectors2files(vectors, sample_num_list, out_format, out_file_list)
# def table_sample(level, ml, sample_num_list, label_list, fixed_len, ind):
# tb = pt.PrettyTable()
# print('+---------------------------------------------------+')
# if ind is True:
# print('| The information of independent test dataset |')
# else:
# print('| The information of benchmark dataset |')
# print('+---------------------------------------------------+')
# tb.field_names = ["label of sample", "number of sample"]
# if ml in ['CNN', 'LSTM', 'GRU', 'Transformer', 'Weighted-Transformer', 'Reformer'] and level == 'residue':
# tb.add_row([label_list, sample_num_list[0]])
# else:
# for label, sample_num in zip(label_list, sample_num_list):
# tb.add_row([label, sample_num, fixed_len])
# print(tb)
# print('\n')
#
#
# def table_params(params_dict, opt=False):
# tb = pt.PrettyTable()
#
# if opt is False:
# print('Parameter details'.center(21, '*'))
# tb.field_names = ["parameter", "value"]
# else:
# print('\n')
# print('\n')
# print('\n')
# print('\n')
# print('+---------------------------+')
# print('| Optimal parameter details |')
# print('+---------------------------+')
# tb.field_names = ["parameter", "optimal value"]
# for item in list(params_dict.items()):
# if item[0] not in ['out_files', 'ind_out_files']:
# tb.add_row(item)
# print(tb)
# print('\n')
def create_all_seq_file(seq_files, tgt_dir, ind=False):
suffix = os.path.splitext(seq_files[0])[-1]
if ind is False:
return tgt_dir + '/' + 'all_seq_file' + suffix
else:
return tgt_dir + '/' + 'ind_all_seq_file' + suffix
def seq_file2one(category, seq_files, label_list, out_file):
if category == 'DNA':
alphabet = DNA
elif category == 'RNA':
alphabet = RNA
else:
alphabet = PROTEIN
sp_num_list = [] # 每一种标签序列的数目(list[])
seq_all = [] # 每一种标签序列的列表(list[list[]])
seq_len_list = [] # 每一种标签序列的长度列表(list[])
for i in range(len(seq_files)):
with open(seq_files[i], 'r') as in_f:
seq_list = get_seqs(in_f, alphabet)
for seq in seq_list:
seq_len_list.append(len(seq))
seq_num = len(seq_list)
sp_num_list.append(seq_num)
seq_all.append(seq_list)
# 写入所有序列
with open(out_file, 'w') as out_f:
for i in range(len(label_list)):
for j in range(len(seq_all[i])):
out_f.write('>Sequence[' + str(j + 1) + '] | ' + 'Label[' + str(label_list[i]) + ']')
out_f.write('\n')
out_f.write(seq_all[i][j])
out_f.write('\n')
return sp_num_list, seq_len_list
def gen_label_array(sp_num_list, label_list):
labels = []
for i in range(len(sp_num_list)):
labels += [int(label_list[i])] * sp_num_list[i]
return np.array(labels)
def fixed_len_control(seq_len_list, fixed_len):
fixed_len = max(seq_len_list) if fixed_len is None else fixed_len
return fixed_len
def opt_params2file(selected_params, result_path):
temp_re = 'Optimal value of all parameters:\n'
for key, value in list(selected_params.items()):
if key not in ['out_files', 'ind_out_files']:
temp_re += str(key) + ' = ' + str(value) + '\n'
filename = result_path + 'Opt_params.txt'
with open(filename, 'w') as f:
f.write(temp_re)
full_path = os.path.abspath(filename)
if os.path.isfile(full_path):
print('The output file for final results can be found:')
print(full_path)
print('\n')
def out_seq_file(label_list, out_format, results_dir, params_dict, params_list_dict):
# 这里需要注意的是比如params_list_dict = {k: [1, 2, 3], w: [0.7, 0.8], n: [3]}, 则最终的输出文件名只包含k和w
output_file_list = []
multi_fea = False
# print(params_list_dict)
params_val_list = list(params_list_dict.values())
for params_val in params_val_list:
if len(params_val) > 1:
multi_fea = True
for i in range(len(label_list)):
if multi_fea is False:
fea_path = results_dir
else:
fea_path = results_dir + 'all_fea_files/'
if multi_fea is False:
fea_path += '' \
'[' + str(label_list[i]) + ']_' + str(out_format) + '.txt' # 给文件名加上标签
else:
for key in params_list_dict.keys():
if len(params_list_dict[key]) >= 2:
fea_path += str(key) + '_' + str(params_dict[key]) + '_' # For example: _k_2_lag_5
fea_path += '/'
if not os.path.exists(fea_path):
try:
os.makedirs(fea_path)
except OSError:
pass
fea_path += 'cv_features[' + str(label_list[i]) + ']_' + str(out_format) + '.txt' # 给文件名加上标签
output_file_list.append(fea_path)
return output_file_list
def out_ind_file(label, out_format, results_dir):
output_file_list = []
for i in range(len(label)):
fea_path = results_dir
fea_path += 'ind_features[' + str(label[i]) + ']_' + str(out_format) + '.txt' # 给文件名加上标签
output_file_list.append(fea_path)
return output_file_list
def out_dl_seq_file(label, results_dir, ind=False):
output_files = []
for i in range(len(label)):
if ind is True:
fea_path = results_dir + 'ind_dl_features[' + str(label[i]) + ']_.txt'
else:
fea_path = results_dir + 'cv_dl_features[' + str(label[i]) + ']_.txt'
output_files.append(fea_path)
return output_files
def out_res_file(label, results_dir, out_format, fragment, ind):
output_files = []
for i in range(len(label)):
if ind is True:
if fragment == 0:
fea_path = results_dir + 'ind_res_features[' + str(label[i]) + ']_' + str(out_format) + '.txt'
else:
fea_path = results_dir + 'ind_res_frag_features[' + str(label[i]) + ']_' + str(out_format) + '.txt'
else:
if fragment == 0:
fea_path = results_dir + 'cv_res_features[' + str(label[i]) + ']_' + str(out_format) + '.txt'
else:
fea_path = results_dir + 'cv_res_frag_features[' + str(label[i]) + ']_' + str(out_format) + '.txt'
output_files.append(fea_path)
return output_files
def out_dl_frag_file(label, results_dir, ind=False):
output_files = []
for i in range(len(label)):
if ind is True:
fea_path = results_dir + 'ind_dl_frag_features[' + str(label[i]) + ']_.txt'
else:
fea_path = results_dir + 'cv_dl_frag_features[' + str(label[i]) + ']_.txt'
output_files.append(fea_path)
return output_files
def opt_file_copy(source_files, results_dir):
# adding exception handling
target_files = []
for source_file in source_files:
dir_name, file_name = os.path.split(source_file)
target_file = results_dir + 'opt_' + '_'.join(file_name.split('_')[-3:])
target_files.append(target_file)
try:
shutil.copyfile(source_file, target_file)
except IOError as e:
print("Unable to copy file. %s\n" % e)
return False
for index, output_file in enumerate(target_files):
out_with_full_path = os.path.abspath(output_file)
if os.path.isfile(out_with_full_path):
if index == 0:
print('+----------------------------------------------------------------+')
print('| The output files of optimal feature vectors can be found here: |')
print('+----------------------------------------------------------------+')
print(out_with_full_path)
print('\n')
return target_files
def read_res_seq_file(seq_file, category):
if category == 'DNA':
alphabet = DNA
elif category == 'RNA':
alphabet = RNA
else:
alphabet = PROTEIN
seq_len_list = [] # 每一种标签序列的长度列表(list[])
with open(seq_file, 'r') as in_f:
seq_list = get_seqs(in_f, alphabet)
for seq in seq_list:
seq_len_list.append(len(seq))
return seq_len_list
def read_res_label_file(label_file):
res_labels_list = []
label_len_list = []
f = open(label_file, 'r')
lines = f.readlines()
for line in lines:
if line[0] != '>':
labels = line.strip().split()
labels = list(map(int, labels))
label_len_list.append(len(labels))
res_labels_list.append(labels)
f.close()
return res_labels_list, label_len_list
def res_file_check(seq_len_list, label_len_list, fragment):
count = 0
# print(seq_len_list)
# print(label_len_list)
assert len(seq_len_list) == len(label_len_list), "The number of sequence should be equal to it's label!"
for seq_len, label_len in zip(seq_len_list, label_len_list):
if fragment == 0:
assert seq_len == label_len, 'The length of sequence[' + str(count+1) + '] is not equal to corresponding ' \
'labels'
assert label_len >= 5, 'The number of labels for sequence[' + str(count+1) + '] should not less than 5'
else:
assert label_len == 1, 'If -fragment is 1, each sequence should have only one label!'
<file_sep>from pylab import zeros, random, log
from sklearn.decomposition import LatentDirichletAllocation, TruncatedSVD
# TODO: LSA
def lsa(vectors, com_prop=0.8):
# Component proportion 成分占比
n_components = int(len(vectors[0]) * com_prop)
lsa_vectors = TruncatedSVD(n_components).fit_transform(vectors)
return lsa_vectors
# TODO: LDA and Label LDA
def lda(vectors, labels=None, com_prop=0.8):
n_components = int(len(vectors[0]) * com_prop)
if labels is not None:
lda_vectors = LatentDirichletAllocation(n_components=n_components, max_iter=5,
learning_method='batch',
learning_offset=50.,
random_state=0).fit_transform(vectors, labels)
else:
lda_vectors = LatentDirichletAllocation(n_components=n_components, max_iter=5,
learning_method='batch',
learning_offset=50.,
random_state=0).fit_transform(vectors)
return lda_vectors
# TODO: PLSA
class PLsa(object):
def __init__(self, vectors, com_prop=0.8, max_iter=20):
self.X = vectors
self.N, self.M = vectors.shape # document-word矩阵, N为文档个数,M为词表长
self.K = int(self.M * com_prop)
self.p = zeros([self.N, self.M, self.K]) # 定义隐变量的后验概率的矩阵表示
self.max_iter = max_iter
def init_lamda(self):
lamda = random([self.N, self.K])
for i in range(0, self.N):
normalization = sum(lamda[i, :])
for j in range(0, self.K):
lamda[i, j] /= normalization
return lamda
def init_theta(self):
theta = random([self.K, self.M])
for i in range(0, self.K):
normalization = sum(theta[i, :])
for j in range(0, self.M):
theta[i, j] /= normalization
return theta
# E-Step
def e_step(self, theta, lamda):
for i in range(0, self.N):
for j in range(0, self.M):
denominator = 0
for k in range(0, self.K):
self.p[i, j, k] = theta[k, j] * lamda[i, k]
denominator += self.p[i, j, k]
if denominator == 0:
for k in range(0, self.K):
self.p[i, j, k] = 0
else:
for k in range(0, self.K):
self.p[i, j, k] /= denominator
return theta, lamda
# M-Step
def m_step(self, theta, lamda):
# 更新参数theta
for k in range(0, self.K):
denominator = 0
for j in range(0, self.M):
theta[k, j] = 0
for i in range(0, self.N):
theta[k, j] += self.X[i, j] * self.p[i, j, k]
denominator += theta[k, j]
if denominator == 0:
for j in range(0, self.M):
theta[k, j] = 1.0 / self.M
else:
for j in range(0, self.M):
theta[k, j] /= denominator
# 更新参数lamda
for i in range(0, self.N):
for k in range(0, self.K):
lamda[i, k] = 0
denominator = 0
for j in range(0, self.M):
lamda[i, k] += self.X[i, j] * self.p[i, j, k]
denominator += self.X[i, j]
if denominator == 0:
lamda[i, k] = 1.0 / self.K
else:
lamda[i, k] /= denominator
return theta, lamda
def log_likelihood(self, theta, lamda):
loglikelihood = 0
for i in range(0, self.N):
for j in range(0, self.M):
tmp = 0
for k in range(0, self.K):
tmp += theta[k, j] * lamda[i, k]
if tmp > 0:
loglikelihood += self.X[i, j] * log(tmp)
print('log likelihood : ', loglikelihood)
# EM algorithm
# ==============================================================================
def em_algorithm(self):
theta = self.init_theta() # topic-word matrix: [K, M]
lamda = self.init_lamda() # doc-topic matrix: [N, K] -> 相当于降维,也就是我们需要的
self.log_likelihood(theta, lamda)
for i in range(0, self.max_iter):
theta, lamda = self.e_step(theta, lamda)
theta, lamda = self.m_step(theta, lamda)
return theta, lamda
<file_sep>import multiprocessing
import os
import time
from CheckAll import Method_One_Hot_Enc, Feature_Extract_Mode, All_Words, FE_PATH_Seq, FE_BATCH_PATH_Seq, \
Method_Semantic_Similarity
from CheckAll import check_contain_chinese, seq_feature_check, mode_params_check, results_dir_check, \
make_params_dicts, print_fe_dict
from FeatureExtractionMode.utils.utils_write import seq_file2one, gen_label_array, out_seq_file, out_dl_seq_file, \
create_all_seq_file, fixed_len_control
from SemanticSimilarity import score_process
def create_results_dir(args, cur_dir):
if args.bp == 1:
results_dir = cur_dir + FE_BATCH_PATH_Seq + str(args.category) + "/" + str(args.mode) + "/"
if args.method is not None:
results_dir += str(args.method) + "/"
if args.in_tm is not None:
results_dir += str(args.in_tm) + "/"
if args.in_af is not None:
results_dir += str(args.in_af) + "/"
if args.words is not None:
results_dir += str(args.words) + "/"
else:
results_dir = cur_dir + FE_PATH_Seq
results_dir_check(results_dir)
return results_dir
def seq_fe_process(args):
current_path = os.path.dirname(os.path.realpath(__file__))
args.current_dir = os.path.dirname(os.getcwd())
# 判断是否包含中文路径
check_contain_chinese(current_path)
# 生成结果文件夹
args.results_dir = create_results_dir(args, args.current_dir)
# 合并序列文件
input_one_file = create_all_seq_file(args.seq_file, args.results_dir)
# 统计样本数目和序列长度
sp_num_list, seq_len_list = seq_file2one(args.category, args.seq_file, args.label, input_one_file)
# 生成标签数组
label_array = gen_label_array(sp_num_list, args.label)
# 控制序列的固定长度
args.fixed_len = fixed_len_control(seq_len_list, args.fixed_len)
# 通过遍历参数字典列表来获得特征向量,同时将特征向量写入文件(打分特征除外)
# 对每个mode的method进行检查
seq_feature_check(args)
# 对每个mode的words和method的参数进行检查
all_params_list_dict = {} # 适配框架
# params_list_dict 为只包括特征提取的参数的字典
params_list_dict, all_params_list_dict = mode_params_check(args, all_params_list_dict)
params_dict_list = make_params_dicts(all_params_list_dict)
# 这里的策略是遍历所有数值参数来并行计算
if args.dl == 0:
# 多进程计算
pool = multiprocessing.Pool(args.cpu)
for i in range(len(params_dict_list)):
params_dict = params_dict_list[i]
vec_files = out_seq_file(args.label, args.format, args.results_dir, params_dict, params_list_dict)
params_dict['out_files'] = vec_files
# 注意参数报错pool并不会显示,所以需要测试模式,而非直接并行
# 测试模式
# one_seq_fe_process(args, input_one_file, labels, vec_files, sample_num_list, False, **params_dict)
pool.apply_async(one_seq_fe_process, (args, input_one_file, label_array, vec_files, sp_num_list, False,
params_dict))
pool.close()
pool.join()
else:
params_dict = params_dict_list[0]
vec_files = out_dl_seq_file(args.label, args.results_dir, ind=False)
params_dict['out_files'] = vec_files
one_seq_fe_process(args, input_one_file, label_array, vec_files, sp_num_list, False, **params_dict)
def one_seq_fe_process(args, input_one_file, labels, vec_files, sample_num_list, ind, **params_dict):
print_fe_dict(params_dict) # 输出特征提取参数详细信息
if args.mode == 'OHE':
from FeatureExtractionMode.OHE.OHE4vec import ohe2seq_vec, ohe2seq_mat
for out_file in vec_files:
if not os.path.exists(out_file):
if args.dl == 0:
ohe2seq_vec(input_one_file, args.category, args.method, args.current_dir, args.pp_file,
args.rss_file, sample_num_list, args.fixed_len, args.format, vec_files, args.cpu)
if args.score != 'none' and ind is False:
score_process(args.score, vec_files, labels, args.cv, args.format, args.cpu)
else:
ohe2seq_mat(input_one_file, args.category, args.method, args.current_dir, args.pp_file,
args.rss_file, sample_num_list, vec_files, args.cpu)
elif args.mode == 'BOW':
from FeatureExtractionMode.BOW.BOW4vec import bow
for out_file in vec_files:
if not os.path.exists(out_file):
bow(input_one_file, args.category, args.words, sample_num_list, args.format,
vec_files, args.current_dir, False, **params_dict)
if args.score != 'none' and ind is False:
score_process(args.score, vec_files, labels, args.cv, args.format, args.cpu)
elif args.mode == 'TF-IDF':
from FeatureExtractionMode.TF_IDF.TF_IDF4vec import tf_idf
for out_file in vec_files:
if not os.path.exists(out_file):
tf_idf(input_one_file, args.category, args.words, args.fixed_len, sample_num_list,
args.format, vec_files, args.current_dir, False, **params_dict)
if args.score != 'none' and ind is False:
score_process(args.score, vec_files, labels, args.cv, args.format, args.cpu)
elif args.mode == 'TR':
from FeatureExtractionMode.TR.TR4vec import text_rank
for out_file in vec_files:
if not os.path.exists(out_file):
text_rank(input_one_file, args.category, args.words, args.fixed_len, sample_num_list,
args.format, vec_files, args.current_dir, False, **params_dict)
if args.score != 'none' and ind is False:
score_process(args.score, vec_files, labels, args.cv, args.format, args.cpu)
elif args.mode == 'WE':
from FeatureExtractionMode.WE.WE4vec import word_emb
for out_file in vec_files:
if not os.path.exists(out_file):
word_emb(args.method, input_one_file, args.category, args.words, args.fixed_len,
sample_num_list, args.format, vec_files, args.current_dir, **params_dict)
if args.score != 'none' and ind is False:
score_process(args.score, vec_files, labels, args.cv, args.format, args.cpu)
elif args.mode == 'TM':
from FeatureExtractionMode.TM.TM4vec import topic_model
for out_file in vec_files:
if not os.path.exists(out_file):
topic_model(args.in_tm, args.method, input_one_file, labels, args.category, args.words, args.fixed_len,
sample_num_list, args.format, vec_files, args.current_dir, **params_dict)
if args.score != 'none' and ind is False:
score_process(args.score, vec_files, labels, args.cv, args.format, args.cpu)
elif args.mode == 'SR':
from FeatureExtractionMode.SR.SR4vec import syntax_rules
from FeatureExtractionMode.SR.pse import AAIndex
for out_file in vec_files:
if not os.path.exists(out_file):
syntax_rules(args.method, input_one_file, args.category, sample_num_list,
args.format, vec_files, args.current_dir, args, **params_dict)
if args.score != 'none' and ind is False:
score_process(args.score, vec_files, labels, args.cv, args.format, args.cpu)
else:
from FeatureExtractionMode.AF.AF4vec import auto_feature
for out_file in vec_files:
if not os.path.exists(out_file):
# method, in_fa, input_file, labels, sample_num_list, out_format, out_file_list, alphabet, cur_dir,
# chosen_file, cpu, fixed_len, ** params_dict
auto_feature(args.method, input_one_file, labels, sample_num_list, vec_files, args, **params_dict)
if args.score != 'none' and ind is False:
score_process(args.score, vec_files, labels, args.cv, args.format, args.cpu)
def main(args):
print("\nStep into analysis...\n")
start_time = time.time()
seq_fe_process(args)
print("Done.")
print(("Used time: %.2fs" % (time.time() - start_time)))
if __name__ == '__main__':
import argparse
parse = argparse.ArgumentParser(prog='BioSeq-NLP', description="Step into analysis, please select parameters ")
# parameters for whole framework
parse.add_argument('-dl', type=int, default=0, choices=[0, 1],
help="Select whether generate features for deep learning algorithm.")
parse.add_argument('-category', type=str, choices=['DNA', 'RNA', 'Protein'], required=True,
help="The category of input sequences.")
parse.add_argument('-mode', type=str, choices=Feature_Extract_Mode, required=True,
help="The feature extraction mode for input sequence which analogies with NLP, "
"for example: bag of words (BOW).")
# parameters for mode
parse.add_argument('-words', type=str, choices=All_Words,
help="If you select mode in ['BOW', 'TF-IDF', 'TR', 'WE', 'TM'], you should select word for "
"corresponding mode, for example Mismatch. Pay attention to that "
"different category has different words, please reference to manual.")
parse.add_argument('-method', type=str,
help="If you select mode in ['OHE', 'WE', 'TM', 'SR', 'AF'], you should select method for "
"corresponding mode, for example select 'LDA' for 'TM' mode, select 'word2vec' for 'WE'"
" mode and so on. For different category, the methods belong to 'OHE' and 'SR' mode is "
"different, please reference to manual")
parse.add_argument('-auto_opt', type=int, default=0, choices=[0, 1, 2],
help="Choose whether automatically traverse the argument list. "
"2 is automatically traversing the argument list set ahead, 1 is automatically traversing "
"the argument list in a smaller range, while 0 is not (default=0).")
# parameters for one-hot encoding
parse.add_argument('-cpu', type=int, default=1,
help="The maximum number of CPU cores used for multiprocessing in generating frequency profile"
" and the number of CPU cores used for multiprocessing during parameter selection process "
"(default=1).")
parse.add_argument('-pp_file', type=str,
help="The physicochemical properties file user input.\n"
"if input nothing, the default physicochemical properties is:\n"
"DNA dinucleotide: Rise, Roll, Shift, Slide, Tilt, Twist.\n"
"DNA trinucleotide: Dnase I, Bendability (DNAse).\n"
"RNA: Rise, Roll, Shift, Slide, Tilt, Twist.\n"
"Protein: Hydrophobicity, Hydrophilicity, Mass.")
parse.add_argument('-rss_file', type=str,
help="The second structure file for all input sequences.(The order of a specific sequence "
"should be corresponding to the order in 'all_seq_file.txt' file")
# parameters for bag of words
parse.add_argument('-word_size', type=int, nargs='*', default=[3],
help="The word size of sequences for specific words "
"(the range of word_size is between 1 and 6).")
parse.add_argument('-mis_num', type=int, nargs='*', default=[1],
help="For Mismatch words. The max value inexact matching, mis_num should smaller than word_size "
"(the range of mis_num is between 1 and 6).")
parse.add_argument('-delta', type=float, nargs='*', default=[0.5],
help="For Subsequence words. The value of penalized factor "
"(the range of delta is between 0 and 1).")
parse.add_argument('-top_n', type=int, nargs='*', default=[1],
help="The maximum distance between structure statuses (the range of delta is between 1 and 4)."
"It works with Top-n-gram words.")
parse.add_argument('-max_dis', type=int, nargs='*', default=[1],
help="The max distance value for DR words and DT words (default range is from 1 to 4).")
# parameters for TextRank
parse.add_argument('-alpha', type=float, default=0.85,
help="Damping parameter for PageRank used in 'TR' mode, default=0.85.")
# parameters for word embedding
parse.add_argument('-win_size', type=int,
help="The maximum distance between the current and predicted word within a sentence for "
"'word2vec' in 'WE' mode, etc.")
parse.add_argument('-vec_dim', type=int,
help="The output dimension of feature vectors for 'Glove' model and dimensionality of a word "
"vectors for 'word2vec' and 'fastText' method.")
parse.add_argument('-sg', type=int, default=0,
help="Training algorithm for 'word2vec' and 'fastText' method. 1 for skip-gram, otherwise CBOW.")
# parameters for topic model
parse.add_argument('-in_tm', type=str, choices=['BOW', 'TF-IDF', 'TextRank'],
help="While topic model implement subject extraction from a text, the text need to be "
"preprocessed by one of mode in choices.")
parse.add_argument('-com_prop', type=float, default=0.8,
help="If choose topic model mode, please set component proportion for output feature vectors.")
# parameters for syntax rules
parse.add_argument('-oli', type=int, choices=[0, 1], default=0,
help="Choose one kind of Oligonucleotide (default=0): 0 represents dinucleotid; "
"1 represents trinucleotide. For MAC, GAC, NMBAC methods of 'SR' mode.")
parse.add_argument('-lag', type=int, nargs='*', default=[1],
help="The value of lag (default=1). For DACC, TACC, ACC, ACC-PSSM, AC-PSSM or CC-PSSM methods"
" and so on.")
parse.add_argument('-lamada', type=int, nargs='*', default=[1],
help="The value of lamada (default=1). For MAC, PDT, PDT-Profile, GAC or NMBAC methods "
"and so on.")
parse.add_argument('-w', type=float, nargs='*', default=[0.8],
help="The value of weight (default=0.1). For ZCPseKNC method.")
parse.add_argument('-k', type=int, nargs='*', default=[3],
help="The value of Kmer, it works only with ZCPseKNC method.")
parse.add_argument('-n', type=int, nargs='*', default=[1],
help="The maximum distance between structure statuses (default=1). "
"It works with PDT-Profile method.")
parse.add_argument('-ui_file', help="The user-defined physicochemical property file.")
parse.add_argument('-all_index', dest='a', action='store_true', help="Choose all physicochemical indices.")
parse.add_argument('-no_all_index', dest='a', action='store_false',
help="Do not choose all physicochemical indices, default.")
parse.set_defaults(a=False)
# parameters for automatic features/deep learning algorithm
parse.add_argument('-in_af', type=str, choices=Method_One_Hot_Enc,
help="Choose the input for 'AF' mode from 'OHE' mode.")
parse.add_argument('-lr', type=float, default=0.99,
help="The value of learning rate, it works only with 'AF' mode.")
parse.add_argument('-epochs', type=int,
help="The epoch number of train process for 'AF' mode.")
parse.add_argument('-batch_size', type=int, default=5,
help="The size of mini-batch, it works only with 'AF' mode.")
parse.add_argument('-dropout', type=float, default=0.6,
help="The value of dropout prob, it works only with 'AF' mode.")
parse.add_argument('-fea_dim', type=int, default=256,
help="The output dimension of feature vectors, it works only with 'AF' mode.")
parse.add_argument('-hidden_dim', type=int, default=256,
help="Only for automatic features mode."
"The size of the intermediate (a.k.a., feed forward) layer, it works only with 'AF' mode.")
parse.add_argument('-n_layer', type=int, default=2,
help="The number of units for LSTM and GRU, it works only with 'AF' mode.")
parse.add_argument('-motif_database', type=str, choices=['ELM', 'Mega'],
help="The database where input motif file comes from.")
parse.add_argument('-motif_file', type=str,
help="The short linear motifs from ELM database or structural motifs from the MegaMotifBase.")
# parameters for scoring
parse.add_argument('-score', type=str, choices=Method_Semantic_Similarity, default='none',
help="Choose whether calculate semantic similarity score and what method for calculation.")
parse.add_argument('-cv', choices=['5', '10', 'j'], default='5',
help="The cross validation mode.\n"
"5 or 10: 5-fold or 10-fold cross validation,\n"
"j: (character 'j') jackknife cross validation.")
# parameters for input
parse.add_argument('-seq_file', nargs='*', required=True, help="The input files in FASTA format.")
parse.add_argument('-label', type=int, nargs='*', required=True,
help="The corresponding label of input sequence files")
parse.add_argument('-fixed_len', type=int,
help="The length of sequence will be fixed via cutting or padding. If you don't set "
"value for 'fixed_len', it will be the maximum length of all input sequences. ")
# parameters for output
parse.add_argument('-format', default='csv', choices=['tab', 'svm', 'csv', 'tsv'],
help="The output format (default = csv).\n"
"tab -- Simple format, delimited by TAB.\n"
"svm -- The libSVM training data format.\n"
"csv, tsv -- The format that can be loaded into a spreadsheet program.")
parse.add_argument('-bp', type=int, choices=[0, 1], default=0,
help="Select use batch mode or not, the parameter will change the directory for generating file "
"based on the method you choose.")
argv = parse.parse_args()
main(argv)
<file_sep>import os
import time
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.cluster import AffinityPropagation
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
from sklearn.cluster import DBSCAN
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA, KernelPCA
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import RFE
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2, f_classif, mutual_info_classif
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from FeatureExtractionMode.utils.utils_plot import plot_2d, plot_3d, plot_clustering_2d, plot_ap, plot_fs, plot_hc
from FeatureExtractionMode.utils.utils_write import fa_vectors2files
from MachineLearningAlgorithm.utils.utils_read import files2vectors_info, seq_label_read
def fa_process(args, feature_vectors, labels, after_ps=False, ind=False):
# normalization
if args.sn != 'none':
feature_vectors = normalization(feature_vectors, args.sn)
# clustering
if after_ps is True:
if args.cl != 'none':
assert args.nc is not None and args.nc <= feature_vectors.shape[0] and \
args.nc <= feature_vectors.shape[1]
if args.nc is not None or args.cl == 'AP':
cluster = clustering(feature_vectors, args.cm, args.nc, args.cl, args.results_dir, ind)
save_cluster_result(cluster, args.results_dir, ind)
plot_clustering_2d(feature_vectors, cluster, args.results_dir, args.cm, ind)
# feature select
if args.fs != 'none':
assert args.nf is not None and args.nf <= feature_vectors.shape[1]
fs_vectors, scores = feature_select(feature_vectors, labels, args.nf, args.fs)
if after_ps is True:
save_fs_result(scores, args.fs, args.results_dir, ind)
# plot_fs(scores, args.nf, out_path, ind)
plot_fs(scores, args.results_dir, ind) # 修改为仅仅绘制前20重要的特征
else:
fs_vectors = feature_vectors
# dimension reduction
if args.dr != 'none':
assert args.np is not None and args.np <= feature_vectors.shape[1]
dr_vectors = dimension_reduction(feature_vectors, args.np, args.dr)
if after_ps is True:
save_dr_result(dr_vectors, args.results_dir, ind)
plot_2d(dr_vectors, labels, args.results_dir, ind)
plot_3d(dr_vectors, labels, args.results_dir, ind)
else:
dr_vectors = feature_vectors
if args.rdb == 'fs':
assert args.fs != 'none', "Can't reduce dimension by feature select since feature select method is none"
return fs_vectors
elif args.rdb == 'dr':
assert args.dr != 'none', "Can't reduce dimension by dimension reduce since dimension reduce method is none"
return dr_vectors
else:
# 仅仅展示特征分析结果,而不对特征向量进行降维
return feature_vectors
def feature_select(vectors, labels, n_features, scoring_func):
res = np.zeros((vectors.shape[0], n_features))
scores = np.zeros(vectors.shape[0])
if scoring_func == 'chi2':
selector = SelectKBest(chi2, k=n_features)
selector.fit(vectors, labels)
res = selector.transform(vectors)
scores = selector.pvalues_
elif scoring_func == 'F-value':
selector = SelectKBest(f_classif, k=n_features)
selector.fit(vectors, labels)
res = selector.transform(vectors)
scores = selector.pvalues_
elif scoring_func == 'MIC':
# print(n_features)
selector = SelectKBest(mutual_info_classif, k=n_features)
selector.fit(vectors, labels)
res = selector.transform(vectors)
scores = selector.pvalues_
# print(scores) # why none?
elif scoring_func == 'RFE':
svc = SVC(kernel="linear", C=1)
rfe = RFE(estimator=svc, n_features_to_select=n_features, step=1).fit(vectors, labels)
res = rfe.transform(vectors)
scores = rfe.ranking_
elif scoring_func == 'Tree':
clf = ExtraTreesClassifier(n_estimators=50)
clf = clf.fit(vectors, labels)
scores = clf.feature_importances_
model = SelectFromModel(clf, prefit=True, threshold=-np.inf, max_features=n_features)
res = model.transform(vectors)
return res, scores
def normalization(vectors, normal_method):
if normal_method == 'min-max-scale':
min_max_scale = preprocessing.MinMaxScaler()
res = min_max_scale.fit_transform(vectors)
elif normal_method == 'standard-scale':
standard_scale = preprocessing.StandardScaler()
res = standard_scale.fit_transform(vectors)
elif normal_method == 'L1-normalize':
res = preprocessing.normalize(vectors, norm='l1')
else:
res = preprocessing.normalize(vectors, norm='l2')
return res
def clustering(vectors, mode, n_clusters, cluster_method, out_path, ind=False):
index = []
if mode == 'feature':
vectors = vectors.T
for i in range(len(vectors)):
index.append('F%d' % (i + 1))
else:
for i in range(len(vectors)):
index.append('S%d' % (i + 1))
labels = np.zeros(len(vectors))
if cluster_method == 'AP':
ap = AffinityPropagation().fit(vectors)
cluster_centers_indices = ap.cluster_centers_indices_
labels = ap.labels_
plot_ap(vectors, cluster_centers_indices, labels, out_path, ind)
elif cluster_method == 'DBSCAN':
data = StandardScaler().fit_transform(vectors)
db = DBSCAN().fit(data)
labels = db.labels_
elif cluster_method == 'GMM':
gm = GaussianMixture(n_components=n_clusters).fit(vectors)
labels = gm.predict(vectors)
elif cluster_method == 'AGNES':
plot_hc(vectors, index, out_path, ind)
connectivity = kneighbors_graph(vectors, n_neighbors=10, include_self=False)
ward = AgglomerativeClustering(n_clusters=n_clusters, connectivity=connectivity,
linkage='ward').fit(vectors)
labels = ward.labels_
elif cluster_method == 'Kmeans':
labels = KMeans(n_clusters=n_clusters).fit_predict(vectors)
res = []
for i in range(len(vectors)):
res.append([index[i], labels[i]])
return res
def dimension_reduction(data, n_components, dr_method):
new_data = np.zeros((data.shape[0], n_components))
if dr_method == 'PCA':
new_data = PCA(n_components=n_components, whiten=True).fit_transform(data)
elif dr_method == 'KernelPCA':
new_data = KernelPCA(n_components=n_components, kernel="rbf").fit_transform(data)
elif dr_method == 'TSVD':
new_data = TruncatedSVD(n_components).fit_transform(data)
return new_data
def save_cluster_result(cluster, out_path, ind=False):
if ind is True:
filename = out_path + 'cluster_results_ind.txt'
else:
filename = out_path + 'cluster_results.txt'
if cluster is None:
return False
else:
my_cluster = np.array(cluster)
df = pd.DataFrame({'name': my_cluster[:, 0], 'cluster': my_cluster[:, 1]})
my_set = set(df.cluster.tolist())
with open(filename, 'w') as f:
f.write('# The sample/feature can be clustered into %d clusters:\n' % len(my_set))
f.write('Feature\tcluster\n')
for i in cluster:
f.write(i[0] + '\t' + str(i[1]) + '\n')
full_path = os.path.abspath(filename)
if os.path.isfile(full_path):
print('The output clustering file can be found:')
print(full_path)
print('\n')
def save_fs_result(scores, method, out_path, ind=False):
if ind is True:
filename = out_path + 'feature_selection_results_ind.txt'
else:
filename = out_path + 'feature_selection_results.txt'
if scores is not None:
index = []
for i in range(len(scores)):
index.append('F%d' % (i + 1))
ranking = np.argsort(-scores)
with open(filename, 'w') as f:
f.write('# Feature selection method: %s\n' % method)
for i in range(len(ranking)):
f.write(index[ranking[i]] + '\t' + str(scores[ranking[i]]) + '\n')
full_path = os.path.abspath(filename)
if os.path.isfile(full_path):
print('The output feature selection file can be found:')
print(full_path)
print('\n')
def save_dr_result(reduced_data, out_path, ind=False):
if ind is True:
filename = out_path + 'dimension_reduction_results_ind.txt'
else:
filename = out_path + 'dimension_reduction_results.txt'
if reduced_data is not None:
index = []
for i in range(len(reduced_data)):
index.append('F%d' % (i + 1))
else:
return False
with open(filename, 'w') as f:
f.write('Sample')
for i in range(1, len(reduced_data[0]) + 1):
f.write('\tPC' + str(i))
f.write('\n')
for i in range(len(reduced_data)):
f.write('S%d' % i)
for j in range(len(reduced_data[0])):
f.write('\t' + str(reduced_data[i][j]))
f.write('\n')
full_path = os.path.abspath(filename)
if os.path.isfile(full_path):
print('The output dimension reduction file can be found:')
print(full_path)
print('\n')
def main(args):
print("\nStep into analysis...\n")
start_time = time.time()
# 读取向量,样本数量和绝对路径
args.results_dir = os.path.dirname(os.path.abspath(args.vec_file[0])) + '/'
vectors, sample_num_list, in_files = files2vectors_info(args.vec_file, args.format)
labels = seq_label_read(sample_num_list, args.label)
fa_vectors = fa_process(args, vectors, labels, after_ps=True)
fa_vectors2files(fa_vectors, sample_num_list, args.format, in_files)
print("Done.")
print(("Used time: %.2fs" % (time.time() - start_time)))
if __name__ == '__main__':
import argparse
parse = argparse.ArgumentParser(prog='BioSeq-NLP', description="Step into analysis, please select parameters ")
# ----------------------- parameters for feature analysis---------------------- #
# standardization or normalization
parse.add_argument('-sn', choices=['min-max-scale', 'standard-scale', 'L1-normalize', 'L2-normalize', 'none'],
default='none', help=" Choose method of standardization or normalization for feature vectors.")
# clustering
parse.add_argument('-cl', choices=['AP', 'DBSCAN', 'GMM', 'AGNES', 'Kmeans', 'none'], default='none',
help="Choose method for clustering.")
parse.add_argument('-cm', default='sample', choices=['feature', 'sample'], help="The mode for clustering.")
parse.add_argument('-nc', type=int, help="The number of clusters.")
# feature select
parse.add_argument('-fs', choices=['chi2', 'F-value', 'MIC', 'RFE', 'Tree', 'none'], default='none',
help="Select feature select method.")
parse.add_argument('-nf', type=int, help="The number of features after feature selection.")
# dimension reduction
parse.add_argument('-dr', choices=['PCA', 'KernelPCA', 'TSVD', 'none'], default='none',
help="Choose method for dimension reduction.")
parse.add_argument('-np', type=int, help="The dimension of main component after dimension reduction.")
# rdb
parse.add_argument('-rdb', choices=['no', 'fs', 'dr'], default='no',
help="Reduce dimension by:\n"
" 'no'---none;\n"
" 'fs'---apply feature selection to parameter selection procedure;\n"
" 'dr'---apply dimension reduction to parameter selection procedure.\n")
parse.add_argument('-vec_file', nargs='*', required=True, help="The input vector file or files")
parse.add_argument('-label', type=int, nargs='*', required=True,
help="The corresponding label of input vector file or files")
parse.add_argument('-format', default='csv', choices=['tab', 'svm', 'csv', 'tsv'],
help="The output format (default = csv).\n"
"tab -- Simple format, delimited by TAB.\n"
"svm -- The libSVM training data format.\n"
"csv, tsv -- The format that can be loaded into a spreadsheet program.")
argv = parse.parse_args()
main(argv)
<file_sep>import os
import numpy as np
from ..SR.profile import simplify_pssm, read_pssm, get_blosum62
from ..utils.utils_const import aaList_DNA, aaList, DBM_List
from ..utils.utils_pssm import produce_all_frequency, sep_file, generate_pssm
from ..utils.utils_psfm import km2index, sep_file_psfm, run_group_search, profile_worker
class EvolutionaryInformation2Vectors(object):
# TODO: 在模型的初始化函数中定义模型要用到的变量
def __init__(self, alphabet, cur_dir=None):
"""
Initialize the object.
:param alphabet: DNA, RNA or Protein
"""
if alphabet == 'DNA':
self.size = 4
self.alphabet_list = aaList_DNA
elif alphabet == 'RNA':
print('Evolutionary information class method is not adapt for RNA!')
exit()
else:
self.size = 20
self.alphabet_list = aaList
self.cur_dir = cur_dir
full_path = os.path.realpath(__file__)
self.ei_dir = os.path.dirname(full_path) + '/data/'
self.sw_dir = cur_dir + '/software/'
self.vec_mat_list = []
def blast_matrix(self, input_file):
dbm_dict = DBM_List
with open(input_file) as f:
for line in f:
line = line.strip().upper()
if line[0] == '>':
continue
else:
vec_mat = []
temp_len = len(line)
for i in range(temp_len):
vector = dbm_dict[line[i]]
vec_mat.append(vector)
self.vec_mat_list.append(np.array(vec_mat))
return self.vec_mat_list
def pam250(self, file_path):
pam250 = {}
pam250_path = self.ei_dir + 'PAM250.txt'
pam250_reader = open(pam250_path)
count = 0
# read the matrix of pam250
for line in pam250_reader:
count += 1
if count <= 1:
continue
line = line.strip('\r').split()
# print(line)
if line[0] != '*':
pam250[line[0]] = [float(x) for x in line[1:21]]
# print(pam250)
with open(file_path) as f:
for line in f:
line = line.strip()
if line[0] == '>':
continue
else:
vec_mat = []
temp_len = len(line)
for i in range(temp_len):
vec_mat.append(pam250[line[i]])
self.vec_mat_list.append(np.array(vec_mat))
return self.vec_mat_list
def blosum62(self, file_path):
blosum62 = {}
blosum62_path = self.ei_dir + 'blosum62'
blosum_reader = open(blosum62_path)
count = 0
# read the matrix of blosum62
for line in blosum_reader:
count += 1
if count <= 7:
continue
line = line.strip('\r').split()
if line[0] != '*':
blosum62[line[0]] = [float(x) for x in line[1:21]]
with open(file_path) as f:
for line in f:
line = line.strip()
if line[0] == '>':
continue
else:
vec_mat = []
temp_len = len(line)
for i in range(temp_len):
vec_mat.append(blosum62[line[i]])
self.vec_mat_list.append(np.array(vec_mat))
return self.vec_mat_list
def pssm(self, input_file, process_num):
pssm_path, seq_name = sep_file(input_file)
# pssm_path: # D:\Leon\bionlp\BioSeq-NLP/data/results/Protein/sequence/OHE/SVM/PSSM/all_seq
pssm_dir = produce_all_frequency(pssm_path, self.sw_dir, process_num)
# 调试模式 on/off
# pssm_dir = self.cur_dir + "/results/all_seq_cv/pssm"
# print('pssm_dir: ', pssm_dir)
# pssm_dir: D:\Leon\bionlp\BioSeq-NLP\data\results\Protein\sequence\OHE\SVM\PSSM\all_seq/pssm
dir_name = os.path.split(pssm_dir)[0]
xml_dir = dir_name + '/xml'
# print('xml_dir: ', xml_dir)
# xml_dir: D:\Leon\bionlp\BioSeq-NLP\data\results\Protein\sequence\OHE\SVM\PSSM\all_seq/xml
final_result = ''.join([dir_name, '/final_result'])
# print('final_result: ', final_result)
# final_result: D:\Leon\bionlp\BioSeq-NLP\data\results\Protein\sequence\OHE\SVM\PSSM\all_seq/final_result
if not os.path.isdir(final_result):
os.mkdir(final_result)
dir_list = os.listdir(xml_dir)
# print('dir_list: ', dir_list)
# dir_list: ['1.xml', '10.xml', '11.xml', '12.xml', '13.xml', '14.xml', '15.xml', '16.xml', '17.xml',
# '18.xml', '19.xml', '2.xml', '20.xml', '3.xml', '4.xml', '5.xml', '6.xml', '7.xml', '8.xml', '9.xml']
index_list = []
for elem in dir_list:
xml_full_path = ''.join([xml_dir, '/', elem])
# print("xml_full_path: ", xml_full_path)
# xml_full_path: D:\Leon\bionlp\BioSeq-NLP\data\results\Protein\sequence\OHE\SVM\PSSM\all_seq/xml/1.xml
name, suffix = os.path.splitext(elem)
# print("suffix: ", suffix)
# suffix: .xml
if os.path.isfile(xml_full_path) and suffix == '.xml':
index_list.append(int(name))
index_list.sort()
# print('index_list:', index_list)
# index_list: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
pssm_pro_files = []
seq_names = []
for index in index_list:
pssm_file = pssm_dir + '/' + str(index) + '.pssm'
pssm_file_list = list(os.path.splitext(pssm_file))
pssm_process_file = pssm_file_list[0] + '_pro' + pssm_file_list[1]
seq_name = pssm_file_list[0].split('/')[-1]
seq_names.append(seq_name)
pssm_pro_files.append(pssm_process_file)
simplify_pssm(pssm_file, pssm_process_file)
# pssm_process_file为除去前三行后八行且每行只包含字母后的前20个数字
pssm = read_pssm(pssm_process_file)
if pssm is False:
p1 = os.path.split(pssm_process_file)
seq_path = os.path.split(p1[0])[0] + '/' + seq_name + '.txt'
with open(seq_path) as f:
lines = f.readlines()
protein_seq = lines[1].strip().upper()
pssm = get_blosum62(protein_seq)
pssm = np.array(pssm)
protein_seq = [np.array([x]) for x in list(protein_seq)]
protein_seq = np.array(protein_seq)
pssm = np.hstack((protein_seq, pssm))
temp_vec = generate_pssm(pssm)
self.vec_mat_list.append(temp_vec)
return self.vec_mat_list
def psfm(self, file_path, process_num):
k = 1
km_index = km2index(self.alphabet_list, k)
headers = sorted(iter(km_index.items()), key=lambda d: d[1])
# print("km_index: ", km_index)
# print("headers: ", headers)
# km_index: {'A': 0, 'C': 1, ..., 'W': 18, 'Y': 19}
# headers: [('A', 0), ('C', 1), ..., ('W', 18), ('Y', 19)]
profile_home = os.path.split(file_path)[0] + '/' + str(os.path.split(file_path)[1].split('.')[0])
# print('profile_home', profile_home)
# profile_home D:\Leon\bionlp\BioSeq-NLP/data/results/Protein/sequence/OHE/SVM/PSFM/all_seq
if not os.path.exists(profile_home):
try:
os.makedirs(profile_home)
except OSError:
pass
seq_dir, seq_name = sep_file_psfm(profile_home, file_path)
# print('seq_dir', seq_dir)
# print('seq_name', seq_name)
# seq_dir D:\Leon\bionlp\BioSeq-NLP\data\results\Protein\sequence\OHE\SVM\PSFM\all_seq\all_seq
# seq_name ['1AKHA\t|1~1', '1AOII\t|1~2', '1B6WA\t|1~3', ...]
profile_home = seq_dir
seq_dir = os.listdir(seq_dir)
seq_dir.sort()
pssm_dir = profile_home + '/pssm'
if not os.path.isdir(pssm_dir):
try:
os.makedirs(pssm_dir)
except OSError:
pass
xml_dir = profile_home + '/xml'
if not os.path.isdir(xml_dir):
try:
os.makedirs(xml_dir)
except OSError:
pass
msa_dir = profile_home + '/msa'
if not os.path.isdir(msa_dir):
try:
os.makedirs(msa_dir)
except OSError:
pass
psfm_dir = profile_home + '/psfm'
if not os.path.isdir(psfm_dir):
try:
os.makedirs(psfm_dir)
except OSError:
pass
index_list = []
for elem in seq_dir:
name, suffix = os.path.splitext(elem)
index_list.append(int(name))
index_list.sort()
# print('index_list:', index_list)
# index_list: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
# exit()
run_group_search(index_list, profile_home, self.sw_dir, process_num) # 并行计算msa
for i in range(0, len(index_list)):
seq = str(index_list[i]) + '.txt'
out_file = profile_worker(seq, self.alphabet_list, k, profile_home, headers)
psfm_mat = read_pssm(out_file)
temp_vec = generate_pssm(psfm_mat)
self.vec_mat_list.append(temp_vec)
return self.vec_mat_list
<file_sep>import numpy as np
from ..utils.utils_words import make_km_list
from ..utils.utils_const import aaList, aaList_DNA, aaList_RNA, aaList_sixbits, aaList_five, aaList_AESNN3, aaList_ncp
class ResidueComposition2Vectors(object):
# TODO: 在模型的初始化函数中定义模型要用到的变量
def __init__(self, alphabet):
"""
Initialize the object.
:param alphabet: DNA, RNA or Protein
"""
if alphabet == 'DNA':
self.size = 4
self.alphabet_list = aaList_DNA
elif alphabet == 'RNA':
self.size = 4
self.alphabet_list = aaList_RNA
else:
self.size = 20
self.alphabet_list = aaList
self.aaList_Index = []
self.vec_mat_list = []
def one_hot(self, input_file):
with open(input_file) as r:
for line in r:
if line[0] == '>':
continue
else:
index_list = []
line = line.strip().upper()
for k in range(len(line)):
index_list.append(str(self.alphabet_list.index(line[k])))
self.aaList_Index.append(index_list)
for i in range(len(self.aaList_Index)):
temp_length = len(self.aaList_Index[i]) # 小于或等于fixed_len的长度值
vec_mat = np.zeros((temp_length, self.size))
for j in range(temp_length):
vector = [0] * self.size
vector[int(self.aaList_Index[i][j])] = 1
vector = list(map(float, vector))
vec_mat[j] = vector
self.vec_mat_list.append(vec_mat)
return self.vec_mat_list
def position_specific(self, k, input_file):
kms = make_km_list(k, self.alphabet_list)
with open(input_file) as f:
for line in f:
if line[0] == '>':
continue
else:
line = line.upper().strip()
vec_mat = []
length = len(line)
for s in range(length-k+1):
seq = line[s:(s + k)]
index = kms.index(seq)
fe = [0] * len(kms)
fe[index] = 1
vec_mat.append(list(map(float, fe)))
self.vec_mat_list.append(np.array(vec_mat))
return self.vec_mat_list
def one_hot_six_bits(self, input_file):
index_list = []
six_bits_alphabet_list = aaList_sixbits
with open(input_file) as r:
for line in r:
if line[0] == '>':
continue
else:
index = []
line = line.strip()
length = len(line)
for k in range(length):
for i in range(len(six_bits_alphabet_list)):
if line[k] in six_bits_alphabet_list[i]:
index.append(str(i))
index_list.append(index)
for i in range(len(index_list)):
vec_mat = []
temp_len = len(index_list[i])
for j in range(temp_len):
vector = [0] * 6
vector[int(index_list[i][j])] = 1
vector = list(map(float, vector))
vec_mat.append(vector)
self.vec_mat_list.append(np.array(vec_mat))
return self.vec_mat_list
def one_hot_five(self, input_file):
index_list = []
alphabet_list = aaList
five_alphabet_list = aaList_five
with open(input_file) as r:
for line in r:
if line[0] == '>':
continue
else:
index = []
line = line.strip()
length = len(line)
for k in range(length):
index.append(str(alphabet_list.index(line[k])))
index_list.append(index)
for i in range(len(index_list)):
vec_mat = []
temp_len = len(index_list[i])
for j in range(temp_len):
vector = five_alphabet_list[int(index_list[i][j])]
vector = list(map(float, vector))
vec_mat.append(vector)
self.vec_mat_list.append(np.array(vec_mat))
return self.vec_mat_list
def aesnn3(self, input_file):
# Just for Protein
encoding_schemes = aaList_AESNN3
with open(input_file) as f:
for line in f:
line = line.strip()
if line[0] != '>':
vec_mat = []
temp_len = len(line)
for i in range(temp_len):
vector = encoding_schemes[line[i]]
vector = list(map(float, vector))
vec_mat.append(vector)
self.vec_mat_list.append(np.array(vec_mat))
return self.vec_mat_list
def dbe(self, input_file):
kms = make_km_list(2, self.alphabet_list)
with open(input_file) as f:
for line in f:
if line[0] == '>':
continue
else:
line = line.upper().strip()
vec_mat = []
temp_len = len(line)
for i in range(temp_len-1):
seq = line[i:(i + 2)]
index = kms.index(seq)
fe = list(map(float, bin(index)[2:].zfill(4))) # for index=15, fe=[1, 1, 1, 1]
vec_mat.append(list(map(float, fe)))
self.vec_mat_list.append(np.array(vec_mat))
return self.vec_mat_list
def ncp(self, input_file):
# Just for RNA
encoding_schemes = aaList_ncp
with open(input_file) as f:
for line in f:
line = line.strip()
if line[0] != '>':
vec_mat = []
temp_len = len(line)
for i in range(temp_len):
vector = encoding_schemes[line[i]]
vector = list(map(float, vector))
vec_mat.append(vector)
self.vec_mat_list.append(np.array(vec_mat))
return self.vec_mat_list
<file_sep>from ..utils.utils_words import subsequence_words
from ..utils.utils_algorithm import tf_idf
def subsequence_tf_idf(input_file, alphabet, fixed_len, word_size, fixed=True):
corpus = subsequence_words(input_file, alphabet, fixed_len, word_size, fixed)
return tf_idf(corpus)
<file_sep>import os
import pickle
import sys
from math import pow
from numpy import array
from ..utils.utils_const import DNA, RNA
from ..utils.utils_fasta import get_seqs
from ..utils.utils_words import make_km_list
from .index_list import DNA, RNA, PROTEIN, didna_list, tridna_list, dirna_list, pro_list
class AAIndex:
def __init__(self, head, index_dict):
self.head = head
self.index_dict = index_dict
def __str__(self):
return "%s\n%s" % (self.head, self.index_dict)
def frequency_p(tol_str, tar_str):
"""Generate the frequency of tar_str in tol_str.
:param tol_str: mother string.
:param tar_str: substring.
"""
i, j, tar_count, tar1_count, tar2_count, tar3_count = 0, 0, 0, 0, 0, 0
len_tol_str = len(tol_str)
len_tar_str = len(tar_str)
while i < len_tol_str and j < len_tar_str:
if tol_str[i] == tar_str[j]:
i += 1
j += 1
if j >= len_tar_str:
tar_count += 1
i = i - j + 1
j = 0
if (i + 1) % 3 == 1:
# judge the position of last base of kmer in corresponding codon. pay attention to "i + 1"
tar1_count += 1
elif (i + 1) % 3 == 2:
tar2_count += 1
else:
tar3_count += 1
else:
i = i - j + 1
j = 0
tar_list = (tar_count, tar1_count, tar2_count, tar3_count)
return tar_list
def z_curve(sequence, k, alphabet):
km = make_km_list(k, alphabet)
len_km = len(km)
i = 0
f_z_curve = []
fx_list = []
fy_list = []
fz_list = []
while i < len_km:
j = 1
fre1_list = []
fre2_list = []
fre3_list = []
while j <= 4:
fre1 = frequency_p(sequence, str(km[i]))[1]
fre2 = frequency_p(sequence, str(km[i]))[2]
fre3 = frequency_p(sequence, str(km[i]))[3]
fre1_list.append(fre1)
fre2_list.append(fre2)
fre3_list.append(fre3)
j += 1
i += 1
fx1 = (fre1_list[0] + fre1_list[2]) - (fre1_list[1] + fre1_list[3])
fx2 = (fre2_list[0] + fre2_list[2]) - (fre2_list[1] + fre2_list[3])
fx3 = (fre3_list[0] + fre3_list[2]) - (fre3_list[1] + fre3_list[3])
fx_list.append(fx1)
fx_list.append(fx2)
fx_list.append(fx3)
fy1 = (fre1_list[0] + fre1_list[1]) - (fre1_list[2] + fre1_list[3])
fy2 = (fre2_list[0] + fre2_list[1]) - (fre2_list[2] + fre2_list[3])
fy3 = (fre3_list[0] + fre3_list[1]) - (fre3_list[2] + fre3_list[3])
fy_list.append(fy1)
fy_list.append(fy2)
fy_list.append(fy3)
fz1 = (fre1_list[0] + fre1_list[3]) - (fre1_list[1] + fre1_list[2])
fz2 = (fre2_list[0] + fre2_list[3]) - (fre2_list[1] + fre2_list[2])
fz3 = (fre3_list[0] + fre3_list[3]) - (fre3_list[1] + fre3_list[2])
fz_list.append(fz1)
fz_list.append(fz2)
fz_list.append(fz3)
for i in range(0, len(fx_list)):
f_z_curve.append(fx_list[i])
for i in range(0, len(fy_list)):
f_z_curve.append(fy_list[i])
for i in range(0, len(fz_list)):
f_z_curve.append(fz_list[i])
return f_z_curve
def zcpseknc(input_data, k, w, lamada, alphabet):
"""This is a complete process in ZCPseKNC."""
with open(input_data, 'r') as f:
seq_list = get_seqs(f, alphabet)
return make_zcpseknc_vector(seq_list, k, w, lamada, alphabet)
def get_phyche_list(k, phyche_list, extra_index_file, alphabet, all_prop=False):
# """Get phyche_list and check it.
#
# :param k: int, the value of k-tuple.
# :param phyche_list: list, the input physicochemical properties list.
# :param all_prop: bool, choose all physicochemical properties or not.
# """
if phyche_list is None or len(phyche_list) == 0:
if extra_index_file is None and all_prop is False:
error_info = 'Error, The phyche_list, extra_index_file and all_prop can\'t be all False.'
raise ValueError(error_info)
try:
if alphabet == DNA:
if k == 2:
all_prop_list = didna_list
elif k == 3:
all_prop_list = tridna_list
else:
error_info = 'Error, the k value must be 2 or 3.'
raise ValueError(error_info)
elif alphabet == RNA:
if k == 2:
all_prop_list = dirna_list
else:
error_info = 'Error, the k or alphabet error.'
raise ValueError(error_info)
elif alphabet == PROTEIN:
all_prop_list = pro_list
else:
error_info = "Error, the alphabet must be dna, rna or protein."
raise ValueError(error_info)
except:
raise
# Set and check physicochemical properties.
try:
# Set all properties.
if all_prop is True:
phyche_list = all_prop_list
# Check phyche properties.
else:
for e in phyche_list:
if e not in all_prop_list:
error_info = 'Sorry, the physicochemical properties ' + e + ' is not exit.'
raise NameError(error_info)
except:
raise
return phyche_list
def get_extra_index(filename):
"""Get the extend indices from index file, only work for DNA and RNA."""
extra_index_vals = []
with open(filename) as f:
lines = f.readlines()
for ind, line in enumerate(lines):
if line[0] == '>':
vals = lines[ind + 2].rstrip().strip().split('\t')
vals = [float(val) for val in vals]
extra_index_vals.append(vals)
return extra_index_vals
def get_aaindex(index_list):
"""Get the aaindex from data/aaindex.data.
:param index_list: the index we want to get.
:return: a list of AAIndex obj.
"""
new_aaindex = []
full_path = os.path.realpath(__file__)
file_path = "%s/data/aaindex.data" % os.path.dirname(full_path)
with open(file_path, 'rb') as f:
aaindex = pickle.load(f)
for index_vals in aaindex:
if index_vals.head in index_list:
new_aaindex.append(index_vals)
return new_aaindex
def extend_aaindex(filename):
"""Extend the user-defined AAIndex from user's file.
:return: a list of AAIndex obj.
"""
from .extract_aaindex import norm_index_vals
aaindex = get_ext_ind_pro(filename)
for ind, (head, index_dict) in enumerate(aaindex):
aaindex[ind] = AAIndex(head, norm_index_vals(index_dict))
return aaindex
def get_ext_ind_pro(filename):
"""Get the extend indices from index file, only work for protein."""
inds = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
aaindex = []
with open(filename, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line[0] == '>':
temp_name = line[1:].rstrip()
vals = lines[i + 2].rstrip().split('\t')
ind_val = {ind: float(val) for ind, val in zip(inds, vals)}
aaindex.append((temp_name, ind_val))
return aaindex
def get_phyche_value(k, phyche_list, alphabet, extra_phyche_index=None):
"""Generate DNA or RNA phyche_value.
:param k: int, the value of k-tuple.
:param phyche_list: physicochemical properties list.
:param extra_phyche_index: dict, the key is the olinucleotide (string),
the value is its physicochemical property value (list).
It means the user-defined physicochemical indices.
"""
if extra_phyche_index is None:
extra_phyche_index = {}
phyche_value = extend_phyche_index(get_phyche_index(k, phyche_list, alphabet), extra_phyche_index)
return phyche_value
def extend_phyche_index(original_index, extend_index):
"""Extend DNA or RNA {phyche:[value, ... ]}"""
if extend_index is None or len(extend_index) == 0:
return original_index
for key in list(original_index.keys()):
original_index[key].extend(extend_index[key])
return original_index
def get_phyche_factor_dic(k, alphabet):
"""Get all DNA or RNA {nucleotide: [(phyche, value), ...]} dict."""
full_path = os.path.realpath(__file__)
if 2 == k and alphabet == DNA:
file_path = "%s/data/didna.data" % os.path.dirname(full_path)
elif 2 == k and alphabet == RNA:
file_path = "%s/data/dirna.data" % os.path.dirname(full_path)
elif 3 == k:
file_path = "%s/data/mmc4.data" % os.path.dirname(full_path)
else:
sys.stderr.write("The k can just be 2 or 3.")
sys.exit(0)
try:
with open(file_path, 'rb') as f:
phyche_factor_dic = pickle.load(f)
except:
with open(file_path, 'r') as f:
phyche_factor_dic = pickle.load(f)
return phyche_factor_dic
def get_phyche_index(k, phyche_list, alphabet):
"""get phyche_value according phyche_list."""
phyche_value = {}
if 0 == len(phyche_list):
for nucleotide in make_km_list(k, alphabet):
phyche_value[nucleotide] = []
return phyche_value
nucleotide_phyche_value = get_phyche_factor_dic(k, alphabet)
for nucleotide in make_km_list(k, alphabet):
if nucleotide not in phyche_value:
phyche_value[nucleotide] = []
for e in nucleotide_phyche_value[nucleotide]:
if e[0] in phyche_list:
phyche_value[nucleotide].append(e[1])
return phyche_value
def get_theta(k, lamada, sequence, alphabet):
"""Get the theta list which use frequency to replace physicochemical properties(the kernel of ZCPseKNC method."""
theta = []
L = len(sequence)
kmer = make_km_list(k, alphabet)
fre_list = [frequency_p(sequence, str(key))[0] for key in kmer]
fre_sum = float(sum(fre_list))
for i in range(1, lamada + 1):
temp_sum = 0.0
for j in range(0, L - k - i + 1):
nucleotide1 = sequence[j: j + k]
nucleotide2 = sequence[j + i: j + i + k]
if alphabet == DNA:
fre_nucleotide1 = frequency_p(sequence, str(nucleotide1))[0] / fre_sum
fre_nucleotide2 = frequency_p(sequence, str(nucleotide2))[0] / fre_sum
temp_sum += pow(float(fre_nucleotide1) - float(fre_nucleotide2), 2)
else:
sys.stderr.write("The ZCPseKNC method just for DNA.")
sys.exit(0)
theta.append(temp_sum / (L - k - i + 1))
return theta
def make_zcpseknc_vector(sequence_list, k=2, w=0.05, lamada=1, alphabet=DNA):
# use theta_type=1 variable can distinguish method
"""Generate the ZCPseKNC vector."""
kmer = make_km_list(k, alphabet)
vector = []
for sequence in sequence_list:
if len(sequence) < k or lamada + k > len(sequence):
error_info = "Sorry, the sequence length must be larger than " + str(lamada + k)
sys.stderr.write(error_info)
sys.exit(0)
# Get the nucleotide frequency in the DNA sequence.
fre_list = [frequency_p(sequence, str(key))[0] for key in kmer]
fre_sum = float(sum(fre_list))
fre_list = z_curve(sequence, k, alphabet)
# Get the normalized occurrence frequency of nucleotide in the DNA sequence.
fre_list = [e / fre_sum for e in fre_list]
fre_sum = float(sum(fre_list))
# Get the theta_list.
theta_list = get_theta(k, lamada, sequence, alphabet)
theta_sum = sum(theta_list)
# Generate the vector according the Equation .
denominator = fre_sum + w * theta_sum
temp_vec = [round(f / denominator, 8) for f in fre_list]
for theta in theta_list:
temp_vec.append(round(w * theta / denominator, 8))
vector.append(temp_vec)
return array(vector)
<file_sep>import sys
import numpy as np
import torch
from torch import optim
from torch.utils.data import DataLoader, TensorDataset
from ..utils.utils_math import CRF
from ..utils.utils_plot import plot_roc_curve, plot_pr_curve, plot_roc_ind, plot_pr_ind
from ..utils.utils_results import performance, final_results_output, print_metric_dict
START_TAG = "<START>"
STOP_TAG = "<STOP>"
def get_partition(feature, target, length, train_index, val_index):
feature = np.array(feature)
x_train = feature[train_index]
x_val = feature[val_index]
y_train = np.array(target)[train_index]
y_val = np.array(target)[val_index]
train_length = np.array(length)[train_index]
test_length = np.array(length)[val_index]
return x_train, x_val, y_train, y_val, train_length, test_length
def crf_cv_process(vectors, labels, seq_length_list, folds, out_dir, params_dict):
results = []
cv_labels = []
cv_prob = []
# predicted_labels = np.zeros(len(seq_length_list))
# predicted_prob = np.zeros(len(seq_length_list))
count = 0
width = vectors.shape[-1]
for train_index, val_index in folds:
train_x, test_x, train_y, test_y, train_length, test_length = get_partition(vectors, labels, seq_length_list,
train_index, val_index)
lr = params_dict['lr']
num_epochs = params_dict['epochs']
batch_size = params_dict['batch_size']
# 筛选最后的模型参数
min_loss = float('inf') # 无穷大
opt_test_prob, opt_test_y_hat = [], []
for num_epoch in range(num_epochs):
test_loss, test_prob, test_y_hat = crf_main(train_x, test_x, train_y, test_y, width, batch_size, num_epoch,
lr)
if test_loss < min_loss:
min_loss = test_loss
opt_test_prob = test_prob
opt_test_y_hat = test_y_hat
test_label_list, test_prob_list, predict_label_list = preprocess4evaluate(test_y, opt_test_prob,
opt_test_y_hat, test_length)
result = performance(test_label_list, predict_label_list, test_prob_list, bi_or_multi=False, res=True)
results.append(result)
cv_labels.append(test_label_list)
cv_prob.append(test_prob_list)
count += 1
print(" Round[%d]: Accuracy = %.3f | minimum loss = %.4f" % (count, result[0], min_loss))
print('\n')
plot_roc_curve(cv_labels, cv_prob, out_dir) # 绘制ROC曲线
plot_pr_curve(cv_labels, cv_prob, out_dir) # 绘制PR曲线
final_results = np.array(results).mean(axis=0)
print_metric_dict(final_results, ind=False)
final_results_output(final_results, out_dir, ind=False, multi=False) # 将指标写入文件
def crf_ind_process(vectors, labels, ind_vectors, ind_labels, ind_seq_length_list, out_dir, params_dict):
lr = params_dict['lr']
num_epochs = params_dict['epochs']
batch_size = params_dict['batch_size']
# 筛选最后的模型参数
min_loss = float('inf') # 无穷大
width = vectors.shape[-1]
opt_test_prob, opt_test_y_hat = [], []
for num_epoch in range(num_epochs):
test_loss, test_prob, test_y_hat = crf_main(vectors, ind_vectors, labels, ind_labels, width, batch_size,
num_epoch, lr)
if test_loss < min_loss:
min_loss = test_loss
opt_test_prob = test_prob
opt_test_y_hat = test_y_hat
test_label_list, test_prob_list, predict_label_list = preprocess4evaluate(ind_labels, opt_test_prob,
opt_test_y_hat, ind_seq_length_list)
final_result = performance(test_label_list, predict_label_list, test_prob_list, bi_or_multi=False, res=True)
print_metric_dict(final_result, ind=True)
plot_roc_ind(test_label_list, test_prob_list, out_dir) # 绘制ROC曲线
plot_pr_ind(test_label_list, test_prob_list, out_dir) # 绘制PR曲线
final_results_output(final_result, out_dir, ind=True, multi=False) # 将指标写入文件
# prob_output_res(final_target_list, final_predict_list, final_prob_list, out_dir)
def preprocess4evaluate(test_y, test_prob, test_y_hat, test_length):
""" 将正确的测试集标签读取出来,而非全部内容进行评测 """
test_label_list = []
test_prob_list = []
predict_label_list = []
for i in range(len(test_length)):
seq_len = test_length[i]
test_label_list += list(test_y[i][:seq_len])
test_prob_list += list(test_prob[i][:seq_len])
predict_label_list += list(test_y_hat[i][:seq_len])
return test_label_list, test_prob_list, predict_label_list
def make_data(train_x, test_x, train_y, test_y, batch_size):
# train_x = np.random.normal(0, 0.1, (num_seq, seq_len, width))
# test_x = np.random.normal(0, 0.1, (num_seq//2, seq_len, width))
#
# train_y = np.random.randint(0, 2, size=(num_seq, seq_len))
# test_y = np.random.randint(0, 2, size=(num_seq//2, seq_len))
if sys.platform.startswith('win'):
num_workers = 0
else:
num_workers = 4
train_dataset = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
train_data_iter = DataLoader(train_dataset, batch_size, shuffle=True, num_workers=num_workers)
test_dataset = TensorDataset(torch.from_numpy(test_x), torch.from_numpy(test_y))
test_data_iter = DataLoader(test_dataset, 1, shuffle=False, num_workers=num_workers)
return train_data_iter, test_data_iter
def train_crf(model, data_iter, epoch, optimizer=None):
train_loss_sum = 0.0
n = 1
for x, y in data_iter:
# 步骤1. 记住,pytorch积累了梯度
# We need to clear them out before each instance
model.zero_grad()
# 步骤3. 向前运行
loss = model.neg_log_likelihood_parallel(x.float(), y.long())
# 步骤4.通过optimizer.step()
loss.backward()
optimizer.step()
train_loss_sum += loss.item()
n += y.shape[0]
print('epoch[%d]: train loss: %.4f ' % (epoch + 1, train_loss_sum / n))
# for epoch in range(num_epochs):
# train_loss_sum = 0.0
# n = 1
# for x, y in data_iter:
# # 步骤1. 记住,pytorch积累了梯度
# # We need to clear them out before each instance
# model.zero_grad()
#
# # 步骤3. 向前运行
# loss = model.neg_log_likelihood_parallel(x.float(), y.long())
#
# # 步骤4.通过optimizer.step()
# loss.backward()
# optimizer.step()
#
# train_loss_sum += loss.item()
# n += y.shape[0]
#
# print('epoch[%d]: train loss: %.4f ' % (epoch + 1, train_loss_sum / n))
def test_crf(model, data_iter, test_x):
tag_hat_list = []
test_loss_sum = 0.0
n = 1
with torch.no_grad():
for x, y in data_iter:
score, tag_seq = model(x.float())
tag_hat_list.append(tag_seq)
loss = model.neg_log_likelihood_parallel(x.float(), y.long())
test_loss_sum += loss.item()
n += y.shape[0]
test_loss = test_loss_sum / n
if sys.platform.startswith('win'):
num_workers = 0
else:
num_workers = 4
test_y_hat = np.array(tag_hat_list)
test_dataset = TensorDataset(torch.from_numpy(test_x), torch.from_numpy(test_y_hat))
test_data_iter = DataLoader(test_dataset, 1, shuffle=False, num_workers=num_workers)
test_prob = []
with torch.no_grad():
for x, y_ in test_data_iter:
prob_list = model.calculate_pro_new(x.float(), y_.long())
test_prob.append(prob_list)
# prob_list = []
# with torch.no_grad():
# for x, y in test_data_iter:
# prob_list += model.calculate_pro(x.float(), y.long())
return test_loss, np.array(test_prob), test_y_hat
def crf_main(train_x, test_x, train_y, test_y, width, batch_size, num_epoch, lr):
train_data_iter, test_data_iter = make_data(train_x, test_x, train_y, test_y, batch_size)
# for X, y in train_data_iter:
# print(X.size())
# print(y)
# break
tag_to_ix = {"B": 0, "O": 1, START_TAG: 2, STOP_TAG: 3}
model = CRF(width, tag_to_ix)
optimizer = optim.SGD(model.parameters(), lr=lr, weight_decay=1e-4)
train_crf(model, train_data_iter, num_epoch, optimizer)
test_loss, test_prob, test_y_hat = test_crf(model, test_data_iter, test_x)
# print(test_prob)
return test_loss, test_prob, test_y_hat
# if __name__ == '__main__':
# START_TAG = "<START>"
# STOP_TAG = "<STOP>"
# TAG2INDEX = {"B": 0, "O": 1, START_TAG: 2, STOP_TAG: 3}
#
# NUM_SEQ, SEQ_LEN, WIDTH, BATCH_SIZE, NUM_EPOCHS = 100, 10, 10, 5, 13
# print('... CRF processing ...\n')
#
# crf_main(TAG2INDEX, NUM_SEQ, SEQ_LEN, WIDTH, BATCH_SIZE, NUM_EPOCHS)
#
# print('\nFinish!')
<file_sep># import numpy as np
# import threading
# import multiprocessing
# from itertools import combinations
# from ..utils.utils_bow import get_km_dict
# from ..utils.utils_fasta import get_seqs
#
#
# def subsequence_bow(input_file, alphabet, k, delta):
# alphabet = list(alphabet)
# # cpu_num = int(multiprocessing.cpu_count() - 1)
# threads = []
# # sem = threading.Semaphore(cpu_num)
#
# with open(input_file, 'r') as f:
# seq_list = get_seqs(f, alphabet)
#
# km_dict = get_km_dict(k, alphabet)
# results = np.zeros((len(seq_list), len(km_dict)))
#
# for i in range(len(seq_list)):
# print('sequence[%d]' % i)
# sequence = np.array(list(seq_list[i]))
# threads.append(threading.Thread(target=get_one_subsequence,
# args=(sequence, i, km_dict, k, delta, results)))
# for t in threads:
# t.start()
# for t in threads:
# t.join()
#
# return results
#
#
# def get_one_subsequence(sequence, index, km_dict, k, delta, results):
# # sem.acquire()
#
# vector = np.zeros(len(km_dict))
# n = len(sequence)
#
# for sub_seq_index in combinations(list(range(n)), k):
# # [(0, 1, 2) ,(0, 1, 3) ,(0, 1, 4), ...,(4, 6, 7) ,(5, 6, 7)]
# sub_seq_index = list(sub_seq_index)
# subsequence = sequence[sub_seq_index]
# position = km_dict.get(''.join(subsequence))
# sub_seq_length = sub_seq_index[-1] - sub_seq_index[0] + 1
# sub_seq_score = 1 if sub_seq_length == k else delta ** sub_seq_length
# vector[position] += sub_seq_score
#
# results[index] = vector
#
# # time.sleep(2)
# # sem.release()
import multiprocessing
import threading
import numpy as np
from itertools import combinations
from ..utils.utils_bow import get_km_dict
from ..utils.utils_fasta import get_seqs
def subsequence_bow(filename, alphabet, k, delta):
alphabet = list(alphabet)
with open(filename) as f:
seq_list = get_seqs(f, alphabet)
cpu_num = int(multiprocessing.cpu_count() / 3)
batches = construct_partitions(seq_list, cpu_num)
threads = []
sem = threading.Semaphore(cpu_num)
km_dict = get_km_dict(k, alphabet)
results = np.zeros((len(seq_list), len(km_dict)))
for batch in batches:
# temp = pool.apply_async(get_subsequence_profile, (batch, alphabet, k, delta))
# results.append(temp)
threads.append(threading.Thread(target=get_subsequence_profile,
args=(seq_list, batch, km_dict, k, delta, results, sem)))
for t in threads:
t.start()
for t in threads:
t.join()
return results
def construct_partitions(seq_list, cpu_num):
cpu_num = int(cpu_num)
seqs_num = len(seq_list)
batch_num = seqs_num // cpu_num
batches = []
for i in range(cpu_num - 1):
# batch = seq_list[i * batch_num:(i + 1) * batch_num]
batch = list(range(i * batch_num, (i + 1) * batch_num))
batches.append(batch)
# batch = seq_list[(cpu_num - 1) * batch_num:]
batch = list(range((cpu_num - 1) * batch_num, seqs_num))
batches.append(batch)
return batches
def get_subsequence_profile(seq_list, batch, km_dict, k, delta, results, sem):
sem.acquire()
for seq_ind in batch:
print('sequence index: %d\n' % seq_ind)
sequence = seq_list[seq_ind]
vector = np.zeros((1, len(km_dict)))
sequence = np.array(list(sequence))
n = len(sequence)
# index_lst = list(combinations(range(n), k))
for sub_seq_index in combinations(list(range(n)),
k): # [(0, 1, 2) ,(0, 1, 3) ,(0, 1, 4), ...,(4, 6, 7) ,(5, 6, 7)]
sub_seq_index = list(sub_seq_index)
subsequence = sequence[sub_seq_index]
position = km_dict.get(''.join(subsequence))
sub_seq_length = sub_seq_index[-1] - sub_seq_index[0] + 1
sub_seq_score = 1 if sub_seq_length == k else delta ** sub_seq_length
vector[0, position] += sub_seq_score
results[seq_ind] = vector
sem.release()
<file_sep>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from numpy import *
def frequency_p(tol_str, tar_str):
"""Generate the frequency of tar_str in tol_str.
:param tol_str: mother string.
:param tar_str: substring.
"""
i, j, tar_count, tar1_count, tar2_count, tar3_count = 0, 0, 0, 0, 0, 0
tar_list = []
len_tol_str = len(tol_str)
len_tar_str = len(tar_str)
while i < len_tol_str and j < len_tar_str:
if tol_str[i] == tar_str[j]:
i += 1
j += 1
if j >= len_tar_str:
tar_count += 1
i = i - j + 1
j = 0
if (
i + 1) % 3 == 1:
# judge the position of last base of kmer in corresponding codon. pay attention to "i + 1"
tar1_count += 1
elif (i + 1) % 3 == 2:
tar2_count += 1
else:
tar3_count += 1
else:
i = i - j + 1
j = 0
tar_list = (tar_count, tar1_count, tar2_count, tar3_count)
return tar_list
def Z_curve(sequence, k, alphabet):
kmer = make_kmer_list(k, alphabet)
len_kmer = len(kmer)
i = 0
f_ZC = []
fx_list = []
fy_list = []
fz_list = []
while i < len_kmer:
j = 1
fre1_list = []
fre2_list = []
fre3_list = []
while j <= 4:
fre1 = frequency_p(sequence, str(kmer[i]))[1]
fre2 = frequency_p(sequence, str(kmer[i]))[2]
fre3 = frequency_p(sequence, str(kmer[i]))[3]
fre1_list.append(fre1)
fre2_list.append(fre2)
fre3_list.append(fre3)
j += 1
i += 1
fx1 = (fre1_list[0] + fre1_list[2]) - (fre1_list[1] + fre1_list[3])
fx2 = (fre2_list[0] + fre2_list[2]) - (fre2_list[1] + fre2_list[3])
fx3 = (fre3_list[0] + fre3_list[2]) - (fre3_list[1] + fre3_list[3])
fx_list.append(fx1)
fx_list.append(fx2)
fx_list.append(fx3)
fy1 = (fre1_list[0] + fre1_list[1]) - (fre1_list[2] + fre1_list[3])
fy2 = (fre2_list[0] + fre2_list[1]) - (fre2_list[2] + fre2_list[3])
fy3 = (fre3_list[0] + fre3_list[1]) - (fre3_list[2] + fre3_list[3])
fy_list.append(fy1)
fy_list.append(fy2)
fy_list.append(fy3)
fz1 = (fre1_list[0] + fre1_list[3]) - (fre1_list[1] + fre1_list[2])
fz2 = (fre2_list[0] + fre2_list[3]) - (fre2_list[1] + fre2_list[2])
fz3 = (fre3_list[0] + fre3_list[3]) - (fre3_list[1] + fre3_list[2])
fz_list.append(fz1)
fz_list.append(fz2)
fz_list.append(fz3)
for i in range(0, len(fx_list)):
f_ZC.append(fx_list[i])
for i in range(0, len(fy_list)):
f_ZC.append(fy_list[i])
for i in range(0, len(fz_list)):
f_ZC.append(fz_list[i])
return f_ZC
def convert_phyche_index_to_dict(phyche_index, alphabet):
"""Convert phyche index from list to dict."""
# for e in phyche_index:
# print e
len_index_value = len(phyche_index[0])
k = 0
for i in range(1, 10):
if len_index_value < 4 ** i:
sys.exit("Sorry, the number of each index value is must be 4^k.")
if len_index_value == 4 ** i:
k = i
break
kmer_list = make_kmer_list(k, alphabet)
# print kmer_list
len_kmer = len(kmer_list)
phyche_index_dict = {}
for kmer in kmer_list:
phyche_index_dict[kmer] = []
# print phyche_index_dict
phyche_index = list(zip(*phyche_index))
for i in range(len_kmer):
phyche_index_dict[kmer_list[i]] = list(phyche_index[i])
return phyche_index_dict
def make_kmer_list(k, alphabet):
try:
return ["".join(e) for e in itertools.product(alphabet, repeat=k)]
except TypeError:
print("TypeError: k must be an inter and larger than 0, alphabet must be a string.")
raise TypeError
except ValueError:
print("TypeError: k must be an inter and larger than 0")
raise ValueError
def standard_deviation(value_list):
"""Return standard deviation."""
from math import sqrt
from math import pow
n = len(value_list)
average_value = sum(value_list) * 1.0 / n
return sqrt(sum([pow(e - average_value, 2) for e in value_list]) * 1.0 / (n - 1))
def normalize_index(phyche_index, alphabet, is_convert_dict=False):
"""Normalize the physicochemical index."""
normalize_phyche_value = []
for phyche_value in phyche_index:
average_phyche_value = sum(phyche_value) * 1.0 / len(phyche_value)
sd_phyche = standard_deviation(phyche_value)
normalize_phyche_value.append([round((e - average_phyche_value) / sd_phyche, 2) for e in phyche_value])
if is_convert_dict is True:
return convert_phyche_index_to_dict(normalize_phyche_value, alphabet)
print(normalize_phyche_value)
return normalize_phyche_value
<file_sep>from ..utils.utils_topic import lsa, PLsa, lda
from ..TF_IDF.TF_IDF4vec import tf_idf
def tf_idf_lsa(input_file, alphabet, words, **param_dict):
bow_vectors = tf_idf(input_file, alphabet, words, **param_dict)
lsa_vectors = lsa(bow_vectors, com_prop=param_dict['com_prop'])
return lsa_vectors
def tf_idf_plsa(input_file, alphabet, words, **param_dict):
bow_vectors = tf_idf(input_file, alphabet, words, **param_dict)
_, plsa_vectors = PLsa(bow_vectors, com_prop=param_dict['com_prop']).em_algorithm()
return plsa_vectors
def tf_idf_lda(input_file, alphabet, words, **param_dict):
bow_vectors = tf_idf(input_file, alphabet, words, **param_dict)
lda_vectors = lda(bow_vectors, labels=None, com_prop=param_dict['com_prop'])
return lda_vectors
def tf_idf_label_lda(input_file, labels, alphabet, words, **param_dict):
bow_vectors = tf_idf(input_file, alphabet, words, **param_dict)
lda_vectors = lda(bow_vectors, labels=labels, com_prop=param_dict['com_prop'])
return lda_vectors
def tf_idf_tm(tm_method, input_file, labels, category, words, fixed_len, sample_num_list, out_format, out_file_list,
cur_dir, **param_dict):
vectors = tf_idf(input_file, category, words, fixed_len, sample_num_list, out_format, out_file_list, cur_dir,
True, **param_dict)
if tm_method == 'LSA':
tm_vectors = lsa(vectors, com_prop=param_dict['com_prop'])
elif tm_method == 'PLSA':
_, tm_vectors = PLsa(vectors, com_prop=param_dict['com_prop']).em_algorithm()
elif tm_method == 'LDA':
tm_vectors = lda(vectors, labels=None, com_prop=param_dict['com_prop'])
elif tm_method == 'Labeled-LDA':
tm_vectors = lda(vectors, labels=labels, com_prop=param_dict['com_prop'])
else:
print('Topic model method error!')
return False
return tm_vectors
<file_sep>import os
import itertools
import numpy as np
from itertools import product
from numpy import random
from sklearn.model_selection import StratifiedKFold
from gensim.models import Word2Vec
random_seed = 40
def combine_seq_file(seq_files, target_dir):
suffix = os.path.splitext(seq_files[0])[-1]
return target_dir + '/' + 'combined_input_file' + suffix
def count_num(category, seq_file, label_file, output_file):
if category == 'DNA':
alphabet = "ATCG"
elif category == 'RNA':
alphabet = "AUCG"
elif category == "Protein":
alphabet = "ACDEFGHIKLMNPQRSTVWY"
num_list = []
len_list = []
detail_list = []
for i in range(len(seq_file)):
with open(seq_file[i], 'r') as f:
flag = 0
temp_detail_list = []
for line in f.readlines():
line = line.strip('\n')
if line[0] == '>' and flag == 0:
temp = ""
continue
elif line[0] == '>' and flag == 1:
flag = 0
else:
temp += line
flag = 1
continue
temp_detail_list.append(temp)
len_list.append(len(temp))
temp = ""
detail_list.append(temp_detail_list)
num_list.append(len(temp_detail_list))
with open(output_file, 'w') as f:
for i in range(len(label_file)):
for j in range(len(detail_list[i])):
f.write('>Sequence[' + str(j+1) + '] | Label[' + str(i) + ']\n')
f.write(detail_list[i][j])
f.write('\n')
return num_list, len_list
def generate_label_list(num_list, label):
label_list = []
for i in range(len(label)):
label_list += [int(label[i])] * num_list[i]
return np.array(label_list)
def possible_parameter_generation(args, parameter_dict):
if args.method == 'SVM':
possible_parameter_generation_svm(args.cost, args.gamma, parameter_dict)
elif args.method == 'RF':
possible_parameter_generation_rf(args.tree, parameter_dict)
elif args.method == 'KNN':
possible_parameter_generation_knn(args.ngb, parameter_dict)
elif args.method == 'LinearSVM':
possible_parameter_generation_lsvm(args.cost, parameter_dict)
return parameter_dict
def possible_parameter_generation_lsvm(cost, parameter_dict):
if cost is not None:
if len(cost)==1:
range_of_cost = range(cost[0], cost[0]+1, 1)
elif len(cost)==2:
range_of_cost = range(cost[0], cost[1], 1)
elif len(cost)==3:
range_of_cost = range(cost[0], cost[1], cost[2])
else:
range_of_cost = range(-10, 11, 1)
parameter_dict['cost'] = range_of_cost
def possible_parameter_generation_knn(ngb, parameter_dict):
if ngb is not None:
if len(ngb) == 1:
range_of_ngb = range(ngb[0], ngb[0] + 1, 1)
elif len(ngb) == 2:
range_of_ngb = range(ngb[0], ngb[1], 1)
elif len(ngb) == 3:
range_of_ngb = range(ngb[0], ngb[1], ngb[2])
else:
range_of_ngb = range(1, 20, 1)
parameter_dict['ngb'] = range_of_ngb
return parameter_dict
def possible_parameter_generation_svm(cost, gamma, parameter_dict):
if cost is not None:
if len(cost)==1:
range_of_cost = range(cost[0], cost[0]+1, 1)
elif len(cost)==2:
range_of_cost = range(cost[0], cost[1], 1)
elif len(cost)==3:
range_of_cost = range(cost[0], cost[1], cost[2])
else:
range_of_cost = range(-5, 11, 3)
if gamma is not None:
if len(gamma) == 1:
range_of_gamma = range(gamma[0], gamma[0]+1, 1)
elif len(gamma) == 2:
range_of_gamma = range(gamma[0], gamma[1], 1)
elif len(gamma) == 3:
range_of_gamma = range(gamma[0], gamma[1], gamma[2])
else:
range_of_gamma = range(-10, 6, 3)
parameter_dict['cost'] = list(range_of_cost)
parameter_dict['gamma'] = list(range_of_gamma)
return parameter_dict
def possible_parameter_generation_rf(tree, parameter_dict):
if tree is not None:
if len(tree) == 1:
range_of_tree = range(tree[0], tree[0]+1, 1)
elif len(tree) == 2:
range_of_tree = range(tree[0], tree[1], 1)
elif len(tree) == 3:
range_of_tree = range(tree[0], tree[1], tree[2])
else:
range_of_tree = range(10, 200, 10)
parameter_dict['tree'] = list(range_of_tree)
return parameter_dict
def dict_to_list(dict):
parameter_list = []
key_list = list(dict.keys())
for value_pair in product(*list(dict.values())):
temp = {}
for i in range(len(value_pair)):
temp[key_list[i]] = value_pair[i]
parameter_list.append(temp)
return parameter_list
def divide_data_set(args, label_list):
x = random.normal(loc=0.0, scale=1, size=len(label_list))
num_of_folds = int(args.test)
folder = StratifiedKFold(n_splits=num_of_folds, shuffle=True, random_state=random.RandomState(random_seed))
fold_result = list(folder.split(x, label_list))
args.folds = fold_result
return args
def encode_line_onehot(args, line):
if args.type == 'DNA':
alphabet = "ATCG"
elif args.type == 'RNA':
alphabet = "AUCG"
elif args.type == "Protein":
alphabet = "ACDEFGHIKLMNPQRSTVWY"
result = []
line = line.strip("\n")
# print(line)
for c in line:
# print('1'+c+'1')
num = alphabet.index(c)
vector = [0] * len(alphabet)
vector[num] = 1
result.append(vector)
return result
def generate_kmer_list(k, alphabet):
return ["".join(i) for i in itertools.product(alphabet, repeat=k[0])]
def kmer_frequency_count(kmer, line):
i = 0
j = 0
count = 0
len_line = len(line)
len_kmer = len(kmer)
while i < len_line and j < len_kmer:
if line[i] == kmer[j]:
i += 1
j += 1
if j >= len_kmer:
count += 1
i = i - j + 1
j = 0
else:
i = i - j + 1
j = 0
return count
def feature_extraction(args):
if args.type == 'DNA':
alphabet = "ATCG"
elif args.type == 'RNA':
alphabet = "AUCG"
elif args.type == "Protein":
alphabet = "ACDEFGHIKLMNPQRSTVWY"
# 准备阶段
# print("=================Feature extraction step=================")
input_file_combined = combine_seq_file(args.seq_file, args.result_dir)
num_list, len_list = count_num(args.type, args.seq_file, args.label, input_file_combined)
label_list = generate_label_list(num_list, args.label)
args.fixed_len = max(len_list)
# 参数生成
possible_parameter_dict = {}
possible_parameter_dict = possible_parameter_generation(args, possible_parameter_dict)
possible_parameter_list = dict_to_list(possible_parameter_dict)
# 训练/测试集划分
args = divide_data_set(args, label_list)
print('Input file direction: '+input_file_combined)
print('Num of sequence: '+str(len(len_list)))
print('Num of positive sequence: '+str(num_list[0]))
print('Num of negative sequence: '+str(num_list[1]))
output_file = 'input_file_encoded.txt'
output_list = []
output_array = []
if args.code == 'One-hot':
with open(input_file_combined, 'r') as f:
for line in f.readlines():
if line[0] == '>':
continue
temp_line = encode_line_onehot(args, line)
output_list.append(temp_line)
width = len(output_list[0][0])
for i in range(len(output_list)):
temp_array = np.zeros((args.fixed_len, width))
temp_len = len(output_list[i])
if temp_len <= args.fixed_len:
temp_array[:temp_len, :] = output_list[i]
output_array.append(temp_array.flatten().tolist())
output_array = np.array(output_array)
elif args.code == 'BOW':
kmer_list = generate_kmer_list(args.word_size, alphabet)
with open(input_file_combined, 'r') as f:
for line in f.readlines():
if line[0] == '>':
continue
sum = 0
kmer_count_dict = {}
for kmer in kmer_list:
count_temp = kmer_frequency_count(kmer, line)
if kmer not in kmer_count_dict:
kmer_count_dict[kmer] = 0
kmer_count_dict[kmer] += count_temp
sum += count_temp
kmer_count_list = [kmer_count_dict[kmer] for kmer in kmer_list]
kmer_count = [round(float(kmer) / sum, 8) for kmer in kmer_count_list]
output_list.append(kmer_count)
output_array = np.array(output_list)
elif args.code == 'WE':
sentences_list = []
with open(input_file_combined, 'r') as f:
for line in f.readlines():
if line[0] == '>':
continue
sent = []
if len(line) <= args.fixed_len:
for j in range(args.fixed_len - len(line)):
line += 'X'
else:
line = line[:args.fixed_len]
for i in range(len(line) - args.word_size[0] + 1):
word = line[i:i + args.word_size[0]]
sent.append(word)
sentences_list.append(sent)
row = (args.fixed_len - args.word_size[0] + 1) * 10
output = -np.ones((len(sentences_list), row))
for i, (train, test) in enumerate(args.folds):
print('Round [%s]' % (i+1))
train_sentences = []
test_sentences = []
for x in train:
train_sentences.append(sentences_list[x])
for y in test:
test_sentences.append(sentences_list[y])
model = Word2Vec(train_sentences, size=10, window=5, sg=0)
vectors = []
for sentence in test_sentences:
vector = []
for j in range(len(sentence)):
try:
temp = np.array(model[sentence[j]])
except KeyError:
temp = np.zeros(10)
if len(vector) == 0:
vector = temp
else:
vector = np.hstack((vector, temp))
vectors.append(vector)
for k in range(len(test)):
output[test[k]] = np.array(vectors[k])
# output[test] = np.array(vectors)
output_array = output
with open(args.result_dir+output_file, 'w') as f:
for line in output_list:
f.write(str(line))
f.write('\n')
return output_array, label_list, possible_parameter_list
<file_sep>from ..utils.utils_words import dt_words
from ..utils.utils_algorithm import text_rank
def dt_text_rank(input_file, fixed_len, max_dis, process_num, alpha, cur_dir, fixed=True):
corpus = dt_words(input_file, fixed_len, max_dis, process_num, cur_dir, fixed)
return text_rank(corpus, alpha)
<file_sep>from collections import OrderedDict
import numpy as np
import os
class PhyChemicalProperty2vectors(object):
# TODO: 在模型的初始化函数中定义模型要用到的变量
def __init__(self, method, alphabet, chosen_file=None):
"""
Initialize the object.
:param alphabet: DNA, RNA or Protein
"""
full_path = os.path.realpath(__file__)
self.pp_dir = os.path.dirname(full_path) + '/data/'
if alphabet == 'DNA':
self.indicators = self.pp_dir + 'DDi_index.txt'
self.indicators_name = self.pp_dir + 'DDi_name.txt'
# sum = 90 # total number of the DNA indicators
elif alphabet == 'RNA':
self.indicators = self.pp_dir + 'RDi_index.txt'
self.indicators_name = self.pp_dir + 'RDi_name.txt'
# sum = 11 # total number of the RNA indicators
else:
self.indicators = self.pp_dir + 'aaindex.txt'
self.indicators_name = self.pp_dir + 'Phy_HeadList.txt'
# sum = ? # total number of the Protein indicators
if chosen_file is None:
print('\nThe pp_file is None, select default physicochemical properties.')
if method == 'DPC':
self.chosen_name = ['Twist', 'Tilt', 'Roll', 'Shift', 'Slide', 'Rise']
elif method == 'TPC':
self.chosen_name = ['Dnase I', 'Nucleosome positioning']
elif method == 'PP':
self.chosen_name = ['Hydrophobicity', 'Hydrophilicity', 'Mass']
else:
print('\nThe pp_file is: %s' % chosen_file)
with open(chosen_file) as r:
self.chosen_name = [i.replace('\r', '') for i in r.read().split('\n')]
self.alphabet = alphabet
self.aaList_Index = []
self.vec_mat_list = []
def dpc(self, file_path):
indicators_value = OrderedDict()
with open(self.indicators) as f:
line = f.readlines()
for i in range(32):
if i % 2 == 0:
indicators_value[line[i].strip()] = ''
else:
indicators_value[line[i - 1].strip()] = line[i].strip().split()
print('The physicochemical properties file is %s\n' % self.indicators_name)
with open(self.indicators_name) as r:
indicators_list = [i.replace('\r', '') for i in r.read().split('\n')]
# chosen_index = [indicators_list.index(i) for i in self.chosen_name]
chosen_index = [indicators_list.index(i) for i in self.chosen_name if i != '']
with open(file_path) as f:
for line in f:
if line[0] != '>':
vec_mat = []
line = line.strip().upper()
for n in range(len(line) - 1):
i = line[n] + line[n + 1]
vector = []
for j in chosen_index:
vector.append(indicators_value[i][j])
vector = list(map(float, vector))
vec_mat.append(vector)
self.vec_mat_list.append(np.array(vec_mat))
return self.vec_mat_list
def tpc(self, file_path):
assert self.alphabet == 'DNA', 'TPC method is only for DNA sequence!'
indicators_value = OrderedDict()
with open(self.pp_dir + 'DTi_index.txt') as f:
line = f.readlines()
for i in range(128):
if i % 2 == 0:
indicators_value[line[i].strip()] = ''
else:
indicators_value[line[i - 1].strip()] = line[i].strip().split()
print('The physicochemical properties file is %s\n' % (self.pp_dir + 'DTi_name.txt'))
with open(self.pp_dir + 'DTi_name.txt') as r:
indicators_list = [i.replace('\r', '') for i in r.read().split('\n')]
# chosen_index = [indicators_list.index(i) for i in self.chosen_name]
chosen_index = [indicators_list.index(i) for i in self.chosen_name if i != '']
with open(file_path) as f:
for line in f:
if line[0] != '>':
vec_mat = []
line = line.strip().upper()
for n in range(len(line) - 1):
i = line[n] + line[n + 1]
vector = []
for j in chosen_index:
vector.append(indicators_value[i][j])
vector = list(map(float, vector))
vec_mat.append(vector)
self.vec_mat_list.append(np.array(vec_mat))
return self.vec_mat_list
def pp(self, file_path):
indicators_value = OrderedDict()
with open(self.indicators) as f:
line = f.readlines()
for i in range(40):
if i % 2 == 0:
indicators_value[line[i].strip()] = ''
else:
indicators_value[line[i - 1].strip()] = line[i].strip().split()
print('The physicochemical properties file is %s\n' % self.indicators_name)
with open(self.indicators_name) as r:
indicators_list = [i.replace('\r', '') for i in r.read().split('\n')]
# chosen_index = [indicators_list.index(i) for i in self.chosen_name]
chosen_index = [indicators_list.index(i) for i in self.chosen_name if i != '']
with open(file_path) as lines:
for line in lines:
if line[0] != '>':
vec_mat = []
line = line.strip().upper()
for i in range(len(line)):
vector = []
for j in chosen_index:
vector.append(indicators_value[line[i]][j])
vector = list(map(float, vector))
vec_mat.append(vector)
self.vec_mat_list.append(np.array(vec_mat))
return self.vec_mat_list
<file_sep>import math
import subprocess
import threading
import os
import sys
import time
import pickle
import numpy as np
from itertools import product
from .acc import pdt
from ..utils.utils_pssm import sep_file, produce_all_frequency
from ..utils.utils_words import convert_tng_to_fasta
def pdt_profile(inputfile, n, lamada, sw_dir, process_num):
"""Generate PDT-Profile features.
:param inputfile: input sequence file in FASTA format.
:param n: the n most frequent amino acids in the amino acid frequency profiles.
:param lamada: the distance between two amino acids.
:param sw_dir: the main dir of software.
:param process_num: the number of processes used for multiprocessing.
"""
# tng_list, seq_name = top_n_gram(inputfile, n, process_num)
dirname, seq_name = sep_file(inputfile)
pssm_dir = produce_all_frequency(dirname, sw_dir, process_num)
tng_fasta = convert_tng_to_fasta(pssm_dir, seq_name, inputfile, n, sw_dir)
# convert_tng_to_fasta(pssm_dir, seq_name, input_file, n, sw_dir)
return pdt(tng_fasta, lamada, sw_dir)
# -------------------------------------------------------------------------------------
# PDT-Profile end
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# ACC-PSSM, AC-PSSM, CC-PSSM start
# -------------------------------------------------------------------------------------
def blosum_pssm(seq_file, new_blosum_dict, dirname):
"""Generate pssm file using blosum62 matrix.
:param seq_file: the sequence file containing one sequence.
:param new_blosum_dict: the blosum62 dict after processing.
:param dirname: the directory name for storing the generated files.
"""
pssm_list = []
with open(seq_file, 'r') as f:
for line in f:
if line.strip().startswith('>'):
continue
else:
for index, i in enumerate(line.strip()):
blosum_list = list(map(str, new_blosum_dict[i]))
blosum_str = ' '.join(blosum_list)
line_str = ' '.join([str(index + 1), i, blosum_str])
pssm_list.append(line_str)
blosum_dir = ''.join([dirname, '/blosum_pssm'])
if not os.path.isdir(blosum_dir):
os.mkdir(blosum_dir)
seq_file_name = os.path.splitext(seq_file)[0]
seq_file_name = os.path.split(seq_file_name)[1]
blosum_file = ''.join([blosum_dir, '/', seq_file_name, '.pssm'])
with open(blosum_file, 'w') as f:
for i in range(3):
f.write('\n')
for line in pssm_list:
f.write(line)
f.write('\n')
return os.path.abspath(blosum_file)
def read_blosum():
"""Read blosum dict and delete some keys and values."""
full_path = os.path.realpath(__file__)
file_path = os.path.dirname(full_path) + '/data/blosum62.pkl'
with open(file_path, 'rb') as f:
blosum_dict = pickle.load(f)
blosum_dict.pop('*')
blosum_dict.pop('B')
blosum_dict.pop('Z')
blosum_dict.pop('X')
blosum_dict.pop('alphas')
for key in blosum_dict:
for i in range(4):
blosum_dict[key].pop()
return blosum_dict
def acc_pssm_cmd(pssm_file, lag, acc_out_file, sw_dir, sem):
"""ACC-PSSM command.
:param pssm_file: the .pssm file.
:param lag: the distance between two amino acids.
:param acc_out_file: the output file of the acc program.
:param sw_dir: the main dir of software.
:param sem: 是用于控制进入数量的锁,控制同时进行的线程,内部是基于Condition来进行实现的
"""
sem.acquire()
if sys.platform.startswith('win'):
acc_cmd = sw_dir + 'acc_pssm/acc.exe'
else:
acc_cmd = sw_dir + 'acc_pssm/acc'
os.chmod(acc_cmd, 0o777)
cmd = ' '.join([acc_cmd, ' ', str(lag), ' ', pssm_file, ' ', acc_out_file])
subprocess.call(cmd, shell=True)
time.sleep(2)
sem.release()
def sep_acc_vector(acc_out_file):
"""Seperate acc_out_file and output the acc, ac, cc vectors.
:param acc_out_file: the output file of the acc program.
"""
acc_vec_list = []
ac_vec_list = []
cc_vec_list = []
with open(acc_out_file, 'r') as f:
for line in f:
line = round(float(line.strip()), 3)
# line = float(line.strip())
acc_vec_list.append(line)
for i in range(0, len(acc_vec_list), 400):
ac_vec_list.extend(acc_vec_list[i:i + 20])
cc_vec_list.extend(acc_vec_list[i + 20:i + 400])
return acc_vec_list, ac_vec_list, cc_vec_list
def make_acc_pssm_vector(inputfile, lag, vec_type, sw_dir, process_num):
"""Generate ACC, AC, CC feature vectors.
:param inputfile: input sequence file in FASTA format.
:param lag: the distance between two amino acids.
:param vec_type: the type of the vectors generated, ACC-PSSM, AC-PSSM
or CC-PSSM.
:param sw_dir: the main dir of software.
:param process_num: the number of processes used for multiprocessing.
"""
dirname, seq_name = sep_file(inputfile)
pssm_dir = produce_all_frequency(dirname, sw_dir, process_num)
# 调试模式 on/off
# pssm_dir = "D:\\Leon\\bionlp\\BioSeq-NLP\\data\\cv_results\\Protein\\sequence\\SR\\SVM\\ACC-PSSM/all_seq_cv/pssm"
dir_list = os.listdir(pssm_dir)
index_list = []
for elem in dir_list:
pssm_full_path = ''.join([pssm_dir, '/', elem])
name, suffix = os.path.splitext(elem)
if os.path.isfile(pssm_full_path) and suffix == '.pssm':
index_list.append(int(name))
index_list.sort()
new_blosum_dict = {}
if len(index_list) != len(seq_name):
new_blosum_dict = read_blosum()
acc_out_fold = dirname + '/acc_out'
acc_vectors = []
ac_vectors = []
cc_vectors = []
if not os.path.isdir(acc_out_fold):
os.mkdir(acc_out_fold)
out_file_list = []
threads = []
sem = threading.Semaphore(process_num)
for i in range(1, len(seq_name) + 1):
if i in index_list:
pssm_full_path = ''.join([pssm_dir, '/', str(i), '.pssm'])
else:
seq_file = ''.join([dirname, '/', str(i), '.txt'])
pssm_full_path = blosum_pssm(seq_file, new_blosum_dict, dirname)
acc_out_file = ''.join([acc_out_fold, '/', str(i), '.out'])
out_file_list.append(acc_out_file)
# acc_pssm_cmd(pssm_full_path, lag, acc_out_file, sw_dir)
threads.append(threading.Thread(target=acc_pssm_cmd,
args=(pssm_full_path, lag, acc_out_file, sw_dir, sem)))
for t in threads:
t.start()
for t in threads:
t.join()
for out_file in out_file_list:
acc_vec_list, ac_vec_list, cc_vec_list = sep_acc_vector(out_file)
acc_vectors.append(acc_vec_list)
ac_vectors.append(ac_vec_list)
cc_vectors.append(cc_vec_list)
if vec_type == 'acc':
return np.array(acc_vectors)
elif vec_type == 'ac':
return np.array(ac_vectors)
elif vec_type == 'cc':
return np.array(cc_vectors)
else:
return False
# -------------------------------------------------------------------------------------
# ACC-PSSM, AC-PSSM, CC-PSSM end
# -------------------------------------------------------------------------------------
def initialization():
blosum62 = {}
full_path = os.path.realpath(__file__)
blosum62_path = os.path.dirname(full_path) + '/data/blosum62'
blosum_reader = open(blosum62_path)
count = 0
# read the matrix of blosum62
for line in blosum_reader:
count += 1
if count <= 7:
continue
line = line.strip('\r').split()
if line[0] != '*':
blosum62[line[0]] = [float(x) for x in line[1:21]]
return blosum62
# PSSM RT starts
def format_each_line(each_line):
col = each_line[5:8].strip() + '\t'
col += ('\t'.join(each_line[9:].strip().split()[:20]) + '\n')
return col
def simplify_pssm(pssm_file, new_file):
if os.path.exists(pssm_file):
with open(pssm_file) as input_pssm, open(new_file, 'w') as outfile:
count = 0
for each_line in input_pssm:
count += 1
if count <= 2:
continue
if not len(each_line.strip()):
break
one_line = format_each_line(each_line)
if count == 3:
one_line = ' ' + one_line
outfile.write(one_line)
def pssm_ksb(input_file, sw_dir, process_num, is_pssm_dt=False):
dirname, seq_name = sep_file(input_file)
pssm_dir = produce_all_frequency(dirname, sw_dir, process_num)
dir_name = os.path.split(pssm_dir)[0]
xml_dir = dir_name + '/xml'
final_result = ''.join([dir_name, '/final_result'])
if not os.path.isdir(final_result):
os.mkdir(final_result)
dir_list = os.listdir(xml_dir)
index_list = []
for elem in dir_list:
xml_full_path = ''.join([xml_dir, '/', elem])
name, suffix = os.path.splitext(elem)
if os.path.isfile(xml_full_path) and suffix == '.xml':
index_list.append(int(name))
index_list.sort()
pssm_pro_files = []
seq_names = []
vectors = []
for index in index_list:
pssm_file = pssm_dir + '/' + str(index) + '.pssm'
pssm_file_list = list(os.path.splitext(pssm_file))
pssm_process_file = pssm_file_list[0] + '_pro' + pssm_file_list[1]
seq_name = pssm_file_list[0].split('/')[-1]
seq_names.append(seq_name)
pssm_pro_files.append(pssm_process_file)
simplify_pssm(pssm_file, pssm_process_file)
if is_pssm_dt:
pssm = read_pssm(pssm_process_file)
if pssm is False:
p1 = os.path.split(pssm_process_file)
seq_path = os.path.split(p1[0])[0] + '/' + seq_name + '.txt'
with open(seq_path) as f:
lines = f.readlines()
protein_seq = lines[1].strip().upper()
pssm = get_blosum62(protein_seq)
pssm = np.array(pssm)
protein_seq = [np.array([x]) for x in list(protein_seq)]
protein_seq = np.array(protein_seq)
pssm = np.hstack((protein_seq, pssm))
vector = generate_ksb_pssm(pssm, 4)
vectors.append(list(vector))
if is_pssm_dt:
return np.array(vectors)
return pssm_pro_files, seq_names
def get_blosum62(protein):
blosum62 = initialization()
pssm_score = []
for aa in protein:
aa = aa.upper()
pssm_score.append(blosum62[aa])
return pssm_score
def pssm_rt_method(input_file, process_num, sw_dir, fixed_len):
pssm_files, seq_names = pssm_ksb(input_file, sw_dir, process_num)
# pssm_ksb(input_file, sw_dir, process_num, is_pssm_dt=False):
vectors = []
for pssm_file, seq_name in zip(pssm_files, seq_names):
pssm_score = []
if os.path.exists(pssm_file):
with open(pssm_file) as f:
lines = f.readlines()
for line in lines[1:]:
line = [int(x) for x in line.strip().split('\t')[1:]]
pssm_score.append(line)
else:
p1 = os.path.split(pssm_file)
seq_path = os.path.split(p1[0])[0] + '/' + seq_name + '.txt'
with open(seq_path) as f:
lines = f.readlines()
pssm_score = get_blosum62(lines[1].strip())
vectors.append(get_rt_vector(pssm_score, fixed_len))
return np.array(vectors)
def get_rt_vector(pssm_score, fixed_len):
pssm_score = normalized(pssm_score)
temp_len = len(pssm_score)
fixed_pssm_score = []
for i in range(fixed_len):
if i < temp_len:
fixed_pssm_score.append(pssm_score[i])
else:
fixed_pssm_score.append([0.0]*20)
residue = cal_residue_conservation(fixed_pssm_score)
pair = cal_pair_relationships(fixed_pssm_score)
multi = cal_multi_relationships(fixed_pssm_score)
vector = residue + list(pair) + multi
# print('len of pssm_score ', len(pssm_score))
# print('len of pssm_score[0] ', len(pssm_score[0]))
# print('len of fixed_pssm_score ', len(fixed_pssm_score))
# print('len of residue ', len(residue))
# print('len of pair ', len(list(pair)))
# print('len of multi ', len(multi))
# print('len of vector ', len(vector))
return vector
def normalized(pssm_score):
nor_pssm_score = []
for i in pssm_score:
nor_pssm_score.append([1 / (1 + math.e ** x) for x in i]) # 按行正则化
return nor_pssm_score
def cal_residue_conservation(pssm_score):
residue_conservation = []
for i in pssm_score:
residue_conservation += i
return residue_conservation
def cal_pair_relationships(pssm_score):
target_position = int(len(pssm_score) / 2)
pair_scores = []
if len(pssm_score) % 2 == 1:
target_position += 1
for i in range(len(pssm_score)):
if i != target_position:
pair = product(pssm_score[i], pssm_score[target_position])
pair_scores.append([x[0] * x[1] for x in pair])
pair_scores = [np.array(x) for x in pair_scores]
pair_relationships = pair_scores[0]
for i in pair_scores:
pair_relationships += i
return pair_relationships.tolist()
def cal_multi_relationships(pssm_score):
multi_relationship_left = []
multi_relationship_right = []
target_position = int(len(pssm_score) / 2)
if len(pssm_score) % 2 == 1:
target_position += 1
pssm_score = [np.array(x) for x in pssm_score]
pssm_score = np.array(pssm_score)
for i in range(5):
left = pssm_score[:target_position - 1, i].tolist()
multi_relationship_left.append(sum(left))
right = pssm_score[target_position:, i].tolist()
multi_relationship_right.append(sum(right))
multi_relationship = multi_relationship_left
multi_relationship.extend(multi_relationship_right)
return multi_relationship
# PSSM RT ends
# PSSM DT starts
def read_pssm(pssm_file):
if os.path.exists(pssm_file):
with open(pssm_file, 'r') as f:
lines = f.readlines()
pssm_arr = []
count = 0
for line in lines:
if count == 0:
count += 1
pass
else:
line = line.strip().split()
pssm_arr.append(line)
pssm = np.array(pssm_arr) # 一个文件对应一个n*20的矩阵
return pssm
else:
return False
def create_matrix(row_size, column_size):
Matrix = np.zeros((row_size, column_size))
return Matrix
def aver(matrix_sum, seq_len):
matrix_array = np.array(matrix_sum)
matrix_array = np.divide(matrix_array, seq_len)
matrix_array_shp = np.shape(matrix_array)
matrix_average = [(np.reshape(matrix_array, (matrix_array_shp[0] * matrix_array_shp[1],)))]
return matrix_average
def generate_ksb_pssm(pssm, lag):
seq_cn = float(np.shape(pssm)[0])
vector = []
for i in range(1, lag + 1):
matrix_final = pre_handle_columns(pssm, i)
ksb_vector = aver(matrix_final, float(seq_cn - i))
vector += list(ksb_vector[0])
return vector
def pre_handle_columns(pssm, step):
pssm = pssm[:, 1:21]
pssm = pssm.astype(float)
matrix_final = [[0.0] * 20] * 20
matrix_final = np.array(matrix_final)
seq_cn = np.shape(pssm)[0]
for i in range(20):
for j in range(20):
for k in range(seq_cn - step):
matrix_final[i][j] += (pssm[k][i] * pssm[k + step][j])
return matrix_final
def pssm_dt_method(input_file, process_num, sw_dir):
return pssm_ksb(input_file, sw_dir, process_num, True)
<file_sep>from sklearn import svm
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
def machine_learning(args, output_array, folds, label_list, possible_parameter_list):
if args.method == 'SVM':
max_acc = 0
max_index = 0
for i in range(len(possible_parameter_list)):
parameter_pair = possible_parameter_list[i]
output_str = ' cost = 2 ** ' + str(parameter_pair['cost']) + ' | ' + 'gamma = 2 ** ' + str(parameter_pair['gamma']) + ' '
print(output_str.center(40, '='))
cnt = 0
true_cnt = 0
for train, test in folds:
# output_array = np.array(output_array)
x_train = output_array[train]
x_test = output_array[test]
y_train = label_list[train]
y_test = label_list[test]
classification = svm.SVC(C=2 ** parameter_pair['cost'], gamma=2 ** parameter_pair['gamma'], probability=True)
classification.fit(x_train, y_train)
y_test_predict_prob = classification.predict_proba(x_test)
y_test_predict = classification.predict(x_test)
# print("test:")
# print(y_test)
# print("predict:")
# print(y_test_predict)
for j in range(len(y_test)):
cnt += 1
if y_test_predict[j] == y_test[j]:
true_cnt += 1
acc = (1.0*true_cnt)/(1.0*cnt)
print('Acc = ' + str(acc))
if acc > max_acc:
max_acc = acc
max_index = i
print("best parameter:")
print('cost:' + str(possible_parameter_list[max_index]['cost']) + ' gamma: ' +
str(possible_parameter_list[max_index]['gamma']))
best_parameter_pair = possible_parameter_list[max_index]
return best_parameter_pair
elif args.method == 'LinearSVM':
max_acc = 0
max_index = 0
for i in range(len(possible_parameter_list)):
parameter_pair = possible_parameter_list[i]
output_str = ' cost = 2 ** ' + str(parameter_pair['cost'])
print(output_str.center(40, '='))
cnt = 0
true_cnt = 0
for train, test in folds:
# output_array = np.array(output_array)
x_train = output_array[train]
x_test = output_array[test]
y_train = label_list[train]
y_test = label_list[test]
classification = svm.SVC(C=2 ** parameter_pair['cost'], kernel="linear", probability=True)
classification.fit(x_train, y_train)
y_test_predict_prob = classification.predict_proba(x_test)
y_test_predict = classification.predict(x_test)
# print("test:")
# print(y_test)
# print("predict:")
# print(y_test_predict)
for j in range(len(y_test)):
cnt += 1
if y_test_predict[j] == y_test[j]:
true_cnt += 1
acc = (1.0 * true_cnt) / (1.0 * cnt)
print('Acc = ' + str(acc))
if acc > max_acc:
max_acc = acc
max_index = i
print("best parameter:")
print('cost:' + str(possible_parameter_list[max_index]['cost']))
best_parameter_pair = possible_parameter_list[max_index]
return best_parameter_pair
elif args.method == 'RF':
max_acc = 0
max_index = 0
for i in range(len(possible_parameter_list)):
parameter_pair = possible_parameter_list[i]
output_str = ' tree = ' + str(parameter_pair['tree'])
print(output_str.center(40, '='))
cnt = 0
true_cnt = 0
for train, test in folds:
# output_array = np.array(output_array)
x_train = output_array[train]
x_test = output_array[test]
y_train = label_list[train]
y_test = label_list[test]
classification = RandomForestClassifier(random_state=42, n_estimators=parameter_pair['tree'])
classification.fit(x_train, y_train)
y_test_predict_prob = classification.predict_proba(x_test)
y_test_predict = classification.predict(x_test)
# print("test:")
# print(y_test)
# print("predict:")
# print(y_test_predict)
for j in range(len(y_test)):
cnt += 1
if y_test_predict[j] == y_test[j]:
true_cnt += 1
acc = (1.0 * true_cnt) / (1.0 * cnt)
print('Acc = ' + str(acc))
if acc > max_acc:
max_acc = acc
max_index = i
print("best parameter:")
print('tree:' + str(possible_parameter_list[max_index]['tree']))
best_parameter_pair = possible_parameter_list[max_index]
return best_parameter_pair
elif args.method == 'KNN':
max_acc = 0
max_index = 0
for i in range(len(possible_parameter_list)):
parameter_pair = possible_parameter_list[i]
output_str = ' neighbors = ' + str(parameter_pair['ngb'])
print(output_str.center(40, '='))
cnt = 0
true_cnt = 0
for train, test in folds:
# output_array = np.array(output_array)
x_train = output_array[train]
x_test = output_array[test]
y_train = label_list[train]
y_test = label_list[test]
classification = KNeighborsClassifier(n_neighbors=parameter_pair['ngb'])
classification.fit(x_train, y_train)
y_test_predict_prob = classification.predict_proba(x_test)
y_test_predict = classification.predict(x_test)
# print("test:")
# print(y_test)
# print("predict:")
# print(y_test_predict)
for j in range(len(y_test)):
cnt += 1
if y_test_predict[j] == y_test[j]:
true_cnt += 1
acc = (1.0 * true_cnt) / (1.0 * cnt)
print('Acc = ' + str(acc))
if acc > max_acc:
max_acc = acc
max_index = i
print("best parameter:")
print('neighbors:' + str(possible_parameter_list[max_index]['ngb']))
best_parameter_pair = possible_parameter_list[max_index]
return best_parameter_pair
elif args.method == 'AdaBoost' or args.method == 'NB' or args.method == 'LDA' or args.method == 'QDA':
best_parameter_pair = []
return best_parameter_pair
<file_sep>from ..utils.utils_words import tng_words
from ..utils.utils_algorithm import tf_idf
def tng_tf_idf(input_file, fixed_len, word_size, n, process_num, cur_dir, fixed=True):
corpus = tng_words(input_file, fixed_len, word_size, n, process_num, cur_dir, fixed)
return tf_idf(corpus)
<file_sep>from sklearn import svm
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as lda
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as qda
from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, auc
from scipy import interp
import matplotlib.pyplot as plt
import numpy as np
import os
import math
def plot_pr_curve(cv_labels, cv_prob, file_path):
precisions = []
auc_list = []
recall_array = []
precision_array = []
mean_recall = np.linspace(0, 1, 100)
for i in range(len(cv_labels)):
precision, recall, _ = precision_recall_curve(cv_labels[i], cv_prob[i])
recall_array.append(recall)
precision_array.append(precision)
precisions.append(interp(mean_recall, recall[::-1], precision[::-1])[::-1])
try:
roc_auc = auc(recall, precision)
except ZeroDivisionError:
roc_auc = 0.0
auc_list.append(roc_auc)
plt.figure(0)
mean_precision = np.mean(precisions, axis=0)
mean_recall = mean_recall[::-1]
mean_auc = auc(mean_recall, mean_precision)
std_auc = np.std(auc_list)
plt.plot(mean_recall, mean_precision, color='navy',
label=r'Mean PRC (AUPRC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.7)
std_precision = np.std(precisions, axis=0)
precision_upper = np.minimum(mean_precision + std_precision, 1)
precision_lower = np.maximum(mean_precision - std_precision, 0)
plt.fill_between(mean_recall, precision_lower, precision_upper, color='grey', alpha=.3,
label=r'$\pm$ 1 std. dev.')
plt.xlim([0, 1.0])
plt.ylim([0, 1.0])
plt.title('Precision-Recall Curve', fontsize=18)
plt.xlabel('Recall', fontsize=16)
plt.ylabel('Precision', fontsize=16)
plt.legend(loc="lower left")
ax_width = 1
ax = plt.gca() # 获取边框
ax.spines['bottom'].set_linewidth(ax_width)
ax.spines['left'].set_linewidth(ax_width)
ax.spines['top'].set_linewidth(ax_width)
ax.spines['right'].set_linewidth(ax_width)
figure_name = file_path + 'cv_prc.png'
plt.savefig(figure_name, dpi=600, transparent=True, bbox_inches='tight')
plt.close(0)
full_path = os.path.abspath(figure_name)
if os.path.isfile(full_path):
print('The Precision-Recall Curve of cross-validation can be found:')
print(full_path)
print('\n')
return mean_auc
def plot_roc_curve(cv_labels, cv_prob, file_path):
"""Plot ROC curve."""
# Receiver Operating Characteristic
tpr_list = []
auc_list = []
fpr_array = []
tpr_array = []
thresholds_array = []
mean_fpr = np.linspace(0, 1, 100)
for i in range(len(cv_labels)):
fpr, tpr, thresholds = roc_curve(cv_labels[i], cv_prob[i])
fpr_array.append(fpr)
tpr_array.append(tpr)
thresholds_array.append(thresholds)
tpr_list.append(interp(mean_fpr, fpr, tpr))
tpr_list[-1][0] = 0.0
try:
roc_auc = auc(fpr, tpr)
except ZeroDivisionError:
roc_auc = 0.0
auc_list.append(roc_auc)
plt.figure(0)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Random', alpha=.7)
mean_tpr = np.mean(tpr_list, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(auc_list)
plt.plot(mean_fpr, mean_tpr, color='navy',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.7)
std_tpr = np.std(tpr_list, axis=0)
tpr_upper = np.minimum(mean_tpr + std_tpr, 1)
tpr_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tpr_lower, tpr_upper, color='grey', alpha=.3,
label=r'$\pm$ 1 std. dev.')
plt.xlim([0, 1.0])
plt.ylim([0, 1.0])
plt.title('Receiver Operating Characteristic', fontsize=18)
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.legend(loc="lower right")
ax_width = 1
ax = plt.gca() # 获取边框
ax.spines['bottom'].set_linewidth(ax_width)
ax.spines['left'].set_linewidth(ax_width)
ax.spines['top'].set_linewidth(ax_width)
ax.spines['right'].set_linewidth(ax_width)
figure_name = file_path + 'cv_roc.png'
plt.savefig(figure_name, dpi=600, transparent=True, bbox_inches='tight')
plt.close(0)
full_path = os.path.abspath(figure_name)
if os.path.isfile(full_path):
print('The Receiver Operating Characteristic of cross-validation can be found:')
print(full_path)
print('\n')
return mean_auc
def evaluation(label, label_predict):
tp = 0.0
fp = 0.0
tn = 0.0
fn = 0.0
for i in range(len(label)):
if label[i] == 1 and label_predict[i] == 1:
tp += 1.0
elif label[i] == 1 and label_predict[i] == -1:
fn += 1.0
elif label[i] == -1 and label_predict[i] == 1:
fp += 1.0
elif label[i] == -1 and label_predict[i] == -1:
tn += 1.0
try:
acc = (tp + tn) / (tp + tn + fp + fn)
except ZeroDivisionError:
acc = 0.0
try:
auc = roc_auc_score(label, label_predict)
except ZeroDivisionError:
auc = 0.0
try:
sensitivity = tp / (tp + fn)
except ZeroDivisionError:
sensitivity = 0.0
try:
specificity = tn / (tn + fp)
except ZeroDivisionError:
specificity = 0.0
try:
p = tp / (tp + fp)
except ZeroDivisionError:
p = 0.0
try:
r = tp / (tp + fn)
except ZeroDivisionError:
r = 0.0
try:
f1 = (2 * p * r) / (p + r)
except ZeroDivisionError:
f1 = 0.0
return acc, auc, sensitivity, specificity, f1
def result_print(result):
result_dict = {'Accuracy': result[0], 'AUC': result[1], 'Sensitivity': result[2],
'Specificity': result[3], 'F1-score': result[4]}
print("Result: ")
for i in range(len(result_dict)):
print(str(list(result_dict.keys())[i]) + ':' + str(list(result_dict.values())[i]))
def performance_evaluation(args, output_array, folds, label_list, best_parameter_pair):
if args.method == 'SVM':
temp_str = 'The best parameter for SVM is: cost = ' + str(best_parameter_pair['cost']) + ', gamma = ' + str(best_parameter_pair['gamma'])
# print(temp_str.center(40, '+'))
results = []
true_labels = []
predict_labels = []
predict_probability = []
for train, test in folds:
x_train = output_array[train]
x_test = output_array[test]
y_train = label_list[train]
y_test = label_list[test]
classification = svm.SVC(C=2 ** best_parameter_pair['cost'], gamma=2 ** best_parameter_pair['gamma'], probability=True)
classification.fit(x_train, y_train)
y_test_predict = classification.predict(x_test)
y_test_prob_predict = classification.predict_proba(x_test)[:, 1]
result = evaluation(y_test, y_test_predict)
results.append(result)
true_labels.append(y_test)
predict_labels.append(y_test_predict)
predict_probability.append(y_test_prob_predict)
plot_roc_curve(true_labels, predict_probability, args.result_dir)
plot_pr_curve(true_labels, predict_probability, args.result_dir)
final_result = np.array(results).mean(axis=0)
result_print(final_result)
elif args.method == 'LinearSVM':
temp_str = 'The best parameter for Linear SVM is: cost = ' + str(best_parameter_pair['cost'])
# print(temp_str.center(40, '+'))
results = []
true_labels = []
predict_labels = []
predict_probability = []
for train, test in folds:
x_train = output_array[train]
x_test = output_array[test]
y_train = label_list[train]
y_test = label_list[test]
classification = svm.SVC(C=2 ** best_parameter_pair['cost'], kernel="linear", probability=True)
classification.fit(x_train, y_train)
y_test_predict = classification.predict(x_test)
y_test_prob_predict = classification.predict_proba(x_test)[:, 1]
result = evaluation(y_test, y_test_predict)
results.append(result)
true_labels.append(y_test)
predict_labels.append(y_test_predict)
predict_probability.append(y_test_prob_predict)
plot_roc_curve(true_labels, predict_probability, args.result_dir)
plot_pr_curve(true_labels, predict_probability, args.result_dir)
final_result = np.array(results).mean(axis=0)
result_print(final_result)
elif args.method == 'RF':
temp_str = 'The best parameter for RF is: tree = ' + str(best_parameter_pair['tree'])
# print(temp_str.center(40, '+'))
results = []
true_labels = []
predict_labels = []
predict_probability = []
for train, test in folds:
x_train = output_array[train]
x_test = output_array[test]
y_train = label_list[train]
y_test = label_list[test]
classification = RandomForestClassifier(random_state=42, n_estimators=best_parameter_pair['tree'])
classification.fit(x_train, y_train)
y_test_predict = classification.predict(x_test)
y_test_prob_predict = classification.predict_proba(x_test)[:, 1]
result = evaluation(y_test, y_test_predict)
results.append(result)
true_labels.append(y_test)
predict_labels.append(y_test_predict)
predict_probability.append(y_test_prob_predict)
plot_roc_curve(true_labels, predict_probability, args.result_dir)
plot_pr_curve(true_labels, predict_probability, args.result_dir)
final_result = np.array(results).mean(axis=0)
result_print(final_result)
elif args.method == 'KNN':
temp_str = 'The best parameter for KNN is: neighbors = ' + str(best_parameter_pair['ngb'])
# print(temp_str.center(40, '+'))
results = []
true_labels = []
predict_labels = []
predict_probability = []
for train, test in folds:
x_train = output_array[train]
x_test = output_array[test]
y_train = label_list[train]
y_test = label_list[test]
classification = KNeighborsClassifier(n_neighbors=best_parameter_pair['ngb'])
classification.fit(x_train, y_train)
y_test_predict = classification.predict(x_test)
y_test_prob_predict = classification.predict_proba(x_test)[:, 1]
result = evaluation(y_test, y_test_predict)
results.append(result)
true_labels.append(y_test)
predict_labels.append(y_test_predict)
predict_probability.append(y_test_prob_predict)
plot_roc_curve(true_labels, predict_probability, args.result_dir)
plot_pr_curve(true_labels, predict_probability, args.result_dir)
final_result = np.array(results).mean(axis=0)
result_print(final_result)
elif args.method == 'AdaBoost' or args.method == 'NB' or args.method == 'LDA' or args.method == 'QDA':
results = []
true_labels = []
predict_labels = []
predict_probability = []
for train, test in folds:
x_train = output_array[train]
x_test = output_array[test]
y_train = label_list[train]
y_test = label_list[test]
if args.method == 'AdaBoost':
classification = AdaBoostClassifier()
elif args.method == 'NB':
classification = GaussianNB()
elif args.method == 'LDA':
classification = lda()
elif args.method == 'QDA':
classification = qda()
classification.fit(x_train, y_train)
y_test_predict = classification.predict(x_test)
y_test_prob_predict = classification.predict_proba(x_test)[:, 1]
result = evaluation(y_test, y_test_predict)
results.append(result)
true_labels.append(y_test)
predict_labels.append(y_test_predict)
predict_probability.append(y_test_prob_predict)
plot_roc_curve(true_labels, predict_probability, args.result_dir)
plot_pr_curve(true_labels, predict_probability, args.result_dir)
final_result = np.array(results).mean(axis=0)
result_print(final_result)
all_predict = classification.predict(output_array)
with open(args.result_dir + 'prediction result', 'w') as f:
space = ' '
f.write('No.' + space + 'True Label' + space + 'Predict Label\n')
for i in range(len(all_predict)):
f.write(str(i) + space + str(label_list[i]) + space + str(all_predict[i]))
f.write('\n')
<file_sep>from ..utils.utils_words import subsequence_words
from ..utils.utils_algorithm import text_rank
def subsequence_text_rank(input_file, alphabet, fixed_len, word_size, alpha, fixed=True):
corpus = subsequence_words(input_file, alphabet, fixed_len, word_size, fixed)
return text_rank(corpus, alpha)
<file_sep>import collections
import itertools
import os
import sys
import time
import subprocess
import threading
from xml.etree import ElementTree
import numpy as np
from ..utils.utils_pssm import check_and_save
def sep_file_psfm(parent_file, sub_file):
dir_name = (parent_file + '/' + os.path.basename(parent_file))
if not os.path.exists(dir_name):
try:
os.makedirs(dir_name)
except OSError:
pass
seq_name = []
for seq in check_and_save(sub_file):
seq_name.append(seq.name)
seq_file = dir_name + '/' + str(seq.no) + '.txt'
with open(seq_file, 'w') as f:
f.write('>')
f.write(str(seq.name))
f.write('\n')
f.write(str(seq.seq))
return os.path.abspath(dir_name), seq_name
def km2index(alphabet, km):
km_tuple = list(itertools.product(alphabet, repeat=km))
km_string = [''.join(x) for x in km_tuple]
km_ind = {km_string[i]: i for i in range(0, len(km_string))}
return km_ind
def run_group_search(index_list, profile_home, sw_dir, process_num):
threads = []
sem = threading.Semaphore(process_num)
for i in range(0, len(index_list)):
seq = str(index_list[i]) + '.txt'
threads.append(threading.Thread(target=run_simple_search,
args=(seq, profile_home, sw_dir, sem)))
for t in threads:
t.start()
for t in threads:
t.join()
def run_simple_search(fd, profile_home, sw_dir, sem):
sem.acquire()
protein_name = fd.split('.')[0]
complet_n = 0
complet_n += 1
outfmt_type = 5
num_iter = 10
evalue_threshold = 0.001
fasta_file = profile_home + '/' + protein_name + '.txt'
xml_file = profile_home + '/xml/' + protein_name + '.xml'
pssm_file = profile_home + '/pssm/' + protein_name + '.pssm'
msa_file = profile_home + '/msa/' + protein_name + '.msa'
BLAST_DB = sw_dir + 'psiblast/nrdb90/nrdb90'
if sys.platform.startswith('win'):
psiblast_cmd = sw_dir + 'psiblast/psiblast.exe'
else:
psiblast_cmd = sw_dir + 'psiblast/psiblast'
os.chmod(psiblast_cmd, 0o777)
cmd = ' '.join([psiblast_cmd,
'-query ' + fasta_file,
'-db ' + BLAST_DB,
'-out ' + xml_file,
'-evalue ' + str(evalue_threshold),
'-num_iterations ' + str(num_iter),
'-outfmt ' + str(outfmt_type),
'-out_ascii_pssm ' + pssm_file, # Write the pssm file
'-num_threads ' + '40']
)
subprocess.call(cmd, shell=True)
msa = []
# parser the xml format
tree = ElementTree.ElementTree(file=xml_file)
# get query info
# query_def = tree.find('BlastOutput_query-def').text
# print query_def
query_len = tree.find('BlastOutput_query-len').text
# print query_len
iteration = tree.findall('BlastOutput_iterations/Iteration')[-1] # get the last iteration
iteration_hits = iteration.find('Iteration_hits')
for Hit in list(iteration_hits):
hsp_evalue = Hit.find('Hit_hsps/Hsp/Hsp_evalue').text
# only parser the hits that e-value < threshold
if float(hsp_evalue) > evalue_threshold:
continue
# print Hsp_evalue
# Hit_num = Hit.find('Hit_num').text
# Hit_id = Hit.find('Hit_id').text
# Hit_def = Hit.find('Hit_def').text
Hsp_query_from = Hit.find('Hit_hsps/Hsp/Hsp_query-from').text
Hsp_query_to = Hit.find('Hit_hsps/Hsp/Hsp_query-to').text
# Hsp_hit_from = Hit.find('Hit_hsps/Hsp/Hsp_hit-from').text
# Hsp_hit_to = Hit.find('Hit_hsps/Hsp/Hsp_hit-to').text
Hsp_qseq = Hit.find('Hit_hsps/Hsp/Hsp_qseq').text
Hsp_hseq = Hit.find('Hit_hsps/Hsp/Hsp_hseq').text
# alignment sequence by add prefix, suffix
prefix = "-" * (int(Hsp_query_from) - 1)
suffix = "-" * (int(query_len) - int(Hsp_query_to))
# delete the space in protein_name and the corresponding position of hits
pos = -1
for aa in Hsp_qseq:
pos = pos + 1
if aa == '-':
Hsp_hseq = Hsp_hseq[:pos] + '*' + Hsp_hseq[pos + 1:]
Hsp_hseq = Hsp_hseq.replace('*', '')
if 'X' in Hsp_hseq:
Hsp_hseq = Hsp_hseq.replace('X', '-')
if 'B' in Hsp_hseq:
Hsp_hseq = Hsp_hseq.replace('B', '-')
if 'Z' in Hsp_hseq:
Hsp_hseq = Hsp_hseq.replace('Z', '-')
if 'U' in Hsp_hseq:
Hsp_hseq = Hsp_hseq.replace('U', '-')
if 'J' in Hsp_hseq:
Hsp_hseq = Hsp_hseq.replace('J', '-')
if 'O' in Hsp_hseq:
Hsp_hseq = Hsp_hseq.replace('O', '-')
# combine prefix, modified hits, suffix
hit_sequence = prefix + Hsp_hseq + suffix
# print hit_sequence
# append in MSA
msa.append(hit_sequence)
if not msa:
# append the protein-self
ff = open(fasta_file, 'r')
ff.readline() # skip the id
fasta_seq = ff.readline().strip().upper()
ff.close()
if 'X' in fasta_seq:
fasta_seq = fasta_seq.replace('X', '-')
if 'B' in fasta_seq:
fasta_seq = fasta_seq.replace('B', '-')
if 'Z' in fasta_seq:
fasta_seq = fasta_seq.replace('Z', '-')
if 'U' in fasta_seq:
fasta_seq = fasta_seq.replace('U', '-')
if 'J' in fasta_seq:
fasta_seq = fasta_seq.replace('J', '-')
if 'O' in fasta_seq:
fasta_seq = fasta_seq.replace('O', '-')
msa.append(fasta_seq)
# write file
output = open(msa_file, 'w')
output.write('\n'.join(msa))
output.close()
time.sleep(2)
sem.release()
def read_msa(msa_file):
msa = []
with open(msa_file) as f:
for line in f:
msa.append(line.strip())
return msa
def new_print(query, pfm, protein_name, profile_home, headers):
output_file = profile_home + '/psfm/' + protein_name + '.psfm'
pfm = pfm.transpose()
with open(output_file, 'w') as f:
f.write(str(' ') + '\t')
for item in headers:
f.write(str(item[0]) + '\t')
f.write('\n')
for i in range(0, pfm.shape[0]):
f.write(str(query[i]) + '\t')
for j in range(0, pfm.shape[1]):
f.write('%.6f' % float(pfm[i, j]))
f.write('\t')
f.write('\n')
f.close()
return output_file
def create_matrix(row_size, column_size):
Matrix = np.zeros((row_size, column_size))
return Matrix
def single_frequency_matrix(msa, km_index, kmer):
"""Count the frequency with extension methods.
"""
# MATRIX SHAPE
# Matrix shape is {#20+400+8000+..., #length of sequence}
row_size = len(km_index)
column_size = len(msa[0])
PFM = create_matrix(row_size, column_size - kmer + 1)
# FREQUENCY MATRIX
for col in range(column_size):
position_specific_composition = []
for row in msa:
km_slide = row[col:col + kmer]
if '-' not in km_slide:
position_specific_composition.append(km_slide)
# count frequency
position_specific_frequency = collections.Counter(position_specific_composition)
for composition in position_specific_frequency:
# print composition
if composition.strip() != '':
pssm_row = km_index[composition]
PFM[pssm_row, col] = position_specific_frequency[composition]
normal_pfm = create_matrix(row_size, column_size - kmer + 1)
n = np.sum(PFM, axis=0)
for i in range(0, PFM.shape[0]):
for j in range(0, PFM.shape[1]):
if n[j] == 0.0:
normal_pfm[i, j] = 0.0
else:
normal_pfm[i, j] = PFM[i, j] / n[j]
return normal_pfm
def profile_worker(fd, alphabet, k, profile_home, headers):
protein_name = fd.split('.')[0]
query_seq_file = profile_home + '/' + protein_name + '.txt'
msa_file = profile_home + '/msa/' + protein_name + '.msa'
msa_ret = read_msa(msa_file)
temp_len = len(msa_ret[0])
f = open(query_seq_file, 'r')
next(f)
query = []
for line in f:
query = list(line)
# generate single frequency ------------------------------------
km_index = km2index(alphabet, k)
row_size = len(km_index)
column_size = temp_len
pfm = create_matrix(row_size, column_size)
if msa_ret:
pfm = single_frequency_matrix(msa_ret, km_index, k)
assert pfm.shape[0] == len(km_index)
return new_print(query, pfm, protein_name, profile_home, headers)
# generate single frequency ------------------------------------
<file_sep>import numpy as np
from ..utils.utils_words import make_km_list
from ..utils.utils_fasta import get_seqs
from ..utils.utils_const import PROTEIN
def dr_bow(input_file, max_dis):
"""
The Distance Residue method.
:param input_file: the input sequence file.
:param max_dis: the value of the maximum distance.
"""
assert int(max_dis) > 0
aa_pairs = make_km_list(2, PROTEIN)
aa_list = list(PROTEIN)
vector_list = []
with open(input_file, 'r') as f:
seq_list = get_seqs(f, alphabet=PROTEIN)
for line in seq_list:
vector = []
len_line = len(line)
for i in range(max_dis + 1):
if i == 0:
temp = [line.count(j) for j in aa_list]
vector.extend(temp)
else:
new_line = []
for index, elem in enumerate(line):
if (index + i) < len_line:
new_line.append(line[index] + line[index + i])
temp = [new_line.count(j) for j in aa_pairs]
vector.extend(temp)
vector_list.append(vector)
return np.array(vector_list)
<file_sep>from ..utils.utils_algorithm import fast_text
def fast_text4vec(corpus, sample_size_list, fixed_len, **param_dict):
corpus_out = fast_text(corpus, sample_size_list, fixed_len, word_size=param_dict['word_size'],
win_size=param_dict['win_size'], vec_dim=param_dict['vec_dim'], skip_gram=param_dict['sg'])
return corpus_out
<file_sep>import math
import os
from sklearn.metrics import roc_auc_score
def performance(origin_labels, predict_labels, deci_value, bi_or_multi=False, res=False):
"""evaluations used to evaluate the performance of the model.
:param deci_value: decision values used for ROC and AUC.
:param bi_or_multi: binary or multiple classification
:param origin_labels: true values of the data set.
:param predict_labels: predicted values of the data set.
:param res: residue or not.
"""
if len(origin_labels) != len(predict_labels):
raise ValueError("The number of the original labels must equal to that of the predicted labels.")
if bi_or_multi is False:
tp = 0.0
tn = 0.0
fp = 0.0
fn = 0.0
for i in range(len(origin_labels)):
if res is True:
if origin_labels[i] == 1 and predict_labels[i] == 1:
tp += 1.0
elif origin_labels[i] == 1 and predict_labels[i] == 0:
fn += 1.0
elif origin_labels[i] == 0 and predict_labels[i] == 1:
fp += 1.0
elif origin_labels[i] == 0 and predict_labels[i] == 0:
tn += 1.0
else:
if origin_labels[i] == 1 and predict_labels[i] == 1:
tp += 1.0
elif origin_labels[i] == 1 and predict_labels[i] == -1:
fn += 1.0
elif origin_labels[i] == -1 and predict_labels[i] == 1:
fp += 1.0
elif origin_labels[i] == -1 and predict_labels[i] == -1:
tn += 1.0
try:
sn = tp / (tp + fn)
r = sn
except ZeroDivisionError:
sn, r = 0.0, 0.0
try:
sp = tn / (fp + tn)
except ZeroDivisionError:
sp = 0.0
try:
acc = (tp + tn) / (tp + tn + fp + fn)
except ZeroDivisionError:
acc = 0.0
try:
mcc = (tp * tn - fp * fn) / math.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
except ZeroDivisionError:
mcc = 0.0
try:
auc = roc_auc_score(origin_labels, deci_value)
except ValueError: # modify in 2020/9/13
auc = 0.0
try:
p = tp / (tp + fp)
except ZeroDivisionError:
p = 0.0
try:
f1 = 2 * p * r / (p + r)
except ZeroDivisionError:
f1 = 0.0
balance_acc = (sn + sp) / 2
return acc, mcc, auc, balance_acc, sn, sp, p, r, f1
else:
correct_labels = 0.0
for elem in zip(origin_labels, predict_labels):
if elem[0] == elem[1]:
correct_labels += 1.0
acc = correct_labels / len(origin_labels)
return acc
# def table_metric(results, opt=False, ind=False):
# metric1 = {'Acc': results[0], 'MCC': results[1], 'AUC': results[2], 'BAcc': results[3],
# 'Sn': results[4], 'Sp': results[5], 'Pr': results[6], 'Rc': results[7], 'F1': results[8]}
# metric2 = {'Accuracy': results[0], 'MCC': results[1], 'AUC': results[2], 'Balanced Accuracy': results[3],
# 'Sensitivity': results[4], 'Specificity': results[5], 'Precision': results[6], 'Recall': results[7],
# 'F1-score': results[8]}
# tb = pt.PrettyTable()
# if opt is False:
# print('Metric details'.center(18, '*'))
# tb.field_names = ["metric", "value"]
# for key, value in list(metric1.items()):
# tb.add_row([key, round(value, 3)])
# else:
# if ind is False:
# print('+-----------------------------------------+')
# print('| The final results of cross validation |')
# print('+-----------------------------------------+')
# tb.field_names = ["cross validation metric", "final results"]
# else:
# print('+-----------------------------------------+')
# print('| The final results of independent test |')
# print('+-----------------------------------------+')
# tb.field_names = ["independent test metric", "final results"]
# for key, value in list(metric2.items()):
# tb.add_row([key, round(value, 4)])
# print(tb)
# print('\n')
def print_metric_dict(results, ind):
metric_dict = {'Accuracy': results[0], 'MCC': results[1], 'AUC': results[2], 'Balanced Accuracy': results[3],
'Sensitivity': results[4], 'Specificity': results[5], 'Precision': results[6], 'Recall': results[7],
'F1-score': results[8]}
print('\n')
key_max_len = 16
val_max_len = 10
tag = '--'
if ind is False:
header = 'Final results of cross validation'
else:
header = 'Results of independent test'
header_str1 = '+' + tag.center(key_max_len + val_max_len + 9, '-') + '+'
header_str2 = '|' + header.center(key_max_len + val_max_len + 9, ' ') + '|'
print(header_str1)
print(header_str2)
up_dn_str = '+' + tag.center(key_max_len + 4, '-') + '+' + tag.center(val_max_len + 4, '-') + '+'
print(up_dn_str)
for key, val in metric_dict.items():
var_str = '%.4f' % val
temp_str = '|' + str(key).center(key_max_len + 4, ' ') + '|' + var_str.center(val_max_len + 4, ' ') + '|'
print(temp_str)
print(up_dn_str)
print('\n')
def final_results_output(results, out_path, ind=False, multi=False):
if multi is True:
acc = float(results)
acc_re = 'Acc = %.4f' % acc
eval_re = [acc_re]
else:
acc_re = 'Acc = %.4f' % results[0]
mcc_re = 'MCC = %.4f' % results[1]
auc_re = 'AUC = %.4f' % results[2]
bcc_re = 'BAcc = %.4f' % results[3]
sn_re = 'Sn = %.4f' % results[4]
sp_re = 'Sp = %.4f' % results[5]
p_re = 'Precision = %.4f' % results[6]
r_re = 'Recall = %.4f' % results[7]
f1_re = 'F1 = %.4f\n' % results[8]
eval_re = [acc_re, mcc_re, auc_re, bcc_re, sn_re, sp_re, p_re, r_re, f1_re]
if ind is True:
filename = out_path + 'ind_final_results.txt'
else:
filename = out_path + 'final_results.txt'
with open(filename, 'w') as f:
f.write('The final results of cross validation are as follows:\n')
for i in eval_re:
f.write(i)
f.write("\n")
full_path = os.path.abspath(filename)
if os.path.isfile(full_path):
print('The output file for final results can be found:')
print(full_path)
print('\n')
def prob_output(true_labels, predicted_labels, prob_list, out_path, ind=False):
prob_file = out_path + "prob_out.txt"
if ind is True:
prob_file = out_path + "ind_prob_out.txt"
with open(prob_file, 'w') as f:
head = 'Sample index' + '\t' + 'True labels' + '\t' + 'predicted labels' + '\t' + 'probability values' + '\n'
f.write(head)
for i, (k, m, n) in enumerate(zip(true_labels, predicted_labels, prob_list)):
line = str(i + 1) + '\t' + str(k) + '\t' + str(m) + '\t' + str(n) + '\n'
f.write(line)
full_path = os.path.abspath(prob_file)
if os.path.isfile(full_path):
print('The output file for probability values can be found:')
print(full_path)
print('\n')
def prob_output_res(true_labels, predicted_labels, prob_list, out_path, ind=False):
prob_file = out_path + "probability_values.txt"
if ind is True:
prob_file = out_path + "Ind_probability_values.txt"
with open(prob_file, 'w') as f:
for i in range(len(true_labels)):
for k, m, n in zip(true_labels[i], predicted_labels[i], prob_list[i]):
f.write(str(k))
f.write('\t')
f.write(str(m))
f.write('\t')
f.write(str(n))
f.write('\n')
f.write(' ' + '\n')
f.close()
full_path = os.path.abspath(prob_file)
if os.path.isfile(full_path):
print('The output file for probability values can be found:')
print(full_path)
<file_sep>import re
import numpy as np
import torch
import torch.nn.functional as func
def mega_motif2mat(motif_string):
alphabet = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F',
'P', 'S', 'T', 'W', 'Y', 'V'] # Just for protein now
motif_mat = []
bracket_match = re.compile(r'[\[](.*?)[\]]', re.S) # 最小匹配,去掉问号为贪婪匹配
frag_list = re.findall(bracket_match, motif_string)
for frag in frag_list:
vec_tmp = np.zeros(20, dtype=np.float32)
for i in range(len(frag)):
index = alphabet.index(frag[i])
vec_tmp[index] = 1 / float(1.5 ** i)
nom_vec = vec_tmp / np.sum(vec_tmp)
motif_mat.append(nom_vec)
return np.array(motif_mat)
class MotifFile2Matrix(object):
def __init__(self, motif_file):
self.input = motif_file
def elm_motif_to_matrix(self):
motifs = []
# A R N D C Q E G H I L K M F P S T W Y V
alp1 = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F',
'P', 'S', 'T', 'W', 'Y', 'V']
alp2 = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q',
'R', 'S', 'T', 'W', 'Y', 'V']
with open(self.input, 'r') as f:
lines = f.readlines()
i = 0
while i >= 0:
if len(lines[i].split()) < 1:
i += 1
elif lines[i].split()[0] == 'END':
break
elif lines[i].split()[0] == 'MOTIF':
tmp = []
i += 3
elif lines[i].split()[0] == 'URL':
motifs.append(tmp)
i += 2
else:
tmp.append(lines[i].split())
i += 1
frequency_matrices = []
for motif in motifs:
tmp = np.asarray(motif, dtype=np.float32)
print('motif shape:', tmp.shape)
for i in range(tmp.shape[0]):
for j in range(20):
ti = alp1.index(alp2[j])
tmp[i][ti] = float(motif[i][j])
frequency_matrices.append(tmp)
return frequency_matrices
def mega_motif_to_matrix(self):
frequency_matrices = []
with open(self.input, 'r') as f:
lines = f.readlines()
i = 0
while i >= 0:
if len(lines[i].split(' ')) < 2:
i += 6
elif lines[i].split(' ')[0] == 'END':
break
elif lines[i].split(' ')[0] == 'MT':
motif_reg = lines[i].split(':')[1]
motif = mega_motif2mat(motif_reg)
# 删除那些长度小于3的motif
# if len(motif) > 3:
frequency_matrices.append(motif)
i += 1
else:
i += 1
return frequency_matrices
def motif_init(x, kernels):
motif_out = []
for kernel in kernels:
# x: torch.Size([5, 100, 20])
# print('size of kernel:', kernel.shape) # [5, 20]
out = x.unsqueeze(1) # [batch_size, 1, 100, 20]
# inputs = torch.randn(64, 3, 244, 244)
# weight = torch.randn(64, 3, 3, 3)
# bias = torch.randn(64)
# outputs = func.conv2d(inputs, weight, bias)
# print('size of mat:', out.size()) # torch.Size([5, 1, 100, 20])
# print(outputs.size()) # torch.Size([64, 64, 242, 242])
weight = torch.from_numpy(kernel).unsqueeze(0).unsqueeze(0).double() # 注意格式转换
# print('size of weight:', weight.size())
out = func.conv2d(out,
weight=weight)
out = func.relu(out)
out = func.max_pool2d(out, (2, 1)) # torch.Size([50, 1, 7, 1])
out = out.view(out.size()[0], -1)
out_mean = torch.mean(out, dim=1, keepdim=True)
out_max = torch.max(out, dim=1, keepdim=True)[0] # 0 for value; 1 for index
out_mm = torch.cat([out_mean, out_max], 1)
motif_out.append(out_mm)
return torch.cat(motif_out, 1)
<file_sep>import numpy as np
from .ei import EvolutionaryInformation2Vectors
from .pp import PhyChemicalProperty2vectors
from .rc import ResidueComposition2Vectors
from .sc import rss_method, ss_method, sasa_method, cs_method
from ..utils.utils_write import vectors2files, dl_vec2file, write_res_base_vec, res_vectors2file
def one_hot_enc(input_file, alphabet, enc_method, cur_dir, chosen_file, rss_file, cpu):
# return_mat: 对于one-hot方法返回向量, 对于Automatic features和深度学习模型返回矩阵
# TODO: ResidueComposition
if enc_method == 'One-hot':
vec_mat_list = ResidueComposition2Vectors(alphabet).one_hot(input_file)
elif enc_method == 'One-hot-6bit':
vec_mat_list = ResidueComposition2Vectors(alphabet).one_hot_six_bits(input_file)
elif enc_method == 'Binary-5bit':
vec_mat_list = ResidueComposition2Vectors(alphabet).one_hot_five(input_file)
elif enc_method == 'DBE':
vec_mat_list = ResidueComposition2Vectors(alphabet).dbe(input_file)
elif enc_method == 'Position-specific-2':
vec_mat_list = ResidueComposition2Vectors(alphabet).position_specific(2, input_file)
elif enc_method == 'Position-specific-3':
vec_mat_list = ResidueComposition2Vectors(alphabet).position_specific(3, input_file)
elif enc_method == 'Position-specific-4':
vec_mat_list = ResidueComposition2Vectors(alphabet).position_specific(4, input_file)
elif enc_method == 'AESNN3':
vec_mat_list = ResidueComposition2Vectors(alphabet).aesnn3(input_file)
elif enc_method == 'NCP':
vec_mat_list = ResidueComposition2Vectors(alphabet).ncp(input_file)
# TODO: PhyChemicalProperty
elif enc_method == 'DPC':
vec_mat_list = PhyChemicalProperty2vectors(enc_method, alphabet, chosen_file).dpc(input_file)
elif enc_method == 'TPC':
vec_mat_list = PhyChemicalProperty2vectors(enc_method, alphabet, chosen_file).tpc(input_file)
elif enc_method == 'PP':
vec_mat_list = PhyChemicalProperty2vectors(enc_method, alphabet, chosen_file).pp(input_file)
# TODO: EvolutionaryInformation
elif enc_method == 'BLAST-matrix':
vec_mat_list = EvolutionaryInformation2Vectors(alphabet, cur_dir).blast_matrix(input_file)
elif enc_method == 'PAM250':
vec_mat_list = EvolutionaryInformation2Vectors(alphabet, cur_dir).pam250(input_file)
elif enc_method == 'BLOSUM62':
vec_mat_list = EvolutionaryInformation2Vectors(alphabet, cur_dir).blosum62(input_file)
elif enc_method == 'PSSM':
vec_mat_list = EvolutionaryInformation2Vectors(alphabet, cur_dir).pssm(input_file, cpu)
elif enc_method == 'PSFM':
vec_mat_list = EvolutionaryInformation2Vectors(alphabet, cur_dir).psfm(input_file, cpu)
# TODO: SecondStructure and ConservationScore
# mat_return 后期需要修改
elif enc_method == 'RSS':
vec_mat_list = rss_method(rss_file)
elif enc_method == 'SS':
vec_mat_list = ss_method(input_file, cur_dir, cpu)
elif enc_method == 'SASA':
vec_mat_list = sasa_method(input_file, cur_dir, cpu)
elif enc_method == 'CS':
vec_mat_list = cs_method(input_file, cur_dir, cpu)
else:
print('Method for One-hot encoding mode error!')
return False
return vec_mat_list
def ohe2seq_vec(input_file, alphabet, enc_method, cur_dir, chosen_file, rss_file, sp_num_list, fixed_len, out_format,
out_files, cpu):
vec_mat_list = one_hot_enc(input_file, alphabet, enc_method, cur_dir, chosen_file, rss_file, cpu)
ohe_array = mat_list2mat_array(vec_mat_list, fixed_len)
vectors2files(ohe_array, sp_num_list, out_format, out_files)
def ohe2seq_mat(input_file, alphabet, enc_method, cur_dir, chosen_file, rss_file, sp_num_list, out_files, cpu):
vec_mat_list = one_hot_enc(input_file, alphabet, enc_method, cur_dir, chosen_file, rss_file, cpu)
dl_vec2file(vec_mat_list, sp_num_list, out_files)
def ohe2res_base(input_file, alphabet, enc_method, cur_dir, chosen_file, rss_file, out_file, cpu):
vec_mat_list = one_hot_enc(input_file, alphabet, enc_method, cur_dir, chosen_file, rss_file, cpu)
write_res_base_vec(vec_mat_list, out_file)
def mat_list2mat_array(mat_list, fixed_len):
mat_array = []
try:
width = mat_list[0].shape[1]
except IndexError:
width = 1
for i in range(len(mat_list)):
temp_arr = np.zeros((fixed_len, width))
temp_len = mat_list[i].shape[0]
if temp_len <= fixed_len:
# temp_arr[:temp_len, :] = mat_list[i]
try:
temp_arr[:temp_len, :] = mat_list[i]
except ValueError:
temp_arr[:temp_len, 0] = mat_list[i]
else:
temp_arr = mat_list[i][:fixed_len, :]
mat_array.append(temp_arr.flatten().tolist())
mat_array = np.array(mat_array)
return mat_array
def sliding_win2files(res_mats, res_labels_list, win_size, out_format, out_files):
width = res_mats[0].shape[1]
win_ctrl = win_size // 2
pos_vec = []
neg_vec = []
for i in range(len(res_mats)):
res_mat = res_mats[i]
seq_len = len(res_mat)
assert seq_len > win_size, "The size of window should be no more than the length of sequence[%d]." % i
# print('seq_len: %d' % seq_len)
for j in range(seq_len):
temp_mat = np.zeros((win_size, width))
if j <= win_ctrl:
# print(j+win_ctrl+1)
temp_mat[:j+win_ctrl+1, :] = res_mat[: j+win_ctrl+1, :]
elif j >= seq_len - win_ctrl:
temp_mat[:seq_len-j+win_ctrl, :] = res_mat[j-win_ctrl: seq_len, :]
else:
temp_mat[:, :] = res_mat[j-win_ctrl: j+win_ctrl+1]
temp_vec = temp_mat.flatten().tolist()
if res_labels_list[i][j] == 0:
neg_vec.append(temp_vec)
else:
pos_vec.append(temp_vec)
print('The output files can be found here:')
res_vectors2file(np.array(pos_vec), out_format, out_files[0])
res_vectors2file(np.array(neg_vec), out_format, out_files[1])
print('\n')
def mat_list2frag_array(mat_list, res_labels_list, fixed_len, out_format, out_files):
frag_array = []
width = mat_list[0].shape[1]
for i in range(len(mat_list)):
temp_arr = np.zeros((fixed_len, width))
temp_len = mat_list[i].shape[0]
if temp_len <= fixed_len:
temp_arr[:temp_len, :] = mat_list[i]
else:
temp_arr = mat_list[i][:fixed_len, :]
frag_array.append(temp_arr.flatten().tolist())
pos_num = 0
neg_num = 0
pos_vec = []
neg_vec = []
for i in range(len(res_labels_list)):
if int(res_labels_list[i][0]) == 1:
pos_num += 1
pos_vec.append(frag_array[i])
else:
neg_num += 1
neg_vec.append(frag_array[i])
assert pos_num + neg_num == len(res_labels_list), 'Please check label file, there contains ont only two labels!'
print('The output files can be found here:')
res_vectors2file(np.array(pos_vec), out_format, out_files[0])
res_vectors2file(np.array(neg_vec), out_format, out_files[1])
print('\n')
<file_sep>from .kmer_TF_IDF import km_tf_idf
from .revkmer_TF_IDF import rev_km_tf_idf
from .mismatch_TF_IDF import mismatch_tf_idf
from .subsequence_TF_IDF import subsequence_tf_idf
from .tng_TF_IDF import tng_tf_idf
from .dr_TF_IDF import dr_tf_idf
from .dt_TF_IDF import dt_tf_idf
from ..utils.utils_write import vectors2files
from ..utils.utils_words import DNA_X, RNA_X, PROTEIN_X
def tf_idf(input_file, category, words, fixed_len, sample_num_list, out_format, out_file_list, cur_dir, tm=False,
**param_dict):
if category == 'DNA':
alphabet = DNA_X
elif category == 'RNA':
alphabet = RNA_X
else:
alphabet = PROTEIN_X
if words == 'Kmer':
tf_vectors = km_tf_idf(input_file, alphabet, fixed_len, word_size=param_dict['word_size'], fixed=True)
elif words == 'RevKmer':
tf_vectors = rev_km_tf_idf(input_file, alphabet, fixed_len, word_size=param_dict['word_size'], fixed=True)
elif words == 'Mismatch':
tf_vectors = mismatch_tf_idf(input_file, alphabet, fixed_len, word_size=param_dict['word_size'], fixed=True)
elif words == 'Subsequence':
tf_vectors = subsequence_tf_idf(input_file, alphabet, fixed_len, word_size=param_dict['word_size'], fixed=True)
elif words == 'Top-N-Gram':
tf_vectors = tng_tf_idf(input_file, fixed_len, word_size=param_dict['word_size'], n=param_dict['top_n'],
process_num=param_dict['cpu'], cur_dir=cur_dir, fixed=True)
elif words == 'DR':
tf_vectors = dr_tf_idf(input_file, alphabet, fixed_len, max_dis=param_dict['max_dis'], fixed=True)
elif words == 'DT':
# input_file, fixed_len, max_dis, process_num, cur_dir, fixed=True
tf_vectors = dt_tf_idf(input_file, fixed_len, max_dis=param_dict['max_dis'],
process_num=param_dict['cpu'], cur_dir=cur_dir, fixed=True)
else:
print('word segmentation method error!')
return False
if tm is False:
vectors2files(tf_vectors, sample_num_list, out_format, out_file_list)
else:
return tf_vectors
<file_sep>from .kmer_TextRank import km_text_rank
from .revkmer_TextRank import rev_km_text_rank
from .mismatch_TextRank import mismatch_text_rank
from .subsequence_TextRank import subsequence_text_rank
from .tng_TextRank import tng_text_rank
from .dr_TextRank import dr_text_rank
from .dt_TextRank import dt_text_rank
from ..utils.utils_write import vectors2files
from ..utils.utils_words import DNA_X, RNA_X, PROTEIN_X
def text_rank(input_file, category, words, fixed_len, sample_num_list, out_format, out_file_list, cur_dir, tm=False,
**param_dict):
if category == 'DNA':
alphabet = DNA_X
elif category == 'RNA':
alphabet = RNA_X
else:
alphabet = PROTEIN_X
if words == 'Kmer':
tr_vectors = km_text_rank(input_file, alphabet, fixed_len, word_size=param_dict['word_size'],
alpha=param_dict['alpha'], fixed=True)
elif words == 'RevKmer':
tr_vectors = rev_km_text_rank(input_file, alphabet, fixed_len, word_size=param_dict['word_size'],
alpha=param_dict['alpha'], fixed=True)
elif words == 'Mismatch':
tr_vectors = mismatch_text_rank(input_file, alphabet, fixed_len, word_size=param_dict['word_size'],
alpha=param_dict['alpha'], fixed=True)
elif words == 'Subsequence':
tr_vectors = subsequence_text_rank(input_file, alphabet, fixed_len, word_size=param_dict['word_size'],
alpha=param_dict['alpha'], fixed=True)
elif words == 'Top-N-Gram':
tr_vectors = tng_text_rank(input_file, fixed_len, word_size=param_dict['word_size'], n=param_dict['top_n'],
process_num=param_dict['cpu'], alpha=param_dict['alpha'], cur_dir=cur_dir,
fixed=True)
elif words == 'DR':
tr_vectors = dr_text_rank(input_file, alphabet, fixed_len, max_dis=param_dict['max_dis'],
alpha=param_dict['alpha'], fixed=True)
elif words == 'DT':
tr_vectors = dt_text_rank(input_file, fixed_len, max_dis=param_dict['max_dis'], process_num=param_dict['cpu'],
alpha=param_dict['alpha'], cur_dir=cur_dir,
fixed=True)
else:
print('word segmentation method error!')
return False
if tm is False:
vectors2files(tr_vectors, sample_num_list, out_format, out_file_list)
else:
return tr_vectors
<file_sep>import numpy as np
from ..utils.utils_bow import get_km_dict
from ..utils.utils_fasta import get_seqs
def mismatch_bow(input_file, alphabet, k, m):
alphabet = list(alphabet)
p = len(alphabet)
km_dict = get_km_dict(k, alphabet)
features = []
with open(input_file, 'r') as f:
seq_list = get_seqs(f, alphabet)
if m == 0 and m < k:
for sequence in seq_list:
vector = get_spectrum(sequence, km_dict, p, k)
features.append(vector)
else:
for sequence in seq_list:
vector = get_mismatch(sequence, alphabet, km_dict, p, k)
features.append(vector)
return np.array(features)
def get_spectrum(sequence, km_dict, p, k):
vector = np.zeros((1, p ** k))
n = len(sequence)
for i in range(n - k + 1):
subsequence = sequence[i:i + k]
position = km_dict.get(subsequence)
vector[0, position] += 1
return list(vector[0])
def get_mismatch(sequence, alphabet, km_dict, p, k):
n = len(sequence)
vector = np.zeros((1, p ** k))
for i in range(n - k + 1):
subsequence = sequence[i:i + k]
position = km_dict.get(subsequence)
vector[0, position] += 1
for j in range(k):
substitution = subsequence
for letter in list(set(alphabet) ^ set(subsequence[j])): # 求字母并集
substitution = list(substitution)
substitution[j] = letter
substitution = ''.join(substitution)
position = km_dict.get(substitution)
vector[0, position] += 1
return list(vector[0])
<file_sep>import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from collections import Counter
from ..utils.utils_algorithm import data_partition
from .CNN_BiLSTM import CNNBiLSTM
from .DCNN_BiLSTM import DCNNBiLSTM
from .Motif_CNN import MotifCNN
from .Motif_DCNN import MotifDCNN
from .Auto_Encoder import AutoEncoder
from ..utils.utils_write import vectors2files
from ..OHE.OHE4vec import one_hot_enc
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 让torch判断是否使用GPU,建议使用GPU环境,因为会快很多
Method_IN_AF = ['One-hot', 'Binary-5bit', 'One-hot-6bit', 'Position-specific-2', 'Position-specific-3',
'Position-specific-4', 'AESNN3', 'DBE', 'NCP', 'DPC', 'TPC', 'PP', 'PSSM', 'PSFM',
'PAM250', 'BLOSUM62', 'BLAST-matrix', 'SS', 'SASA', 'RSS', 'CS']
def train(model, device, train_loader, optimizer, criterion, epoch, auto=False):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
if auto is True:
data = data.view(len(data), -1)
_, output = model(data.float())
loss = criterion(output, data.float())
else:
output = model(data.float())
loss = criterion(output, target.long())
loss.backward()
optimizer.step()
if (batch_idx + 1) % 30 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader),
loss.item()))
# 测试的操作也一样封装成一个函数
def test(model, device, test_loader, criterion, auto=False):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
if auto is True:
data = data.view(len(data), -1)
_, output = model(data.float())
test_loss += criterion(output, data.float()).item() # 将一批的损失相加
else:
output = model(data.float())
test_loss += criterion(output, target.long()).item() # 将一批的损失相加
prediction = output.max(1, keepdim=True)[1] # 找到概率最大的下标
correct += prediction.eq(target.long().view_as(prediction)).sum().item()
if auto is True:
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}\n'.format(test_loss))
else:
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
METHODS_Auto_features = ['MotifCNN', 'MotifDCNN', 'CNN-BiLSTM', 'DCNN-BiLSTM', 'Autoencoder']
def extract_feature(method, train_data, test_data, n_class, in_dim, args, **params_dict):
auto = False
train_loader = DataLoader(train_data, batch_size=params_dict['batch_size'], shuffle=True)
test_loader = DataLoader(test_data, batch_size=params_dict['batch_size'], shuffle=False)
if method in METHODS_Auto_features:
if method == 'MotifCNN':
rnn = MotifCNN(params_dict['fea_dim'], n_class, params_dict['prob'], args).to(DEVICE)
elif method == 'MotifDCNN':
rnn = MotifDCNN(args.hidden_dim, args.n_layer, params_dict['fea_dim'], n_class,
params_dict['prob'], args).to(DEVICE)
elif method == 'CNN-BiLSTM':
rnn = CNNBiLSTM(in_dim, args.hidden_dim, args.n_layer, params_dict['fea_dim'], n_class,
params_dict['prob']).to(DEVICE)
elif method == 'DCNN-BiLSTM':
rnn = DCNNBiLSTM(in_dim, args.hidden_dim, args.n_layer, params_dict['fea_dim'], n_class,
params_dict['prob']).to(DEVICE)
else:
rnn = AutoEncoder(in_dim, args.hidden_dim, n_class).to(DEVICE)
auto = True
if auto is True:
criterion = nn.MSELoss()
else:
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(rnn.parameters(), lr=params_dict['lr'])
epochs = params_dict['epoch']
for epoch in range(1, epochs + 1):
train(rnn, DEVICE, train_loader, optimizer, criterion, epoch, auto)
test(rnn, DEVICE, test_loader, criterion, auto)
# 保存模型
# save_path = save_path + '/' + str(n_round) + 'round_model.pth'
# torch.save(rnn.state_dict(), save_path)
feature_one_round = []
with torch.no_grad():
for data, target in test_loader:
data = data.to(DEVICE)
if auto is True:
data = data.view(len(data), -1)
vec_tensor = rnn.extract_feature(data.float())
else:
vec_tensor = rnn.extract_feature(data.float())
# vec_tensor = out[:, -1, :] # [batch_size, hidden_dim]
vec_numpy = vec_tensor.numpy()
feature_one_round += list(vec_numpy)
return np.array(feature_one_round)
def mat_list2mat(mat_list, fixed_len):
mat_array = []
width = mat_list[0].shape[1]
for i in range(len(mat_list)):
temp_arr = np.zeros((fixed_len, width))
temp_len = mat_list[i].shape[0]
if temp_len <= fixed_len:
temp_arr[:temp_len, :] = mat_list[i]
else:
temp_arr = mat_list[i][:fixed_len, :]
mat_array.append(temp_arr)
mat_array = np.array(mat_array)
return mat_array
def auto_feature(method, input_file, labels, sample_num_list, out_file_list, args, **params_dict):
# args.format, args.category, args.current_dir, args.pp_file, args.cpu, args.fixed_len
assert args.in_af in Method_IN_AF, 'Please set correct value for -in_fa parameter!'
args.res = True
vec_mat_list = one_hot_enc(input_file, args.category, args.in_af, args.current_dir, args.pp_file, args.rss_file,
args.cpu)
from_mat = mat_list2mat(vec_mat_list, args.fixed_len)
auto_vectors = np.zeros([len(from_mat), params_dict['fea_dim']]) if method != 'Autoencoder' else \
np.zeros([len(from_mat), args.hidden_dim])
# print(from_mat.shape)
# exit()
folds = data_partition(sample_num_list)
n_class = len(Counter(labels).keys())
in_dim = len(from_mat[0][0]) if method != 'Autoencoder' else (len(from_mat[0]) * len(from_mat[0][0]))
# print(len(from_mat[0]))
# print(len(from_mat[0][0]))
# print(in_dim)
for i, (train_index, test_index) in enumerate(folds):
print('Round [%s]' % (i + 1))
train_xy = []
test_xy = []
for x in train_index:
train_xy.append([from_mat[x], labels[x]])
for y in test_index:
test_xy.append([from_mat[y], labels[y]])
feature_one_round = extract_feature(method, train_xy, test_xy, n_class, in_dim, args, **params_dict)
auto_vectors[test_index, :] = feature_one_round
vectors2files(auto_vectors, sample_num_list, args.format, out_file_list)
<file_sep>from .bow_topic_model import bow_tm
from .TF_IDF_topic_model import tf_idf_tm
from .TextRank_topic_model import text_rank_tm
from ..utils.utils_write import vectors2files
def topic_model(from_vec, tm_method, input_file, labels, category, words, fixed_len, sample_num_list, out_format,
out_file_list, cur_dir, **param_dict):
if from_vec == 'BOW':
tm_vectors = bow_tm(tm_method, input_file, labels, category, words, sample_num_list, out_format, out_file_list,
cur_dir, **param_dict)
elif from_vec == 'TF-IDF':
tm_vectors = tf_idf_tm(tm_method, input_file, labels, category, words, fixed_len, sample_num_list, out_format,
out_file_list, cur_dir, **param_dict)
elif from_vec == 'TextRank':
tm_vectors = text_rank_tm(tm_method, input_file, labels, category, words, fixed_len, sample_num_list,
out_format, out_file_list, cur_dir, **param_dict)
else:
print('The input data type of topic model is wrong, please check!')
return False
vectors2files(tm_vectors, sample_num_list, out_format, out_file_list)
<file_sep>import os
import matplotlib.pyplot as plt
import numpy as np
from scipy import interp
from sklearn.metrics import roc_curve, auc, precision_recall_curve
def plot_roc_curve(cv_labels, cv_prob, file_path):
"""Plot ROC curve."""
# Receiver Operating Characteristic
tpr_list = []
auc_list = []
fpr_array = []
tpr_array = []
thresholds_array = []
mean_fpr = np.linspace(0, 1, 100)
for i in range(len(cv_labels)):
fpr, tpr, thresholds = roc_curve(cv_labels[i], cv_prob[i])
fpr_array.append(fpr)
tpr_array.append(tpr)
thresholds_array.append(thresholds)
tpr_list.append(interp(mean_fpr, fpr, tpr))
tpr_list[-1][0] = 0.0
try:
roc_auc = auc(fpr, tpr)
except ZeroDivisionError:
roc_auc = 0.0
auc_list.append(roc_auc)
plt.figure(0)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Random', alpha=.7)
mean_tpr = np.mean(tpr_list, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(auc_list)
plt.plot(mean_fpr, mean_tpr, color='navy',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.7)
std_tpr = np.std(tpr_list, axis=0)
tpr_upper = np.minimum(mean_tpr + std_tpr, 1)
tpr_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tpr_lower, tpr_upper, color='grey', alpha=.3,
label=r'$\pm$ 1 std. dev.')
plt.xlim([0, 1.0])
plt.ylim([0, 1.0])
plt.title('Receiver Operating Characteristic', fontsize=18)
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.legend(loc="lower right")
ax_width = 1
ax = plt.gca() # 获取边框
ax.spines['bottom'].set_linewidth(ax_width)
ax.spines['left'].set_linewidth(ax_width)
ax.spines['top'].set_linewidth(ax_width)
ax.spines['right'].set_linewidth(ax_width)
figure_name = file_path + 'cv_roc.png'
plt.savefig(figure_name, dpi=600, transparent=True, bbox_inches='tight')
plt.close(0)
full_path = os.path.abspath(figure_name)
if os.path.isfile(full_path):
print('The Receiver Operating Characteristic of cross-validation can be found:')
print(full_path)
print('\n')
return mean_auc
def plot_roc_ind(ind_labels, ind_prob, file_path):
fpr_ind, tpr_ind, thresholds_ind = roc_curve(ind_labels, ind_prob)
try:
ind_auc = auc(fpr_ind, tpr_ind)
except ZeroDivisionError:
ind_auc = 0.0
plt.figure(0)
plt.plot(fpr_ind, tpr_ind, lw=2, alpha=0.7, color='red',
label='ROC curve (area = %0.2f)' % ind_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.title('Receiver Operating Characteristic', fontsize=18)
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.legend(loc="lower right")
ax_width = 1
ax = plt.gca() # 获取边框
ax.spines['bottom'].set_linewidth(ax_width)
ax.spines['left'].set_linewidth(ax_width)
ax.spines['top'].set_linewidth(ax_width)
ax.spines['right'].set_linewidth(ax_width)
figure_name = file_path + 'ind_roc.png'
plt.savefig(figure_name, dpi=600, transparent=True, bbox_inches='tight')
plt.close(0)
full_path = os.path.abspath(figure_name)
if os.path.isfile(full_path):
print('The Receiver Operating Characteristic of independent test can be found:')
print(full_path)
print('\n')
return ind_auc
def plot_pr_curve(cv_labels, cv_prob, file_path):
precisions = []
auc_list = []
recall_array = []
precision_array = []
mean_recall = np.linspace(0, 1, 100)
for i in range(len(cv_labels)):
precision, recall, _ = precision_recall_curve(cv_labels[i], cv_prob[i])
recall_array.append(recall)
precision_array.append(precision)
precisions.append(interp(mean_recall, recall[::-1], precision[::-1])[::-1])
try:
roc_auc = auc(recall, precision)
except ZeroDivisionError:
roc_auc = 0.0
auc_list.append(roc_auc)
plt.figure(0)
mean_precision = np.mean(precisions, axis=0)
mean_recall = mean_recall[::-1]
mean_auc = auc(mean_recall, mean_precision)
std_auc = np.std(auc_list)
plt.plot(mean_recall, mean_precision, color='navy',
label=r'Mean PRC (AUPRC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.7)
std_precision = np.std(precisions, axis=0)
precision_upper = np.minimum(mean_precision + std_precision, 1)
precision_lower = np.maximum(mean_precision - std_precision, 0)
plt.fill_between(mean_recall, precision_lower, precision_upper, color='grey', alpha=.3,
label=r'$\pm$ 1 std. dev.')
plt.xlim([0, 1.0])
plt.ylim([0, 1.0])
plt.title('Precision-Recall Curve', fontsize=18)
plt.xlabel('Recall', fontsize=16)
plt.ylabel('Precision', fontsize=16)
plt.legend(loc="lower left")
ax_width = 1
ax = plt.gca() # 获取边框
ax.spines['bottom'].set_linewidth(ax_width)
ax.spines['left'].set_linewidth(ax_width)
ax.spines['top'].set_linewidth(ax_width)
ax.spines['right'].set_linewidth(ax_width)
figure_name = file_path + 'cv_prc.png'
plt.savefig(figure_name, dpi=600, transparent=True, bbox_inches='tight')
plt.close(0)
full_path = os.path.abspath(figure_name)
if os.path.isfile(full_path):
print('The Precision-Recall Curve of cross-validation can be found:')
print(full_path)
print('\n')
return mean_auc
def plot_pr_ind(ind_labels, ind_prob, file_path):
precision, recall, _ = precision_recall_curve(ind_labels, ind_prob)
try:
ind_auc = auc(recall, precision)
except ZeroDivisionError:
ind_auc = 0.0
plt.figure(0)
plt.plot(recall, precision, lw=2, alpha=0.7, color='red',
label='PRC curve (area = %0.2f)' % ind_auc)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.title('Precision-Recall Curve', fontsize=18)
plt.xlabel('Recall', fontsize=16)
plt.ylabel('Precision', fontsize=16)
plt.legend(loc="lower left")
ax_width = 1
ax = plt.gca() # 获取边框
ax.spines['bottom'].set_linewidth(ax_width)
ax.spines['left'].set_linewidth(ax_width)
ax.spines['top'].set_linewidth(ax_width)
ax.spines['right'].set_linewidth(ax_width)
figure_name = file_path + 'ind_prc.png'
plt.savefig(figure_name, dpi=600, transparent=True, bbox_inches='tight')
plt.close(0)
full_path = os.path.abspath(figure_name)
if os.path.isfile(full_path):
print('The Precision-Recall Curve of independent test can be found:')
print(full_path)
print('\n')
return ind_auc
<file_sep>from ..utils.utils_algorithm import word2vec
def word4vec(corpus, sample_size_list, fixed_len, **param_dict):
corpus_out = word2vec(corpus, sample_size_list, fixed_len, word_size=param_dict['word_size'],
win_size=param_dict['win_size'], vec_dim=param_dict['vec_dim'], skip_gram=param_dict['sg'])
return corpus_out
<file_sep>import os
from numpy import random
import shutil
import sys
from collections import Counter
from itertools import count, takewhile, product
from MachineLearningAlgorithm.utils.utils_math import construct_partition2two
# Alphabets of DNA, RNA, PROTEIN
DNA = "ACGT"
RNA = "ACGU"
PROTEIN = "ACDEFGHIKLMNPQRSTVWY"
# 每个residue特征应该包含method/words
Method_Res = ['One-hot', 'Binary-5bit', 'One-hot-6bit', 'Position-specific-2', 'Position-specific-3',
'Position-specific-4', 'AESNN3', 'DBE', 'NCP', 'DPC', 'TPC', 'PP', 'PSSM', 'PSFM',
'PAM250', 'BLOSUM62', 'BLAST-matrix', 'SS', 'SASA', 'RSS', 'CS']
# 每个sequence特征应该包含method/words
Method_One_Hot_Enc = ['One-hot', 'Binary-5bit', 'One-hot-6bit', 'Position-specific-2', 'Position-specific-3',
'Position-specific-4', 'AESNN3', 'DBE', 'NCP', 'DPC', 'TPC', 'PP', 'PSSM', 'PSFM',
'PAM250', 'BLOSUM62', 'BLAST-matrix', 'SS', 'SASA', 'RSS', 'CS']
All_Words = ['Kmer', 'RevKmer', 'Mismatch', 'Subsequence', 'Top-N-Gram', 'DR', 'DT']
DNA_Words = ['Kmer', 'RevKmer', 'Mismatch', 'Subsequence']
RNA_Words = ['Kmer', 'Mismatch', 'Subsequence']
Protein_Words = ['Kmer', 'Mismatch', 'Top-N-Gram', 'DR', 'DT']
Method_Topic_Model = ['LSA', 'PLSA', 'LDA', 'Labeled-LDA']
Method_Word_Embedding = ['word2vec', 'fastText', 'Glove']
Method_Syntax_Rules = ['DAC', 'DCC', 'DACC', 'TAC', 'TCC', 'TACC', 'MAC', 'GAC', 'NMBAC', 'AC', 'CC', 'ACC', 'PDT',
'PDT-Profile', 'AC-PSSM', 'CC-PSSM', 'ACC-PSSM', 'PSSM-DT', 'PSSM-RT', 'ZCPseKNC', 'ND',
'Motif-PSSM']
Method_Auto_features = ['MotifCNN', 'MotifDCNN', 'CNN-BiLSTM', 'DCNN-BiLSTM', 'Autoencoder']
Method_Semantic_Similarity = ['ED', 'MD', 'CD', 'HD', 'JSC', 'CS', 'PCC', 'KLD', 'none']
Score_dict = {'ED': 'Euclidean Distance', 'MD': 'Manhattan Distance', 'CD': 'Chebyshev Distance',
'HD': 'hamming Distance', 'JSC': 'Jaccard Similarity Coefficient', 'CS': 'Cosine Similarity',
'PCC': 'Pearson Correlation Coefficient', 'KLD': 'Kullback-Leible Divergence'}
# 特征提取的种类
Feature_Extract_Mode = ['OHE', 'BOW', 'TF-IDF', 'TR', 'WE', 'TM', 'SR', 'AF'] # 'semantic similarity' ?
Mode = {'OHE': 'one-hot encoding', 'BOW': 'bag of words', 'TF-IDF': 'term frequency–inverse document frequency',
'TR': 'TextRank', 'WE': 'word embedding', 'TM': 'topic model', 'SR': 'syntax rules', 'AF': 'automatic features'}
Machine_Learning_Algorithm = ['SVM', 'RF', 'CRF', 'CNN', 'LSTM', 'GRU', 'Transformer', 'Weighted-Transformer',
'Reformer', 'FastText', 'Bert']
Ml = {'SVM': 'Support Vector Machine(SVM)', 'RF': 'Random Forest(RF)', 'CRF': 'Conditional Random Field(CRF)',
'CNN': 'Convolutional Neural Networks(CNN)', 'LSTM': 'Long Short-Term Memory(LSTM)',
'GRU': 'Gate Recurrent Unit(GRU)', 'Transformer': 'Transformer',
'Weighted-Transformer': 'Weighted-Transformer', 'Reformer': 'Reformer', 'FastText': 'FastText'}
DeepLearning = ['CNN', 'LSTM', 'GRU', 'Transformer', 'Weighted-Transformer', 'Reformer', 'FastText']
Classification = ['SVM', 'RF', 'CNN', 'LSTM', 'GRU', 'Transformer', 'Weighted-Transformer', 'Reformer']
SequenceLabelling = ['CRF', 'CNN', 'LSTM', 'GRU', 'Transformer', 'Weighted-Transformer', 'Reformer']
# 路径
Final_Path = '/results/'
Batch_Path_Seq = '/results/batch/Seq/'
Batch_Path_Res = '/results/batch/Res/'
FE_PATH_Res = '/results/FE/Res/'
FE_BATCH_PATH_Res = '/results/batch/FE/Res/'
FE_PATH_Seq = '/results/FE/Seq/'
FE_BATCH_PATH_Seq = '/results/batch/FE/Seq/'
# Metric
Metric_Index = {'Acc': 0, 'MCC': 1, 'AUC': 2, 'BAcc': 3, 'F1': 8}
Metric_dict = {'Acc': 'Accuracy', 'MCC': 'Matthews Correlation Coefficient', 'AUC': 'Area Under Curve',
'BAcc': 'Balanced-Accuracy', 'F1': 'F-Measure'}
def seq_sys_check(args, res=False):
print('************************** PLEASE CHECK **************************')
if args.mode != 'OHE':
assert args.ml not in SequenceLabelling, 'The ' + Ml[args.ml] + ' machine learning algorithm can only ' \
'construct predictor for one-hot feature!'
print('Analysis category: %s sequence' % args.category)
if res is False:
print('Feature extraction mode: ', Mode[args.mode])
else:
print('Feature extraction method: ', args.method)
print('Machine learning algorithm: ', Ml[args.ml])
print('*******************************************************************')
print('\n')
def check_contain_chinese(check_str):
"""Check if the path name and file name user input contain Chinese character.
:param check_str: string to be checked.
"""
current_path_uni = str(check_str.encode('gbk'), "gbk")
for ch in current_path_uni:
assert ch < '\u4e00' or ch > '\u9fff', 'Error: the path can not contain Chinese characters.'
def results_dir_check(results_dir):
if not os.path.exists(results_dir):
try:
os.makedirs(results_dir)
print('results_dir:', results_dir)
except OSError:
pass
else:
# 先删除再创建
try:
shutil.rmtree(results_dir)
print('results_dir:', results_dir)
os.makedirs(results_dir)
except OSError:
pass
def make_params_dicts(params_list_dict):
params_dict_list = []
key_list = list(params_list_dict.keys())
for value_comb in product(*list(params_list_dict.values())):
temp_dict = {}
for i in range(len(value_comb)):
temp_dict[key_list[i]] = value_comb[i]
params_dict_list.append(temp_dict)
return params_dict_list
def residue_check(ml, cv, fragment, statistics_label):
assert ml != 'CRF' or cv != 'i', "Error: the CRF can only use the k-fold cross-validation"
assert ml != 'crf' or fragment == 1, "Sorry, If you use fragment method only svm and rf can be used!"
# Judge the crf, only use for binary situation.
assert ml != 'crf' or (len(statistics_label) != 2), "Error: the CRF only use for binary situation!"
def ohe_method_error(args):
assert args.ml not in DeepLearning or args.method not in ['SASA', 'RSS', 'CS'], "One dimension vector isn't " \
"support Depp Learning"
def seq_feature_check(args):
if args.mode == 'OHE':
assert args.method in Method_One_Hot_Enc, "Please check method for 'OHE' mode!"
if args.mode == 'BOW':
if args.category == 'DNA':
assert args.words in DNA_Words, "Please check words for 'BOW' mode of DNA sequence!"
elif args.category == 'RNA':
assert args.words in RNA_Words, "Please check words for 'BOW' mode of RNA sequence!"
else:
assert args.words in Protein_Words, "Please check words for 'BOW' mode of Protein sequence!"
if args.mode == 'TM':
assert args.method in Method_Topic_Model, "Please check method for 'TM' mode!"
if args.mode == 'WE':
assert args.method in Method_Word_Embedding, "Please check method for 'WE' mode!"
if args.mode == 'SR':
assert args.method in Method_Syntax_Rules, "Please check method for 'SR' mode!"
if args.mode == 'AF':
assert args.method in Method_Auto_features, "Please check method for 'AF' mode!"
def res_feature_check(args):
if args.mode == 'OHE':
assert args.method in Method_Res, "Please check method for 'OHE' mode!"
def f_range(start, stop, step):
return takewhile(lambda x: x < stop, count(start, step))
def one_hot_check(args, **params_dict):
# if method in ['DPC', 'TPC', 'PP']:
# params_dict['chosen_name'] = args.chosen_name
# if method in ['PSSM', 'PSFM', 'SS', 'SASA', 'CS']:
params_dict['cpu'] = [args.cpu]
return params_dict
def bow_check(args, **params_dict):
if args.words in ['Kmer', 'RevKmer', 'Mismatch', 'Subsequence']:
if args.auto_opt == 1:
params_dict['word_size'] = list(range(1, 5, 1))
elif args.auto_opt == 2:
params_dict['word_size'] = list(range(1, 7, 1)) # 这里之后估计得具体区分一下蛋白质和基因
else:
if args.word_size is not None:
if len(args.word_size) == 1:
params_dict['word_size'] = list(range(args.word_size[0], args.word_size[0] + 1, 1))
# args.word_size.pop()
elif len(args.word_size) == 2:
params_dict['word_size'] = list(range(args.word_size[0], args.word_size[1] + 1, 1))
# args.word_size.pop()
# args.word_size.pop()
elif len(args.word_size) == 3:
params_dict['word_size'] = list(range(args.word_size[0], args.word_size[1] + 1, args.word_size[2]))
# args.word_size.pop()
# args.word_size.pop()
# args.word_size.pop()
else:
error_info = 'The number of input value of parameter "word_size" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "word_size" missed!'
sys.stderr.write(error_info)
return False
if args.words in ['Mismatch']:
if args.auto_opt == 1:
params_dict['mis_num'] = list(range(1, 5, 1))
elif args.auto_opt == 2:
params_dict['mis_num'] = list(range(1, 7, 1))
else:
if args.mis_num is not None:
if len(args.mis_num) == 1:
params_dict['mis_num'] = list(range(args.mis_num[0], args.mis_num[0] + 1, 1))
# args.mis_num.pop()
elif len(args.mis_num) == 2:
params_dict['mis_num'] = list(range(args.mis_num[0], args.mis_num[1] + 1, 1))
# args.mis_num.pop()
# args.mis_num.pop()
elif len(args.mis_num) == 3:
params_dict['mis_num'] = list(range(args.mis_num[0], args.mis_num[1] + 1, args.mis_num[2]))
# args.mis_num.pop()
# args.mis_num.pop()
# args.mis_num.pop()
else:
error_info = 'The number of input value of parameter "mis_num" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "mis_num" missed!'
sys.stderr.write(error_info)
return False
if args.words in ['Subsequence']:
if args.auto_opt == 1:
params_dict['delta'] = list(f_range(0, 0.8, 0.2))
elif args.auto_opt == 2:
params_dict['delta'] = list(f_range(0, 1, 0.1))
else:
if args.delta is not None:
if len(args.delta) == 1:
params_dict['delta'] = list(f_range(args.delta[0], args.delta[0] + 0.1, 0.1))
# args.delta.pop()
elif len(args.delta) == 2:
params_dict['delta'] = list(f_range(args.delta[0], args.delta[1] + 0.1, 0.1))
# args.delta.pop()
# args.delta.pop()
elif len(args.delta) == 3:
params_dict['delta'] = list(f_range(args.delta[0], args.delta[1] + 0.1, args.delta[2]))
# args.delta.pop()
# args.delta.pop()
# args.delta.pop()
else:
error_info = 'The number of input value of parameter "delta" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "delta" missed!'
sys.stderr.write(error_info)
return False
if args.words in ['Top-N-Gram']:
if args.auto_opt == 1:
params_dict['top_n'] = list(range(1, 3, 1))
elif args.auto_opt == 2:
params_dict['top_n'] = list(range(1, 4, 1))
else:
if args.top_n is not None:
if len(args.top_n) == 1:
params_dict['top_n'] = list(range(args.top_n[0], args.top_n[0] + 1, 1))
# args.top_n.pop()
elif len(args.top_n) == 2:
params_dict['top_n'] = list(range(args.top_n[0], args.top_n[1] + 1, 1))
# args.top_n.pop()
# args.top_n.pop()
elif len(args.top_n) == 3:
params_dict['top_n'] = list(range(args.top_n[0], args.top_n[1] + 1, args.top_n[2]))
# args.top_n.pop()
# args.top_n.pop()
# args.top_n.pop()
else:
error_info = 'The number of input value of parameter "top_n" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "top_n" missed!'
sys.stderr.write(error_info)
return False
if args.words in ['DR', 'DT']:
if args.auto_opt == 1:
params_dict['max_dis'] = list(range(1, 5, 1))
elif args.auto_opt == 2:
params_dict['max_dis'] = list(range(1, 7, 1))
else:
if args.max_dis is not None:
if len(args.max_dis) == 1:
params_dict['max_dis'] = list(range(args.max_dis[0], args.max_dis[0] + 1, 1))
# args.max_dis.pop()
elif len(args.max_dis) == 2:
params_dict['max_dis'] = list(range(args.max_dis[0], args.max_dis[1] + 1, 1))
# args.max_dis.pop()
# args.max_dis.pop()
elif len(args.max_dis) == 3:
params_dict['max_dis'] = list(range(args.max_dis[0], args.max_dis[1] + 1, args.max_dis[2]))
# args.max_dis.pop()
# args.max_dis.pop()
# args.max_dis.pop()
else:
error_info = 'The number of input value of parameter "max_dis" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "max_dis" missed!'
sys.stderr.write(error_info)
if args.words in ['Top-N-Gram', 'DT']:
params_dict['cpu'] = [args.cpu]
# special for Mismatch BOW
if 'mis_num' in list(params_dict.keys()):
params_dict['mis_num'] = [1]
if params_dict['word_size'][0] == 1:
params_dict['word_size'] = params_dict['word_size'][1:]
return params_dict
def words_check(args, **params_dict):
if args.words in ['Kmer', 'RevKmer', 'Mismatch', 'Subsequence', 'Top-N-Gram', 'DR', 'DT']:
if args.auto_opt == 1:
params_dict['word_size'] = list(range(1, 5, 1))
elif args.auto_opt == 2:
params_dict['word_size'] = list(range(1, 6, 1))
else:
if args.word_size is not None:
if len(args.word_size) == 1:
params_dict['word_size'] = list(range(args.word_size[0], args.word_size[0] + 1, 1))
# args.word_size.pop()
elif len(args.word_size) == 2:
params_dict['word_size'] = list(range(args.word_size[0], args.word_size[1] + 1, 1))
# args.word_size.pop()
# args.word_size.pop()
elif len(args.word_size) == 3:
params_dict['word_size'] = list(range(args.word_size[0], args.word_size[1] + 1, args.word_size[2]))
# args.word_size.pop()
# args.word_size.pop()
# args.word_size.pop()
else:
error_info = 'The number of input value of parameter "word_size" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "word_size" missed!'
sys.stderr.write(error_info)
return False
if args.words in ['Top-N-Gram']:
if args.auto_opt == 1:
params_dict['top_n'] = list(range(1, 3, 1))
elif args.auto_opt == 2:
params_dict['top_n'] = list(range(1, 4, 1))
else:
if args.top_n is not None:
if len(args.top_n) == 1:
params_dict['top_n'] = list(range(args.top_n[0], args.top_n[0] + 1, 1))
# args.top_n.pop()
elif len(args.top_n) == 2:
params_dict['top_n'] = list(range(args.top_n[0], args.top_n[1] + 1, 1))
# args.top_n.pop()
# args.top_n.pop()
elif len(args.top_n) == 3:
params_dict['top_n'] = list(range(args.top_n[0], args.top_n[1] + 1, args.top_n[2]))
# args.top_n.pop()
# args.top_n.pop()
# args.top_n.pop()
else:
error_info = 'The number of input value of parameter "top_n" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "top_n" missed!'
sys.stderr.write(error_info)
return False
if args.words in ['DR', 'DT']:
if args.auto_opt == 1:
params_dict['max_dis'] = list(range(1, 5, 1))
elif args.auto_opt == 2:
params_dict['max_dis'] = list(range(1, 7, 1))
else:
if args.max_dis is not None:
if len(args.max_dis) == 1:
params_dict['max_dis'] = list(range(args.max_dis[0], args.max_dis[0] + 1, 1))
# args.max_dis.pop()
elif len(args.max_dis) == 2:
params_dict['max_dis'] = list(range(args.max_dis[0], args.max_dis[1] + 1, 1))
# args.max_dis.pop()
# args.max_dis.pop()
elif len(args.max_dis) == 3:
params_dict['max_dis'] = list(range(args.max_dis[0], args.max_dis[1] + 1, args.max_dis[2]))
# args.max_dis.pop()
# args.max_dis.pop()
# args.max_dis.pop()
else:
error_info = 'The number of input value of parameter "max_dis" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "max_dis" missed!'
sys.stderr.write(error_info)
if args.words in ['Top-N-Gram', 'DT']:
params_dict['cpu'] = [args.cpu]
return params_dict
def tr_check(args, **params_dict):
params_dict = words_check(args, **params_dict)
params_dict['alpha'] = [args.alpha]
return params_dict
def we_check(args, **params_dict):
params_dict = words_check(args, **params_dict)
if args.method == 'Glove':
params_dict['win_size'] = [args.win_size if args.win_size is not None else 10]
params_dict['vec_dim'] = [args.vec_dim if args.vec_dim is not None else 100]
else:
params_dict['sg'] = [args.sg]
params_dict['win_size'] = [args.win_size if args.win_size is not None else 5]
params_dict['vec_dim'] = [args.vec_dim if args.vec_dim is not None else 10]
return params_dict
def tm_check(args, **params_dict):
if args.in_tm == 'BOW':
params_dict = bow_check(args, **params_dict)
else:
params_dict = words_check(args, **params_dict)
if args.in_tm == 'TextRank':
params_dict['win_size'] = [args.win_size if args.win_size is not None else 3]
params_dict['com_prop'] = [args.com_prop]
return params_dict
METHODS_ACC_S = ['DAC', 'DCC', 'DACC', 'TAC', 'TCC', 'TACC', 'AC', 'CC', 'ACC']
def sr_check(args, **params_dict):
if args.method in ['MAC', 'GAC', 'NMBAC', 'PDT', 'PDT-Profile', 'ZCPseKNC']:
if args.auto_opt == 1:
params_dict['lamada'] = list(range(1, 8, 1))
elif args.auto_opt == 2:
params_dict['lamada'] = list(range(1, 10, 1))
else:
if args.lamada is not None:
if len(args.lamada) == 1:
params_dict['lamada'] = list(range(args.lamada[0], args.lamada[0] + 1, 1))
# args.lamada.pop()
elif len(args.lamada) == 2:
params_dict['lamada'] = list(range(args.lamada[0], args.lamada[1] + 1, 1))
# args.lamada.pop()
# args.lamada.pop()
elif len(args.lamada) == 3:
params_dict['lamada'] = list(range(args.lamada[0], args.lamada[1] + 1, args.lamada[2]))
# args.lamada.pop()
# args.lamada.pop()
# args.lamada.pop()
else:
error_info = 'The number of input value of parameter "lamada" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "lamada" missed!'
sys.stderr.write(error_info)
return False
if args.method in METHODS_ACC_S or args.method in ['ACC-PSSM', 'AC-PSSM', 'CC-PSSM']:
if args.auto_opt == 1:
params_dict['lag'] = list(range(1, 8, 1))
elif args.auto_opt == 2:
params_dict['lag'] = list(range(1, 10, 1))
else:
if args.lag is not None:
if len(args.lag) == 1:
params_dict['lag'] = list(range(args.lag[0], args.lag[0] + 1, 1))
# args.lag.pop()
elif len(args.lag) == 2:
params_dict['lag'] = list(range(args.lag[0], args.lag[1] + 1, 1))
# args.lag.pop()
# args.lag.pop()
elif len(args.lag) == 3:
params_dict['lag'] = list(range(args.lag[0], args.lag[1] + 1, args.lag[2]))
# args.lag.pop()
# args.lag.pop()
# args.lag.pop()
else:
error_info = 'The number of input value of parameter "lag" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "lag" missed!'
sys.stderr.write(error_info)
return False
if args.method in ['ZCPseKNC']:
if args.auto_opt == 1:
params_dict['w'] = list(f_range(0, 0.8, 0.1))
elif args.auto_opt == 2:
params_dict['w'] = list(f_range(0, 1, 0.1))
else:
if args.w is not None:
if len(args.w) == 1:
params_dict['w'] = list(f_range(args.w[0], args.w[0] + 0.1, 0.1))
# args.w.pop()
elif len(args.w) == 2:
params_dict['w'] = list(f_range(args.w[0], args.w[1] + 0.1, 0.1))
# args.w.pop()
# args.w.pop()
elif len(args.w) == 3:
params_dict['w'] = list(f_range(args.w[0], args.w[1] + 0.1, args.w[2]))
# args.w.pop()
# args.w.pop()
# args.w.pop()
else:
error_info = 'The number of input value of parameter "w" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "w" missed!'
sys.stderr.write(error_info)
return False
if args.auto_opt == 1:
params_dict['k'] = list(range(1, 5, 1))
elif args.auto_opt == 2:
params_dict['k'] = list(range(1, 6, 1))
else:
if args.k is not None:
if len(args.k) == 1:
params_dict['k'] = list(range(args.k[0], args.k[0] + 1, 1))
# args.k.pop()
elif len(args.k) == 2:
params_dict['k'] = list(range(args.k[0], args.k[1] + 1, 1))
# args.k.pop()
# args.k.pop()
elif len(args.k) == 3:
params_dict['k'] = list(range(args.k[0], args.k[1] + 1, args.k[2]))
# args.k.pop()
# args.k.pop()
# args.k.pop()
else:
error_info = 'The number of input value of parameter "k" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "k" missed!'
sys.stderr.write(error_info)
return False
if args.method in ['PDT-Profile']:
if args.auto_opt == 1:
params_dict['n'] = list(range(1, 3, 1))
elif args.auto_opt == 2:
params_dict['n'] = list(range(1, 4, 1))
else:
if args.n is not None:
if len(args.n) == 1:
params_dict['n'] = list(range(args.n[0], args.n[0] + 1, 1))
elif len(args.n) == 2:
params_dict['n'] = list(range(args.n[0], args.n[1] + 1, 1))
elif len(args.n) == 3:
params_dict['n'] = list(range(args.n[0], args.n[1] + 1, args.n[2]))
else:
error_info = 'The number of input value of parameter "n" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
error_info = 'Parameter "n" missed!'
sys.stderr.write(error_info)
return False
if args.method == 'Motif-PSSM':
params_dict['batch_size'] = [args.batch_size]
if args.method in ['PDT-Profile', 'AC-PSSM', 'CC-PSSM', 'ACC-PSSM', 'PSSM-DT', 'PSSM-RT', 'Motif-PSSM']:
params_dict['cpu'] = [args.cpu]
return params_dict
def af_check(args, **params_dict):
params_dict['prob'] = [args.dropout]
params_dict['lr'] = [args.lr]
params_dict['epoch'] = [args.epochs]
params_dict['batch_size'] = [args.batch_size]
params_dict['fea_dim'] = [args.fea_dim]
return params_dict
def mode_params_check(args, all_params_list_dict, res=False):
params_list_dict = {}
if res is False:
if args.mode == 'OHE':
params_list_dict = one_hot_check(args, **params_list_dict) # Example: {k: [1, 2, 3], w: [0.7, 0.8]}
all_params_list_dict = one_hot_check(args, **all_params_list_dict)
elif args.mode == 'BOW':
params_list_dict = bow_check(args, **params_list_dict)
all_params_list_dict = bow_check(args, **all_params_list_dict)
elif args.mode == 'TF-IDF':
params_list_dict = words_check(args, **params_list_dict)
all_params_list_dict = words_check(args, **all_params_list_dict)
elif args.mode == 'TR':
params_list_dict = tr_check(args, **params_list_dict)
all_params_list_dict = tr_check(args, **all_params_list_dict)
elif args.mode == 'WE':
params_list_dict = we_check(args, **params_list_dict)
all_params_list_dict = we_check(args, **all_params_list_dict)
elif args.mode == 'TM':
params_list_dict = tm_check(args, **params_list_dict)
all_params_list_dict = tm_check(args, **all_params_list_dict)
elif args.mode == 'SR':
params_list_dict = sr_check(args, **params_list_dict)
all_params_list_dict = sr_check(args, **all_params_list_dict)
else:
params_list_dict = af_check(args, **params_list_dict)
all_params_list_dict = af_check(args, **all_params_list_dict)
return params_list_dict, all_params_list_dict
else:
params_list_dict = one_hot_check(args, **params_list_dict) # Example: {k: [1, 2, 3], w: [0.7, 0.8]}
return params_list_dict
def svm_params_check(cost, gamma, grid=0, param_list_dict=None): # 1: meticulous; 0: 'rough'.
if cost is not None:
if len(cost) == 1:
c_range = range(cost[0], cost[0] + 1, 1)
cost.pop()
elif len(cost) == 2:
c_range = range(cost[0], cost[1] + 1, 1)
cost.pop()
cost.pop()
elif len(cost) == 3:
c_range = range(cost[0], cost[1] + 1, cost[2])
cost.pop()
cost.pop()
cost.pop()
else:
error_info = 'The number of input value of parameter "cost" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
if grid == 0:
c_range = range(-5, 11, 3)
else:
c_range = range(-5, 11, 1)
if gamma is not None:
if len(gamma) == 1:
g_range = range(gamma[0], gamma[0] + 1, 1)
gamma.pop()
elif len(gamma) == 2:
g_range = range(gamma[0], gamma[1] + 1, 1)
gamma.pop()
gamma.pop()
elif len(gamma) == 3:
g_range = range(gamma[0], gamma[1] + 1, gamma[2])
gamma.pop()
gamma.pop()
gamma.pop()
else:
error_info = 'The number of input value of parameter "gamma" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
if grid == 0:
g_range = range(-10, 6, 3)
else:
g_range = range(-10, 6, 1)
param_list_dict['cost'] = list(c_range)
param_list_dict['gamma'] = list(g_range)
# test mode on/off
# param_list_dict['cost'] = [10]
# param_list_dict['gamma'] = [5]
return param_list_dict
def rf_params_check(tree, grid='r', param_list_dict=None): # 'm': meticulous; 'r': 'rough'.
if tree is not None:
if len(tree) == 1:
t_range = range(tree[0], tree[0] + 10, 10)
tree.pop()
elif len(tree) == 2:
t_range = range(tree[0], tree[1] + 10, 10)
tree.pop()
tree.pop()
elif len(tree) == 3:
t_range = range(tree[0], tree[1] + 10, tree[2])
tree.pop()
tree.pop()
tree.pop()
else:
error_info = 'The number of input value of parameter "cost" should be no more than 3!'
sys.stderr.write(error_info)
return False
else:
if grid == 'r':
t_range = range(100, 600, 200)
else:
t_range = range(100, 600, 100)
param_list_dict['tree'] = list(t_range)
return param_list_dict
def ml_params_check(args, all_params_list_dict):
if args.ml == 'SVM':
all_params_list_dict = svm_params_check(args.cost, args.gamma, args.grid, all_params_list_dict)
else:
all_params_list_dict = rf_params_check(args.tree, args.grid, all_params_list_dict)
return all_params_list_dict
# 深度学习的参数检查
def dl_params_check(args, params_list_dict):
params_list_dict['lr'] = [args.lr]
params_list_dict['epochs'] = [args.epochs]
params_list_dict['batch_size'] = [args.batch_size]
params_list_dict['dropout'] = [args.dropout]
if args.ml == 'LSTM' or args.ml == 'GRU':
params_list_dict['hidden_dim'] = [args.hidden_dim]
params_list_dict['n_layer'] = [args.n_layer]
elif args.ml == 'CNN':
params_list_dict['out_channels'] = [args.out_channels]
params_list_dict['kernel_size'] = [args.kernel_size]
elif args.ml == 'Transformer' or args.ml == 'Weighted-Transformer':
params_list_dict['d_model'] = [args.d_model]
params_list_dict['d_ff'] = [args.d_ff]
params_list_dict['n_heads'] = [args.n_heads]
params_list_dict['n_layer'] = [args.n_layer]
elif args.ml == 'FastText':
params_list_dict['hidden_dim'] = [args.hidden_dim]
else:
params_list_dict['d_model'] = [args.d_model]
params_list_dict['d_ff'] = [args.d_ff]
params_list_dict['n_heads'] = [args.n_heads]
params_list_dict['n_layer'] = [args.n_layer]
params_list_dict['n_chunk'] = [args.n_chunk]
params_list_dict['rounds'] = [args.rounds]
params_list_dict['bucket_length'] = [args.bucket_length]
return params_list_dict
def crf_params_check(args, params_list_dict):
params_list_dict['lr'] = [args.lr]
params_list_dict['epochs'] = [args.epochs]
params_list_dict['batch_size'] = [args.batch_size]
return params_list_dict
# def table_params(params_dict, opt=False):
# tb = pt.PrettyTable()
#
# if opt is False:
# print('Parameter details'.center(21, '*'))
# tb.field_names = ["parameter", "value"]
# else:
# print('\n')
# print('\n')
# print('+---------------------------+')
# print('| Optimal parameter details |')
# print('+---------------------------+')
# tb.field_names = ["parameter", "optimal value"]
# for item in list(params_dict.items()):
# if item[0] not in ['out_files', 'ind_out_files']:
# tb.add_row(item)
# print(tb)
# print('\n')
def prepare4train_seq(args, label_array, dl):
info_dict = {}
if args.cv == 'j':
args.folds_num = sum(args.sample_num_list)
info_dict['Validation method'] = 'Jackknife cross validation'
elif args.cv == '10':
args.folds_num = 10
info_dict['Validation method'] = '10-fold cross validation'
else:
args.folds_num = 5
info_dict['Validation method'] = '5-fold cross validation'
args.folds = construct_partition2two(label_array, args.folds_num, True) # 固定交叉验证的每一折index
if dl is False:
args.metric_index = Metric_Index[args.metric]
info_dict['Metric for selection'] = Metric_dict[args.metric]
if args.sp != 'none':
if args.sp == 'over':
info_dict['Technique for sampling'] = 'oversampling '
elif args.sp == 'under':
info_dict['Technique for sampling'] = 'undersampling '
else:
info_dict['Technique for sampling'] = 'combine oversampling and undersampling '
if len(Counter(label_array).keys()) > 2: # 判断是二分类还是多分类
args.multi = True
info_dict['Type of Problem'] = 'Multi-class classification'
else:
args.multi = False
info_dict['Type of Problem'] = 'Binary classification'
print_base_dict(info_dict)
return args
def print_base_dict(info_dict):
print('\r')
key_max_len = 0
val_max_len = 0
for key, val in info_dict.items():
key_max_len = max(key_max_len, len(key))
val_max_len = max(val_max_len, len(val))
tag = '--'
header = 'Basic settings for Parameter Selection'
header_str1 = '+' + tag.center(key_max_len + val_max_len + 9, '-') + '+'
header_str2 = '|' + header.center(key_max_len + val_max_len + 9, ' ') + '|'
print(header_str1)
print(header_str2)
up_dn_str = '+' + tag.center(key_max_len + 4, '-') + '+' + tag.center(val_max_len + 4, '-') + '+'
print(up_dn_str)
for key, val in info_dict.items():
temp_str = '|' + str(key).center(key_max_len + 4, ' ') + '|' + str(val).center(val_max_len + 4, ' ') + '|'
print(temp_str)
print(up_dn_str)
print('\r')
def print_fe_dict(params_dict):
print_dict = {}
for key in list(params_dict.keys()):
if key in ['word_size', 'mis_num', 'delta', 'top_n', 'alpha', 'win_size', 'vec_dim', 'lamada', 'lag',
'w', 'k', 'n']:
print_dict[key] = params_dict[key]
if print_dict != {}:
print('\r')
key_max_len = 20
val_max_len = 20
tag = '--'
header = 'Feature Extraction Parameters'
header_str1 = '+' + tag.center(key_max_len + val_max_len + 9, '-') + '+'
header_str2 = '|' + header.center(key_max_len + val_max_len + 9, ' ') + '|'
print(header_str1)
print(header_str2)
up_dn_str = '+' + tag.center(key_max_len + 4, '-') + '+' + tag.center(val_max_len + 4, '-') + '+'
print(up_dn_str)
for key, val in print_dict.items():
temp_str = '|' + str(key).center(key_max_len + 4, ' ') + '|' + str(val).center(val_max_len + 4, ' ') + '|'
print(temp_str)
print(up_dn_str)
print('\r')
def prepare4train_res(args, label_array, dl):
info_dict = {}
if args.cv == 'j':
args.folds_num = sum(args.sample_num_list)
info_dict['Validation method'] = 'Jackknife cross validation'
elif args.cv == '10':
args.folds_num = 10
info_dict['Validation method'] = '10-fold cross validation'
else:
args.folds_num = 5
info_dict['Validation method'] = '5-fold cross validation'
if dl is False:
args.folds = construct_partition2two(label_array, args.folds_num, True) # 固定交叉验证的每一折index
args.metric_index = Metric_Index[args.metric]
info_dict['Metric for selection'] = Metric_dict[args.metric]
else:
label_array = random.normal(loc=0.0, scale=1, size=(len(label_array)))
args.folds = construct_partition2two(label_array, args.folds_num, False) # 固定交叉验证的每一折index
# if len(Counter(label_array).keys()) > 2: # 判断是二分类还是多分类
# args.multi = True
# info_dict['Type of Problem'] = 'Multi-class classification'
# else:
args.multi = False
info_dict['Type of Problem'] = 'Binary classification'
print_base_dict(info_dict)
return args
<file_sep>import os
import random
from itertools import cycle
import numpy as np
import pandas as pd
import scipy.cluster.hierarchy as sch
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plot_2d(data, labels, path, ind=False):
if ind is True:
figure_name = path + 'dimension_reduction_2d_ind.png'
else:
figure_name = path + 'dimension_reduction_2d.png'
if os.path.isfile(figure_name):
try:
os.remove(figure_name)
except OSError:
print('File delete error!')
pass
color_sets = cycle(['crimson', 'navy', 'teal', 'darkorange', 'slategrey'])
color_set = []
label_set = list(set(labels))
for i, j in zip(label_set, color_sets):
color_set.append(j)
my_dict = {}
for i in range(len(label_set)):
my_dict[label_set[i]] = color_set[i]
plt.figure(0)
if len(labels) == 0:
plt.scatter(data[:, 0], data[:, 1], 20, c='r')
else:
df = pd.DataFrame({'X': data[:, 0], 'Y': data[:, 1], 'L': labels})
labels_set = set(labels)
for k in labels_set:
new_data = df.loc[df.loc[:, "L"] == k, :]
plt.scatter(np.array(new_data.X), np.array(new_data.Y), s=50, c=my_dict[k], alpha=0.7, label="Label_%s" % k)
plt.legend(loc='best')
plt.title('2D-figure of dimension reduction', fontsize=18)
plt.xlabel('First principal component', fontsize=14)
plt.ylabel('Second principal component', fontsize=14)
ax_width = 1
ax = plt.gca() # 获取边框
ax.spines['bottom'].set_linewidth(ax_width)
ax.spines['left'].set_linewidth(ax_width)
ax.spines['top'].set_linewidth(ax_width)
ax.spines['right'].set_linewidth(ax_width)
plt.savefig(figure_name, dpi=600, transparent=True, bbox_inches='tight')
plt.close(0)
full_path = os.path.abspath(figure_name)
if os.path.isfile(full_path):
print('The output 2D-figure for dimension reduction can be found:')
print(full_path)
print('\n')
def plot_3d(data, labels, path, ind=False):
if ind is True:
figure_name = path + 'dimension_reduction_3d_ind.png'
else:
figure_name = path + 'dimension_reduction_3d.png'
if os.path.isfile(figure_name):
try:
os.remove(figure_name)
except OSError:
print('File delete error!')
pass
mark_sets = cycle(['o', 'o'])
color_sets = cycle(['crimson', 'navy', 'teal', 'darkorange', 'slategrey'])
label_set = list(set(labels))
my_dict = {}
m = 0
for i in label_set:
my_dict[i] = m
m = m + 1
mark_set = []
color_set = []
for i, j, k in zip(label_set, mark_sets, color_sets):
mark_set.append(j)
color_set.append(k)
mc = np.zeros((len(labels), 2)).astype(str)
for i in range(len(labels)):
mc[i][0], mc[i][1] = mark_set[my_dict[labels[i]]], color_set[my_dict[labels[i]]]
fig = plt.figure(0)
axes3d = Axes3D(fig)
for i in range(len(data)):
axes3d.scatter(data[i][0], data[i][1], data[i][2], s=40, c=mc[i][1], alpha=0.7)
plt.title('3D-figure of dimension reduction', fontsize=18)
plt.xlabel('First PC', fontsize=12)
plt.ylabel('Second PC', fontsize=12)
axes3d.set_zlabel('Third PC', fontsize=12)
plt.savefig(figure_name, dpi=600, transparent=True, bbox_inches='tight')
plt.close(0)
full_path = os.path.abspath(figure_name)
if os.path.isfile(full_path):
print('The output 3D-figure for dimension reduction can be found:')
print(full_path)
print('\n')
def plot_clustering_2d(data, my_cluster, path, mode, ind=False):
if ind is True:
figure_name = path + 'clustering_2d_ind.png'
else:
figure_name = path + 'clustering_2d.png'
if os.path.isfile(figure_name):
try:
os.remove(figure_name)
except OSError:
print('File delete error!')
pass
if my_cluster is not None:
if mode == 'sample':
data = data
else:
data = data.T
labels = np.array(my_cluster)[0:, 1:].reshape(-1, )
clusters = list(map(float, labels))
clustering = np.array(clusters)
try:
# print(data)
y_2d = TSNE(n_components=2, init='pca', random_state=42).fit_transform(data)
# y_2d = TSNE(n_components=2).fit_transform(data)
except RuntimeWarning:
y_2d = PCA(n_components=2).fit_transform(data)
color_sets = cycle(['crimson', 'navy', 'teal', 'darkorange', 'slategrey', 'purple'])
color_set = []
label_set = list(set(labels))
for i, j in zip(label_set, color_sets):
color_set.append(j)
my_dict = {}
for i in range(len(label_set)):
my_dict[label_set[i]] = color_set[i]
plt.figure(0)
labels_set = set(labels)
if len(labels_set) > 6:
plt.scatter(y_2d[:, 0], y_2d[:, 1], s=50, c=clustering, alpha=0.7) # adjusted!
else:
df = pd.DataFrame({'X': y_2d[:, 0], 'Y': y_2d[:, 1], 'L': labels})
for k in labels_set:
new_data = df.loc[df.loc[:, "L"] == k, :]
plt.scatter(np.array(new_data.X), np.array(new_data.Y), s=50, linewidths=0,
c=my_dict[k], alpha=0.7, label="cluster_%s" % k)
plt.legend(loc='best')
plt.title('2D-figure of clustering', fontsize=18)
ax_width = 1
ax = plt.gca() # 获取边框
ax.spines['bottom'].set_linewidth(ax_width)
ax.spines['left'].set_linewidth(ax_width)
ax.spines['top'].set_linewidth(ax_width)
ax.spines['right'].set_linewidth(ax_width)
plt.savefig(figure_name, dpi=600, transparent=True, bbox_inches='tight')
plt.close(0)
full_path = os.path.abspath(figure_name)
if os.path.isfile(full_path):
print('The output 2D-figure for clustering can be found:')
print(full_path)
print('\n')
def plot_fs(scores, path, ind=False):
if ind is True:
figure_name = path + 'Feature_importance_ind.png'
else:
figure_name = path + 'Feature_importance.png'
if os.path.isfile(figure_name):
try:
os.remove(figure_name)
except OSError:
print('File delete error!')
pass
if scores is not None:
index = []
for i in range(len(scores)):
index.append('F%d' % (i + 1))
ranking = np.argsort(-scores)[:10]
x_label = []
height = []
for i in range(len(ranking)):
x_label.append(index[ranking[i]])
height.append(scores[ranking[i]])
plt.figure(0)
plt.bar(range(len(height)), height, color='navy', alpha=0.7, tick_label=x_label)
plt.xticks(size=10)
plt.title('Feature importance ranking', fontsize=18)
plt.xlabel('Feature index', fontsize=16)
plt.ylabel('Feature importance', fontsize=16)
ax_width = 1
ax = plt.gca() # 获取边框
ax.spines['bottom'].set_linewidth(ax_width)
ax.spines['left'].set_linewidth(ax_width)
ax.spines['top'].set_linewidth(ax_width)
ax.spines['right'].set_linewidth(ax_width)
plt.savefig(figure_name, dpi=600, transparent=True, bbox_inches='tight')
plt.close(0)
full_path = os.path.abspath(figure_name)
if os.path.isfile(full_path):
print('The figure for feature importance can be found:')
print(full_path)
print('\n')
def plot_ap(vectors, cluster_centers_indices, labels_, path, ind=False):
n_clusters_ = len(cluster_centers_indices)
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
if ind is True:
figure_name = path + 'AP_cluster_ind.png'
else:
figure_name = path + 'AP_cluster.png'
if os.path.isfile(figure_name):
try:
os.remove(figure_name)
except OSError:
print('File delete error!')
pass
try:
data = TSNE(n_components=2, init='pca', random_state=42).fit_transform(vectors)
except RuntimeWarning:
data = PCA(n_components=2).fit_transform(vectors)
for k, col in zip(range(n_clusters_), colors):
# labels == k 使用k与labels数组中的每个值进行比较
# 如labels = [1,0],k=0,则‘labels==k’的结果为[False, True]
class_members = labels_ == k
cluster_center = data[cluster_centers_indices[k]] # 聚类中心的坐标
plt.plot(data[class_members, 0], data[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in data[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
# plt.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文标签
# plt.title('Numbers of predicted centers:%d' % n_clusters_)
plt.title('Affinity Propagation cluster diagram', fontsize=16)
ax_width = 1
ax = plt.gca() # 获取边框
ax.spines['bottom'].set_linewidth(ax_width)
ax.spines['left'].set_linewidth(ax_width)
ax.spines['top'].set_linewidth(ax_width)
ax.spines['right'].set_linewidth(ax_width)
plt.savefig(figure_name, dpi=600, transparent=True, bbox_inches='tight')
plt.close(0)
full_path = os.path.abspath(figure_name)
if os.path.isfile(full_path):
print('Here is figure for %s method\n' % figure_name)
print(full_path)
def plot_hc(vectors, index, path, ind=False):
# 2020/7/26 加一个随机抽样,减少图片显示样本数目
index_sp = []
if len(vectors) > 22:
range_list = list(range(len(vectors)))
random.shuffle(range_list)
sp_list = range_list[: 22]
sp_list.sort()
#
vectors = vectors[sp_list]
for k in sp_list:
index_sp.append(index[k])
else:
index_sp = index
if ind is True:
image = path + 'H_cluster_ind.png'
else:
image = path + 'H_cluster.png'
if os.path.isfile(image):
try:
os.remove(image)
except OSError:
print('File delete error!')
pass
plt.figure(0)
dis_mat = sch.distance.pdist(vectors, 'euclidean')
z = sch.linkage(dis_mat, method='ward')
sch.dendrogram(z, labels=np.array(index_sp), leaf_rotation=270, leaf_font_size=8)
plt.title('Hierarchical cluster diagram', fontsize=18)
ax_width = 1
ax = plt.gca() # 获取边框
ax.spines['bottom'].set_linewidth(ax_width)
ax.spines['left'].set_linewidth(ax_width)
ax.spines['top'].set_linewidth(ax_width)
ax.spines['right'].set_linewidth(ax_width)
plt.savefig(image, dpi=600, transparent=True, bbox_inches='tight')
plt.close(0)
full_path = os.path.abspath(image)
if os.path.isfile(full_path):
print('Here is figure for %s method\n' % image)
print(full_path)
<file_sep>import pickle
import math
from collections import Counter
from math import log
from random import shuffle
import networkx as nx
import numpy as np
from gensim.models import Word2Vec, FastText
from scipy import sparse
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import StratifiedKFold
# TODO: TF-IDF
def tf_idf(sentence_list):
corpus = []
for sentence in sentence_list:
document = ' '.join(sentence)
corpus.append(document)
count_vec = CountVectorizer()
# 计算个词语出现的次数
X = count_vec.fit_transform(corpus)
# 获取词袋中所有文本关键词
# word = count_vec.get_feature_names()
# print(word)
# 类调用
transformer = TfidfTransformer()
# print(transformer)
# 将词频矩阵X统计成TF-IDF值
tf_idf_vec = transformer.fit_transform(X)
# 查看数据结构 tf-idf[i][j]表示i类文本中的tf-idf权重
# print(tf_idf_vec.toarray())
return tf_idf_vec.toarray()
# TODO: TextRank
def text_rank(sentence_list, alpha=0.85):
corpus = []
for sentence in sentence_list:
document = ' '.join(sentence)
corpus.append(document)
count_vec = CountVectorizer()
# 计算个词语出现的次数
X = count_vec.fit_transform(corpus)
# 类调用
transformer = TfidfTransformer()
# print(transformer)
# 将词频矩阵X统计成TF-IDF值
tf_idf_vec = transformer.fit_transform(X)
similarity = nx.from_scipy_sparse_matrix(tf_idf_vec * tf_idf_vec.T)
scores = nx.pagerank(similarity, alpha=alpha)
vectors = []
tf_idf_vec = tf_idf_vec.toarray()
scores_val = list(scores.values())
for i in range(len(scores_val)):
vectors.append(tf_idf_vec[i] * scores_val[i])
return np.array(vectors)
# TODO: TextRank
def text_rank1(sentence_list, alpha=0.85, window_size=3):
# window_size用来统计词的共现关系以构建无向图
max_iteration = 30 # max iteration for power iteration method
d = alpha # damping factor
threshold = 0.001 # convergence threshold THRESHOLD
vectors = []
for ind in range(len(sentence_list)):
document = sentence_list[ind]
vocabulary = list(set(document))
vector = np.zeros(len(document), dtype=np.float32)
vocab_len = len(vocabulary)
weighted_edge = np.zeros((vocab_len, vocab_len), dtype=np.float32)
score = np.zeros(vocab_len, dtype=np.float32)
covered_co_occurrence = []
for i in range(0, vocab_len):
score[i] = 1
for j in range(0, vocab_len):
if j == i:
weighted_edge[i][j] = 0
else:
for window_start in range(0, (len(document) - window_size)):
window_end = window_start + window_size
window = document[window_start:window_end]
if (vocabulary[i] in window) and (vocabulary[j] in window):
index_of_i = window_start + window.index(vocabulary[i])
index_of_j = window_start + window.index(vocabulary[j])
# index_of_x is the absolute position of the xth term in the window
# (counting from 0)
# in the processed_text
if [index_of_i, index_of_j] not in covered_co_occurrence:
weighted_edge[i][j] += 1 / math.fabs(index_of_i - index_of_j)
covered_co_occurrence.append([index_of_i, index_of_j])
inout = np.zeros(vocab_len, dtype=np.float32)
for i in range(0, vocab_len):
for j in range(0, vocab_len):
inout[i] += weighted_edge[i][j]
print('Power iteration method running...')
print('Sequence index: %d' % ind)
for it in range(0, max_iteration):
prev_score = np.copy(score)
for i in range(0, vocab_len):
summation = 0
for j in range(0, vocab_len):
if weighted_edge[i][j] != 0:
summation += (weighted_edge[i][j] / inout[j]) * score[j]
score[i] = (1 - d) + d * summation
if np.sum(np.fabs(prev_score - score)) <= threshold: # convergence condition
print("Converging at iteration " + str(it) + "....\n")
break
# for i in range(0, vocab_len):
# print("Score of " + vocabulary[i] + ": " + str(score[i]))
for i in range(len(document)):
index = vocabulary.index(document[i])
rank_value = score[index]
vector[i] = rank_value
vectors.append(vector)
return np.array(vectors)
# TODO: Word2vec
def data_partition(sample_size_list):
num_sum = 0
seed_ = 42
folds_num = 5
label_all = []
for i in range(len(sample_size_list)):
tmp_labels = [float(i)] * sample_size_list[i]
label_all += tmp_labels
num_sum += sample_size_list[i]
label_all = np.array(label_all)
pse_data = np.random.normal(loc=0.0, scale=1.0, size=[num_sum, num_sum])
folds = StratifiedKFold(folds_num, shuffle=True, random_state=np.random.RandomState(seed_))
folds_temp = list(folds.split(pse_data, label_all))
folds = []
for i in range(folds_num):
train_index = folds_temp[i][0]
test_index = folds_temp[i][1]
folds.append((train_index, test_index))
return folds
def word2vec(sentence_list, sample_size_list, fixed_len, word_size, win_size, vec_dim=10, skip_gram=0):
n_row = (fixed_len - word_size + 1) * vec_dim # the default win_size value is 100
corpus_out = -np.ones((len(sentence_list), n_row))
folds = data_partition(sample_size_list)
print('word2vec processing ...')
for i, (train_index, test_index) in enumerate(folds):
print('Round [%s]' % (i + 1))
train_sentences = []
test_sentences = []
for x in train_index:
train_sentences.append(sentence_list[x])
for y in test_index:
test_sentences.append(sentence_list[y])
# The core stone of Gene2vec | window: 一个句子中当前单词和被预测单词的最大距离。
model = Word2Vec(train_sentences, size=vec_dim, window=win_size, sg=skip_gram) # sg=1对应skip gram模型
vectors = []
for sentence in test_sentences:
# print(sentence)
vector = []
for j in range(len(sentence)):
try:
vec_temp = np.array(model[sentence[j]])
except KeyError:
vec_temp = np.zeros(vec_dim)
# print(len(vec_temp))
if len(vector) == 0:
vector = vec_temp
else:
vector = np.hstack((vector, vec_temp))
vectors.append(vector)
corpus_out[test_index] = np.array(vectors)
return corpus_out
# TODO: Glove
def build_vocab(train_corpus):
"""
Build a vocabulary with word frequencies for an entire corpus.
Returns: {word : (ID, frequency)}
"""
vocab = Counter()
for line in train_corpus:
for tokens in line:
vocab.update([tokens])
return {word: (i, freq) for i, (word, freq) in enumerate(vocab.items())}
def build_co_occur(vocab, corpus, window_size=10, min_count=None):
"""
Build a word co-occurrence list for the given corpus.
return: (i_main, i_context, co_occurrence value)
i_main -> the main word in the co_occurrence
i_context -> is the ID of the context word
co_occurrence` is the `X_{ij}` co_occurrence value as described in Pennington et al.(2014).
If `min_count` is not `None`, co_occurrence pairs fewer than `min_count` times are ignored.
"""
vocab_size = len(vocab)
id2word = dict((i, word) for word, (i, _) in vocab.items())
co_occurrences = sparse.lil_matrix((vocab_size, vocab_size),
dtype=np.float64)
for i, line in enumerate(corpus):
token_ids = [vocab[word][0] for word in line]
for center_i, center_id in enumerate(token_ids):
# Collect all word IDs in left window of center word
# 将窗口左边的内容与窗口右边的内容区分开,
context_ids = token_ids[max(0, center_i - window_size): center_i]
contexts_len = len(context_ids)
for left_i, left_id in enumerate(context_ids):
# Distance from center word
distance = contexts_len - left_i
# Weight by inverse of distance between words
increment = 1.0 / float(distance) # 原文用词对间的独立来除count
# Build co-occurrence matrix symmetrically (pretend we
# are calculating right contexts as well)
co_occurrences[center_id, left_id] += increment
co_occurrences[left_id, center_id] += increment
# Now yield our tuple sequence (dig into the LiL-matrix internals to
# quickly iterate through all nonzero cells)
for i, (row, data) in enumerate(zip(co_occurrences.rows,
co_occurrences.data)):
if min_count is not None and vocab[id2word[i]][1] < min_count:
continue
for data_idx, j in enumerate(row):
if min_count is not None and vocab[id2word[j]][1] < min_count:
continue
yield i, j, data[data_idx]
def run_iter(data, learning_rate=0.05, x_max=100, alpha=0.75):
"""
Run a single iteration of GloVe training.
`data` is a pre-fetched data / weights list where each element is of
the form
(v_main, v_context,
b_main, b_context,
gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context,
cooccurrence)
Returns the cost associated with the given weight assignments and
updates the weights by online AdaGrad in place.
"""
global_cost = 0
# We want to iterate over data randomly so as not to unintentionally
# bias the word vector contents
shuffle(data)
for (v_main, v_context, b_main, b_context, gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context, cooccurrence) in data:
weight = (cooccurrence / x_max) ** alpha if cooccurrence < x_max else 1
# Compute inner component of cost function
# $$ J' = w_i^Tw_j + b_i + b_j - log(X_{ij}) $$
cost_inner = (v_main.dot(v_context)
+ b_main[0] + b_context[0]
- log(cooccurrence))
cost = weight * (cost_inner ** 2)
# Add weighted cost to the global cost tracker
global_cost += 0.5 * cost # 注意这里乘了1/2
# Compute gradients for word vector terms.
#
# NB: `main_word` is only a view into `W` (not a copy), so our
# modifications here will affect the global weight matrix;
# likewise for context_word, biases, etc.
grad_main = weight * cost_inner * v_context # 损失函数对vi求导,是不是缺了个因子2呢? 不缺!
grad_context = weight * cost_inner * v_main
# Compute gradients for bias terms
grad_bias_main = weight * cost_inner
grad_bias_context = weight * cost_inner
# Now perform adaptive updates
v_main -= (learning_rate * grad_main / np.sqrt(gradsq_W_main)) # 梯度下降法,
# 问题是为什么要除以np.sqrt(gradsq_W_main) -> 原文使用Adagrad算法, 利用梯度的对学习率约束
v_context -= (learning_rate * grad_context / np.sqrt(gradsq_W_context))
b_main -= (learning_rate * grad_bias_main / np.sqrt(gradsq_b_main))
b_context -= (learning_rate * grad_bias_context / np.sqrt(
gradsq_b_context))
# Update squared gradient sums
gradsq_W_main += np.square(grad_main) # 向量
gradsq_W_context += np.square(grad_context)
gradsq_b_main += grad_bias_main ** 2 # 标量
gradsq_b_context += grad_bias_context ** 2
return global_cost
def train_glove(vocab, co_occurrences, iter_callback=None, vector_size=100,
iterations=25, **kwargs):
"""
co_occurrences: (word_i_id, word_j_id, x_ij)
If `iter_callback` is not `None`, the provided function will be
called after each iteration with the learned `W` matrix so far.
Keyword arguments are passed on to the iteration step function
`run_iter`.
Returns the computed word vector matrix .
"""
vocab_size = len(vocab)
# Word vector matrix. This matrix is (2V) * d, where V is the size
# of the corpus vocabulary and d is the dimensionality of the word
# vectors. All elements are initialized randomly in the range (-0.5,
# 0.5].
vector_matrix = (np.random.rand(vocab_size * 2, vector_size) - 0.5) / float(vector_size + 1)
biases = (np.random.rand(vocab_size * 2) - 0.5) / float(vector_size + 1)
# Training is done via adaptive gradient descent (AdaGrad). To make
# this work we need to store the sum of squares of all previous
# gradients.
#
# this matrix is same size with vector_matrix
#
# Initialize all squared gradient sums to 1 so that our initial
# adaptive learning rate is simply the global learning rate.
gradient_squared = np.ones((vocab_size * 2, vector_size),
dtype=np.float64)
# Sum of squared gradients for the bias terms.
gradient_squared_biases = np.ones(vocab_size * 2, dtype=np.float64)
# NB: These are all views into the actual data matrices, so updates
# to them will pass on to the real data structures
data = [(vector_matrix[i_main], vector_matrix[i_context + vocab_size],
biases[i_main: i_main + 1],
biases[i_context + vocab_size: i_context + vocab_size + 1],
gradient_squared[i_main], gradient_squared[i_context + vocab_size],
gradient_squared_biases[i_main: i_main + 1],
gradient_squared_biases[i_context + vocab_size: i_context + vocab_size + 1],
co_occurrence)
for i_main, i_context, co_occurrence in co_occurrences]
for i in range(iterations):
cost = run_iter(data, **kwargs)
print('global cost of glove model: %.4f' % cost)
if iter_callback is not None:
iter_callback(vector_matrix)
return vector_matrix
def save_model(vector_matrix, out_path):
with open(out_path, 'wb') as vector_f:
pickle.dump(vector_matrix, vector_f, protocol=2)
def load_model(model_path):
with open(model_path, 'wb') as vector_f:
vector_matrix = pickle.load(vector_f)
return vector_matrix
def merge_main_context(vector_matrix, merge_fun=lambda m, c: np.mean([m, c], axis=0),
normalize=True):
"""
Merge the main-word and context-word vectors for a weight matrix
using the provided merge function (which accepts a main-word and
context-word vector and returns a merged version).
By default, `merge_fun` returns the mean of the two vectors.
"""
vocab_size = int(len(vector_matrix) / 2)
for i, row in enumerate(vector_matrix[:vocab_size]):
merged = merge_fun(row, vector_matrix[i + vocab_size]) # 按对应行进行求和
if normalize:
merged /= np.linalg.norm(merged)
vector_matrix[i, :] = merged
return vector_matrix[:vocab_size]
def glove(sentence_list, sample_size_list, fixed_len, word_size, win_size, vec_dim=10):
n_row = (fixed_len - word_size + 1) * vec_dim
corpus_out = -np.ones((len(sentence_list), n_row))
folds = data_partition(sample_size_list)
print('Glove processing ...')
for i, (train_index, test_index) in enumerate(folds):
print('Round [%s]' % (i + 1))
train_sentences = []
test_sentences = []
for x in train_index:
train_sentences.append(sentence_list[x])
for y in test_index:
test_sentences.append(sentence_list[y])
# The core stone of Glove
vocab = build_vocab(train_sentences) # 词汇表
# print(vocab): {'CTT': (0, 8), 'TTC': (1, 6), 'TCG': (2, 2), 'CGC': (3, 2), ...}
# exit()
co_occur = build_co_occur(vocab, train_sentences, window_size=win_size) # “共现矩阵”
vector_matrix = train_glove(vocab, co_occur, vector_size=vec_dim, iterations=50) # 词向量矩阵(main + context)
# Merge and normalize word vectors
vector_matrix = merge_main_context(vector_matrix) # 对词向量矩阵(main + context)按对应行平均并归一化
vectors = []
for sentence in test_sentences:
vector = []
for j in range(len(sentence)):
try:
vec_temp = np.array(vector_matrix[vocab[sentence[j]][0]])
# vocab={'word': (id, frequency), ...}
except KeyError:
vec_temp = np.zeros(vec_dim)
if len(vector) == 0:
vector = vec_temp
else:
vector = np.hstack((vector, vec_temp))
vectors.append(vector)
corpus_out[test_index] = np.array(vectors)
print('....................')
return corpus_out
# TODO: fastText
def fast_text(sentence_list, sample_size_list, fixed_len, word_size, win_size, vec_dim=10, skip_gram=0):
n_row = (fixed_len - word_size + 1) * vec_dim # the default win_size value is 100
corpus_out = -np.ones((len(sentence_list), n_row))
folds = data_partition(sample_size_list)
print('fastText processing ...')
for i, (train_index, test_index) in enumerate(folds):
print('Round [%s]' % (i + 1))
train_sentences = []
test_sentences = []
for x in train_index:
train_sentences.append(sentence_list[x])
for y in test_index:
test_sentences.append(sentence_list[y])
# The core stone of FastText
model = FastText(sentence_list, size=vec_dim, window=win_size, sg=skip_gram)
vectors = []
for sentence in test_sentences:
vector = []
for j in range(len(sentence)):
try:
vec_temp = np.array(model[sentence[j]])
except KeyError:
vec_temp = np.zeros(vec_dim)
if len(vector) == 0:
vector = vec_temp
else:
vector = np.hstack((vector, vec_temp))
vectors.append(vector)
corpus_out[test_index] = np.array(vectors)
print('.......................')
return corpus_out
<file_sep>import multiprocessing
import os
import time
from CheckAll import results_dir_check, check_contain_chinese, seq_sys_check, ml_params_check, make_params_dicts, \
res_feature_check, Machine_Learning_Algorithm, DeepLearning, Final_Path, dl_params_check, Batch_Path_Seq, \
Method_Res, prepare4train_res, prepare4train_seq, crf_params_check
from FeatureAnalysis import fa_process
from FeatureExtractionMode.OHE.OHE4vec import ohe2res_base, sliding_win2files, mat_list2frag_array
from FeatureExtractionMode.utils.utils_write import read_res_seq_file, read_res_label_file, fixed_len_control, \
res_file_check, out_res_file, out_dl_frag_file, res_base2frag_vec, gen_label_array
from MachineLearningAlgorithm.Classification.dl_machine import dl_cv_process as seq_dcp
from MachineLearningAlgorithm.Classification.dl_machine import dl_ind_process as seq_dip
from MachineLearningAlgorithm.SequenceLabelling.dl_machine import dl_cv_process as res_dcp
from MachineLearningAlgorithm.SequenceLabelling.dl_machine import dl_ind_process as res_dip
from MachineLearningAlgorithm.Classification.ml_machine import ml_cv_results, ml_ind_results
from MachineLearningAlgorithm.SequenceLabelling.ml_machine import crf_cv_process, crf_ind_process
from MachineLearningAlgorithm.utils.utils_read import files2vectors_res, read_base_mat4res, read_base_vec_list4res, \
res_label_read, read_dl_vec4seq, res_dl_label_read
from MachineLearningRes import one_cl_process, params_select
def create_results_dir(args, cur_dir):
if args.bp == 1:
results_dir = cur_dir + Batch_Path_Seq + str(args.category) + "/" + str(args.method) + "/"
else:
results_dir = cur_dir + Final_Path
results_dir_check(results_dir)
return results_dir
def res_cl_fe_process(args, fragment):
# ** 残基层面特征提取和标签数组生成开始 ** #
# 为存储SVM和RF输入特征的文件命名
out_files = out_res_file(args.label, args.results_dir, args.format, args.fragment, ind=False)
# 读取base特征文件, 待写入
vectors_list = read_base_vec_list4res(args.fea_file)
# fragment判断,生成对应的特征向量
if fragment == 0:
assert args.window is not None, "If -fragment is 0, lease set window size!"
# 在fragment=0时,通过滑窗技巧为每个残基生成特征
sliding_win2files(vectors_list, args.res_labels_list, args.window, args.format, out_files)
else:
# 在fragment=1时, 将每个残基片段的base特征进行flatten
mat_list2frag_array(vectors_list, args.res_labels_list, args.fixed_len, args.format, out_files)
# 读取特征向量文件
vectors, sp_num_list = files2vectors_res(out_files, args.format)
# 根据不同标签样本数目生成标签数组
label_array = res_label_read(sp_num_list, args.label)
# ** 残基层面特征提取和标签数组生成完毕 ** #
# 在参数便利前进行一系列准备工作: 1. 固定划分;2.设定指标;3.指定任务类型
args = prepare4train_res(args, label_array, dl=False)
args.res = True
# ** 通过遍历SVM/RF参数字典列表来筛选参数 ** #
# SVM/RF参数字典
params_dict_list = args.params_dict_list
# 多进程控制
pool = multiprocessing.Pool(args.cpu)
params_dict_list_pro = []
print('\n')
print('Parameter Selection Processing...')
print('\n')
for i in range(len(params_dict_list)):
params_dict = params_dict_list[i]
params_dict_list_pro.append(pool.apply_async(one_cl_process, (args, vectors, label_array, args.folds,
params_dict)))
pool.close()
pool.join()
# ** 筛选结束 ** #
# 根据指标进行参数选择
params_selected = params_select(params_dict_list_pro, args.results_dir)
# 特征分析
print(' Shape of Feature vectors: [%d, %d] '.center(66, '*') % (vectors.shape[0], vectors.shape[1]))
print('\n')
if args.score == 'none':
vectors = fa_process(args, vectors, label_array, after_ps=True)
print(' Shape of Feature vectors after FA process: [%d, %d] '.center(66, '*') % (vectors.shape[0],
vectors.shape[1]))
# 构建分类器
ml_cv_results(args.ml, vectors, label_array, args.folds, args.sp, args.multi, args.res, args.results_dir,
params_selected)
# -------- 独立测试-------- #
# 即,将独立测试数据集在最优的model上进行测试
if args.ind_seq_file is not None:
res_ind_cl_fe_process(args, params_selected)
# -------- 独立测试-------- #
def res_ind_cl_fe_process(args, params_selected):
print('########################## Independent Test Begin ##########################\n')
# 为独立测试集配置参数
args = ind_preprocess(args)
# 为存储独立测试数据集SVM和RF输入特征的文件命名
ind_out_files = out_res_file(args.label, args.results_dir, args.format, args.fragment, ind=False)
# 读取独立测试数据集base特征文件, 待写入
ind_vectors_list = read_base_vec_list4res(args.ind_fea_file)
# fragment判断,生成对应的特征向量
if args.fragment == 0:
assert args.window is not None, "If -fragment is 0, lease set window size!"
# 在fragment=0时,通过滑窗技巧为独立测试数据集每个残基生成特征
sliding_win2files(ind_vectors_list, args.ind_res_labels_list, args.window, args.format, ind_out_files)
else:
# 在fragment=1时, 将独立测试数据集每个残基片段的base特征进行flatten
mat_list2frag_array(ind_vectors_list, args.ind_res_labels_list, args.fixed_len, args.format, ind_out_files)
# 读取独立测试数据集特征向量文件
ind_vectors, ind_sp_num_list = files2vectors_res(ind_out_files, args.format)
# 根据不同标签样本数目生成独立测试数据集标签数组
ind_label_array = res_label_read(ind_sp_num_list, args.label)
# 独立测试集特征分析
print(' Shape of Ind Feature vectors: [%d, %d] '.center(66, '*') % (ind_vectors.shape[0], ind_vectors.shape[1]))
print('\n')
if args.score == 'none':
ind_vectors = fa_process(args, ind_vectors, ind_label_array, True, True)
print(' Shape of Ind Feature vectors after FA process: [%d, %d] '.center(66, '*') % (ind_vectors.shape[0],
ind_vectors.shape[1]))
# 构建独立测试集的分类器
ml_ind_results(args.ml, ind_vectors, ind_label_array, args.multi, args.res, args.results_dir, params_selected)
print('########################## Independent Test Finish ##########################\n')
def res_crf_fe_process(args):
# 深度学习参数字典
params_dict = args.params_dict_list[0]
# 读取base文件向量,对向量矩阵和序列长度数组进行处理
vec_mat, fixed_seq_len_list = read_base_mat4res(args.fea_file, args.fixed_len)
# 这一步将所有残基的标签转换为固定长度,需要在评测时注意
res_label_mat = res_dl_label_read(args.res_labels_list, args.fixed_len)
# 不同于SVM/RF, 深度学习
if args.ind_seq_file is None:
# 在参数便利前进行一系列准备工作: 1. 固定划分;2.设定指标;3.指定任务类型
args = prepare4train_res(args, args.res_labels_list, dl=True)
crf_cv_process(vec_mat, res_label_mat, fixed_seq_len_list, args.folds, args.results_dir, params_dict)
else:
ind_res_crf_fe_process(args, vec_mat, res_label_mat, params_dict)
def ind_res_crf_fe_process(args, vec_mat, res_label_mat, params_dict):
print('########################## Independent Test Begin ##########################\n')
# 为独立测试集配置参数
args = ind_preprocess(args)
ind_vec_mat, ind_fixed_seq_len_list = read_base_mat4res(args.ind_fea_file, args.fixed_len)
# 这一步将独立测试集所有残基的标签转换为固定长度,需要在评测时注意
ind_res_label_mat = res_dl_label_read(args.ind_res_labels_list, args.fixed_len)
crf_ind_process(vec_mat, res_label_mat, ind_vec_mat, ind_res_label_mat, ind_fixed_seq_len_list,
args.results_dir, params_dict)
print('########################## Independent Test Finish ##########################\n')
def res_dl_fe_process(args):
# 深度学习参数字典
params_dict = args.params_dict_list[0]
# 读取base文件向量,对向量矩阵和序列长度数组进行处理
vec_mat, fixed_seq_len_list = read_base_mat4res(args.fea_file, args.fixed_len)
# 这一步将所有残基的标签转换为固定长度,需要在评测时注意
res_label_mat = res_dl_label_read(args.res_labels_list, args.fixed_len)
# 不同于SVM/RF, 深度学习
if args.ind_seq_file is None:
# 在参数便利前进行一系列准备工作: 1. 固定划分;2.设定指标;3.指定任务类型
args = prepare4train_res(args, args.res_labels_list, dl=True)
res_dcp(args.ml, vec_mat, res_label_mat, fixed_seq_len_list, args.fixed_len, args.folds,
args.results_dir, params_dict)
else:
ind_res_dl_fe_process(args, vec_mat, res_label_mat, fixed_seq_len_list, params_dict)
def ind_res_dl_fe_process(args, vec_mat, res_label_mat, fixed_seq_len_list, params_dict):
print('########################## Independent Test Begin ##########################\n')
# 为独立测试集配置参数
args = ind_preprocess(args)
ind_vec_mat, ind_fixed_seq_len_list = read_base_mat4res(args.ind_fea_file, args.fixed_len)
# 这一步将独立测试集所有残基的标签转换为固定长度,需要在评测时注意
ind_res_label_mat = res_dl_label_read(args.ind_res_labels_list, args.fixed_len)
res_dip(args.ml, vec_mat, res_label_mat, fixed_seq_len_list, ind_vec_mat, ind_res_label_mat,
ind_fixed_seq_len_list, args.fixed_len, args.results_dir, params_dict)
print('########################## Independent Test Finish ##########################\n')
def frag_dl_fe_process(args):
# ** 当fragment为1,且选则深度学习特征提取方法时进行下列操作 ** #
# 生成特征向量文件名
out_files = out_dl_frag_file(args.label, args.results_dir, ind=False)
# 生成深度特征向量文件
res_base2frag_vec(args.fea_file, args.res_labels_list, args.fixed_len, out_files)
# 获取深度特征向量; fixed_seq_len_list: 最大序列长度为fixed_len的序列长度的列表
vectors, sp_num_list, fixed_seq_len_list = read_dl_vec4seq(args.fixed_len, out_files, return_sp=True)
# 生成标签数组
label_array = gen_label_array(sp_num_list, args.label)
# 深度学习参数字典
params_dict = args.params_dict_list[0]
# 深度学习的独立测试和交叉验证分开
if args.ind_seq_file is None:
# 在参数便利前进行一系列准备工作: 1. 固定划分;2.设定指标;3.指定任务类型
args = prepare4train_seq(args, label_array, dl=True)
# 构建深度学习分类器
seq_dcp(args.ml, vectors, label_array, fixed_seq_len_list, args.fixed_len, args.folds, args.results_dir,
params_dict)
else:
# 独立验证开始
ind_frag_dl_fe_process(args, vectors, label_array, fixed_seq_len_list, params_dict)
def ind_frag_dl_fe_process(args, vectors, label_array, fixed_seq_len_list, params_dict):
print('########################## Independent Test Begin ##########################\n')
# 生成特征向量文件名
ind_out_files = out_dl_frag_file(args.label, args.results_dir, ind=False)
# 生成深度特征向量文件
res_base2frag_vec(args.ind_fea_file, args.ind_res_labels_list, args.fixed_len, ind_out_files)
# 获取深度特征向量; fixed_seq_len_list: 最大序列长度为fixed_len的序列长度的列表
ind_vectors, ind_sp_num_list, ind_fixed_seq_len_list = read_dl_vec4seq(args.fixed_len, ind_out_files,
return_sp=True)
# 生成标签数组
ind_label_array = gen_label_array(ind_sp_num_list, args.label)
# 为独立测试构建深度学习分类器
seq_dip(args.ml, vectors, label_array, fixed_seq_len_list, ind_vectors, ind_label_array, ind_fixed_seq_len_list,
args.fixed_len, args.results_dir, params_dict)
print('########################## Independent Test Finish ##########################\n')
def ind_preprocess(args):
""" 为独立测试步骤生成特征 """
# 读取独立测试集序列文件里每条序列的长度
ind_seq_len_list = read_res_seq_file(args.ind_seq_file, args.category)
# 读取独立测试集标签列表和标签长度列表 res_labels_list --> list[list1, list2,..]
args.ind_res_labels_list, ind_label_len_list = read_res_label_file(args.ind_label_file)
# 在独立测试集进行同样的判断
res_file_check(ind_seq_len_list, ind_label_len_list, args.fragment)
# 所有res特征在独立测试集上的基础输出文件
args.ind_fea_file = args.results_dir + 'ind_res_features.txt'
# 提取独立测试集残基层面特征,生成向量文件
ohe2res_base(args.ind_seq_file, args.category, args.method, args.current_dir, args.pp_file, args.rss_file,
args.ind_fea_file, args.cpu)
return args
def main(args):
print("\nStep into analysis...\n")
start_time = time.time()
current_path = os.path.dirname(os.path.realpath(__file__))
args.current_dir = os.path.dirname(os.getcwd())
# 判断中文目录
check_contain_chinese(current_path)
# 判断mode和ml的组合是否合理
args.mode = 'OHE'
args.score = 'none'
seq_sys_check(args, True)
# 生成结果文件夹
args.results_dir = create_results_dir(args, args.current_dir)
# 读取序列文件里每条序列的长度
seq_len_list = read_res_seq_file(args.seq_file, args.category)
# 读取标签列表和标签长度列表 res_labels_list --> list[list1, list2,..]
args.res_labels_list, label_len_list = read_res_label_file(args.label_file)
# fragment=0: 判断标签是否有缺失且最短序列长度是否大于5; fragment=1: 判断标签是否唯一
res_file_check(seq_len_list, label_len_list, args.fragment)
# 这里直接针对残基问题设置标签
args.label = [1, 0]
# 控制序列的固定长度(只需要在benchmark dataset上操作一次)
args.fixed_len = fixed_len_control(seq_len_list, args.fixed_len)
# 对每个残基层面的method进行检查
res_feature_check(args)
# 对SVM或RF的参数进行检查
all_params_list_dict = {} # 包含了机器学习和特征提取的参数
if args.ml in DeepLearning:
all_params_list_dict = dl_params_check(args, all_params_list_dict)
# 列表字典 ---> 字典列表
args.params_dict_list = make_params_dicts(all_params_list_dict)
elif args.ml in ['SVM', 'RF']:
all_params_list_dict = ml_params_check(args, all_params_list_dict)
# 列表字典 ---> 字典列表
args.params_dict_list = make_params_dicts(all_params_list_dict)
else:
# CRF 无需任何参数
all_params_list_dict = crf_params_check(args, all_params_list_dict)
args.params_dict_list = make_params_dicts(all_params_list_dict)
# 所有res特征在基准数据集上的基础输出文件
args.fea_file = args.results_dir + 'res_features.txt'
# 提取残基层面特征,生成向量文件
ohe2res_base(args.seq_file, args.category, args.method, args.current_dir, args.pp_file, args.rss_file,
args.fea_file, args.cpu)
if args.ml in DeepLearning:
if args.fragment == 0:
res_dl_fe_process(args)
else:
frag_dl_fe_process(args)
elif args.ml in ['SVM', 'RF']:
res_cl_fe_process(args, args.fragment)
else:
res_crf_fe_process(args)
print("Done.")
print(("Used time: %.2fs" % (time.time() - start_time)))
if __name__ == '__main__':
import argparse
parse = argparse.ArgumentParser(prog='BioSeq-NLP', description="Step into analysis, please select parameters ")
# parameters for whole framework
parse.add_argument('-category', type=str, choices=['DNA', 'RNA', 'Protein'], required=True,
help="The category of input sequences.")
parse.add_argument('-method', type=str, required=True, choices=Method_Res,
help="Please select feature extraction method for residue level analysis")
# ----------------------- parameters for FeatureExtraction ---------------------- #
# parameters for residue
parse.add_argument('-window', type=int,
help="The window size when construct sliding window technique for allocating every "
"label a short sequence")
parse.add_argument('-fragment', type=int, default=0, choices=[0, 1],
help="Please choose whether use the fragment method, 1 is yes while 0 is no.")
# parameters for one-hot encoding
parse.add_argument('-cpu', type=int, default=1,
help="The maximum number of CPU cores used for multiprocessing in generating frequency profile"
" or The number of CPU cores used for multiprocessing during parameter selection process "
"(default=1).")
parse.add_argument('-pp_file', type=str,
help="The physicochemical properties file user input.\n"
"if input nothing, the default physicochemical properties is:\n"
"DNA dinucleotide: Rise, Roll, Shift, Slide, Tilt, Twist.\n"
"DNA trinucleotide: Dnase I, Bendability (DNAse).\n"
"RNA: Rise, Roll, Shift, Slide, Tilt, Twist.\n"
"Protein: Hydrophobicity, Hydrophilicity, Mass.")
parse.add_argument('-rss_file', type=str,
help="The second structure file for all input sequences.(The order of a specific sequence "
"should be corresponding to the order in 'all_seq_file.txt' file")
# ----------------------- parameters for feature analysis---------------------- #
# standardization or normalization
parse.add_argument('-sn', choices=['min-max-scale', 'standard-scale', 'L1-normalize', 'L2-normalize', 'none'],
default='none', help=" Choose method of standardization or normalization for feature vectors.")
# clustering
parse.add_argument('-cl', choices=['AP', 'DBSCAN', 'GMM', 'AGNES', 'Kmeans', 'none'], default='none',
help="Choose method for clustering.")
parse.add_argument('-cm', default='sample', choices=['feature', 'sample'], help="The mode for clustering")
parse.add_argument('-nc', type=int, help="The number of clusters.")
# feature select
parse.add_argument('-fs', choices=['chi2', 'F-value', 'MIC', 'RFE', 'Tree', 'none'], default='none',
help="Select feature select method. Please refer to sklearn feature selection module for more.")
parse.add_argument('-nf', type=int, help="The number of features after feature selection.")
# dimension reduction
parse.add_argument('-dr', choices=['PCA', 'KernelPCA', 'TSVD', 'none'], default='none',
help="Choose method for dimension reduction.")
parse.add_argument('-np', type=int, help="The dimension of main component after dimension reduction.")
# rdb
parse.add_argument('-rdb', choices=['no', 'fs', 'dr'], default='no',
help="Reduce dimension by:\n"
" 'no'---none;\n"
" 'fs'---apply feature selection to parameter selection procedure;\n"
" 'dr'---apply dimension reduction to parameter selection procedure.\n")
# ----------------------- parameters for MachineLearning---------------------- #
parse.add_argument('-ml', type=str, choices=Machine_Learning_Algorithm, required=True,
help="The machine learning algorithm, for example: Support Vector Machine(SVM).")
parse.add_argument('-grid', type=int, nargs='*', choices=[0, 1], default=0,
help="grid = 0 for rough grid search, grid = 1 for meticulous grid search.")
# parameters for svm
parse.add_argument('-cost', type=int, nargs='*', help="Regularization parameter of 'SVM'.")
parse.add_argument('-gamma', type=int, nargs='*', help="Kernel coefficient for 'rbf' of 'SVM'.")
# parameters for rf
parse.add_argument('-tree', type=int, nargs='*', help="The number of trees in the forest for 'RF'.")
# ----------------------- parameters for DeepLearning---------------------- #
parse.add_argument('-lr', type=float, default=0.01, help="The value of learning rate for deep learning.")
parse.add_argument('-epochs', type=int, help="The epoch number for train deep model.")
parse.add_argument('-batch_size', type=int, default=50, help="The size of mini-batch for deep learning.")
parse.add_argument('-dropout', type=float, default=0.6, help="The value of dropout prob for deep learning.")
# parameters for LSTM, GRU
parse.add_argument('-hidden_dim', type=int, default=256,
help="The size of the intermediate (a.k.a., feed forward) layer.")
parse.add_argument('-n_layer', type=int, default=2, help="The number of units for 'LSTM' and 'GRU'.")
# parameters for CNN
parse.add_argument('-out_channels', type=int, default=256, help="The number of output channels for 'CNN'.")
parse.add_argument('-kernel_size', type=int, default=5, help="The size of stride for 'CNN'.")
# parameters for Transformer and Weighted-Transformer
parse.add_argument('-d_model', type=int, default=256,
help="The dimension of multi-head attention layer for Transformer or Weighted-Transformer.")
parse.add_argument('-d_ff', type=int, default=1024,
help="The dimension of fully connected layer of Transformer or Weighted-Transformer.")
parse.add_argument('-heads', type=int, default=4,
help="The number of heads for Transformer or Weighted-Transformer.")
# parameters for Reformer
parse.add_argument('-n_chunk', type=int, default=8,
help="The number of chunks for processing lsh attention.")
parse.add_argument('-rounds', type=int, default=1024,
help="The number of rounds for multiple rounds of hashing to reduce probability that similar "
"items fall in different buckets.")
parse.add_argument('-bucket_length', type=int, default=64,
help="Average size of qk per bucket, 64 was recommended in paper")
# parameters for ML parameter selection and cross validation
parse.add_argument('-metric', type=str, choices=['Acc', 'MCC', 'AUC', 'BAcc', 'F1'], default='Acc',
help="The metric for parameter selection")
parse.add_argument('-cv', choices=['5', '10', 'j'], default='5',
help="The cross validation mode.\n"
"5 or 10: 5-fold or 10-fold cross validation.\n"
"j: (character 'j') jackknife cross validation.")
parse.add_argument('-sp', type=str, choices=['none', 'over', 'under', 'combine'], default='none',
help="Select technique for oversampling.")
# ----------------------- parameters for input and output ---------------------- #
parse.add_argument('-seq_file', required=True, help="The input file in FASTA format.")
parse.add_argument('-label_file', required=True, help="The corresponding label file.")
parse.add_argument('-ind_seq_file', help="The independent test dataset in FASTA format.")
parse.add_argument('-ind_label_file', help="The corresponding label file of independent test dataset.")
parse.add_argument('-fixed_len', type=int,
help="The length of sequence will be fixed via cutting or padding. If you don't set "
"value for 'fixed_len', it will be the maximum length of all input sequences. ")
# parameters for output
parse.add_argument('-format', default='csv', choices=['tab', 'svm', 'csv', 'tsv'],
help="The output format (default = csv).\n"
"tab -- Simple format, delimited by TAB.\n"
"svm -- The libSVM training data format.\n"
"csv, tsv -- The format that can be loaded into a spreadsheet program.")
parse.add_argument('-bp', type=int, choices=[0, 1], default=0,
help="Select use batch mode or not, the parameter will change the directory for generating file "
"based on the method you choose.")
argv = parse.parse_args()
main(argv)
<file_sep>from ..utils.utils_words import dt_words
from ..utils.utils_algorithm import tf_idf
def dt_tf_idf(input_file, fixed_len, max_dis, process_num, cur_dir, fixed=True):
corpus = dt_words(input_file, fixed_len, max_dis, process_num, cur_dir, fixed)
return tf_idf(corpus)
<file_sep>from ..utils.utils_algorithm import glove
def glove4vec(corpus, sample_size_list, fixed_len, **param_dict):
corpus_out = glove(corpus, sample_size_list, fixed_len, word_size=param_dict['word_size'],
win_size=param_dict['win_size'], vec_dim=param_dict['vec_dim'])
return corpus_out
<file_sep>import os
import sys
import time
import subprocess
import threading
import numpy as np
from xml.etree import ElementTree
from ..SR.profile import get_blosum62
from ..utils.utils_pssm import produce_all_frequency, sep_file
def generate_bracket_seq(receive_file_path, bracket_file_path):
""" This is a system command to generate bracket_seq file according receive_file. """
cmd = "RNAfold <" + receive_file_path + " >" + bracket_file_path + ' --noPS'
subprocess.Popen(cmd, shell=True).wait()
def rss_method(input_file):
dir_name, suffix = os.path.splitext(input_file)
bracket_file = dir_name + '_bracket' + suffix
print("bracket_file: ", bracket_file)
generate_bracket_seq(input_file, bracket_file)
vectors = []
with open(bracket_file) as r:
line = r.readlines()
for i in range(1, len(line), 3):
num = len(line[i]) - 1
sc = line[i + 1][:num]
sc_fe = ''
for j in sc:
if j == '.':
sc_fe += '0 ' # keep the space
else:
sc_fe += '1 '
vec_temp = list(map(float, sc_fe.split())) # RSS特征的维度等于序列长度
vector = []
for k in range(len(vec_temp)):
vector.append(vec_temp[k])
vectors.append(np.array(vector))
return vectors
# SS starts
def ss_method(input_file, cur_dir, process_num):
sw_dir = cur_dir + '/software/'
dir_name, seq_name = sep_file(input_file)
threads = []
file_path_list = []
sem = threading.Semaphore(process_num)
for parent, dir_names, file_names in os.walk(dir_name):
for filename in file_names:
file_path = os.path.join(parent, filename)
print('file_path: ', file_path)
file_path_list.append(file_path)
threads.append(threading.Thread(target=gen_ss_vector,
args=(file_path, sw_dir, sem)))
for t in threads:
t.start()
for t in threads:
t.join()
ss_mat_dict = {'C': [0, 0, 1], 'E': [0, 1, 0], 'H': [1, 0, 0]}
for temp_file in file_path_list:
file_name = str(temp_file).split('/')[-1].split('.')[0]
ss_file = os.getcwd() + '/' + file_name + '.ss'
print(ss_file)
with open(ss_file) as f:
ss = [line[7] for line in f.readlines()]
vector = []
for i in range(len(ss)):
vector.append(ss_mat_dict[ss[i]])
os.remove(ss_file)
os.remove(ss_file[:-2] + 'ss2')
os.remove(ss_file[:-2] + 'horiz')
with open(temp_file) as f:
lines = f.readlines()
seq_name[seq_name.index(lines[0].strip()[1:])] = np.array(vector)
ss_vector = seq_name
return ss_vector
def gen_ss_vector(temp_file, sw_dir, sem):
sem.acquire()
# 只支持Linux系统
if sys.platform.startswith('win'):
error_info = 'The SS method for One-hot encoding mode only support Linux/Unix system!'
sys.stderr.write(error_info)
return False
else:
os.chdir(sw_dir)
cmd = sw_dir + 'psipred/runpsipred_single ' + temp_file
subprocess.call(cmd, shell=True)
time.sleep(2)
sem.release()
# SS ends
def sasa_method(input_file, cur_dir, process_num):
sw_dir = cur_dir + '/software/'
pssm_path, seq_name = sep_file(input_file)
pssm_dir = produce_all_frequency(pssm_path, sw_dir, process_num)
# 调试模式 on/off
# pssm_dir = cur_dir + "/data/results/Protein/sequence/OHE/SVM/SASA/all_seq/pssm"
dir_name = os.path.split(pssm_dir)[0]
xml_dir = dir_name + '/xml'
final_result = ''.join([dir_name, '/final_result'])
if not os.path.isdir(final_result):
os.mkdir(final_result)
dir_list = os.listdir(xml_dir)
threads = []
sem = threading.Semaphore(process_num)
index_list = []
for elem in dir_list:
xml_full_path = ''.join([xml_dir, '/', elem])
name, suffix = os.path.splitext(elem)
if os.path.isfile(xml_full_path) and suffix == '.xml':
index_list.append(int(name))
index_list.sort()
# index_list: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
spd3_file_list = []
for index in index_list:
pssm_file = pssm_dir + '/' + str(index) + '.pssm'
pssm_file_list = os.path.split(pssm_file)
spd3_file = os.path.split(pssm_file_list[0])[0] + '/' + pssm_file_list[1].split('.')[0] + '.spd3'
spd3_file_list.append(spd3_file)
threads.append(threading.Thread(target=gen_sa_vec,
args=(pssm_file, dir_name, sw_dir, sem)))
for t in threads:
t.start()
for t in threads:
t.join()
sasa_vectors = []
for spd3_file in spd3_file_list:
with open(spd3_file) as f:
lines = f.readlines()
vec_temp = [float(line.strip().split()[3]) for line in lines[1:]]
vector = []
for i in range(len(vec_temp)):
vector.append(vec_temp[i])
sasa_vectors.append(np.array(vector))
return np.array(sasa_vectors)
def gen_sa_vec(pssm_file, dir_name, sw_dir, sem):
sem.acquire()
if not os.path.exists(pssm_file):
seq_name = os.path.basename(pssm_file).split('.')[0]
seq_path = dir_name + '/' + seq_name + '.txt'
with open(seq_path) as f, open(pssm_file, 'w') as w:
lines = f.readlines()
protein_seq = lines[1].strip().upper()
pssm = get_blosum62(protein_seq)
pssm = np.array(pssm)
protein_seq = [np.array([1, x]) for x in list(protein_seq)]
protein_seq = np.array(protein_seq)
pssm_lists = np.hstack((protein_seq, pssm)).tolist()
for pssm_list in pssm_lists:
w.writelines('\t'.join(pssm_list) + '\n')
pwd = os.getcwd()
os.chdir(dir_name)
cmd = 'python ' + sw_dir + 'SPIDER2_local/misc/pred_pssm.py ' + pssm_file
subprocess.call(cmd, shell=True)
os.chdir(pwd)
time.sleep(2)
sem.release()
def cs_method(input_file, cur_dir, process_num):
sw_dir = cur_dir + '/software/'
dirname, seq_name = sep_file(input_file)
pssm_dir = produce_all_frequency(dirname, sw_dir, process_num)
dir_name = os.path.split(pssm_dir)[0]
xml_dir = dir_name + '/xml'
final_result = ''.join([dir_name, '/final_result'])
if not os.path.isdir(final_result):
os.mkdir(final_result)
dir_list = os.listdir(xml_dir)
index_list = []
for elem in dir_list:
xml_full_path = ''.join([xml_dir, '/', elem])
name, suffix = os.path.splitext(elem)
if os.path.isfile(xml_full_path) and suffix == '.xml':
index_list.append(int(name))
index_list.sort()
threads = []
sem = threading.Semaphore(process_num)
ec_vector = []
cs_file_list = []
for index in index_list:
fasta_file = dir_name + '/' + str(index) + '.fasta'
cs_file = os.path.splitext(fasta_file)[0] + '_cs.txt'
cs_file_list.append(cs_file)
threads.append(threading.Thread(target=gen_ec_vector,
args=(xml_dir, index, dir_name, sw_dir, sem)))
for t in threads:
t.start()
for t in threads:
t.join()
for cs_file in cs_file_list:
with open(cs_file) as f:
lines = f.readlines()
if sys.platform.startswith('win'):
vec_temp = [0.0000 if line.strip().split()[2] == '-nan' else float(line.strip().split()[2]) for line in
lines if line[0].isdigit()]
else:
vec_temp = [0.0000 if line.strip().split()[2] == '-nan' else float(line.strip().split()[2]) for line in
lines if line[0] == ' ']
ec_vector.append(np.array(vec_temp))
return np.array(ec_vector)
def gen_ec_vector(xml_dir, index, dir_name, sw_dir, sem):
sem.acquire()
xml_full_path = xml_dir + '/' + str(index) + '.xml'
txt_full_path = dir_name + '/' + str(index) + '.txt'
msa_file = get_msa(xml_full_path, txt_full_path)
fasta_file = dir_name + '/' + str(index) + '.fasta'
with open(msa_file) as f, open(fasta_file, 'w') as w:
lines = f.readlines()
c = ''
for i, line in enumerate(lines):
if i < 290:
c += '>' + str(i) + '\n' + line
w.writelines(c)
cs_file = os.path.splitext(fasta_file)[0] + '_cs.txt'
if sys.platform.startswith('win'):
cmd = sw_dir + 'psiblast/rate4site_slow.exe -s ' + fasta_file + ' -o ' + cs_file
subprocess.call(cmd, shell=True)
else:
cmd = sw_dir + 'psiblast/rate4site -s ' + fasta_file + ' -o ' + cs_file
subprocess.call(cmd, shell=True)
time.sleep(2)
sem.release()
def get_msa(xml_full_path, file_path):
MSA = []
evalue_threshold = 0.0001
tree = ElementTree.ElementTree(file=xml_full_path)
query_len = tree.find('BlastOutput_query-len').text
iteration = tree.findall('BlastOutput_iterations/Iteration')[-1] # get the last iteration
Iteration_hits = iteration.find('Iteration_hits')
for Hit in list(Iteration_hits):
Hsp_evalue = Hit.find('Hit_hsps/Hsp/Hsp_evalue').text
# only parser the hits that e-value < threshold
if float(Hsp_evalue) > evalue_threshold:
continue
Hsp_query_from = Hit.find('Hit_hsps/Hsp/Hsp_query-from').text
Hsp_query_to = Hit.find('Hit_hsps/Hsp/Hsp_query-to').text
Hsp_qseq = Hit.find('Hit_hsps/Hsp/Hsp_qseq').text
Hsp_hseq = Hit.find('Hit_hsps/Hsp/Hsp_hseq').text
# alignment sequence by add prefix, suffix
prefix = "-" * (int(Hsp_query_from) - 1)
suffix = "-" * (int(query_len) - int(Hsp_query_to))
# delete the space in protein_name and the corresponding position of hits
pos = -1
for aa in Hsp_qseq:
pos = pos + 1
if aa == '-':
Hsp_hseq = Hsp_hseq[:pos] + '*' + Hsp_hseq[pos + 1:]
Hsp_hseq = Hsp_hseq.replace('*', '')
if 'X' in Hsp_hseq:
Hsp_hseq = Hsp_hseq.replace('X', '-')
if 'B' in Hsp_hseq:
Hsp_hseq = Hsp_hseq.replace('B', '-')
if 'Z' in Hsp_hseq:
Hsp_hseq = Hsp_hseq.replace('Z', '-')
if 'U' in Hsp_hseq:
Hsp_hseq = Hsp_hseq.replace('U', '-')
if 'J' in Hsp_hseq:
Hsp_hseq = Hsp_hseq.replace('J', '-')
if 'O' in Hsp_hseq:
Hsp_hseq = Hsp_hseq.replace('O', '-')
# combine prefix, modified hits, suffix
hit_sequence = prefix + Hsp_hseq + suffix
# append in MSA
MSA.append(hit_sequence)
ff = open(file_path, 'r')
ff.readline() # skip the id
fasta_seq = ff.readline().strip().upper()
ff.close()
if not MSA:
# append the protein-self
if 'X' in fasta_seq:
fasta_seq = fasta_seq.replace('X', '-')
if 'B' in fasta_seq:
fasta_seq = fasta_seq.replace('B', '-')
if 'Z' in fasta_seq:
fasta_seq = fasta_seq.replace('Z', '-')
if 'U' in fasta_seq:
fasta_seq = fasta_seq.replace('U', '-')
if 'J' in fasta_seq:
fasta_seq = fasta_seq.replace('J', '-')
if 'O' in fasta_seq:
fasta_seq = fasta_seq.replace('O', '-')
MSA.append(fasta_seq)
if len(MSA) == 1:
MSA.append(MSA[0])
if MSA[0] != fasta_seq:
if fasta_seq in MSA:
index_seq = MSA.index(fasta_seq)
MSA[0], MSA[index_seq] = MSA[index_seq], MSA[0]
else:
MSA.insert(0, fasta_seq)
# write file
msa_file = os.path.splitext(file_path)[0] + '.msa'
output = open(msa_file, 'w')
output.write('\n'.join(MSA))
output.close()
return msa_file
<file_sep>import os
import time
import shutil
from FeatureExtraction import feature_extraction
from MachineLearning import machine_learning
from PerformanceEvaluation import performance_evaluation
def main(args):
print("\nAnalysis Start\n")
start_time = time.time()
current_path = os.path.dirname(os.path.realpath(__file__))
args.current_dir = os.path.dirname(os.getcwd())
args.result_dir = args.current_dir + '/results/'
if not os.path.exists(args.result_dir):
try:
os.makedirs(args.result_dir)
print('result_dir: ', args.result_dir)
except OSError:
pass
else:
try:
shutil.rmtree(args.result_dir)
print('result_dir: ', args.result_dir)
os.makedirs(args.result_dir)
except OSError:
pass
print("=================Prepare step=================")
print('Sequence type: '+args.type)
print('Machine learning method: '+args.method)
print('Result direction: '+args.result_dir)
print("=================Feature extraction step=================")
output_array, label_list, possible_parameter_list = feature_extraction(args)
print("=================Parameter selection step=================")
best_parameter_pair = machine_learning(args, output_array, args.folds, label_list, possible_parameter_list)
print("=================Performance evaluation step=================")
performance_evaluation(args, output_array, args.folds, label_list, best_parameter_pair)
print("\nAnalysis finished.")
print("Time = %.2fs" % (time.time()-start_time))
if __name__ == '__main__':
import argparse
parse = argparse.ArgumentParser(prog='BioSeq-Analysis')
# 特征提取
parse.add_argument('-type', type=str, choices=['DNA', 'RNA', 'Protein'], required=True, help="Type of input sequence.")
# 编码方式
parse.add_argument('-code', type=str, choices=['One-hot', 'BOW', 'WE'], required=True)
parse.add_argument('-word', type=str, choices=['Kmer'])
parse.add_argument('-word_size', type=int, default=[3])
# 分类器构建
parse.add_argument('-method', type=str, choices=['SVM', 'LinearSVM', 'RF', 'KNN', 'AdaBoost', 'NB', 'LDA', 'QDA'], required=True, help="Machine learning method you choose.")
# 分类器参数——SVM
parse.add_argument('-cost', type=int, nargs='*')
parse.add_argument('-gamma', type=int, nargs='*')
# 分类器参数——RF
parse.add_argument('-tree', type=int, nargs='*')
# 分类器参数——KNN
parse.add_argument('-ngb', type=int, nargs='*')
# 测试相关参数
parse.add_argument('-test', choices=['3, 5, 10'], default='5')
# 输入文件及标签
parse.add_argument('-seq_file', nargs='*', required=True, help="Input file.")
parse.add_argument('-fixed_len', type=int)
parse.add_argument('-label', type=int, nargs='*', required=True, help="The corresponding label of input file.")
argv = parse.parse_args()
main(argv)
<file_sep>import torch
from torch import nn
class AutoEncoder(nn.Module):
def __init__(self, input_size, hidden_dim, n_class):
super(AutoEncoder, self).__init__()
self.input_size = input_size
self.hidden_dim = hidden_dim
self.fc = nn.Sequential(nn.Linear(self.input_size, self.hidden_dim),
nn.Sigmoid())
self.encoder = nn.Linear(hidden_dim, n_class)
self.decoder = nn.Sequential(nn.Linear(n_class, hidden_dim),
nn.Sigmoid(),
nn.Linear(hidden_dim, input_size))
def extract_feature(self, x):
# print(x.size())
# exit()
out = self.fc(x)
return out
def forward(self, x):
out = self.extract_feature(x)
encode = self.encoder(out)
decode = self.decoder(encode)
return encode, decode
<file_sep>from ..utils.utils_words import rev_km_words
from ..utils.utils_algorithm import text_rank
def rev_km_text_rank(input_file, alphabet, fixed_len, word_size, alpha, fixed=True):
corpus = rev_km_words(input_file, alphabet, fixed_len, word_size, fixed)
return text_rank(corpus, alpha)
<file_sep>import numpy as np
import torch
from torch.utils.data import DataLoader
from ..utils.utils_motif import MotifFile2Matrix, motif_init
from ..OHE.ei import EvolutionaryInformation2Vectors
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 让torch判断是否使用GPU,建议使用GPU环境,因为会快很多
def mat_list2mat(mat_list, fixed_len):
mat_array = []
width = mat_list[0].shape[1]
for i in range(len(mat_list)):
temp_arr = np.zeros((fixed_len, width))
temp_len = mat_list[i].shape[0]
if temp_len <= fixed_len:
temp_arr[:temp_len, :] = mat_list[i]
else:
temp_arr = mat_list[i][:fixed_len, :]
mat_array.append(temp_arr)
mat_array = np.array(mat_array)
return mat_array
def motif_pssm(input_file, alphabet, process_num, batch_size, motif_file, motif_database, fixed_len, cur_dir):
# all_data = EvolutionaryInformation2Vectors(alphabet, fixed_len, cur_dir, True,
# mat=True).pssm(input_file, process_num)
vec_mat_list = EvolutionaryInformation2Vectors(alphabet, cur_dir).pssm(input_file, process_num)
all_data = mat_list2mat(vec_mat_list, fixed_len)
data_loader = DataLoader(all_data, batch_size=batch_size, shuffle=False)
if motif_database == 'Mega':
motifs = MotifFile2Matrix(motif_file).mega_motif_to_matrix()
else:
motifs = MotifFile2Matrix(motif_file).elm_motif_to_matrix()
# print(all_data.shape) # (20, 100, 20)
motif_features = []
with torch.no_grad():
for mat in data_loader:
# print(mat.size()) # torch.Size([5, 100, 20]) [batch_size, seq_len,
tensor = mat.to(DEVICE)
batch_motif_feature = motif_init(tensor, motifs)
motif_features += batch_motif_feature.tolist()
return np.array(motif_features)
<file_sep>import multiprocessing
import os
import time
from CheckAll import Batch_Path_Seq, DeepLearning, Classification, Method_Semantic_Similarity, prepare4train_seq
from CheckAll import Method_One_Hot_Enc, Feature_Extract_Mode, check_contain_chinese, seq_sys_check, dl_params_check, \
seq_feature_check, mode_params_check, results_dir_check, ml_params_check, make_params_dicts, Final_Path, All_Words
from FeatureAnalysis import fa_process
from FeatureExtractionMode.utils.utils_write import seq_file2one, gen_label_array, out_seq_file, out_ind_file, \
opt_file_copy, out_dl_seq_file, create_all_seq_file, fixed_len_control
from FeatureExtractionSeq import one_seq_fe_process
from MachineLearningAlgorithm.Classification.dl_machine import dl_cv_process, dl_ind_process
from MachineLearningAlgorithm.utils.utils_read import files2vectors_seq, read_dl_vec4seq
from MachineLearningSeq import one_ml_process, params_select, ml_results, ind_ml_results
from FeatureExtractionMode.SR.pse import AAIndex
def create_results_dir(args, cur_dir):
if args.bp == 1:
results_dir = cur_dir + Batch_Path_Seq + str(args.category) + "/" + str(args.mode) + "/"
if args.method is not None:
results_dir += str(args.method) + "/"
if args.in_tm is not None:
results_dir += str(args.in_tm) + "/"
if args.in_af is not None:
results_dir += str(args.in_af) + "/"
if args.words is not None:
results_dir += str(args.words) + "/"
if args.score != 'none':
results_dir += str(args.score) + "/"
else:
results_dir = cur_dir + Final_Path
results_dir_check(results_dir)
return results_dir
def ml_fe_process(args):
# 合并序列文件
input_one_file = create_all_seq_file(args.seq_file, args.results_dir)
# 统计样本数目和序列长度
sp_num_list, seq_len_list = seq_file2one(args.category, args.seq_file, args.label, input_one_file)
# 生成标签数组
label_array = gen_label_array(sp_num_list, args.label)
# 控制序列的固定长度(只需要操作一次)
args.fixed_len = fixed_len_control(seq_len_list, args.fixed_len)
# 多进程计算
pool = multiprocessing.Pool(args.cpu)
# 对每个mode的method进行检查
seq_feature_check(args)
# 对SVM或RF的参数进行检查并生成参数字典集合
all_params_list_dict = {}
all_params_list_dict = ml_params_check(args, all_params_list_dict)
# 对每个mode的words和method的参数进行检查
# params_list_dict 为只包括特征提取的参数的字典, all_params_list_dict为包含所有参数的字典
params_list_dict, all_params_list_dict = mode_params_check(args, all_params_list_dict)
# 列表字典 ---> 字典列表
params_dict_list = make_params_dicts(all_params_list_dict)
# print(params_dict_list)
# exit()
# 在参数便利前进行一系列准备工作: 1. 固定划分;2.设定指标;3.指定任务类型
args = prepare4train_seq(args, label_array, dl=False)
# 指定分析层面
args.res = False
params_dict_list_pro = []
print('\n')
print('Parameter Selection Processing...')
print('\n')
for i in range(len(params_dict_list)):
params_dict = params_dict_list[i]
# 生成特征向量文件名
vec_files = out_seq_file(args.label, args.format, args.results_dir, params_dict, params_list_dict)
params_dict['out_files'] = vec_files
# 注意多进程计算的debug
# one_ml_fe_process(args, input_one_file, label_array, vec_files, sp_num_list, args.folds, **params_dict)
params_dict_list_pro.append(pool.apply_async(one_ml_fe_process, (args, input_one_file, label_array, vec_files,
sp_num_list, args.folds, params_dict)))
pool.close()
pool.join()
# exit()
# 根据指标进行参数选择
params_selected = params_select(params_dict_list_pro, args.results_dir)
# 将最优的特征向量文件从"all_fea_files/"文件夹下复制到主文件下
opt_files = opt_file_copy(params_selected['out_files'], args.results_dir)
# 获取最优特征向量
opt_vectors = files2vectors_seq(opt_files, args.format)
print(' Shape of Optimal Feature vectors: [%d, %d] '.center(66, '*') % (opt_vectors.shape[0], opt_vectors.shape[1]))
# 特征分析
if args.score == 'none':
opt_vectors = fa_process(args, opt_vectors, label_array, after_ps=True, ind=False)
print(' Shape of Optimal Feature vectors after FA process: [%d, %d] '.center(66, '*') % (opt_vectors.shape[0],
opt_vectors.shape[1]))
# 构建分类器
ml_results(args, opt_vectors, label_array, args.folds, params_selected['out_files'], params_selected)
# -------- 独立测试-------- #
# 即,将独立测试数据集在最优的model上进行测试
if args.ind_seq_file is not None:
ind_ml_fe_process(args, opt_vectors, label_array, params_selected)
def one_ml_fe_process(args, input_one_file, labels, vec_files, sp_num_list, folds, params_dict):
# 特征提取
# args.res = False
one_seq_fe_process(args, input_one_file, labels, vec_files, sp_num_list, False, **params_dict)
# 获取特征向量
vectors = files2vectors_seq(vec_files, args.format)
print(' Shape of Feature vectors: [%d, %d] '.center(66, '*') % (vectors.shape[0], vectors.shape[1]))
if args.score == 'none':
vectors = fa_process(args, vectors, labels, after_ps=False, ind=False)
print(' Shape of Feature vectors after FA process: [%d, %d] '.center(66, '*') % (vectors.shape[0],
vectors.shape[1]))
params_dict = one_ml_process(args, vectors, labels, folds, vec_files, params_dict)
return params_dict
def ind_ml_fe_process(args, vectors, labels, params_selected):
print('########################## Independent Test Begin ##########################\n')
# 合并独立测试集序列文件
ind_input_one_file = create_all_seq_file(args.ind_seq_file, args.results_dir)
# 统计独立测试集样本数目和序列长度
ind_sp_num_list, ind_seq_len_list = seq_file2one(args.category, args.ind_seq_file, args.label, ind_input_one_file)
# 生成独立测试集标签数组
ind_label_array = gen_label_array(ind_sp_num_list, args.label)
# 生成独立测试集特征向量文件名
ind_out_files = out_ind_file(args.label, args.format, args.results_dir)
# 特征提取
one_seq_fe_process(args, ind_input_one_file, ind_label_array, ind_out_files, ind_sp_num_list, True,
**params_selected)
# 获取独立测试集特征向量
ind_vectors = files2vectors_seq(ind_out_files, args.format)
print(' Shape of Ind Feature vectors: [%d, %d] '.center(66, '*') % (ind_vectors.shape[0], ind_vectors.shape[1]))
if args.score == 'none':
ind_vectors = fa_process(args, ind_vectors, ind_label_array, after_ps=True, ind=True)
print(' Shape of Ind Feature vectors after FA process: [%d, %d] '.center(66, '*') % (ind_vectors.shape[0],
ind_vectors.shape[1]))
# 为独立测试集构建分类器
args.ind_vec_file = ind_out_files
ind_ml_results(args, vectors, labels, ind_vectors, ind_label_array, params_selected)
print('########################## Independent Test Finish ##########################\n')
def dl_fe_process(args):
# 合并序列文件
input_one_file = create_all_seq_file(args.seq_file, args.results_dir)
# 统计样本数目和序列长度
sp_num_list, seq_len_list = seq_file2one(args.category, args.seq_file, args.label, input_one_file)
# 生成标签数组
label_array = gen_label_array(sp_num_list, args.label)
# 控制序列的固定长度(仅仅需要在基准数据集上操作一次)
args.fixed_len = fixed_len_control(seq_len_list, args.fixed_len)
all_params_list_dict = {}
all_params_list_dict = dl_params_check(args, all_params_list_dict)
# 对每个mode的words和method的参数进行检查
# params_list_dict 为只包括特征提取的参数的字典, all_params_list_dict为包含所有参数的字典
params_list_dict, all_params_list_dict = mode_params_check(args, all_params_list_dict)
# 列表字典 ---> 字典列表 --> 参数字典
params_dict = make_params_dicts(all_params_list_dict)[0]
# 特征向量文件命名
out_files = out_dl_seq_file(args.label, args.results_dir, ind=False)
# 深度特征向量提取
one_seq_fe_process(args, input_one_file, label_array, out_files, sp_num_list, False, **params_dict)
# 获取深度特征向量
# fixed_seq_len_list: 最大序列长度为fixed_len的序列长度的列表
vectors, fixed_seq_len_list = read_dl_vec4seq(args.fixed_len, out_files, return_sp=False)
# 深度学习的独立测试和交叉验证分开
if args.ind_seq_file is None:
# 在参数便利前进行一系列准备工作: 1. 固定划分;2.设定指标;3.指定任务类型
args = prepare4train_seq(args, label_array, dl=True)
# 构建深度学习分类器
dl_cv_process(args.ml, vectors, label_array, fixed_seq_len_list, args.fixed_len, args.folds, args.results_dir,
params_dict)
else:
# 独立验证开始
ind_dl_fe_process(args, vectors, label_array, fixed_seq_len_list, params_dict)
def ind_dl_fe_process(args, vectors, labels, fixed_seq_len_list, params_dict):
print('########################## Independent Test Begin ##########################\n')
# 合并独立测试集序列文件
ind_input_one_file = create_all_seq_file(args.ind_seq_file, args.results_dir)
# 统计独立测试集样本数目和序列长度
ind_sp_num_list, ind_seq_len_list = seq_file2one(args.category, args.ind_seq_file, args.label, ind_input_one_file)
# 生成独立测试集标签数组
ind_label_array = gen_label_array(ind_sp_num_list, args.label)
# 生成独立测试集特征向量文件名
ind_out_files = out_dl_seq_file(args.label, args.results_dir, ind=True)
# 特征提取
one_seq_fe_process(args, ind_input_one_file, ind_label_array, ind_out_files, ind_sp_num_list, True, **params_dict)
# 获取独立测试集特征向量
ind_vectors, ind_fixed_seq_len_list = read_dl_vec4seq(args.fixed_len, ind_out_files, return_sp=False)
# 为独立测试构建深度学习分类器
dl_ind_process(args.ml, vectors, labels, fixed_seq_len_list, ind_vectors, ind_label_array, ind_fixed_seq_len_list,
args.fixed_len, args.results_dir, params_dict)
print('########################## Independent Test Finish ##########################\n')
def main(args):
print("\nStep into analysis...\n")
start_time = time.time()
current_path = os.path.dirname(os.path.realpath(__file__))
args.current_dir = os.path.dirname(os.getcwd())
# 判断中文目录
check_contain_chinese(current_path)
# 判断mode和ml的组合是否合理
seq_sys_check(args)
# 生成结果文件夹
args.results_dir = create_results_dir(args, args.current_dir)
if args.ml in DeepLearning:
args.dl = 1
dl_fe_process(args)
else:
args.dl = 0
ml_fe_process(args)
print("Done.")
print(("Used time: %.2fs" % (time.time() - start_time)))
if __name__ == '__main__':
import argparse
parse = argparse.ArgumentParser(prog='BioSeq-NLP', description="Step into analysis, please select parameters ")
# ----------------------- parameters for FeatureExtraction ---------------------- #
parse.add_argument('-category', type=str, choices=['DNA', 'RNA', 'Protein'], required=True,
help="The category of input sequences.")
parse.add_argument('-mode', type=str, choices=Feature_Extract_Mode, required=True,
help="The feature extraction mode for input sequence which analogies with NLP, "
"for example: bag of words (BOW).")
parse.add_argument('-score', type=str, choices=Method_Semantic_Similarity, default='none',
help="Choose whether calculate semantic similarity score for feature vectors.")
parse.add_argument('-words', type=str, choices=All_Words,
help="If you select mode in ['BOW', 'TF-IDF', 'TR', 'WE', 'TM'], you should select word for "
"corresponding mode, for example Mismatch. Pay attention to that "
"different category has different words, please reference to manual.")
parse.add_argument('-method', type=str,
help="If you select mode in ['OHE', 'WE', 'TM', 'SR', 'AF'], you should select method for "
"corresponding mode, for example select 'LDA' for 'TM' mode, select 'word2vec' for 'WE'"
" mode and so on. For different category, the methods belong to 'OHE' and 'SR' mode is "
"different, please reference to manual")
parse.add_argument('-auto_opt', type=int, default=0, choices=[0, 1, 2],
help="Choose whether automatically traverse the argument list. "
"2 is automatically traversing the argument list set ahead, 1 is automatically traversing "
"the argument list in a smaller range, while 0 is not (default=0).")
# parameters for one-hot encoding
parse.add_argument('-cpu', type=int, default=1,
help="The maximum number of CPU cores used for multiprocessing in generating frequency profile"
" and the number of CPU cores used for multiprocessing during parameter selection process "
"(default=1).")
parse.add_argument('-pp_file', type=str,
help="The physicochemical properties file user input.\n"
"if input nothing, the default physicochemical properties is:\n"
"DNA dinucleotide: Rise, Roll, Shift, Slide, Tilt, Twist.\n"
"DNA trinucleotide: Dnase I, Bendability (DNAse).\n"
"RNA: Rise, Roll, Shift, Slide, Tilt, Twist.\n"
"Protein: Hydrophobicity, Hydrophilicity, Mass.")
parse.add_argument('-rss_file', type=str,
help="The second structure file for all input sequences.(The order of a specific sequence "
"should be corresponding to the order in 'all_seq_file.txt' file")
# parameters for bag of words
parse.add_argument('-word_size', type=int, nargs='*', default=[3],
help="The word size of sequences for specific words "
"(the range of word_size is between 1 and 6).")
parse.add_argument('-mis_num', type=int, nargs='*', default=[1],
help="For Mismatch words. The max value inexact matching, mis_num should smaller than word_size "
"(the range of mis_num is between 1 and 6).")
parse.add_argument('-delta', type=float, nargs='*', default=[0.5],
help="For Subsequence words. The value of penalized factor "
"(the range of delta is between 0 and 1).")
parse.add_argument('-top_n', type=int, nargs='*', default=[1],
help="The maximum distance between structure statuses (the range of delta is between 1 and 4)."
"It works with Top-n-gram words.")
parse.add_argument('-max_dis', type=int, nargs='*', default=[1],
help="The max distance value for DR words and DT words (default range is from 1 to 4).")
# parameters for TextRank
parse.add_argument('-alpha', type=float, default=0.85,
help="Damping parameter for PageRank which used in 'TR' mode, default=0.85.")
# parameters for word embedding
parse.add_argument('-win_size', type=int,
help="The maximum distance between the current and predicted word within a sentence for "
"'word2vec' in 'WE' mode, etc.")
parse.add_argument('-vec_dim', type=int,
help="The output dimension of feature vectors for 'Glove' model and dimensionality of a word "
"vectors for 'word2vec' and 'fastText' method.")
parse.add_argument('-sg', type=int, default=0,
help="Training algorithm for 'word2vec' and 'fastText' method. 1 for skip-gram, otherwise CBOW.")
# parameters for topic model
parse.add_argument('-in_tm', type=str, choices=['BOW', 'TF-IDF', 'TextRank'],
help="While topic model implement subject extraction from a text, the text need to be "
"preprocessed by one of mode in choices.")
parse.add_argument('-com_prop', type=float, default=0.8,
help="If choose topic model mode, please set component proportion for output feature vectors.")
# parameters for syntax rules
parse.add_argument('-oli', type=int, choices=[0, 1], default=0,
help="Choose one kind of Oligonucleotide (default=0): 0 represents dinucleotid; "
"1 represents trinucleotide. For MAC, GAC, NMBAC methods of 'SR' mode.")
parse.add_argument('-lag', type=int, nargs='*', default=[1],
help="The value of lag (default=1). For DACC, TACC, ACC, ACC-PSSM, AC-PSSM or CC-PSSM methods"
" and so on.")
parse.add_argument('-lamada', type=int, nargs='*', default=[1],
help="The value of lamada (default=1). For MAC, PDT, PDT-Profile, GAC or NMBAC methods "
"and so on.")
parse.add_argument('-w', type=float, nargs='*', default=[0.8],
help="The value of weight (default=0.1). For ZCPseKNC method.")
parse.add_argument('-k', type=int, nargs='*', default=[3],
help="The value of Kmer, it works only with ZCPseKNC method.")
parse.add_argument('-n', type=int, nargs='*', default=[1],
help="The maximum distance between structure statuses (default=1). "
"It works with PDT-Profile method.")
parse.add_argument('-ui_file', help="The user-defined physicochemical property file.")
parse.add_argument('-all_index', dest='a', action='store_true', help="Choose all physicochemical indices.")
parse.add_argument('-no_all_index', dest='a', action='store_false',
help="Do not choose all physicochemical indices, default.")
parse.set_defaults(a=False)
# parameters for automatic features/deep learning algorithm
parse.add_argument('-in_af', type=str, choices=Method_One_Hot_Enc,
help="Choose the input for 'AF' mode from 'OHE' mode.")
parse.add_argument('-fea_dim', type=int, default=256,
help="The output dimension of feature vectors, it works with 'AF' mode.")
parse.add_argument('-motif_database', type=str, choices=['ELM', 'Mega'],
help="The database where input motif file comes from.")
parse.add_argument('-motif_file', type=str,
help="The short linear motifs from ELM database or structural motifs from the MegaMotifBase.")
# ----------------------- parameters for feature analysis ---------------------- #
# standardization or normalization
parse.add_argument('-sn', choices=['min-max-scale', 'standard-scale', 'L1-normalize', 'L2-normalize', 'none'],
default='none', help=" Choose method of standardization or normalization for feature vectors.")
# clustering
parse.add_argument('-cl', choices=['AP', 'DBSCAN', 'GMM', 'AGNES', 'Kmeans', 'none'], default='none',
help="Choose method for clustering.")
parse.add_argument('-cm', default='sample', choices=['feature', 'sample'], help="The mode for clustering.")
parse.add_argument('-nc', type=int, help="The number of clusters.")
# feature select
parse.add_argument('-fs', choices=['chi2', 'F-value', 'MIC', 'RFE', 'Tree', 'none'], default='none',
help="Select feature select method.")
parse.add_argument('-nf', type=int, help="The number of features after feature selection.")
# dimension reduction
parse.add_argument('-dr', choices=['PCA', 'KernelPCA', 'TSVD', 'none'], default='none',
help="Choose method for dimension reduction.")
parse.add_argument('-np', type=int, help="The dimension of main component after dimension reduction.")
# rdb
parse.add_argument('-rdb', choices=['no', 'fs', 'dr'], default='no',
help="Reduce dimension by:\n"
" 'no'---none;\n"
" 'fs'---apply feature selection to parameter selection procedure;\n"
" 'dr'---apply dimension reduction to parameter selection procedure.\n")
# ----------------------- parameters for MachineLearning---------------------- #
parse.add_argument('-ml', type=str, choices=Classification, required=True,
help="The machine learning algorithm, for example: Support Vector Machine(SVM).")
parse.add_argument('-grid', type=int, nargs='*', choices=[0, 1], default=0,
help="grid = 0 for rough grid search, grid = 1 for meticulous grid search.")
# parameters for svm
parse.add_argument('-cost', type=int, nargs='*', help="Regularization parameter of 'SVM'.")
parse.add_argument('-gamma', type=int, nargs='*', help="Kernel coefficient for 'rbf' of 'SVM'.")
# parameters for rf
parse.add_argument('-tree', type=int, nargs='*', help="The number of trees in the forest for 'RF'.")
# ----------------------- parameters for DeepLearning---------------------- #
parse.add_argument('-lr', type=float, default=0.01,
help="The value of learning rate, it works with 'AF' mode and deep learning algorithm.")
# 原始为0.99 by wzb at 3.29
parse.add_argument('-epochs', type=int,
help="The epoch number of train process for 'AF' mode and deep learning algorithm.")
parse.add_argument('-batch_size', type=int, default=5,
help="The size of mini-batch, it works with 'AF' mode and deep learning algorithm.")
parse.add_argument('-dropout', type=float, default=0.5,
help="The value of dropout prob, it works with 'AF' mode and deep learning algorithm.")
# parameters for LSTM, GRU
parse.add_argument('-hidden_dim', type=int, default=256,
help="The size of the intermediate (a.k.a., feed forward) layer, it works with 'AF' mode, "
"GRU and LSTM.") # 256
parse.add_argument('-n_layer', type=int, default=2,
help="The number of units for LSTM and GRU, it works with 'AF' mode, GRU and LSTM.")
# parameters for CNN
parse.add_argument('-out_channels', type=int, default=256, help="The number of output channels for 'CNN'.")
parse.add_argument('-kernel_size', type=int, default=5, help="The size of stride for CNN.")
# parameters for Transformer and Weighted-Transformer
parse.add_argument('-d_model', type=int, default=256,
help="The dimension of multi-head attention layer for Transformer or Weighted-Transformer.")
parse.add_argument('-d_ff', type=int, default=1024,
help="The dimension of fully connected layer of Transformer or Weighted-Transformer.")
parse.add_argument('-n_heads', type=int, default=4,
help="The number of heads for Transformer or Weighted-Transformer.")
# parameters for Reformer
parse.add_argument('-n_chunk', type=int, default=8,
help="The number of chunks for processing lsh attention.")
parse.add_argument('-rounds', type=int, default=1024,
help="The number of rounds for multiple rounds of hashing to reduce probability that similar "
"items fall in different buckets.")
parse.add_argument('-bucket_length', type=int, default=64,
help="Average size of qk per bucket, 64 was recommended in paper")
# parameters for ML parameter selection and cross validation
parse.add_argument('-metric', type=str, choices=['Acc', 'MCC', 'AUC', 'BAcc', 'F1'], default='Acc',
help="The metric for parameter selection")
parse.add_argument('-cv', choices=['5', '10', 'j'], default='5',
help="The cross validation mode.\n"
"5 or 10: 5-fold or 10-fold cross validation.\n"
"j: (character 'j') jackknife cross validation.")
parse.add_argument('-sp', type=str, choices=['none', 'over', 'under', 'combine'], default='none',
help="Select technique for oversampling.")
# ----------------------- parameters for input and output ---------------------- #
# parameters for input
parse.add_argument('-seq_file', nargs='*', required=True, help="The input files in FASTA format.")
parse.add_argument('-label', type=int, nargs='*', required=True,
help="The corresponding label of input sequence files. For deep learning method, the label can "
"only set as positive integer")
parse.add_argument('-ind_seq_file', nargs='*', help="The input independent test files in FASTA format.")
parse.add_argument('-fixed_len', type=int,
help="The length of sequence will be fixed via cutting or padding. If you don't set "
"value for 'fixed_len', it will be the maximum length of all input sequences. ")
# parameters for output
parse.add_argument('-format', default='csv', choices=['tab', 'svm', 'csv', 'tsv'],
help="The output format (default = csv).\n"
"tab -- Simple format, delimited by TAB.\n"
"svm -- The libSVM training data format.\n"
"csv, tsv -- The format that can be loaded into a spreadsheet program.")
parse.add_argument('-bp', type=int, choices=[0, 1], default=0,
help="Select use batch mode or not, the parameter will change the directory for generating file "
"based on the method you choose.")
argv = parse.parse_args()
main(argv)
<file_sep>from ..utils.utils_topic import lsa, PLsa, lda
from ..BOW.BOW4vec import bow
def bow_tm(tm_method, input_file, labels, category, words, sample_num_list, out_format, out_file_list, cur_dir,
**param_dict):
vectors = bow(input_file, category, words, sample_num_list, out_format, out_file_list, cur_dir, True, **param_dict)
if tm_method == 'LSA':
tm_vectors = lsa(vectors, com_prop=param_dict['com_prop'])
elif tm_method == 'PLSA':
_, tm_vectors = PLsa(vectors, com_prop=param_dict['com_prop']).em_algorithm()
elif tm_method == 'LDA':
tm_vectors = lda(vectors, labels=None, com_prop=param_dict['com_prop'])
elif tm_method == 'Labeled-LDA':
tm_vectors = lda(vectors, labels=labels, com_prop=param_dict['com_prop'])
else:
print('Topic model method error!')
return False
return tm_vectors
<file_sep>import torch
from torch import nn
import torch.nn.functional as func
from ..utils.utils_motif import MotifFile2Matrix, motif_init
def cnn_init(x, kernels):
out = x.unsqueeze(1) # [batch_size, 1, 20, 20]
w_init = torch.empty(len(kernels), 1, 7, 20) # 20 for Protein, 4 for DNA/RNA
b_init = torch.tensor([0.1] * len(kernels))
out = func.conv2d(out,
weight=torch.nn.init.uniform_(w_init),
bias=b_init)
out = func.relu(out)
out = func.max_pool2d(out, (2, 1)) # torch.Size([50, 301, 7, 1]) 301 为模体个数,同时也是输出通道数。
out_mean = torch.mean(out, dim=2, keepdim=True).squeeze() # torch.Size([50, 301)
out_max = torch.max(out, dim=2, keepdim=True)[0].squeeze()
cnn_out = torch.cat([out_max, out_mean], 1) # torch.Size([50, 602])
return cnn_out
# 定义网络结构
class MotifCNN(torch.nn.Module):
def __init__(self, fea_dim, n_class, prob, args):
super(MotifCNN, self).__init__()
motif_file = args.motif_file
if args.motif_database == 'Mega':
motifs = MotifFile2Matrix(motif_file).mega_motif_to_matrix()
else:
motifs = MotifFile2Matrix(motif_file).elm_motif_to_matrix()
self.kernel = motifs
self.fc = nn.Sequential(
nn.Linear(len(motifs) * 4, fea_dim),
nn.ReLU()
)
self.classifier = nn.Sequential(nn.Dropout(prob),
nn.Linear(fea_dim, n_class))
def extract_feature(self, x):
cnn_layer = cnn_init(x, self.kernel)
motif_layer = motif_init(x, self.kernel)
out = torch.cat([motif_layer, cnn_layer], 1) # size: [50 x 1204] 1204是模体个数的四倍
out = self.fc(out)
return out
def forward(self, x):
out = self.extract_feature(x)
out = self.classifier(out)
return out
<file_sep>from .Glove4vec import glove4vec
from .fastText4vec import fast_text4vec
from .word4vec import word4vec
from ..utils.utils_write import vectors2files
from ..utils.utils_words import DNA_X, RNA_X, PROTEIN_X
from ..utils.utils_words import dr_words, dt_words, km_words, mismatch_words, rev_km_words, subsequence_words, tng_words
def word_emb(emb_method, input_file, category, words, fixed_len, sample_num_list, out_format, out_file_list, cur_dir,
**param_dict):
if category == 'DNA':
alphabet = DNA_X
elif category == 'RNA':
alphabet = RNA_X
else:
alphabet = PROTEIN_X
if words == 'Kmer':
corpus = km_words(input_file, alphabet, fixed_len, word_size=param_dict['word_size'], fixed=True)
elif words == 'RevKmer':
corpus = rev_km_words(input_file, alphabet, fixed_len, word_size=param_dict['word_size'], fixed=True)
elif words == 'Mismatch':
corpus = mismatch_words(input_file, alphabet, fixed_len, word_size=param_dict['word_size'], fixed=True)
elif words == 'Subsequence':
corpus = subsequence_words(input_file, alphabet, fixed_len, word_size=param_dict['word_size'], fixed=True)
elif words == 'Top-N-Gram':
corpus = tng_words(input_file, fixed_len, word_size=param_dict['word_size'], n=param_dict['top_n'],
process_num=param_dict['cpu'], cur_dir=cur_dir, fixed=True)
elif words == 'DR':
corpus = dr_words(input_file, alphabet, fixed_len, max_dis=param_dict['max_dis'], fixed=True)
elif words == 'DT':
corpus = dt_words(input_file, fixed_len, max_dis=param_dict['max_dis'], process_num=param_dict['cpu'],
cur_dir=cur_dir, fixed=True)
else:
print('word segmentation method error!')
return False
if emb_method == 'fastText':
emb_vectors = fast_text4vec(corpus, sample_num_list, fixed_len, **param_dict)
elif emb_method == 'Glove':
emb_vectors = glove4vec(corpus, sample_num_list, fixed_len, **param_dict)
elif emb_method == 'word2vec':
# word4vec(input_file, alphabet, sample_size_list, words, fixed_len, cur_dir, **param_dict):
emb_vectors = word4vec(corpus, sample_num_list, fixed_len, **param_dict)
else:
print('Word embedding method error!')
return False
vectors2files(emb_vectors, sample_num_list, out_format, out_file_list)
| 15a682f12ae186ded5d1331706c7894386535c86 | [
"Python"
] | 80 | Python | mmdzb/BioSeq-BLM | f634016f8b7569ba24dbd46943a91f53da991abd | a4a73f0b4036a03ddc02d6a47e31d4b72d8c8954 |
refs/heads/master | <file_sep>const state = {
cart: [],
};
const getters = {
getCart(state) {
return state.cart;
},
};
const actions = {};
const mutations = {
setCart(state, cart) {
state.cart = cart;
},
};
export default { state, getters, actions, mutations };
<file_sep>import Vue from "vue";
import App from "./App.vue";
import router from "./router";
import store from "./store";
Vue.config.productionTip = false;
const MONTHS = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
];
Vue.filter("dos", (val) => {
const today = new Date();
const dos = new Date(today.getTime() + val * 24 * 60 * 60 * 1000);
const dd = dos.getDate() < 10 ? "0" + dos.getDate() : dos.getDate();
const mm = MONTHS[dos.getMonth()];
const yyyy = dos.getFullYear();
return `${dd} ${mm} ${yyyy}`;
});
Vue.filter("cartTotal", (val) => {
let sum = 0;
for (let item of val) {
const subTotal = item.quantity * item.product.price;
sum += subTotal;
}
return sum;
});
Vue.filter("money", (val) => {
return val.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
});
new Vue({
router,
store,
render: (h) => h(App),
}).$mount("#app");
<file_sep># UID COURSEWORK PROTOTYPE
Simple interface for group project
<file_sep>const products = [
{
name: "Apples",
cost: "UGX 8,500/=",
price: 8500,
unit: "KG",
image: "https://source.unsplash.com/Su1GqBGEk3c/",
dos: 15,
comments: [
{
name: "<NAME>",
comment:
"Try to keep the apples in a cool place. I recommend a glass door fridge.",
},
],
},
{
name: "Yellow Bananas",
cost: "UGX 1,500/=",
price: 1500,
unit: "BUNCH",
image: "https://source.unsplash.com/fczCr7MdE7U/",
dos: 8,
comments: [
{
name: "<NAME>",
comment:
"Try to keep the apples in a cool place. I recommend a glass door fridge.",
},
],
},
{
name: "Oranges",
cost: "UGX 10,000/=",
price: 10000,
unit: "KG",
image: "https://source.unsplash.com/M3iwnCxuCwE/",
dos: 20,
comments: [
{
name: "<NAME>",
comment:
"Try to keep the apples in a cool place. I recommend a glass door fridge.",
},
],
},
{
name: "Mangoes",
cost: "UGX 6,000/=",
price: 6000,
unit: "KG",
image: "https://source.unsplash.com/biK3YJHhBfM/",
dos: 12,
comments: [
{
name: "<NAME>",
comment:
"Try to keep the apples in a cool place. I recommend a glass door fridge.",
},
],
},
{
name: "Avocado",
cost: "UGX 13,500/=",
price: 13500,
unit: "KG",
image: "https://source.unsplash.com/EUEWT74ImEU/",
dos: 32,
comments: [
{
name: "<NAME>",
comment:
"Try to keep the apples in a cool place. I recommend a glass door fridge.",
},
],
},
{
name: "Pineapples",
cost: "UGX 4,500/=",
price: 4500,
unit: "KG",
image: "https://source.unsplash.com/kLSEH5vZESA/",
dos: 17,
comments: [
{
name: "<NAME>",
comment:
"Try to keep the apples in a cool place. I recommend a glass door fridge.",
},
],
},
{
name: "Watermelon",
cost: "UGX 2,000/=",
price: 3000,
unit: "Piece",
image: "https://source.unsplash.com/izi5AnlbRIA/",
dos: 18,
comments: [
{
name: "<NAME>",
comment:
"Try to keep the apples in a cool place. I recommend a glass door fridge.",
},
],
},
];
export default { products };
| 80a0231c09807a6fca8679b101aa0e60abb84b7f | [
"JavaScript",
"Markdown"
] | 4 | JavaScript | KengoWada/uid_prototype | 78c98c9ec2c4616b732d74ca3bd16df7d459c05f | 0b6db60de0012bc8786aaf700dd46f66cecb2c73 |
refs/heads/master | <repo_name>choijiseok2/c2cgeoportal<file_sep>/doc/developer/build_release.rst
.. _developer_build_release:
Create a new release
====================
Vocabulary
----------
On this page I use the word ``version`` for a major version of MapFish
Geoportal (2.0), and the word ``release`` for each step in this version
(2.0.0rc1, 2.0.0, 2.0.1, ...).
``MapFish Geoportal`` is the pack that includes ngeo and c2cgeoportal,
from start of 2014 both projects will synchronize their major versions.
Then ``<release>`` can be ``2.0.0rc1`` for the first release candidate
of the version ``2.0``, ``2.0.0`` for the final release, ``2.0.1`` for
the first bug fix release, and ``<version>`` can be ``2.0``, ``2.1``, ...
.. _developer_build_release_pre_release_task:
Pre release task
----------------
Before doing a release you should merge all the previous branch on this one:
* Merge the release changes (on ``ngeo`` and on ``c2cgeoportal``)
to the upper branches i.e.: ``2.2`` => ``2.3``, ``2.3`` => ``2.4`` (master).
.. note::
On ``c2cgeoportal`` merge see if an alembic merge should be done:
.. prompt:: bash
./docker-run alembic \
--config=geoportal/tests/functional/alembic.ini \
--name=main heads
./docker-run alembic \
--config=geoportal/tests/functional/alembic.ini \
--name=static heads
If yes create the merge with:
.. prompt:: bash
./docker-run alembic \
--config=geoportal/tests/functional/alembic.ini --name=[main|static] \
merge --message="Merge <src> and <dst> branches" \
<rev 1> <rev 2>
Remove the import and replace the core of the method by ``pass`` in the generated file.
And finally add the new file.
ngeo
----
`For ngeo see here <https://github.com/camptocamp/ngeo/blob/master/docs/developer-guide.md#create-a-package-on-npm>`_.
c2cgeoportal
------------
New version
~~~~~~~~~~~
Checkout the code:
.. prompt:: bash
git fetch
git checkout master
git reset --hard origin/master
Get the localisation from Transifex:
.. prompt:: bash
docker build --tag=camptocamp/geomapfish-build-dev docker/build
./docker-run make transifex-get
For each version we create a new branch (at the latest at the final release):
.. prompt:: bash
git checkout -b <version>
git push origin <version>
Change the version in the following files:
* ``.travis.yml`` (``MAIN_BRANCH``, ``MAJOR_VERSION``)
* ``Jenkinsfile`` (``MAIN_BRANCH``, ``MAJOR_VERSION``)
* ``Makefile`` (``MAIN_BRANCH``, ``MAJOR_VERSION``)
* ``docker-run`` (``version``)
Commit your changes:
.. prompt:: bash
git add .travis.yml Jenkinsfile Makefile docker-run
git commit -m "Create the version <version> branch"
Go back to the master branch:
.. prompt:: bash
git checkout master
git merge <version>
Change back the version in the following files:
* ``.travis.yml`` (``MAIN_BRANCH``, ``MAJOR_VERSION``)
* ``Jenkinsfile`` (``MAIN_BRANCH``, ``MAJOR_VERSION``)
* ``Makefile`` (``MAIN_BRANCH``, ``MAJOR_VERSION``)
* ``docker-run`` (``version``)
Commit your changes:
.. prompt:: bash
git add .travis.yml Jenkinsfile Makefile docker-run
git commit -m "Start version <version + 1>"
Push your changes:
.. prompt:: bash
git push origin <version> master
Create a new Transifex resource:
.. prompt:: bash
rm .tx/config
./docker-run rm /build/c2ctemplate-cache.yaml
./docker-run make transifex-init
Update the references in the `index.html` file of the `gh-pages` branch.
Then continue by creating the release.
Do the new release
~~~~~~~~~~~~~~~~~~
Checkout the code:
.. prompt:: bash
git fetch
git checkout <version>
git reset --hard origin/<version>
Tag the new release:
.. prompt:: bash
git tag <release>
git push origin <release>
Run a new job for the <version> branch on Jenkins.
.. note::
It's possible to do a version only on the latest commit on a branch,
If you relay need to do that, you should create a new branch.
Notes about Travis
~~~~~~~~~~~~~~~~~~
When you push a tag with the pattern ``^[0-9]+\.[0-9]+\..+$``
a new release will automatically be created on Travis CI.
Post release tasks
------------------
When a new release or a new version is done you should do the following tasks:
* Merge the version into the upper one to the master i.e.: ``2.4`` => ``2.5``, ``2.5`` => ``master``.
See :ref:`developer_build_release_pre_release_task` for more information.
* Upgrade the demo in your home folder, see :ref:`integrator_upgrade_application`.
* Some specific things for the demo:
`UPGRADE.rst <https://github.com/camptocamp/demo_geomapfish/blob/2.4/UPGRADE.rst>_`.
For non dev release
-------------------
* Rename the milestone on `c2cgeoportal <https://github.com/camptocamp/c2cgeoportal/milestones>`_
and on `ngeo <https://github.com/camptocamp/ngeo/milestones>`_ from ``x.y`` to ``x.y.z``.
* Create again the milestone on `c2cgeoportal <https://github.com/camptocamp/c2cgeoportal/milestones>`_
and on `ngeo <https://github.com/camptocamp/ngeo/milestones>`_ for ``x.y``.
* Move all the open issues to the new milestone and close the current milestone
in `ngeo <https://github.com/camptocamp/ngeo/milestones>`_
and in `c2cgeoportal <https://github.com/camptocamp/c2cgeoportal/milestones>`_.
* Send a release email to the ``<EMAIL>``
and ``<EMAIL>`` mailing lists.
<file_sep>/geoportal/c2cgeoportal_geoportal/scaffolds/create/geoportal/+package+_geoportal/static-ngeo/api/index.js
import Map from 'api/Map.js';
import * as constants from 'api/constants.js';
constants.themesUrl = 'https://www.example.com';
const lib = {
Map
};
export default lib;
<file_sep>/travis/requirements.txt
docker-compose==1.23.2
netifaces==0.10.9
PyYAML==3.13
| d8d00a2c396e6f198fb25421b9c622499628e59e | [
"JavaScript",
"Text",
"reStructuredText"
] | 3 | reStructuredText | choijiseok2/c2cgeoportal | ad7c7dfb64a1d1d614dd3d5a21670aa38d28c531 | ef9bdf930c780287696a8bee76e09856399bb707 |
refs/heads/master | <file_sep># atrico.kotlib.core
Core utilities for the suite of kotlin libraries
<file_sep>package atrico.kotlib
/**
* Builder type for immutable object
*/
interface Builder<T> {
/**
* Build the object
*/
fun build(): T
} | 64b4eaca4cb86f6ad5cf27de518dfad70252531d | [
"Markdown",
"Kotlin"
] | 2 | Markdown | AtricoSoftware/atrico.kotlib.core | 21a3c4dff6905d46b93116d909a59a46586ec60b | 0ae9979efd809595619fd1c8d04184dd97fe72ed |
refs/heads/master | <file_sep>import uuid
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin, BaseUserManager
from django.core import validators
from django.core.mail import send_mail
from django.db import models, OperationalError
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, password, is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
user = self.model(username=username,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, password=None, **extra_fields):
return self._create_user(username, password, False, False, **extra_fields)
def create_superuser(self, username, password, **extra_fields):
return self._create_user(username, password, True, True, **extra_fields)
def _token():
try:
while True:
token = uuid.uuid4().hex[:15].upper()
if not User.objects.filter(token=token).exists():
return token
except OperationalError:
return None
class User(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.EmailField(_('email'), max_length=50, unique=True,
validators=[
validators.EmailValidator(_('Enter a valid email'), 'invalid'),
],
error_messages={
'unique': _("A user with that email already exists"),
})
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Un select this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
token = models.CharField(_('auth token'), max_length=100, unique=True, default=_token)
cred = models.FileField(_('credentials'), upload_to='cred/', null=True, blank=True)
objects = UserManager()
USERNAME_FIELD = 'username'
def __str__(self):
return self.get_full_name()
def get_full_name(self):
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
send_mail(subject, message, from_email, [self.username], **kwargs)
<file_sep>from __future__ import unicode_literals
import oauth2client.file
import youtube_dl
from django.contrib.auth import get_user_model
from django.http import JsonResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import View
from gmusicapi import Musicmanager
from oauth2client.client import OAuth2WebServerFlow
from yt2gpm.settings import oauth
def _check_token(request):
token = request.GET.get('token', request.POST.get('token', None))
return get_user_model().objects.filter(token=token).exists()
class BaseView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
class TokenView(BaseView):
def get(self, request, *args, **kwargs):
return JsonResponse({'response': _check_token(request)})
def post(self, request, *args, **kwargs):
user = get_user_model().objects.filter(username=request.POST.get('username', None))
if user.exists() and user[0].check_password(request.POST.get('password', None)):
return JsonResponse({'response': True, 'token': user[0].token})
return JsonResponse({'response': False})
class OAuthView(BaseView):
def get(self, request, *args, **kwargs):
if _check_token(request):
flow = OAuth2WebServerFlow(**oauth._asdict())
return JsonResponse({'response': True, 'url': flow.step1_get_authorize_url()})
return JsonResponse({'response': False})
def post(self, request, *args, **kwargs):
code = request.POST.get('code', None)
if _check_token(request):
flow = OAuth2WebServerFlow(**oauth._asdict())
flow.step1_get_authorize_url()
credentials = flow.step2_exchange(code)
cred = get_user_model().objects.get(token=request.POST['token']).cred
storage = oauth2client.file.Storage('oauth.cred')
storage.put(credentials)
return credentials
return JsonResponse({'response': False})
class UploadView(BaseView):
filename = None
def _my_hook(self, d):
if d['status'] == 'finished':
self.filename = d['filename'].split('.')[0] + '.mp3'
def post(self, request, *args, **kwargs):
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '320',
}],
'progress_hooks': [self._my_hook],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download(['https://www.youtube.com/watch?v=JylZrg49aXU'])
if self.filename:
manager = Musicmanager()
credential = 'oauth.cred'
manager.perform_oauth(storage_filepath=credential)
manager.login(oauth_credentials=credential)
manager.upload(self.filename)
print('uploaded')
<file_sep>
jQuery.Ajax = function (options) {
let defaults = {
method: 'POST', // GET, POST, PUT, DELETE
url: '.',
dataType: 'json',
data: {},
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
processData: true,
token: '',
success: function () {},
error: function () {}
};
let o = jQuery.extend(defaults, options);
function safeMethod(method) {
// these HTTP methods do not require CSRF protection
return (/^(GET|HEAD|OPTIONS|TRACE)$/.test(method));
}
$.ajax({
method: o.method,
url: o.url,
dataType: o.dataType,
data: o.data,
contentType: o.contentType,
processData: o.processData,
success: function (data) {
o.success(data);
},
error: function (data) {
o.error(data);
},
beforeSend: function (xhr, settings) {
if (!safeMethod(settings.type) && !this.crossDomain) {
xhr.setRequestHeader("X-CSRFToken", Cookies.get('csrftoken'));
}
}
});
};
<file_sep>let loginBlock = document.getElementById('login-block');
let codeBlock = document.getElementById('code-block');
let loginButton = document.getElementById('btn_login');
let username = document.getElementById('username');
let password = document.getElementById('password');
let code = document.getElementById('code');
let baseUrl = 'http://127.0.0.1:8001';
function send_code(){
$.Ajax({
method: 'post',
url: baseUrl + '/oauth/',
data: {'code': code.value},
success: function (data) {
loginBlock.style.display = 'none';
codeBlock.style.display = 'none';
},
error: function (data) {
}
});
}
loginButton.addEventListener("click", function (e) {
e.preventDefault();
$.Ajax({
url: baseUrl + '/token/',
data: {'username': username.value, 'password': <PASSWORD>},
success: function (data) {
if (data['response']) {
chrome.storage.sync.set({token: data['token']}, function () {
$.Ajax({
method: 'get',
url: baseUrl + '/oauth/',
data: {'token': data['token']},
success: function (data) {
window.open(data['url'], '_blank');
loginBlock.style.display = 'none';
codeBlock.style.display = 'block';
send_code();
},
error: function (data) {
}
});
});
}
},
error: function (data) {
}
});
});
chrome.storage.sync.get('token', function (data) {
if(data['token']){
loginBlock.style.display = 'none';
}else{
loginBlock.style.display = 'block';
}
});
chrome.storage.sync.get('code', function (data) {
if (data['code']){
codeBlock.style.display = 'none';
}else {
codeBlock.style.display = 'block';
}
});<file_sep>django==2.1.2
youtube_dl
gmusicapi
| c28c46390894385c43738947cba4405c25a01470 | [
"JavaScript",
"Python",
"Text"
] | 5 | Python | melon-ruet/yt2gpm | 9fea4739c66f686c7f6a5a0fad2b75e92cfb4a9f | 6c20f4423502c470aea9580fbaf9f741f81c6bfe |
refs/heads/master | <repo_name>aaronclimbs/commandmazon<file_sep>/customer.js
const inquirer = require("inquirer");
const mysql = require("mysql");
const cTable = require("console.table");
const db = mysql.createConnection({
host: "localhost",
port: 3306,
user: "root",
password: "<PASSWORD>",
database: "commandmazonDB"
});
db.connect(err => {
if (err) throw new Error(`Error: ${err.message}`);
// console.log(`Connected to database. Status: ${db.state}`);
});
function showItems() {
db.query(
"SELECT item_id, product_name, product_price, stock_quantity FROM items",
async (err, data) => {
if (err) throw new Error(`Error: ${err.message}`);
const itemStr = await data
.map(item => {
return (
"ID: " +
item.item_id.toString().padEnd(3) +
" || " +
"Product: " +
item.product_name.padEnd(20) +
" || " +
(item.stock_quantity > 0
? "Price: " + "$" + item.product_price.toFixed(2)
: "SOLD OUT")
);
})
.join("\n");
// console.log(itemStr.length, data.length, 2);
console.log(
"<>".repeat(itemStr.length / data.length / 4) +
"ITEMS AVAILABLE" +
"<>".repeat(itemStr.length / data.length / 4)
);
console.log(itemStr);
setTimeout(showChoices, 1000);
}
);
}
function purchase() {
inquirer
.prompt([
{
name: "id",
message: "Which ID would you like to buy?"
},
{
name: "quantity",
message: "How many would you like to order?",
type: "number"
}
])
.then(answers => {
const conditions = {
item_id: answers.id
};
db.query("SELECT * FROM items WHERE ?", conditions, (err, item) => {
// console.log(data);
if (item[0].stock_quantity < answers.quantity) {
console.log("INSUFFICIENT QUANTITY! Please try again later.");
setTimeout(showChoices, 1000);
} else {
db.query(
"UPDATE items SET ? WHERE ?",
[
{
stock_quantity: item[0].stock_quantity - answers.quantity,
product_sales:
item[0].product_sales +
item[0].product_price * answers.quantity
},
conditions
],
(err, change) => {
// console.log(change);
console.log(
`You bought ${answers.quantity} of Product: ${
item[0].product_name
} for $${(
parseInt(answers.quantity) * item[0].product_price
).toFixed(2)}`
);
setTimeout(showChoices, 1000);
}
);
}
});
});
}
function showChoices() {
inquirer
.prompt([
{
name: "view",
message: "What would you like to do?",
type: "list",
choices: ["View items", "Purchase an item", "Leave store"]
}
])
.then(answers => {
if (answers.view === "View items") {
showItems();
console.log("\n");
} else if (answers.view === "Purchase an item") {
purchase();
} else {
console.log("Closing connection to database.");
db.end();
}
});
}
showChoices();
<file_sep>/README.md
# commandmazon
An Amazon-like storefront using MySQL and inquirer to provide a storefront interface through nodeJS
## Installation
```sh
git clone https://github.com/aaronclimbs/commandmazon
cd commandmazon
npm install
```
## Usage example
This application uses a mysql backend and a commandline frontend via node to enable the user to interact as a manager, supervisor, or user.
## Meta
<NAME> – [@abritishyank](https://twitter.com/abritishyank) – <EMAIL>
Distributed under the MIT license.
[github.com/brityank/commandmazon](https://github.com/brityank/commandmazon)
<file_sep>/seed/seedDepts.js
module.exports = function() {
const mysql = require("mysql");
const seedDB = mysql.createConnection({
host: "localhost",
port: 3306,
user: "root",
password: "<PASSWORD>",
database: "commandmazonDB"
});
seedDB.connect(err => {
if (err) throw new Error(`Error: ${err.message}`);
console.log(`Connected to database. Status: ${seedDB.state}`);
});
seedDB.query("DROP TABLE IF EXISTS departments", (err, data) => {
if (err) throw new Error(`Error: ${err.message}`);
console.log("TABLE DROPPED.");
});
seedDB.query(
"CREATE TABLE departments(department_id INT NOT NULL AUTO_INCREMENT, department_name VARCHAR(50), over_head_costs INT, PRIMARY KEY (department_id))",
(err, data) => {
if (err) throw new Error(`Error: ${err.message}`);
console.log("TABLE CREATED.");
}
);
const seedData = [
["electronics", 11000],
["household", 4800],
["misc", 6000],
["groceries", 15000],
["furniture", 4000]
];
seedDB.query(
"INSERT INTO departments(department_name, over_head_costs) VALUES ?",
[seedData],
(err, data) => {
if (err) throw new Error(`Error: ${err.message}`);
console.log(`DATA INSERTED: ${data.affectedRows} rows`);
seedDB.end();
}
);
};
<file_sep>/seed/seedProducts.js
module.exports = function() {
const mysql = require("mysql");
const seedDB = mysql.createConnection({
host: "localhost",
port: 3306,
user: "root",
password: "<PASSWORD>",
database: "commandmazonDB"
});
seedDB.connect(err => {
if (err) throw new Error(`Error: ${err.message}`);
console.log(`Connected to database. Status: ${seedDB.state}`);
});
seedDB.query("DROP TABLE IF EXISTS items", (err, data) => {
if (err) throw new Error(`Error: ${err.message}`);
console.log("TABLE DROPPED.");
});
seedDB.query(
"CREATE TABLE items(item_id INT NOT NULL AUTO_INCREMENT, product_name VARCHAR(50), department_name VARCHAR(50), product_price DECIMAL(10,2) NOT NULL, stock_quantity INT DEFAULT(10), product_sales INT DEFAULT(0), PRIMARY KEY (item_id))",
(err, data) => {
if (err) throw new Error(`Error: ${err.message}`);
console.log("TABLE CREATED.");
}
);
const seedData = [
["iPhone XS", "electronics", 800.00, 12000],
["Macbook Pro 13-inch", "electronics", 1500.00, 1000],
["Samsung Tablet", "electronics", 500.00, 1500],
["Sofa", "furniture", 450.00, 1500],
["Dresser", "furniture", 150.00, 2500],
["Bed", "furniture", 700.00, 3500],
["Hue Lightbulb", "household", 60.00, 2600],
["Nest Thermostat", "household", 220.00, 1800],
["Nest Camera", "electronics", 135.00, 6500],
["Coffee Grounds", "groceries", 35.00, 1500]
];
seedDB.query(
"INSERT INTO items(product_name, department_name, product_price, product_sales) VALUES ?",
[seedData],
(err, data) => {
if (err) throw new Error(`Error: ${err.message}`);
console.log(`DATA INSERTED: ${data.affectedRows} rows`);
seedDB.end();
}
);
};
<file_sep>/manager.js
const inquirer = require("inquirer");
const mysql = require("mysql");
const cTable = require("console.table");
const db = mysql.createConnection({
host: "localhost",
port: 3306,
user: "root",
password: "<PASSWORD>",
database: "commandmazonDB"
});
db.connect(err => {
if (err) throw new Error(`Error: ${err.message}`);
// console.log(`Connected to database. Status: ${db.state}`);
});
choices();
function choices() {
inquirer
.prompt([
{
name: "manageChoice",
message: "What would you like to do?",
type: "list",
choices: [
"View products for sale",
"View low itinerary",
"Add to inventory",
"Add new product",
"Exit"
]
}
])
.then(answer => {
switch (answer.manageChoice) {
case "View products for sale":
viewProducts();
break;
case "View low itinerary":
viewLowItinerary();
break;
case "Add to inventory":
addInventory();
break;
case "Add new product":
addProduct();
break;
default:
console.log("SHUTTING DOWN DATABASE");
db.end();
break;
}
});
}
function viewProducts() {
db.query("SELECT * FROM items", (err, data) => {
if (err) throw new Error(`Error: ${err.message}`);
console.table(data);
setTimeout(choices, 1000);
});
}
function addProduct() {
inquirer
.prompt([
{
name: "product",
message: "What is the product name?"
},
{
name: "department",
message: "What department will the item be sold in?",
default: "misc"
},
{
name: "price",
type: "number",
message: "What is the product's price?"
},
{
name: "inventory",
type: "number",
default: 10,
message: "How many should be added to inventory?"
}
])
.then(answers => {
const newData = [
[answers.product, answers.department, answers.price, answers.inventory]
];
db.query(
"INSERT INTO items(product_name, department_name, product_price, stock_quantity) VALUES ?",
[newData],
(err, data) => {
if (err) throw new Error(`Error: ${err.message}`);
console.table(data);
setTimeout(choices, 1000);
}
);
});
}
function viewLowItinerary() {
db.query("SELECT * FROM items WHERE stock_quantity < 5", (err, data) => {
if (err) throw new Error(`Error: ${err.message}`);
console.table(data);
setTimeout(choices, 1000);
});
}
function addInventory() {
db.query(
"SELECT item_id, product_name, stock_quantity FROM items",
async (err, data) => {
if (err) throw new Error(`Error: ${err.message}`);
const itemList = await data.map(item => {
return { name: item.product_name, quantity: item.stock_quantity };
});
console.table(itemList);
inquirer
.prompt([
{
name: "product",
message: "Which item would you like to update the quantity for?",
type: "list",
choices: itemList
},
{
name: "quantity",
message: "How many exist in inventory?",
type: "number"
}
])
.then(answers => {
db.query(
"UPDATE items SET stock_quantity = ? WHERE product_name = ?",
[answers.quantity, answers.product],
(err, data) => {
if (err) throw new Error(`Error: ${err.message}`);
console.table(data);
setTimeout(choices, 1000);
}
);
});
}
);
}
<file_sep>/seed.js
const seedProds = require('./seed/seedProducts')
const seedDepts = require('./seed/seedDepts')
seedProds();
seedDepts(); | 51008170bc2cdc2b66956ed9d1ee09f1b965058c | [
"JavaScript",
"Markdown"
] | 6 | JavaScript | aaronclimbs/commandmazon | a4be0a7578ec6ba5a4923a437ded4a3010c39035 | 0855d867d1f92d1f2253ab0c29ad0d2c2c596256 |
refs/heads/master | <repo_name>ameykpatil/football-engine<file_sep>/README.md
# Football-Engine
Application to serve the details of footballers and teams.
## Problem Statement
Given API endpoint:
`https://vintagemonster.onefootball.com/api/teams/en/%team_id%.json`
(`%team_id%` must be replaced with an unsigned integer).
Using the API endpoint, find the following teams by name:
`Germany, England, France, Spain, Manchester Utd, Arsenal, Chelsea, Barcelona, Real Madrid, FC Bayern Munich`
Extract all the players from the given teams and render to stdout the information about players alphabetically ordered by name. Each player entry should contain the following information:
`full name; age; list of teams`
Output Example:
```
<NAME>; 25; France, Manchester Utd
<NAME>; 30; Arsenal
```
## Instructions
Clone this repository.
Go to the directory where repository is cloned.
(`$GOPATH/src/github.com/ameykpatil/football-engine`)
## Run Tests
`/bin/sh ./check.sh`
`check.sh` is a file created which runs multiple checks such as `fmt`, `vet`, `lint` & `test`
## Run Service
`go install`
Check if the service is running by hitting `http://localhost:4000/ping`
You should get `message` as `pong`
## Print & Get players of the given Teams
Hit following url in browser
`http://localhost:4000/players/fetch`
## Notes
This problem can be solved without creating http server as well but from the perspective of extending it to retrive other information http or web server looked like a better approach.
Application make use of concurrency to fetch `Team` information from external API in parallel.
A constant `maxConcurrency` controls the concurrency factor or number of Go routines. We can change it to increase or decrease the concurrecny.
Go routines are synchronized with main routine with the help of `WaitGroup`.
To avoid concurrent access to map storing `Player` details, `Mutex` has been used.
Tests are written for utilities.
<file_sep>/engine/engine.go
package engine
import (
"fmt"
"github.com/ameykpatil/football-engine/utils/helper"
"sort"
"strings"
"sync"
)
const (
maxConcurrency = 10
)
// Engine is a struct which encapsulate all the properties related to crawling
type Engine struct {
teamsCount int // number of teams processed till date
playersMap map[string]Player // playersMap to store all the players
playersMapMutex *sync.Mutex // mutex to avoid concurrent access to players map
}
// NewEngine is a constructor for creating engine instance
func NewEngine() *Engine {
return &Engine{
teamsCount: 0,
playersMap: map[string]Player{},
playersMapMutex: &sync.Mutex{},
}
}
// Start method start the process of fetching the players for the teams
// concurrent fetching of teams happens which is controlled by maxConcurrency value
func (engine *Engine) Start() []string {
resp := make([]string, 0)
validTeamCount := len(validTeams)
teamCounter := 1
for engine.teamsCount < validTeamCount {
engine.fetch(teamCounter)
teamCounter = teamCounter + maxConcurrency
fmt.Println("completed fetching players ", teamCounter)
}
for _, player := range engine.playersMap {
teamStr := ""
for _, team := range player.Teams {
teamStr = teamStr + team + ", "
}
teamStr = strings.TrimSuffix(teamStr, ", ")
str := player.Name + "; " + player.Age + "; " + teamStr
resp = append(resp, str)
}
sort.Strings(resp)
for _, playerStr := range resp {
fmt.Println(playerStr)
}
return resp
}
// fetch fetches players concurrently through go routines
// go routines are synchronized with main routine using waitGroup
func (engine *Engine) fetch(teamCounter int) {
// waitGroup to synchronize the spawned routines
var wg sync.WaitGroup
wg.Add(maxConcurrency)
for i := 0; i < maxConcurrency; i++ {
// start a go routine to fetch the players & add into a playersMap
go func(teamNumber int) {
players, teamName, err := fetchTeamPlayers(teamNumber)
if err != nil {
fmt.Println("error occurred for fetching players with teamID ", teamNumber, err)
} else {
for _, player := range players {
// mutex to avoid concurrent access to playersMap
engine.playersMapMutex.Lock()
if engine.playersMap[player.ID].ID == "" {
player.Teams = []string{teamName}
engine.playersMap[player.ID] = player
} else {
existingPlayer := engine.playersMap[player.ID]
existingPlayer.Teams = helper.AppendIfMissingString(existingPlayer.Teams, teamName)
engine.playersMap[existingPlayer.ID] = existingPlayer
}
engine.playersMapMutex.Unlock()
}
engine.teamsCount++
}
// notify parent routine that work is done
wg.Done()
}(teamCounter + i)
}
// parent routine is waiting for spawned routines
// to complete their work & notify as done
wg.Wait()
}
<file_sep>/utils/helper/helperUtils.go
package helper
// ContainsString check if given element to search exists in the given string array
func ContainsString(array []string, searchElem string) bool {
for _, elem := range array {
if elem == searchElem {
return true
}
}
return false
}
// AppendIfMissingString append ints to an existing int array if it doesn't contain it already
func AppendIfMissingString(array []string, elemsToAdd ...string) []string {
for _, elemToAdd := range elemsToAdd {
alreadyExist := false
for _, elem := range array {
if elem == elemToAdd {
alreadyExist = true
break
}
}
if !alreadyExist {
array = append(array, elemToAdd)
}
}
return array
}
<file_sep>/utils/server/serverUtils_test.go
package server
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"math"
"net/http"
"testing"
)
type testResponseWriter struct {
headers http.Header
Body io.Reader
Code int
}
func newResponseWriter() *testResponseWriter {
return &testResponseWriter{
headers: make(http.Header),
}
}
func (w *testResponseWriter) Header() http.Header {
return w.headers
}
func (w *testResponseWriter) Write(body []byte) (int, error) {
w.Body = bytes.NewReader(body)
return len(body), nil
}
func (w *testResponseWriter) WriteHeader(statusCode int) {
w.Code = statusCode
}
var testPlayersResponse = []string{
"<NAME>; 29; Manchester Utd",
"<NAME>; 31; Chelsea",
"<NAME>; 22; Real Madrid, Spain",
}
func TestSendResponse(t *testing.T) {
t.Run("Receive json response successfully", func(t *testing.T) {
w := newResponseWriter()
SendResponse(w, testPlayersResponse, http.StatusOK)
if w.Code != 200 {
t.Errorf("Expected returned code to be 200 but got %d", w.Code)
}
var playerResponse []string
err := json.NewDecoder(w.Body).Decode(&playerResponse)
if err != nil {
t.Errorf("Expected to return err nil but got %v", err)
}
if playerResponse[0] != "<NAME>; 29; Manchester Utd" {
t.Errorf("Expected string to be Alex... but got %v", playerResponse[0])
}
if w.Header().Get("Content-Type") != "application/json" {
t.Errorf("Expected header to be aaplication/json but got %v", w.Header().Get("Content-Type"))
}
})
t.Run("Receive internal error in case of unsuccessful marshaling", func(t *testing.T) {
w := newResponseWriter()
invalidData := math.Inf(1)
SendResponse(w, invalidData, http.StatusOK)
if w.Code != http.StatusInternalServerError {
t.Errorf("Expected returned code to be 500 but got %d", w.Code)
}
bodyBytes, _ := ioutil.ReadAll(w.Body)
resp := string(bodyBytes)
if resp != `{"error": "Internal server error"}` {
t.Errorf("Expected to return error message but got %v", resp)
}
if w.Header().Get("Content-Type") != "application/json" {
t.Errorf("Expected header to be aplication/json but got %v", w.Header().Get("Content-Type"))
}
})
}
<file_sep>/utils/server/serverUtils.go
package server
import (
"encoding/json"
"io"
"net/http"
)
// SendResponse sends response to writer in a json format
func SendResponse(w http.ResponseWriter, v interface{}, code int) {
w.Header().Set("Content-Type", "application/json")
b, err := json.Marshal(v)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
io.WriteString(w, `{"error": "Internal server error"}`)
} else {
w.WriteHeader(code)
io.WriteString(w, string(b))
}
}
<file_sep>/utils/helper/helperUtils_test.go
package helper
import "testing"
func TestContainsString(t *testing.T) {
playerNames := []string{"<NAME>", "<NAME>", "<NAME>", "<NAME>"}
pairs := map[string]bool{
"<NAME>": false,
"<NAME>": true,
}
for input, expected := range pairs {
actual := ContainsString(playerNames, input)
if actual != expected {
t.Error(
"For", input,
"expected", expected,
"got", actual,
)
}
}
}
func TestAppendIfMissingString(t *testing.T) {
playerNames := []string{"Fred", "<NAME>", "<NAME>", "Marcelo", "Chumi"}
pairs := map[string][]string{
"<NAME>": {"Fred", "<NAME>", "<NAME>", "Marcelo", "Chumi", "<NAME>"},
"<NAME>": {"Fred", "<NAME>", "<NAME>", "Marcelo", "Chumi"},
}
for input, expected := range pairs {
actual := AppendIfMissingString(playerNames, input)
if len(actual) != len(expected) {
t.Error(
"For", input,
"expected", expected,
"got", actual,
)
}
for i, playerName := range expected {
if playerName != actual[i] {
t.Error(
"For", input,
"expected", expected,
"got", actual,
)
}
}
}
}
<file_sep>/main.go
package main
import (
"fmt"
"github.com/ameykpatil/football-engine/engine"
"github.com/ameykpatil/football-engine/utils/server"
"net/http"
)
func main() {
http.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) {
server.SendResponse(w, map[string]string{"message": "pong"}, http.StatusOK)
})
http.HandleFunc("/players/fetch", PlayerHandler)
fmt.Println("Starting a server at localhost:4000")
err := http.ListenAndServe(":4000", nil)
if err != nil {
panic(err)
}
}
// PlayerHandler is a handler function for players fetch API
func PlayerHandler(w http.ResponseWriter, r *http.Request) {
engineInstance := engine.NewEngine()
resp := engineInstance.Start()
server.SendResponse(w, resp, http.StatusOK)
}
<file_sep>/engine/service.go
package engine
import (
"encoding/json"
"errors"
"github.com/ameykpatil/football-engine/utils/helper"
"io/ioutil"
"net/http"
"strconv"
)
var apiPrefix = "https://vintagemonster.onefootball.com/api/teams/en/"
var validTeams = []string{"Germany", "England", "France", "Spain", "Manchester Utd", "Arsenal", "Chelsea", "Barcelona", "Real Madrid", "FC Bayern Munich"}
// TeamAPIResponse response of the team API
type TeamAPIResponse struct {
Status string `json:"status"`
Code float64 `json:"code"`
Data struct {
Team *Team `json:"team"`
} `json:"data"`
}
// Team encapsulates properties of football team
type Team struct {
ID int `json:"id"`
Name string `json:"name"`
Players []Player `json:"players"`
}
// Player encapsulates properties of a player
// Teams array would be empty initially & would be filled gradually later
type Player struct {
ID string `json:"id"`
Name string `json:"name"`
Age string `json:"age"`
Teams []string `json:"teams"`
}
// fetch teams & players from vintagemonster api
// check if the team is in the list of valid teams
// if yes return the players & team
func fetchTeamPlayers(teamID int) ([]Player, string, error) {
// create api url & make a GET call
apiURL := apiPrefix + strconv.Itoa(teamID) + ".json"
resp, err := http.Get(apiURL)
if err != nil {
return nil, "", err
}
defer resp.Body.Close()
// read bytes from the response body
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, "", err
}
if resp == nil || resp.StatusCode != 200 {
return nil, "", errors.New(string(respBytes))
}
// unmarshal bytes into TeamAPIResponse struct
var teamAPIResponse TeamAPIResponse
err = json.Unmarshal(respBytes, &teamAPIResponse)
if err != nil {
return nil, "", err
}
// return nil if the team is not in the list of valid teams
if !helper.ContainsString(validTeams, teamAPIResponse.Data.Team.Name) {
return nil, "", errors.New("invalid team")
}
return teamAPIResponse.Data.Team.Players, teamAPIResponse.Data.Team.Name, nil
}
| ebc729f75cb88311eda7cfd797546b092cd863b8 | [
"Markdown",
"Go"
] | 8 | Markdown | ameykpatil/football-engine | 6d3b18fded70ef0189e2fe4f743e6cb52f9ebb01 | 36250998a9a21bd6e9f036172438656ffd441c82 |
refs/heads/master | <repo_name>sejoon00/instagram_profile_page<file_sep>/code.js
const plusFreiend = document.querySelector('.plus_friend');
const recommend = document.querySelector('.recommend');
const blueBtn = document.querySelector('.blue_btn');
const followBtn = document.querySelector('.follow');
const plusTriangle = document.querySelector('.fas fa-caret-down');
const changeFollow = document.querySelector('.change_follow');
const followPopup = document.querySelector('.follow_popup');
const cancelBtn = document.querySelector('.cancel_btn');
const followCancelBtn = document.querySelector('.follow_cancel_btn');
const followCancel = document.querySelector('.follow_cancel');
const message = document.querySelector('.message');
const post = document.querySelectorAll('.post');
const searchBox = document.querySelector('.search_box');
const search = document.querySelector('.search');
const searchImg = document.querySelector('.search_img');
const searchWord = document.querySelector('.search_word');
const postNavBox1 = document.querySelector(".post_nav_box1");
const postNavBox2= document.querySelector(".post_nav_box2");
const searchBackground = document.querySelector(".search_background");
plusFreiend.addEventListener('click',()=>{
recommend.classList.toggle('active');
})
followBtn.addEventListener('click',()=>{
followBtn.classList.toggle('remove');
message.classList.toggle('active');
changeFollow.classList.toggle('active');
plusFreiend.style.cssText=" border: solid 1px #dbdbdb; color: black; background-color: white;";
})
changeFollow.addEventListener('click', ()=>{
followPopup.classList.toggle('active');
})
cancelBtn.addEventListener('click',()=>{
followPopup.classList.remove('active');
})
followCancelBtn.addEventListener('click', ()=>{
followPopup.classList.remove('active');
message.classList.remove('active');
followBtn.classList.remove('remove');
changeFollow.classList.remove('active');
plusFreiend.style.cssText=" border: none; color:white; background-color: #0095f6;"
})
followCancel.addEventListener('click',()=>{
followPopup.classList.remove('active');
})
searchBox.addEventListener('click',()=>{
if(search.value==""){
searchWord.style.cssText = "right: 70px"
} else{
searchWord.style.color = "white";
}
search.style.cssText = "background-position: 6px 8px; text-align: left;";
searchImg.style.cssText = "left:0;";
searchBox.style.cssText = "z-index: 4;";
searchBackground.style.cssText = "display: block";
})
searchBackground.addEventListener('click',()=>{
searchBackground.style.cssText = "display: none";
searchImg.style.cssText = "left:70px;";
searchWord.style.cssText = "right: 0";
search.value ="";
})
search.addEventListener('keydown',()=>{
searchWord.style.color = "white";
})
postNavBox1.addEventListener('click', ()=>{
postNavBox2.classList.remove('highlight');
postNavBox1.classList.remove('highlight');
})
postNavBox2.addEventListener('click', ()=>{
postNavBox1.classList.add('highlight');
postNavBox2.classList.add('highlight');
})
| 9e04b9a593204aafb51b2d1a8333e5a3fff8d56c | [
"JavaScript"
] | 1 | JavaScript | sejoon00/instagram_profile_page | 6dd3d46b8eaac096700258d947629aa155fca3b8 | b124407dffbf309a9dba47101714465cfdfaa512 |
refs/heads/master | <file_sep>//DATA BASE
var placeholders_categories = [
"Nahkampf",
"Fernkampf",
"Sinnlos",
"Sinnvoll",
"Gut",
"Schlecht"
];
var init_texts = [
"Selbst ein Aufruf des Königs konnte meinen Vater nicht davon abhalten meiner Mutter am Tag meiner Geburt die Hand zu halten. Zumindest dachte er das. Als jedoch der Hauptmann der Wache über die Schwelle des Tempels trat, muss sein Rückgrat wohl in sich zusammen geschrumpft sein, denn letzten Endes ist er ihm gefolgt.",
"<br><br><br>Während deiner Kindheit hast du die meiste Zeit damit verbracht im Wald zu spielen. Eines Tages hast du dich mit einem Freund verlaufen! Als ihr überlegt wie ihr zurück nach Hause kommt ist etwas bedeutendes geschehen.",
"Default",
"Default",
""
];
var options_1 = [
"Doch er ist niemals dort angekommen, weil er bei der ersten Chance geflohen ist.",
"Du siehst in der Entfernung etwas aufblitzen und erkennst, dass es eine #get_Axt_Nahkampf# ist. Holzfäller müssen sie hier verloren haben! Nach kurzem umsehen findet dein Freund den Pfad der Holzfäller und du weißt wieder in welche Richtung ihr müsst.",
"Default",
"Default",
""
];
var options_2 = [
"Auf dem Weg zum König gab er jedem Bettler eine Münze, wie es üblich ist am Geburtstag.",
"Auch wenn der Wald dicht bewachsen ist, kannst du einen großen Berg erkennen und erinnerst dich waage, dass an dessen Fuß eine Mienenstadt liegt.",
"Default",
"Default",
""
];
var options_3 = [
"Er hastete so schnell wie möglich zum König.",
"Ihr werdet von einem Gewitter überrascht und sucht schnell Zuflucht.",
"Default",
"Default",
""
];
var options_4 = [
"Seine Sorge um Mutter war doch zu groß und ließ den König warten bis er sie in Sicherheit wusste.",
"Keiner von euch ist sich sicher in welche Richtung ihr am besten gehen solltet. Ihr seid kaum einen halben Tag unterwegs und deshalb voller Energie, also statt groß zu grübeln fängst du an mit deinem Freund zu toben.",
"Default",
"Default",
""
];
var output_o_1 = [
"Auch wenn ich die Intrigen des Adels und die Härte des Gesetzes selbst erlebt habe. Nichts entschuldigt diese Feigheit.",
"Ihr kommt kurz danach bei einer Holzfällerhütte an. Einer der Holzfäller erkennt dich und bietet an dich am Abend, wenn er mit seiner Arbeit fertig ist, wieder nach Hause zu bringen. Bis dahin helft ihr auch bei der Arbeit. So hast du dein Talent für den Umgang mit der #show_Nahkampf# entdeckt.",
"Default",
"Default",
""
];
var output_o_2 = [
"Nicht viele halten sich noch an die alten Traditionen und obwohl die meisten obsolet sind, hat mein Vater an diesem Tag viele Freunde gewonnen. Allerdings nur in gewissen Kreisen erwähnbare.",
"Je näher ihr dem Berg kommt desto schwieriger wird es noch vorwärz zu kommen. Steigung und Dickicht machen es letzten Endes unmöglich noch weiter zu kommen, doch dann siehst du einen Trampelpfad. Anchdem ihr endlich aus dem Dickicht schlagt siehst du zwar nicht die Stadt, doch steht ihr vor einer ausgebauten Straße. Ein Händler nimmt euch mit und an der nächsten Kreuzung findest du wieder den Weg nach Hause.",
"Default",
"Default",
""
];
var output_o_3 = [
"Ein böses Omen hat alle Berater des Königs in Aufruhr versetzt und um sie zu beschwichtigen ließ er alle Geburten segnen.",
"An einer großen Eiche findet ihr ein Hühnengrab unter dem ihr euch vor dem Sturm versteckt. Blitz und Donner schlagen so schenell und laut, dass dir die Brust bebt. Als du dich mit deinem Mantel dichter an die Eiche schmiegst, kommt es wie es kommen musste. Ein Blitz schlägt in die riesge Eiche! Doch das ist nicht das Ende deiner Geschichte ... der Blitz wird von dem Stein des Hühnengrabes absorbiert. Silluetten und Runen läuchten grünlich auf und aus dem Deckstein fällt die ein Amulett in den Schoß. Du drehst dich zu deinem Freund und siehst, dass er ohnmächtig ist und du selbst fühlst auch dein Bewusstsein schwinden. Am nächsten Morgen findet euch ein Druide und bringt dich nach Hause.",
"Default",
"Default",
""
];
var output_o_4 = [
"Zu Vaters Glück war der König ein besonnener Mann und gewährte ihm Entschuldigung. Trotzdem hat es sein Ansehen geschwächt, was es bei den Feinden des Königs folglich stärkte.",
"Dabei macht ihr so viel Lärm, dass euch ein der lokalen Jäger findet.",
"Default",
"Default",
""
];
| 6e62393330432097b71618a2f535f878d2107f14 | [
"JavaScript"
] | 1 | JavaScript | Nephilie/StoryCharacterGenerator | 2553534af1b67bf60228145f46a7298ba63548b8 | 17780915460112953eab1709ddc9d47875abdfeb |
refs/heads/master | <file_sep>#/bin/bash
set -e
export BIDS_dir=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS ###### CHECK BIDS_dir
export BASE_dir=$BIDS_dir/derivatives/FS_base
export LOG_dir=$BIDS_dir/log
export QA_dir=$LOG_dir/qa
i=0
#for sub in $(cat $BIDS_dir/participants.tsv); do
for sub in $(cat $BIDS_dir/all_participants_single.tsv); do
#for sub in $(cat $BIDS_dir/control_participants); do
echo $sub
export SUB_dir=$BIDS_dir/sub-$sub
export ANAT_dir=$SUB_dir/anat
export DWI_dir=$SUB_dir/dwi
export SUB_LOG_dir=$SUB_dir/log
#[ ! -e $SUB_LOG_dir ] && mkdir $SUB_LOG_dir
for time in 1 2 3 4; do
subject=${sub}_${time}
for sequence in data dwi anat t1; do
for LOG in $(cat $LOG_dir/qa/no_${sequence}); do
case $LOG in *"${subject}"*) echo "Known missing ${sequence} data for Subject Nr. ${subject}"; continue 3 ;; esac
done
for LOG in $(cat $LOG_dir/qa/bad_${sequence}); do
case $LOG in *"${subject}"*) echo "Known bad ${sequence} data for Subject Nr. ${subject}"; continue 3 ;; esac
done
done
export SUBJECT_dir=$BIDS_dir/sub-$subject
[ ! -e $SUBJECT_dir ] && continue
export TRANSFORM_dir=$SUBJECT_dir/dwi/transform_base
export MASK_dir=$BASE_dir/$sub/tracking_masks
export OUTMASK_dir=$TRANSFORM_dir/tracking_masks
[ ! -e $OUTMASK_dir ] && mkdir $OUTMASK_dir
transform=$TRANSFORM_dir/rigid_T1_to_DWI.txt
#template=$TRANSFORM_dir/sub-${subject}_dwi_meanbzero_111.nii.gz ### ODER b0_distcor?
template=$SUBJECT_dir/dwi/sub-${subject}_dwi_meanbzero.nii.gz
for startmask in $(cat $BIDS_dir/code/tracking_masks_cort); do
inmask=$MASK_dir/$startmask"_500.nii.gz"
#echo $inmask
outmask=$OUTMASK_dir/$startmask"_final.nii.gz"
thr=0.5
if [ -e $outmask ]; then continue; fi
if [ ! -e $inmask ]; then echo "$subject $startmask" >> $LOG_dir/qa/no_startmask; continue; fi
echo "================> Calculating cortical mask $startmask with threshold $thr <========================"
flirt -in $inmask -ref $template -applyxfm -init $transform -out $outmask && fslmaths $outmask -thr $thr -bin $outmask &
#mrtransform -force -linear $transform $inmask $outmask -template $template
#fslmaths $outmask -thr $thr -bin $outmask
done
wait
for startmask in $(cat $BIDS_dir/code/tracking_masks_sub); do
inmask=$MASK_dir/$startmask"_temp.nii.gz"
#echo $inmask
outmask=$OUTMASK_dir/$startmask"_final.nii.gz"
thr=0.5
if [ -e $outmask ]; then continue; fi
if [ ! -e $inmask ]; then echo "$subject $startmask" >> $LOG_dir/qa/no_startmask; continue; fi
echo "================> Calculating subcortical mask $startmask with threshold $thr <========================"
flirt -in $inmask -ref $template -applyxfm -init $transform -out $outmask && fslmaths $outmask -thr $thr -bin $outmask &
#mrtransform -force -linear $transform $inmask $outmask -template $template
#fslmaths $outmask -thr $thr -bin $outmask
done
wait
done
done
<file_sep>#! /bin/bash
#set -e # EXIT ON ERROR
BIDS_dir=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS
SUBJECTS_DIR=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS/derivatives/FREESURFER_T1_DC
LOG_dir=$BIDS_dir/log
i=0
participants_list=$BIDS_dir/control_participants_single.tsv
number=$(cat $participants_list | wc -w)
min_name=-0125 # Binarize Curvature file with minimum concave (i.e. max convex) of min
for sub in $(cat $participants_list); do
Subject=${sub}_base
let i=i+1
echo "======================================================================> $Subject $i / $number <======================================================================="
SUBJECT_dir=$BIDS_dir/derivatives/FS_base/$sub
FIRST_dir=$SUBJECT_dir/sub-${sub}_fs_orig.anat/first_results
########### Check QA-reports ############
for sequence in data anat t1 dwi; do #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<#
for LOG in $(cat $LOG_dir/qa/no_${sequence}); do
case $LOG in *"${sub}"*) echo "Known missing ${sequence} data for Subject Nr. ${sub}"; continue 3 ;; esac
done
for LOG in $(cat $LOG_dir/qa/bad_${sequence}); do
case $LOG in *"${sub}"*) echo "Known bad ${sequence} data for Subject Nr. ${sub}"; continue 3 ;; esac
done
done
#########################################
if [ ! -e $BIDS_dir/derivatives/FS_base/$sub/sub-${sub}_fs_orig.nii.gz ]; then echo $Subject >> $BIDS_dir/no_base_2; continue; fi
##### Waiting Block #####
time=0
while [ ! -e $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh_sulc_${min_name}.nii.gz -a ! -e $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.nii.gz ]; do
sleep 30
time=$((time+30))
minutes=$((time/60))
echo "Waiting since " $minutes "minutes now..."
if [ $minutes -gt 120 ]; then echo $Subject >> $BIDS_dir/waiting_to_long; continue 2; fi
done
#########################
echo "Ready for Generating Tracking Masks"
mkdir -p $SUBJECT_dir/tracking_masks
if [ -e $SUBJECT_dir/tracking_masks/RH_pre_SMA_temp.nii.gz -a -e $SUBJECT_dir/tracking_masks/RH_M1_temp.nii.gz ]; then echo "Ready for 500"; else
# LH_PMV
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.nii.gz -thr 63 -uthr 64 $SUBJECT_dir/tracking_masks/LH_PMV_temp.nii.gz
# RH_PMV
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh.nii.gz -thr 64 -uthr 65 $SUBJECT_dir/tracking_masks/RH_PMV_temp.nii.gz
# LH_SMA
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.nii.gz -thr 9 -uthr 10 -bin $SUBJECT_dir/tracking_masks/LH_SMA_temp.nii.gz
# RH_SMA
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh.nii.gz -thr 10 -uthr 11 -bin $SUBJECT_dir/tracking_masks/RH_SMA_temp.nii.gz
# LH_dPMC
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.nii.gz -thr 55 -uthr 56 $SUBJECT_dir/tracking_masks/LH_dPMC_temp.nii.gz
# RH_dPMC
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh.nii.gz -thr 56 -uthr 57 $SUBJECT_dir/tracking_masks/RH_dPMC_temp.nii.gz
# LH_S1
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.nii.gz -thr 155 -uthr 156 -bin $SUBJECT_dir/tracking_masks/LH_S1_temp.nii.gz
# RH_S1
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh.nii.gz -thr 156 -uthr 157 -bin $SUBJECT_dir/tracking_masks/RH_S1_temp.nii.gz
# LH_pre_SMA
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.nii.gz -thr 1 -uthr 2 $SUBJECT_dir/tracking_masks/LH_pre_SMA_temp.nii.gz
# RH_pre_SMA
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh.nii.gz -thr 2 -uthr 3 $SUBJECT_dir/tracking_masks/RH_pre_SMA_temp.nii.gz
# LH_M1
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.nii.gz -thr 53 -uthr 54 $SUBJECT_dir/tracking_masks/LH_M1_temp_1.nii.gz
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.nii.gz -thr 57 -uthr 58 $SUBJECT_dir/tracking_masks/LH_M1_temp_2.nii.gz
fslmaths $SUBJECT_dir/tracking_masks/LH_M1_temp_1.nii.gz -add $SUBJECT_dir/tracking_masks/LH_M1_temp_2.nii.gz -bin $SUBJECT_dir/tracking_masks/LH_M1_temp.nii.gz
# RH_M1
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh.nii.gz -thr 54 -uthr 55 $SUBJECT_dir/tracking_masks/RH_M1_temp_1.nii.gz
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh.nii.gz -thr 58 -uthr 59 $SUBJECT_dir/tracking_masks/RH_M1_temp_2.nii.gz
fslmaths $SUBJECT_dir/tracking_masks/RH_M1_temp_1.nii.gz -add $SUBJECT_dir/tracking_masks/RH_M1_temp_2.nii.gz -bin $SUBJECT_dir/tracking_masks/RH_M1_temp.nii.gz
fi
if [ -e $SUBJECT_dir/tracking_masks/RH_cIPS_temp.nii.gz -a -e $SUBJECT_dir/tracking_masks/LH_cIPS_temp.nii.gz ]; then echo "Ready for 500 II"; else
# LH_aIPS
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh_sulc_${min_name}.nii.gz -bin $SUBJECT_dir/tracking_masks/LH_aIPS_temp.nii.gz
# RH_aIPS
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh_sulc_${min_name}.nii.gz -bin $SUBJECT_dir/tracking_masks/RH_aIPS_temp.nii.gz
# LH_cIPS
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh_sulc_${min_name}.nii.gz -bin $SUBJECT_dir/tracking_masks/LH_cIPS_temp.nii.gz
# RH_cIPS
fslmaths $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh_sulc_${min_name}.nii.gz -bin $SUBJECT_dir/tracking_masks/RH_cIPS_temp.nii.gz
fi
for coord in LH_aIPS LH_cIPS LH_M1 LH_PMV LH_SMA RH_aIPS RH_cIPS RH_M1 RH_PMV RH_SMA LH_dPMC LH_S1 LH_pre_SMA RH_dPMC RH_S1 RH_pre_SMA; do
if [ -e $SUBJECT_dir/tracking_masks/${coord}_500.nii.gz ]; then
volume_temp=$(fslstats $SUBJECT_dir/tracking_masks/${coord}_500.nii.gz -V)
volume=${volume_temp:0:3}
echo "${coord} is ready for Tracking, size is $volume voxels"
if [ "$volume" -ne "500" ]; then echo "$Subject $coord $volume" >> $BIDS_dir/not_500; fi
else
if [ ! -e $SUBJECT_dir/tracking_masks/${coord}_temp.nii.gz ]; then echo $coord >> $BIDS_dir/missing_temp; fi
inate=$(cat $SUBJECT_dir/coord/$coord)
matlab -nodesktop -nosplash -r "NearestVoxelsMask('$SUBJECT_dir/tracking_masks/${coord}_temp.nii.gz',[$inate]',0.1,500,'$SUBJECT_dir/tracking_masks/${coord}_500.nii.gz');exit;"
volume_temp=$(fslstats $SUBJECT_dir/tracking_masks/${coord}_500.nii.gz -V)
volume=${volume_temp:0:3}
echo "${coord} is ready for Tracking, size is $volume voxels"
if [ "$volume" -ne "500" ]; then echo "$Subject $coord $volume" >> $BIDS_dir/not_500; fi
fi
done
if [ ! -e $FIRST_dir/T1_first_all_fast_firstseg.nii.gz ]; then echo "$Subject" >> $BIDS_dir/no_first; else
LH_Caud=11
LH_Thal=10
LH_Pall=13
LH_Puta=12
RH_Caud=50
RH_Thal=49
RH_Pall=52
RH_Puta=51
for mask in LH_Caud LH_Thal LH_Pall LH_Puta RH_Caud RH_Thal RH_Pall RH_Puta; do
if [ -e $SUBJECT_dir/tracking_masks/${mask}_temp.nii.gz ]; then continue; fi
fslmaths $FIRST_dir/T1_first_all_fast_firstseg.nii.gz -thr ${!mask} -uthr ${!mask} -bin $SUBJECT_dir/tracking_masks/${mask}_temp.nii.gz
echo "${mask} is ready for Tracking"
done
for hemi in L R; do
if [ -e $SUBJECT_dir/tracking_masks/${hemi}H_Lenti_temp.nii.gz ]; then continue; fi
fslmaths $SUBJECT_dir/tracking_masks/${hemi}H_Puta_temp.nii.gz -add $SUBJECT_dir/tracking_masks/${hemi}H_Pall_temp.nii.gz -bin $SUBJECT_dir/tracking_masks/${hemi}H_Lenti_temp.nii.gz
done
if [ ! -e $SUBJECT_dir/tracking_masks/Brainstem_temp.nii.gz ]; then fslmaths $FIRST_dir/T1_first_all_fast_firstseg.nii.gz -thr 16 -uthr 16 -bin $SUBJECT_dir/tracking_masks/Brainstem_temp.nii.gz; fi
fi
done
<file_sep>#! /bin/bash
#set -e # EXIT ON ERROR
BIDS_dir=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS
SUBJECTS_DIR=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS/derivatives/FREESURFER_T1_DC
i=0
participants_list=$BIDS_dir/control_participants_single.tsv
number=$(cat $participants_list | wc -w)
for sub in $(cat $participants_list); do
#for sub in pa_026; do
Subject=${sub}_base
let i=i+1
echo "======================================================================> $Subject $i / $number <======================================================================="
SUB_dir=$BIDS_dir/derivatives/FS_base/$sub
mkdir -p $SUB_dir
mri_convert $SUBJECTS_DIR/$Subject/mri/orig.mgz -o $SUB_dir/sub-${sub}_fs_orig.nii.gz --out_orientation RAS
fsl_anat -i $SUB_dir/sub-${sub}_fs_orig.nii.gz --nocrop
done
<file_sep>#! /bin/bash
#set -e # EXIT ON ERROR
project=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS
SUBJECTS_DIR=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS/derivatives/FREESURFER_T1_DC
export BIDS_dir=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS ###### CHECK BIDS_dir
i=0
participants_list=$BIDS_dir/control_participants_single.tsv
#number=$(cat /home/users/frey/sfb_motor_network/subject_list_split | wc -w)
number=$(cat $participants_list | wc -w)
#for sub in $(cat /home/users/frey/sfb_motor_network/subject_list_split); do
#for sub in $(cat $project/participants_single.tsv); do
for sub in $(cat $participants_list); do
Subject=${sub}_base
let i=i+1
echo "======================================================================> $Subject $i / $number <======================================================================="
### mapping BN_atlas cortex to subjects Freesurfer space
if [ -e $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.nii.gz -a -e $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh.nii.gz ]; then echo "$Subject: Check"; else
#if [ ! -e $SUBJECTS_DIR/$Subject/fs_orig.nii.gz ]; then echo $Subject >> $project/no_base; continue; fi
mris_ca_label -l $SUBJECTS_DIR/$Subject/label/lh.cortex.label $Subject lh $SUBJECTS_DIR/$Subject/surf/lh.sphere.reg $SUBJECTS_DIR/lh.BN_Atlas.gcs $SUBJECTS_DIR/$Subject/label/lh.BN_Atlas.annot
mris_ca_label -l $SUBJECTS_DIR/$Subject/label/rh.cortex.label $Subject rh $SUBJECTS_DIR/$Subject/surf/rh.sphere.reg $SUBJECTS_DIR/rh.BN_Atlas.gcs $SUBJECTS_DIR/$Subject/label/rh.BN_Atlas.annot
mri_label2vol --annot $SUBJECTS_DIR/$Subject/label/lh.BN_Atlas.annot --identity --proj frac 0 0.5 0.1 --subject $Subject --hemi lh --surf white --o $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.mgz --temp $SUBJECTS_DIR/$Subject/mri/brainmask.mgz
mri_label2vol --annot $SUBJECTS_DIR/$Subject/label/rh.BN_Atlas.annot --identity --proj frac 0 0.5 0.1 --subject $Subject --hemi rh --surf white --o $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh.mgz --temp $SUBJECTS_DIR/$Subject/mri/brainmask.mgz
mri_convert $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh.mgz $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh.nii.gz --out_orientation RAS
mri_convert $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.mgz $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.nii.gz --out_orientation RAS
fi
### generating map with only sulcal structures
min=-0.125
min_name=-0125
if [ -e $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh_sulc_${min_name}.nii.gz -a -e $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh_sulc_${min_name}.nii.gz ]; then echo "$Subject: Check"; else
if [ -e $SUBJECTS_DIR/$Subject/surf/lh.curv -a -e $SUBJECTS_DIR/$Subject/surf/rh.curv ]; then
# Binarize Curvature file with minimum concave (i.e. max convex) of min
mri_binarize --i $SUBJECTS_DIR/$Subject/surf/lh.curv --min ${min} --o $SUBJECTS_DIR/$Subject/surf/LH_sulc_${min_name}.mgz
mri_binarize --i $SUBJECTS_DIR/$Subject/surf/rh.curv --min ${min} --o $SUBJECTS_DIR/$Subject/surf/RH_sulc_${min_name}.mgz
# Transform this surface to a volume within certrain boundaries (projfrac)
mri_surf2vol --surfval $SUBJECTS_DIR/$Subject/surf/LH_sulc_${min_name}.mgz --hemi lh --fill-projfrac -1 1.5 0.1 --identity $Subject --template $SUBJECTS_DIR/$Subject/mri/brainmask.mgz --o $SUBJECTS_DIR/$Subject/surf/LH_sulc_${min_name}_vol.mgz
mri_surf2vol --surfval $SUBJECTS_DIR/$Subject/surf/RH_sulc_${min_name}.mgz --hemi rh --fill-projfrac -1 1.5 0.1 --identity $Subject --template $SUBJECTS_DIR/$Subject/mri/brainmask.mgz --o $SUBJECTS_DIR/$Subject/surf/RH_sulc_${min_name}_vol.mgz
# Mask the BN-Atlas Volume with the volume file of the sulci
mri_mask $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh.mgz $SUBJECTS_DIR/$Subject/surf/LH_sulc_${min_name}_vol.mgz $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh_sulc_${min_name}.mgz
mri_mask $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh.mgz $SUBJECTS_DIR/$Subject/surf/RH_sulc_${min_name}_vol.mgz $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh_sulc_${min_name}.mgz
# Convert to FSL-format to check
#mriconvert $SUBJECTS_DIR/$Subject/surf/LH_sulc_${min_name}_vol.mgz $SUBJECTS_DIR/$Subject/surf/LH_sulc_${min_name}_vol.nii.gz
#mriconvert $SUBJECTS_DIR/$Subject/surf/RH_sulc_${min_name}_vol.mgz $SUBJECTS_DIR/$Subject/surf/RH_sulc_${min_name}_vol.nii.gz
# Convert to FSL-format
mri_convert $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh_sulc_${min_name}.mgz $SUBJECTS_DIR/$Subject/mri/BN_atlas_rh_sulc_${min_name}.nii.gz --out_orientation RAS
mri_convert $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh_sulc_${min_name}.mgz $SUBJECTS_DIR/$Subject/mri/BN_atlas_lh_sulc_${min_name}.nii.gz --out_orientation RAS
else echo $Subject >> $project/no_curve
fi
fi
done
<file_sep>#/bin/bash
set -e
export BIDS_dir=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS ###### CHECK BIDS_dir
export BASE_dir=$BIDS_dir/derivatives/FS_base
export LOG_dir=$BIDS_dir/log
export QA_dir=$LOG_dir/qa
i=0
export_file=$BIDS_dir/log/motor_network_BH
[ -e $export_file ] && rm $export_file
for sub in $(cat $BIDS_dir/all_participants_single.tsv); do
#for sub in ca_001_1; do
echo $sub
group=$(echo $sub | head -c 2)
for time in 1 2 3 4; do
subject=${sub}_${time}
for sequence in data dwi anat t1; do
for LOG in $(cat $LOG_dir/qa/no_${sequence}); do
case $LOG in *"${subject}"*) echo "Known missing ${sequence} data for Subject Nr. ${subject}"; continue 4 ;; esac
done
for LOG in $(cat $LOG_dir/qa/bad_${sequence}); do
case $LOG in *"${subject}"*) echo "Known bad ${sequence} data for Subject Nr. ${subject}"; continue 4 ;; esac
done
done
export SUBJECT_dir=$BIDS_dir/sub-$subject
[ ! -e $SUBJECT_dir ] && continue
export TRANSFORM_dir=$SUBJECT_dir/dwi/transform_base
echo $time
graph_array=( $(cat $TRANSFORM_dir/temp/Cc_B_Ef_B_LoEf_B_Cc_Ef_LoEf_Q_As_De_Ew_max_Ew_median_Ew_mean_St_mean_St_median_St_sum_Cc_rand_std_Ef_rand_std_LoEf_rand_std.csv) )
Cc_B=${graph_array[0]}
Ef_B=${graph_array[1]}
LoEf_B=${graph_array[2]}
Cc=${graph_array[3]}
Ef=${graph_array[4]}
LoEf=${graph_array[5]}
Q=${graph_array[6]}
As=${graph_array[7]}
De=${graph_array[8]}
Ew_max=${graph_array[9]}
Ew_median=${graph_array[10]}
Ew_mean=${graph_array[11]}
St_mean=${graph_array[12]}
St_median=${graph_array[13]}
St_sum=${graph_array[14]}
Cc_rand_std=${graph_array[15]}
Ef_rand_std=${graph_array[16]}
LoEf_rand_std=${graph_array[17]}
graph_array_LH=( $(cat $TRANSFORM_dir/temp/Cc_B_Ef_B_LoEf_B_Cc_Ef_LoEf_Q_As_De_Ew_max_Ew_median_Ew_mean_St_mean_St_median_St_sum_Cc_rand_std_Ef_rand_std_LoEf_rand_std_LH.csv) )
Cc_B_LH=${graph_array_LH[0]}
Ef_B_LH=${graph_array_LH[1]}
LoEf_B_LH=${graph_array_LH[2]}
Cc_LH=${graph_array_LH[3]}
Ef_LH=${graph_array_LH[4]}
LoEf_LH=${graph_array_LH[5]}
Q_LH=${graph_array_LH[6]}
As_LH=${graph_array_LH[7]}
De_LH=${graph_array_LH[8]}
Ew_max_LH=${graph_array_LH[9]}
Ew_median_LH=${graph_array_LH[10]}
Ew_mean_LH=${graph_array_LH[11]}
St_mean_LH=${graph_array_LH[12]}
St_median_LH=${graph_array_LH[13]}
St_sum_LH=${graph_array_LH[14]}
Cc_rand_std_LH=${graph_array_LH[15]}
Ef_rand_std_LH=${graph_array_LH[16]}
LoEf_rand_std_LH=${graph_array_LH[17]}
graph_array_RH=( $(cat $TRANSFORM_dir/temp/Cc_B_Ef_B_LoEf_B_Cc_Ef_LoEf_Q_As_De_Ew_max_Ew_median_Ew_mean_St_mean_St_median_St_sum_Cc_rand_std_Ef_rand_std_LoEf_rand_std_RH.csv) )
Cc_B_RH=${graph_array_RH[0]}
Ef_B_RH=${graph_array_RH[1]}
LoEf_B_RH=${graph_array_RH[2]}
Cc_RH=${graph_array_RH[3]}
Ef_RH=${graph_array_RH[4]}
LoEf_RH=${graph_array_RH[5]}
Q_RH=${graph_array_RH[6]}
As_RH=${graph_array_RH[7]}
De_RH=${graph_array_RH[8]}
Ew_max_RH=${graph_array_RH[9]}
Ew_median_RH=${graph_array_RH[10]}
Ew_mean_RH=${graph_array_RH[11]}
St_mean_RH=${graph_array_RH[12]}
St_median_RH=${graph_array_RH[13]}
St_sum_RH=${graph_array_RH[14]}
Cc_rand_std_RH=${graph_array_RH[15]}
Ef_rand_std_RH=${graph_array_RH[16]}
LoEf_rand_std_RH=${graph_array_RH[17]}
#clinical_data_file=
#clinical_data_array=( $(grep -a $sub_num $clinical_data_file) )
#age=${clinical_data_array[1]}
#sex=${clinical_data_array[2]}
#### EXPORT
[ ! -e $export_file ] && echo -n \
"sub timepoint age sex group Cc_B Ef_B LoEf_B Cc Ef LoEf Q As De Ew_max Ew_median Ew_mean St_mean St_median St_sum Cc_rand_std Ef_rand_std LoEf_rand_std \
Cc_B_RH Ef_B_RH LoEf_B_RH Cc_RH Ef_RH LoEf_RH Q_RH As_RH De_RH Ew_max_RH Ew_median_RH Ew_mean_RH St_mean_RH St_median_RH St_sum_RH Cc_rand_std_RH Ef_rand_std_RH LoEf_rand_std_RH \
Cc_B_LH Ef_B_LH LoEf_B_LH Cc_LH Ef_LH LoEf_LH Q_LH As_LH De_LH Ew_max_LH Ew_median_LH Ew_mean_LH St_mean_LH St_median_LH St_sum_LH Cc_rand_std_LH Ef_rand_std_LH LoEf_rand_std_LH \
dummy" \
> $export_file \
&& echo "" >> $export_file
#echo -n "$subject " >> $export_file
echo -n "$subject " >> $export_file
echo -n "$sub " >> $export_file
echo -n "$time " >> $export_file
echo -n "$age " >> $export_file
echo -n "$sex " >> $export_file
echo -n "$group " >> $export_file
echo -n "$Cc_B " >> $export_file
echo -n "$Ef_B " >> $export_file
echo -n "$LoEf_B " >> $export_file
echo -n "$Cc " >> $export_file
echo -n "$Ef " >> $export_file
echo -n "$LoEf " >> $export_file
echo -n "$Q " >> $export_file
echo -n "$As " >> $export_file
echo -n "$De " >> $export_file
echo -n "$Ew_max " >> $export_file
echo -n "$Ew_median " >> $export_file
echo -n "$Ew_mean " >> $export_file
echo -n "$St_mean " >> $export_file
echo -n "$St_median " >> $export_file
echo -n "$St_sum " >> $export_file
echo -n "$Cc_rand_std " >> $export_file
echo -n "$Ef_rand_std " >> $export_file
echo -n "$LoEf_rand_std " >> $export_file
echo -n "$Cc_B_RH " >> $export_file
echo -n "$Ef_B_RH " >> $export_file
echo -n "$LoEf_B_RH " >> $export_file
echo -n "$Cc_RH " >> $export_file
echo -n "$Ef_RH " >> $export_file
echo -n "$LoEf_RH " >> $export_file
echo -n "$Q_RH " >> $export_file
echo -n "$As_RH " >> $export_file
echo -n "$De_RH " >> $export_file
echo -n "$Ew_max_RH " >> $export_file
echo -n "$Ew_median_RH " >> $export_file
echo -n "$Ew_mean_RH " >> $export_file
echo -n "$St_mean_RH " >> $export_file
echo -n "$St_median_RH " >> $export_file
echo -n "$St_sum_RH " >> $export_file
echo -n "$Cc_rand_std_RH " >> $export_file
echo -n "$Ef_rand_std_RH " >> $export_file
echo -n "$LoEf_rand_std_RH " >> $export_file
echo -n "$Cc_B_LH " >> $export_file
echo -n "$Ef_B_LH " >> $export_file
echo -n "$LoEf_B_LH " >> $export_file
echo -n "$Cc_LH " >> $export_file
echo -n "$Ef_LH " >> $export_file
echo -n "$LoEf_LH " >> $export_file
echo -n "$Q_LH " >> $export_file
echo -n "$As_LH " >> $export_file
echo -n "$De_LH " >> $export_file
echo -n "$Ew_max_LH " >> $export_file
echo -n "$Ew_median_LH " >> $export_file
echo -n "$Ew_mean_LH " >> $export_file
echo -n "$St_mean_LH " >> $export_file
echo -n "$St_median_LH " >> $export_file
echo -n "$St_sum_LH " >> $export_file
echo -n "$Cc_rand_std_LH " >> $export_file
echo -n "$Ef_rand_std_LH " >> $export_file
echo -n "$LoEf_rand_std_LH " >> $export_file
echo "" >> $export_file
done
done
<file_sep>#/bin/bash
set -e
export BIDS_dir=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS ###### CHECK BIDS_dir
[ ! -f $BIDS_dir ] && export BIDS_dir
export BASE_dir=$BIDS_dir/derivatives/FS_base
export LOG_dir=$BIDS_dir/log
export QA_dir=$LOG_dir/qa
i=0
#for sub in $(cat $BIDS_dir/participants.tsv); do
for sub in $(cat $BIDS_dir/all_participants_single.tsv); do
#for sub in pa_019; do
#for sub in $(cat $BIDS_dir/control_participants); do
echo $sub
if [ "$sub" == "pa_019" ]; then
##### Waiting Block #####
time=0
while [ ! -e /mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS/sub-pa_019_1/anat/sub-pa_019_1_FLAIR_BrainExtractionBrain.nii.gz ]; do
sleep 30
time=$((time+30))
minutes=$((time/60))
echo "Waiting since " $minutes "minutes now..."
done
#########################
fi
export SUB_dir=$BIDS_dir/sub-$sub
export ANAT_dir=$SUB_dir/anat
export DWI_dir=$SUB_dir/dwi
export SUB_LOG_dir=$SUB_dir/log
for sequence in data dwi anat t1 flair; do
for LOG in $(cat $LOG_dir/qa/no_${sequence}); do
case $LOG in *"${sub}_1"*) echo "Known missing ${sequence} data for Subject Nr. ${sub}"; continue 4 ;; esac
done
for LOG in $(cat $LOG_dir/qa/bad_${sequence}); do
case $LOG in *"${subject}_1"*) echo "Known bad ${sequence} data for Subject Nr. ${sub}"; continue 4 ;; esac
done
done
for time in 1; do
subject=${sub}_${time}
for sequence in data dwi anat t1 flair; do
for LOG in $(cat $LOG_dir/qa/no_${sequence}); do
case $LOG in *"${subject}"*) echo "Known missing ${sequence} data for Subject Nr. ${subject}"; continue 4 ;; esac
done
for LOG in $(cat $LOG_dir/qa/bad_${sequence}); do
case $LOG in *"${subject}"*) echo "Known bad ${sequence} data for Subject Nr. ${subject}"; continue 4 ;; esac
done
done
export SUBJECT_dir=$BIDS_dir/sub-$subject
[ ! -e $SUBJECT_dir ] && continue
export TRANSFORM_dir=$SUBJECT_dir/dwi/transform_base
#[ -e $TRANSFORM_dir/sub-${subject}_lesion_rigid_DWI_bin.nii.gz ] && continue
mkdir -p $SUBJECT_dir/anat/transform
flair_file=$SUBJECT_dir/anat/sub-${subject}_FLAIR.nii.gz
flair_brain=$SUBJECT_dir/anat/sub-${subject}_FLAIR_BrainExtractionBrain.nii.gz
flair_brain_mask=$SUBJECT_dir/anat/sub-${subject}_FLAIR_BrainExtractionMask.nii.gz
lesion_file=$SUBJECT_dir/anat/sub-${subject}_FLAIR_lesion_mask.nii.gz
t1_file=$SUBJECT_dir/anat/sub-${subject}_DC_T1w_in_base.nii.gz
t1_brain_mask=$SUBJECT_dir/anat/sub-${subject}_DC_T1w_in_base_BrainExtractionMask.nii.gz
t1_brain=$SUBJECT_dir/anat/sub-${subject}_DC_T1w_in_base_BrainExtractionBrain.nii.gz
transform_t1_2_dwi=$TRANSFORM_dir/rigid_T1_to_DWI.txt
transform_flair_2_t1=$SUBJECT_dir/anat/transform/rigid_FLAIR_to_T1.txt
transform_flair_2dwi=$SUBJECT_dir/anat/transform/rigid_FLAIR_to_DWI.txt
#mrregister -force ${flair_brain} ${t1_brain} -type rigid -rigid ${transform_flair_2_t1}
flirt -in ${flair_brain} -ref ${t1_brain} -omat $SUBJECT_dir/anat/transform/flair_2_t1_fsl.mat
flirt -in ${flair_file} -out $SUBJECT_dir/anat/transform/sub-${subject}_FLAIR_in_T1.nii.gz -applyxfm -init $SUBJECT_dir/anat/transform/flair_2_t1_fsl.mat -ref ${t1_file}
flirt -in ${lesion_file} -out $SUBJECT_dir/anat/transform/sub-${subject}_lesion_in_T1.nii.gz -applyxfm -init $SUBJECT_dir/anat/transform/flair_2_t1_fsl.mat -ref ${t1_file}
#mrtransform -force ${flair_file} $SUBJECT_dir/anat/transform/sub-${subject}_FLAIR_in_T1.nii.gz -linear ${transform_flair_2_t1} -template ${t1_file}
#mrtransform -force $SUBJECT_dir/anat/transform/sub-${subject}_FLAIR_in_T1.nii.gz $TRANSFORM_dir/sub-${subject}_FLAIR_rigid_DWI.nii.gz -linear $transform_t1_2_dwi -template $TRANSFORM_dir/sub-${subject}_dwi_meanbzero_111.nii.gz
flirt -in $SUBJECT_dir/anat/transform/sub-${subject}_FLAIR_in_T1.nii.gz -ref $TRANSFORM_dir/sub-${subject}_dwi_meanbzero_111.nii.gz -applyxfm -init $transform_t1_2_dwi -out $TRANSFORM_dir/sub-${subject}_FLAIR_rigid_DWI.nii.gz
#mrtransform -force ${lesion_file} $SUBJECT_dir/anat/transform/sub-${subject}_lesion_in_T1.nii.gz -linear ${transform_flair_2_t1} -template ${t1_file}
#mrtransform -force $SUBJECT_dir/anat/transform/sub-${subject}_lesion_in_T1.nii.gz $TRANSFORM_dir/sub-${subject}_lesion_rigid_DWI.nii.gz -linear $transform_t1_2_dwi -template $TRANSFORM_dir/sub-${subject}_dwi_meanbzero_111.nii.gz
flirt -in $SUBJECT_dir/anat/transform/sub-${subject}_lesion_in_T1.nii.gz -ref $TRANSFORM_dir/sub-${subject}_dwi_meanbzero_111.nii.gz -applyxfm -init $transform_t1_2_dwi -out $TRANSFORM_dir/sub-${subject}_lesion_rigid_DWI.nii.gz
fslmaths $TRANSFORM_dir/sub-${subject}_lesion_rigid_DWI.nii.gz -thr 0.5 -bin $TRANSFORM_dir/sub-${subject}_lesion_rigid_DWI_bin.nii.gz
#[ -e $TRANSFORM_dir/sub-${sub}_FLAIR_rigid_DWI.nii.gz ] && rm $TRANSFORM_dir/sub-${sub}_FLAIR_rigid_DWI.nii.gz
#[ -e $TRANSFORM_dir/sub-${sub}_lesion_rigid_DWI.nii.gz ] && rm $TRANSFORM_dir/sub-${sub}_lesion_rigid_DWI.nii.gz
#[ -e $TRANSFORM_dir/sub-${sub}_lesion_rigid_DWI_bin.nii.gz ] && rm $TRANSFORM_dir/sub-${sub}_lesion_rigid_DWI_bin.nii.gz
done
for time in 2 3 4; do
subject=${sub}_${time}
for sequence in flair; do
for LOG in $(cat $LOG_dir/qa/no_${sequence}); do
case $LOG in *"${sub}"*) echo "Known missing ${sequence} data for Subject Nr. ${sub}"; continue 3 ;; esac
done
for LOG in $(cat $LOG_dir/qa/bad_${sequence}); do
case $LOG in *"${sub}"*) echo "Known bad ${sequence} data for Subject Nr. ${sub}"; continue 3 ;; esac
done
done
for sequence in data dwi anat t1; do
for LOG in $(cat $LOG_dir/qa/no_${sequence}); do
case $LOG in *"${subject}"*) echo "Known missing ${sequence} data for Subject Nr. ${subject}"; continue 3 ;; esac
done
for LOG in $(cat $LOG_dir/qa/bad_${sequence}); do
case $LOG in *"${subject}"*) echo "Known bad ${sequence} data for Subject Nr. ${subject}"; continue 3 ;; esac
done
done
export SUBJECT_dir=$BIDS_dir/sub-$subject
[ ! -e $SUBJECT_dir ] && continue
export TRANSFORM_dir=$SUBJECT_dir/dwi/transform_base
[ -e $TRANSFORM_dir/sub-${subject}_lesion_rigid_DWI_bin.nii.gz ] && continue
#mrtransform -force $BIDS_dir/sub-${sub}_1/anat/transform/sub-${sub}_1_lesion_in_T1.nii.gz $TRANSFORM_dir/sub-${subject}_lesion_rigid_DWI.nii.gz -linear $TRANSFORM_dir/rigid_T1_to_DWI.txt -template $TRANSFORM_dir/sub-${subject}_dwi_meanbzero_111.nii.gz
flirt -in $BIDS_dir/sub-${sub}_1/anat/transform/sub-${sub}_1_lesion_in_T1.nii.gz -ref $TRANSFORM_dir/sub-${subject}_dwi_meanbzero_111.nii.gz -applyxfm -init $TRANSFORM_dir/rigid_T1_to_DWI.txt -out $TRANSFORM_dir/sub-${subject}_lesion_rigid_DWI.nii.gz && fslmaths $TRANSFORM_dir/sub-${subject}_lesion_rigid_DWI.nii.gz -thr 0.5 -bin $TRANSFORM_dir/sub-${subject}_lesion_rigid_DWI_bin.nii.gz &
done
wait
done
<file_sep>#! /bin/bash
#set -e # EXIT ON ERROR
BIDS_dir=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS
SUBJECTS_DIR=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS/derivatives/FREESURFER_T1_DC
i=0
participants_list=$BIDS_dir/control_participants_single.tsv
number=$(cat $participants_list | wc -w)
min_name=-0125 # Binarize Curvature file with minimum concave (i.e. max convex) of min
for sub in $(cat $participants_list); do
#for sub in pa_026; do
Subject=${sub}
let i=i+1
echo "======================================================================> $Subject $i / $number <======================================================================="
SUB_dir=$BIDS_dir/derivatives/FS_base/$sub
fsl_anat_dir=$SUB_dir/sub-${sub}_fs_orig.anat
mkdir -p $SUB_dir/coord
if [ ! -e $SUB_dir/sub-${sub}_fs_orig.nii.gz ]; then echo $Subject >> $BIDS_dir/no_base; continue; fi
##### Waiting Block #####
time=0
while [ ! -e $fsl_anat_dir/T1_to_MNI_nonlin.nii.gz -a ! -e $fsl_anat_dir/T1_to_MNI_nonlin_field.nii.gz ]; do
sleep 30
time=$((time+30))
minutes=$((time/60))
echo "Waiting since " $minutes "minutes now..."
if [ $minutes -gt 120 ]; then echo $ Subject >> $BIDS_dir/waiting_to_long; continue 2; fi
done
#########################
#### Koordinaten von Schulz et. al, Stroke 2016
LH_aIPS="-36 -44 54"
LH_cIPS="-20 -62 52"
LH_M1="-36 -20 52"
LH_PMV="-52 6 30"
LH_SMA="-2 -4 56"
RH_aIPS="36 -44 54"
RH_cIPS="20 -62 52"
RH_M1="36 -20 52"
RH_PMV="52 6 30"
RH_SMA="2 -4 56"
## Koordinaten aus Rehme 2010
LH_dPMC="-42 -10 58"
LH_S1="-36 -30 60"
LH_pre_SMA="-2 6 54"
RH_dPMC="42 -6 56"
RH_S1="40 -28 52"
RH_pre_SMA="2 2 56"
for coord in LH_aIPS LH_cIPS LH_M1 LH_PMV LH_SMA RH_aIPS RH_cIPS RH_M1 RH_PMV RH_SMA LH_dPMC LH_S1 LH_pre_SMA RH_dPMC RH_S1 RH_pre_SMA; do ## alle
#for coord in LH_aIPS LH_cIPS LH_M1 LH_PMV LH_SMA RH_aIPS RH_cIPS RH_M1 RH_PMV RH_SMA; do ## Robert
#for coord in LH_dPMC LH_S1 LH_pre_SMA RH_dPMC RH_S1 RH_pre_SMA; do ## Rehme
echo -n $coord
echo -n " is "
echo -n ${!coord}
echo `echo ${!coord} | std2imgcoord -img $SUB_dir/sub-${sub}_fs_orig.nii.gz -std $fsl_anat_dir/T1_to_MNI_nonlin.nii.gz -warp $fsl_anat_dir/T1_to_MNI_nonlin_field.nii.gz -` > $SUB_dir/coord/$coord
echo -n "; and is transformed to "
echo $(cat $SUB_dir/coord/$coord)
done
done
<file_sep>#/bin/bash
set -e
export BIDS_dir=/mnt/Storage/bene/EXTERN/UHHWORK/data/SFB_BIDS ###### CHECK BIDS_dir
export BASE_dir=$BIDS_dir/derivatives/FS_base
export LOG_dir=$BIDS_dir/log
export QA_dir=$LOG_dir/qa
i=0
for sub in $(cat $BIDS_dir/all_participants.tsv); do
#for sub in ca_001_1; do
echo $sub
for sequence in data dwi anat t1; do
for LOG in $(cat $LOG_dir/qa/no_${sequence}); do
case $LOG in *"${sub}"*) echo "Known missing ${sequence} data for Subject Nr. ${sub}"; continue 4 ;; esac
done
for LOG in $(cat $LOG_dir/qa/bad_${sequence}); do
case $LOG in *"${subject}"*) echo "Known bad ${sequence} data for Subject Nr. ${sub}"; continue 4 ;; esac
done
done
export SUB_dir=$BIDS_dir/sub-$sub
export ANAT_dir=$SUB_dir/anat
export DWI_dir=$SUB_dir/dwi
export SUB_LOG_dir=$SUB_dir/log
export TRANSFORM_dir=$SUB_dir/dwi/transform_base
[ ! -e $TRANSFORM_dir/tracking_masks ] && echo ALARM && continue
i=0
cd $TRANSFORM_dir/tracking_masks
for mask in *final.nii.gz;do
[ "$mask" == "LH_Lenti_final.nii.gz" ] && continue
[ "$mask" == "RH_Lenti_final.nii.gz" ] && continue
let i=i+1
echo "$i $mask" >> $TRANSFORM_dir/assignments.tsv
[ ! -e nodes.nii.gz ] && fslmaths $mask -mul 0 nodes.nii.gz
fslmaths nodes.nii.gz -bin -mul $mask intersec.nii.gz
fslmaths $mask -sub intersec.nii.gz mask_temp.nii.gz
fslmaths mask_temp.nii.gz -mul $i temp.nii.gz
fslmaths nodes.nii.gz -add temp.nii.gz nodes.nii.gz
done
[ -e intersec.nii.gz ] && rm intersec.nii.gz
[ -e mask_temp.nii.gz ] && rm mask_temp.nii.gz
[ -e temp.nii.gz ] && rm temp.nii.gz
tck2connectome $TRANSFORM_dir/tractogram.tck nodes.nii.gz $TRANSFORM_dir/connectome.csv -force -tck_weights_in $TRANSFORM_dir/weights.csv -zero_diagonal -symmetric
done
| e08b352293798af21d8a0ec7cc553c1de8f21bf6 | [
"Shell"
] | 8 | Shell | bmfrey/sfb_motor_analysis | afd8209ee41e757c6965c463f6d3715127dc7592 | 30cacb80aec88e29eb3f8f9c641cefc5afd8d192 |
refs/heads/master | <repo_name>MMRBD/Blog<file_sep>/README.md
# Personal Blog
This the learning and practice project of Django.
<file_sep>/phone_book/models.py
from django.db import models
class PhoneBook(models.Model):
name = models.CharField(max_length=200)
email = models.CharField(max_length=200)
mobile = models.CharField(max_length=15)
phone = models.CharField(max_length=10)
city = models.CharField(max_length=200)
address = models.CharField(max_length=200)
country = models.CharField(max_length=200)
def __str__(self):
return self.name
<file_sep>/phone_book/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='p_home'),
path('add_contact', views.add_contact, name='add_contact'),
path('edit/<contact_id>', views.edit, name='edit'),
path('delete/<contact_id>', views.delete, name='delete'),
]
<file_sep>/phone_book/views.py
from django.shortcuts import render, redirect
from .models import PhoneBook
from .forms import PhoneBookForms
from django.contrib import messages
def home(request):
all_contact = PhoneBook.objects.all
return render(request, 'phone_book/home.html', {'all_contact': all_contact})
def edit(request, contact_id):
if request.method == "POST":
contact_num = PhoneBook.objects.get(pk=contact_id)
form = PhoneBookForms(request.POST or None, instance=contact_num)
if form.is_valid():
form.save()
messages.success(request, "Contact Edit has been Added!!")
return redirect('p_home')
else:
messages.success(request, "Contact edit fail!!")
return render(request, 'phone_book/add_contact.html', {})
else:
get_contact = PhoneBook.objects.get(pk=contact_id)
return render(request, 'phone_book/edit.html', {'get_contact': get_contact})
def add_contact(request):
if request.method == "POST":
form = PhoneBookForms(request.POST or None)
if form.is_valid():
form.save()
messages.success(request, "Contact has been Added!!")
return redirect('p_home')
else:
messages.success(request, "Contact save fail!!")
return render(request, 'phone_book/add_contact.html', {})
else:
return render(request, 'phone_book/add_contact.html', {})
def delete(request, contact_id):
if request.method == "POST":
current_contact = PhoneBook.objects.get(pk=contact_id)
current_contact.delete()
messages.success(request, "Contact has been DELETED!!")
return redirect('p_home')
else:
messages.success(request, "Nothing to see here..!!")
return redirect('p_home')
<file_sep>/Blog/urls.py
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from .import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^about/$', views.about),
url(r'^articles/', include('articles.urls')),
url(r'^accounts/', include('accounts.urls')),
url(r'^authenticate/', include('authenticate.urls')),
url(r'^exercise/', include('exercise.urls')),
url(r'^phone_book/', include('phone_book.urls')),
url(r'^$', views.home_page)
]
<file_sep>/authenticate/templates/authenticate/login.html
{% extends 'authenticate/base.html' %}
{% block content %}
<h1 class="form-text text-muted"> Login </h1>
<form method="POST" action="">
{% csrf_token %}
<div class="form-group">
<input class="form-control" type="text" placeholder="Enter Username" name="username">
</div>
<div class="form-group">
<input class="form-control" type="password" placeholder="Enter Password" name="<PASSWORD>">
</div>
<button class="btn btn-secondary" type="submit">Login</button>
</form>
{% endblock %}
<file_sep>/Blog/views.py
from django.http import HttpResponse
from django.shortcuts import render
def home_page(request):
return render(request, 'blog/home_page.html')
# return HttpResponse('Home Page')
def about(request):
return render(request, 'blog/about.html')
# return HttpResponse('About Page')
| 284999de3e9833f82db1f77ab71e1271b912509a | [
"Markdown",
"Python",
"HTML"
] | 7 | Markdown | MMRBD/Blog | 50342e0c017079471d00a17fe3df940476a0e8a0 | fea0737c8061edf8212bc90c6a421db01126bb0e |
refs/heads/master | <repo_name>ajourquin/module-custom-product-sorting<file_sep>/Plugin/Elasticsearch/Model/ResourceModel/Fulltext/Collection/SearchCriteriaResolver.php
<?php
/**
* @author <NAME> <<EMAIL>>
* @link http://www.ajourquin.com
*/
declare(strict_types=1);
namespace Ajourquin\CustomProductSorting\Plugin\Elasticsearch\Model\ResourceModel\Fulltext\Collection;
use Magento\Elasticsearch\Model\ResourceModel\Fulltext\Collection\SearchCriteriaResolver as MagentoSearchCriteriaResolver;
use Magento\Framework\Api\Search\SearchCriteria;
class SearchCriteriaResolver
{
/**
* @param MagentoSearchCriteriaResolver $subject
* @param SearchCriteria $result
* @return SearchCriteria
*/
public function afterResolve(MagentoSearchCriteriaResolver $subject, SearchCriteria $result): SearchCriteria
{
$sortOrders = $result->getSortOrders();
unset($sortOrders['custom_value']);
$result->setSortOrders($sortOrders);
return $result;
}
}
<file_sep>/Plugin/Catalog/Model/Config/Source/ListSort.php
<?php
/**
* @author <NAME> <<EMAIL>>
* @link http://www.ajourquin.com
*/
declare(strict_types=1);
namespace Ajourquin\CustomProductSorting\Plugin\Catalog\Model\Config\Source;
use Magento\Catalog\Model\Config\Source\ListSort as MagentoListSort;
class ListSort
{
/**
* @param MagentoListSort $subject
* @param array $result
* @return array
*/
public function afterToOptionArray(MagentoListSort $subject, array $result): array
{
\array_push($result, ['label' => \__('Custom Value'), 'value' => 'custom_value']);
return $result;
}
}
<file_sep>/Plugin/Catalog/Model/Config.php
<?php
/**
* @author <NAME> <<EMAIL>>
* @link http://www.ajourquin.com
*/
declare(strict_types=1);
namespace Ajourquin\CustomProductSorting\Plugin\Catalog\Model;
use Magento\Catalog\Model\Config as MagentoConfig;
class Config
{
/**
* @param MagentoConfig $subject
* @param array $result
* @return array
*/
public function afterGetAttributeUsedForSortByArray(MagentoConfig $subject, array $result): array
{
$result['custom_value'] = \__('Custom value');
return $result;
}
}
<file_sep>/registration.php
<?php
/**
* @author <NAME> <<EMAIL>>
* @link http://www.ajourquin.com
*/
declare(strict_types=1);
\Magento\Framework\Component\ComponentRegistrar::register(
\Magento\Framework\Component\ComponentRegistrar::MODULE,
'Ajourquin_CustomProductSorting',
__DIR__
);
<file_sep>/Plugin/Catalog/Model/Layer.php
<?php
/**
* @author <NAME> <<EMAIL>>
* @link http://www.ajourquin.com
*/
declare(strict_types=1);
namespace Ajourquin\CustomProductSorting\Plugin\Catalog\Model;
use Magento\Catalog\Block\Product\ProductList\Toolbar as MagentoToolbar;
use Magento\Catalog\Model\Layer as MagentoLayer;
use Magento\Catalog\Model\ResourceModel\Product\Collection as ProductCollection;
use Magento\Framework\Api\SortOrder;
class Layer
{
/** @var MagentoToolbar */
private $toolbar;
/**
* Layer constructor.
* @param MagentoToolbar $toolbar
*/
public function __construct(
MagentoToolbar $toolbar
) {
$this->toolbar = $toolbar;
}
/**
* @param MagentoLayer $subject
* @param ProductCollection $collection
* @return array
*/
public function beforePrepareProductCollection(MagentoLayer $subject, ProductCollection $collection): array
{
$currentOrder = $this->toolbar->getCurrentOrder();
switch ($currentOrder) {
case 'custom_value':
$collection->setOrder('price', 'desc');
$collection->setOrder('name', SortOrder::SORT_ASC);
break;
}
return [$collection];
}
}
<file_sep>/README.md
# Ajourquin_CustomProductSorting
This extension demonstrates how to create a custom product sorting and add the possibility to define it as default for all or a specific category
## Authors
* **<NAME>** - *Initial work*
## Releases notes
* **1.1.0**
* Add Magento 2.4 and ES compatibility
* **1.0.0**
* Initial release
<file_sep>/Plugin/Catalog/Model/Category/Attribute/Source/Sortby.php
<?php
/**
* @author <NAME> <<EMAIL>>
* @link http://www.ajourquin.com
*/
declare(strict_types=1);
namespace Ajourquin\CustomProductSorting\Plugin\Catalog\Model\Category\Attribute\Source;
use Magento\Catalog\Model\Category\Attribute\Source\Sortby as MagentoSortby;
class Sortby
{
/**
* @param MagentoSortby $subject
* @param array $result
* @return array
*/
public function afterGetAllOptions(MagentoSortby $subject, array $result): array
{
\array_push($result, ['label' => \__('Custom Value'), 'value' => 'custom_value']);
return $result;
}
}
| 28824d8a346ce44525dd4c40f1a558b028464bbb | [
"Markdown",
"PHP"
] | 7 | PHP | ajourquin/module-custom-product-sorting | 28f63a0fa80ffcd2c01b2fc6eab8dbb4dda29f23 | ba2e94d6d393ebfb6e786a8fd645b6dac2a9c18f |
refs/heads/master | <repo_name>timmy78/yii-email<file_sep>/email/views/email/email.php
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title><?php echo CHtml::encode($model->subject) ?></title>
<style media="all" type="text/css">
</style>
</head>
<body>
<table cellspacing="0" cellpadding="0" border="0" width="650px">
<tr>
<td align="center">
</td>
</tr>
<tr>
<td bgcolor="#FFFFFF" align="center">
<table width="650px" cellspacing="0" cellpadding="3" class="container">
<tr>
<td>
<hr/>
<?php echo nl2br($model->body); ?>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#FFFFFF" align="center">
<table width="650px" cellspacing="0" cellpadding="3" class="container">
<tr>
<td>
<hr>
<p class="text-center">
Copyright © <?php echo date('Y'); ?> StreamMind.
<?php
echo Yii::t('email','Tous droits réservés.');
echo Yii::app()->params['version'].'.';
?>
</p>
</td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html><file_sep>/email/views/default/_form.php
<?php
/**
* @property EmailForm $model
* @property CActiveForm $form
*/
$class = get_class($model);
if(empty($attributes)) {
$attributes = $model->getSafeAttributeNames();
}
?>
<div class="form row" id="<?php echo $class; ?>">
<div class="col-sm-8 col-centered">
<?php
$form = $this->beginWidget('CActiveForm', array(
'id' => 'form-'.$class,
'enableClientValidation'=>true,
'enableAjaxValidation'=>false,
'htmlOptions' => array(
'class' => 'form-horizontal',
'role' => 'form',
//'enctype' => 'multipart/form-data'
),
'clientOptions' => array(
'errorCssClass' => 'has-error',
'successCssClass' => 'has-success'
),
'errorMessageCssClass' => 'help-block'
));
$columnsLabel = 3;
$columnsInput = 9;
$i=0;
foreach($attributes as $attribute)
{
$htmlOptions = array('class' => 'form-control');
$title = str_replace(' :','',$model->getAttributeLabel($attribute));
$dropDownList = false;
/*
* Types
*/
if(in_array($attribute, array('sndbic', 'rcvbic'))) {
$htmlOptions['style'] = 'text-transform:uppercase;';
}
else if($attribute == 'priority') {
$dropDownList = EmailForm::getPriorities();
}
else if($attribute == 'contentType') {
$dropDownList = EmailForm::getContents();
}
else if($attribute == 'mailerType') {
$dropDownList = EmailForm::getMailers();
}
else if(in_array($attribute, array('status'))) {
$dropDownList = $model::getStatutes();
}
//Afficher période activité
if($attribute == 'datevaliditystart')
{
if(!empty($model->datevaliditystart) || !empty($model->datevalidityend))
$checked = true;
else if(Yii::app()->request->getPost('periode') != null)
$checked = true;
else
$checked = false;
echo '<div class="champ form-group">'
.CHtml::label(Yii::t('email', 'Activer la période de validité').' :', false, array( 'class' => 'col-sm-'.$columnsLabel.' control-label' ))
.'<div class="col-sm-'.$columnsInput.'">'
.CHtml::checkBox('periode', $checked, array('class'=>'checkbox'))
.'</div>'
.'</div>';
}
echo '<div class="champ form-group" id="'.$attribute.'">';
echo $form->labelEx($model, $attribute, array( 'class' => 'col-sm-'.$columnsLabel.' control-label' ));
echo '<div class="col-sm-'.$columnsInput.'">';
if($attribute == 'activated') {
echo $form->radioButtonList(
$model,
$attribute,
array( User::ACTIVATED => Yii::t('email', 'Activé'), User::DESACTIVATED => Yii::t('email', 'Désactivé') ),
array(
'separator' => '<span style="margin-left:20px;"></span>'
)
);
}
else if($attribute == 'file') {
echo $form->fileField($model, $attribute);
}
else if(in_array($attribute, array('acqsta'))) {
$htmlOptions['class'] = 'checkbox';
echo $form->checkBox($model, $attribute, $htmlOptions);
}
else if($attribute == 'admin') {
$data = array(User::IS_ADMIN => Yii::t('email', 'Oui'), 0 => Yii::t('email', 'Non'));
echo $form->radioButtonList($model, $attribute, $data, array('separator' => ' '));
}
else if($dropDownList !== false) {
echo $form->dropDownList($model, $attribute, $dropDownList, $htmlOptions);
if($attribute == 'scenarios')
echo '<div id="scenariosButtons"></div>';
}
else if(in_array($attribute, array('email', 'from'))) {
echo $form->emailField($model, $attribute, $htmlOptions);
}
else if(in_array($attribute, array('password', 'passwordConf'))) {
echo $form->passwordField($model, $attribute, $htmlOptions);
}
else if(in_array($attribute, array('body'))) {
echo $form->textArea($model, $attribute, $htmlOptions);
}
else {
echo $form->textField($model, $attribute, $htmlOptions);
}
echo $form->error($model, $attribute);
echo '</div>';
echo '</div>';
$i++;
}
/*
* REPLIES
*/
if(!empty($model->repliesTo))
{
foreach($model->repliesTo as $i => $replyTo)
{
$attributes = $replyTo->getSafeAttributeNames();
echo '<hr/>'
.'<div class="receiver panel panel-default"><div class="panel-heading">'
.'<h4 class="panel-title">'.Yii::t('email', 'Reply to').' ('.($i+1).')</h4>'
.'</div><div class="panel-body">';
foreach($attributes as $attribute)
{
$function = $attribute == 'email' ? 'emailField' : 'textField';
echo '<div class="champ form-group '.$attribute.'">';
echo $form->labelEx($replyTo, $attribute, array( 'class' => 'col-sm-3 control-label' ));
echo '<div class="col-sm-9">';
echo $form->$function($replyTo, '['.$i.']'.$attribute, $htmlOptions);
echo $form->error($replyTo, '['.$i.']'.$attribute, array('class' => 'help-block'));
echo '</div>';
echo '</div>';
}
echo '</div></div>';
}
}
/*
* ADRESSES
*/
if(!empty($model->addresses))
{
foreach($model->addresses as $i => $address)
{
$attributes = $address->getSafeAttributeNames();
echo '<hr/>'
.'<div class="receiver panel panel-default"><div class="panel-heading">'
.'<h4 class="panel-title">'.Yii::t('email', "Address").' ('.($i+1).')</h4>'
.'</div><div class="panel-body">';
foreach($attributes as $attribute)
{
$function = $attribute == 'email' ? 'emailField' : 'textField';
echo '<div class="champ form-group '.$attribute.'">';
echo $form->labelEx($address, $attribute, array( 'class' => 'col-sm-3 control-label' ));
echo '<div class="col-sm-9">';
echo $form->$function($address, '['.$i.']'.$attribute, $htmlOptions);
echo $form->error($address, '['.$i.']'.$attribute, array('class' => 'help-block'));
echo '</div>';
echo '</div>';
}
echo '</div></div>';
}
}
?>
<div class="text-right" style="margin-bottom:50px;">
<?php
echo CHtml::htmlButton(
'<span class="fa fa-plus"></span> '.Yii::t('email', "Add replyTo"),
array(
'type' => 'submit',
'class' => 'btn btn-success btn-sm',
'name' => 'addReplyTo',
'value' => 1
)
)."\n";
echo CHtml::htmlButton(
'<span class="fa fa-plus"></span> '.Yii::t('email', "Add address"),
array(
'type' => 'submit',
'class' => 'btn btn-success btn-sm',
'name' => 'addAddress',
'value' => 1
)
);
?>
</div>
<div class="form-group text-right">
<div class="col-xs-12">
<?php
echo CHtml::htmlButton('<span class="fa fa-undo"></span> '.Yii::t('email', 'Reset'), array(
'type' => 'reset',
'class' => 'btn btn-primary'
))."\n";
$text = '<span class="fa fa-check"></span> '.Yii::t('email', 'Valider');
//Validate
echo CHtml::htmlButton(
$text,
array(
'type' => 'submit',
'class' => 'btn btn-primary validate'
)
);
?>
</div>
</div>
<?php
$this->endWidget();
?>
</div>
<script type="text/javascript">
$(document).ready(function(){
});
</script>
</div><file_sep>/email/controllers/DefaultController.php
<?php
/**
* @see https://github.com/advancedrei/BootstrapForEmail
* @see http://htmlemailboilerplate.com/
* @see http://templates.mailchimp.com/resources/inline-css/
*
*
* @property EmailForm $model
* @property EMailer $mailer
*/
class DefaultController extends Controller
{
protected $model;
protected $mailer;
private $_pathView;
public function init()
{
parent::init();
$this->_pathView = 'application.modules.email.views.email';
$this->breadcrumbs=array(
Yii::t('email', "Module email"),
);
$this->setPageTitle(Yii::t('email', "Envoie"));
$this->model = new EmailForm;
$this->mailer = Yii::createComponent('application.modules.email.extensions.mailer.EMailer');
$this->mailer->setPathViews($this->_pathView);
}
public function actionIndex()
{
$values = Yii::app()->request->getPost('EmailForm');
if($values != null)
{
$this->model->setAttributes($values);
//RepliesTo
if(Yii::app()->request->getPost('addReplyTo')) {
$this->model->repliesTo[] = new ReplyTo();
}
//Address
if(Yii::app()->request->getPost('addAddress')) {
$this->model->addresses[] = new Address();
}
if($this->model->validate() && $this->send())
{
Yii::app()->user->setFlash('success', Yii::t('email', "E-mail correctement envoyé"));
$this->refresh();
}
}
$this->render('index');
}
/*
* Préparation du mail et envoie
*/
private function send()
{
$success = false;
//On set les valeurs du EMailer
foreach($this->model->getAttributes() as $attribute => $value)
{
$attributeName = ucFirst($attribute);
//if(property_exists(get_class($this->mailer), $attributeName)) {
$this->mailer->$attributeName = $value;
//}
}
//Le mailer
$this->mailer->{$this->model->mailerType}();
//On set les ReplyTo et les Address
if(!empty($this->model->repliesTo))
{
foreach($this->model->repliesTo as $replyTo) {
$this->mailer->AddReplyTo($replyTo->email, $replyTo->name);
}
}
if(!empty($this->model->addresses))
{
foreach($this->model->addresses as $address) {
$this->mailer->AddAddress($address->email, $address->name);
}
}
$this->mailer->Body = $this->renderPartial($this->_pathView.'.email', array('model' => $this->model), true);
//echo ($this->mailer->getView('email'));die;
//print_r($this->mailer);die;
try
{
$success = $this->mailer->Send();
}
catch(Exception $e)
{
throw new CHttpException(500, $e->getMessage());
}
return $success;
}
}<file_sep>/email/models/EmailForm.php
<?php
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
/**
* Description of Email
*
* @author Timothée
*
* @property ReplyTo[] $repliesTo
* @property Address[] $addresses
*/
class EmailForm extends CFormModel
{
const PRIORITY_LOW = 5;
const PRIORITY_NORMAL = 3;
const PRIORITY_HIGH = 1;
const CONTENT_TYPE_HTML = 'text/html';
const CONTENT_TYPE_PLAIN = 'text/plain';
const MAILER_TYPE_SMTP = 'IsSMTP';
const MAILER_TYPE_MAIL = 'IsMail';
const MAILER_TYPE_SEND_MAIL = 'IsSendmail';
const MAILER_TYPE_Q_MAIL = 'IsQmail';
public $priority = self::PRIORITY_NORMAL;
public $charSet = 'UTF-8';//iso-8859-1
public $contentType = self::CONTENT_TYPE_HTML;
public $mailerType = self::MAILER_TYPE_SMTP;
public $host = 'smtp.free.fr';
public $port = 25;
public $from = '<EMAIL>';
public $fromName;
public $sender;
public $repliesTo = array();
public $addresses = array();
public $subject;
public $body;
public function init()
{
parent::init();
$this->fromName = Yii::app()->name;
//On set au début
if(!Yii::app()->request->getPost('EmailForm'))
{
$replyTo = new ReplyTo();
$replyTo->email = $this->from;
$replyTo->name = $this->fromName;
$this->repliesTo[] = $replyTo;
$address = new Address();
$address->email = '<EMAIL>';
$this->addresses[] = $address;
}
}
public function rules()
{
return array(
array('priority, charSet, contentType, mailerType, host, port, from, fromName, sender, subject, body', 'safe'),
array('from, fromName, subject', 'required'),
array('from, sender', 'email')
);
}
public function attributeLabels()
{
return array(
);
}
/**
* On surcharge pour setter les ReplyTo et les Address
* @param array $values
* @param boolean $safeOnly
*/
public function setAttributes($values, $safeOnly = true)
{
parent::setAttributes($values, $safeOnly);
//On set les ReplyTo
$replies = Yii::app()->request->getPost('ReplyTo');
if($replies != null)
{
foreach($replies as $replyAttributes)
{
$reply = new ReplyTo();
$reply->setAttributes($replyAttributes);
$this->repliesTo[] = $reply;
}
}
//On set les Address
$addresses = Yii::app()->request->getPost('Address');
if($addresses != null)
{
foreach($addresses as $addressAttributes)
{
$address = new Address();
$address->setAttributes($addressAttributes);
$this->addresses[] = $address;
}
}
}
/**
* On surcharge pour valider les ReplyTo et les Address
* @param array $attributes
* @param boolean $clearErrors
* @return boolean
*/
public function validate($attributes = null, $clearErrors = true)
{
$validate = parent::validate($attributes, $clearErrors);
//On valide les ReplyTo
$repliesValid = true;
if(!empty($this->repliesTo))
{
foreach($this->repliesTo as $replyTo) {
if(!$replyTo->validate()) {
$repliesValid = false;
}
}
}
//On valide les Address
$addressesValid = true;
if(!empty($this->addresses))
{
foreach($this->addresses as $address) {
if(!$address->validate()) {
$addressesValid = false;
}
}
}
if($validate && $repliesValid && $addressesValid) {
return true;
}
return false;
}
public static function getPriorities()
{
return array(
self::PRIORITY_LOW => Yii::t('email', "Bas"),
self::PRIORITY_NORMAL => Yii::t('email', 'Normal'),
self::PRIORITY_HIGH => Yii::t('email', 'Haut')
);
}
public function getPriority()
{
$priorities = self::getPriorities();
return array_key_exists($this->priority, $priorities) ? $priorities[$this->priority] : $priorities[self::PRIORITY_NORMAL];
}
public static function getContents()
{
return array(
self::CONTENT_TYPE_PLAIN => self::CONTENT_TYPE_PLAIN,
self::CONTENT_TYPE_HTML => self::CONTENT_TYPE_HTML
);
}
public static function getMailers()
{
return array(
self::MAILER_TYPE_MAIL => 'Mail',
self::MAILER_TYPE_SMTP => 'SMTP',
self::MAILER_TYPE_SEND_MAIL => 'sendMail',
self::MAILER_TYPE_Q_MAIL => 'QMail'
);
}
public function getMailer()
{
$mailers = self::getMailers();
return array_key_exists($this->mailerType, $mailers) ? $mailers[$this->mailerType] : $mailers[self::MAILER_TYPE_MAIL];
}
}
class ReplyTo extends CFormModel
{
public $email = '';
public $name = '';
public function rules()
{
return array(
array('email, name', 'safe'),
array('email', 'required'),
array('email', 'email'),
array('name', 'length', 'min' => 0, 'max' => 100)
);
}
public function attributeLabels()
{
return array(
'email' => Yii::t('email', "Email").' :',
'name' => Yii::t('email', "Name").' :',
);
}
}
class Address extends ReplyTo
{
}<file_sep>/README.md
# Yii Email Module
Système d'envoi d'email
## Installation
- Créer un répertoire "modules" dans le dossier ```protected/```
- Y copier le module email
- Ajouter le module dans la configuration :
```php
'modules' => array(
'email' => array(
)
)
```
## Resources
- **[Mailer](http://www.yiiframework.com/extension/mailer/)**
- **[Inliner](http://templates.mailchimp.com/resources/inline-css/)**
- **[Template HTML](https://github.com/advancedrei/BootstrapForEmail)**
- **[BoiletPlate](http://htmlemailboilerplate.com/)**
Markdown : https://help.github.com/articles/github-flavored-markdown/<file_sep>/email/EmailModule.php
<?php
class EmailModule extends CWebModule
{
private $_assetsUrl;
public function init()
{
// this method is called when the module is being created
// you may place code here to customize the module or the application
// import the module-level models and components
$this->setImport(array(
'email.models.*',
'email.components.*',
));
$this->_assetsUrl = Yii::app()->getAssetManager()->publish(Yii::getPathOfAlias('application.modules.email.assets'));
}
public function beforeControllerAction($controller, $action)
{
set_error_handler(function($errno, $errstr, $errfile, $errline) {
$catchError = new CatchError();
if(count($errstr) > 0 && $catchError->controlError($errstr)){
throw new Exception($errstr, 0);
}
});
if(parent::beforeControllerAction($controller, $action))
{
// this method is called before any module controller action is performed
// you may place customized code here
Yii::app()->getClientScript()->registerCssFile($this->getAssetsUrl().'/css/bootstrap.min.css');
Yii::app()->getClientScript()->registerCssFile($this->getAssetsUrl().'/css/font-awesome.css');
//Yii::app()->getClientScript()->registerScriptFile(CClientScript::POS_HEAD, $this->getAssetsUrl().'/js/bootstrap.min.js');
return true;
}
else
return false;
}
public function getAssetsUrl()
{
return $this->_assetsUrl;
}
}
<file_sep>/email/views/default/index.php
<?php
$this->renderPartial('_form', array('model'=>$this->model));
?> | b2e0c7e5d2b1ccaa506d4cdc07d6d5b332e8957e | [
"Markdown",
"PHP"
] | 7 | PHP | timmy78/yii-email | 0c31011a1cea05a6a4964ac0025dffc263f2e533 | c82489136072bfd9e197ebd5c8117c7c3a4fb7bc |
refs/heads/master | <repo_name>AdrianE92/NASA_APOD<file_sep>/src/components/NasaApod.js
import '../App.css';
import React, {useState, useEffect } from 'react';
import Media from "./Media.js";
import Thumbnail from "./Thumbnail.js";
import key from "../nasa_key.js";
export default function NasaApod(){
const KEY = key;
const [media, setMedia] = useState([]);
const [index, setIndex] = useState(0);
function loadMedia(){
return fetchMedia(getRandomDate());
}
function getCurrentMedia(){
if(media.length === 0){
return {};
} else {
return media[index];
}
}
function getCurrentDay(){
return formatDate(new Date());
}
function getRandomDate(){
// Looking at NASA's apod archive, the first image was published 01.01.2015
let start = new Date(2015, 0, 1);
let end = new Date();
let date = new Date(start.getTime() + Math.random() * (end.getTime() - start.getTime()));
return formatDate(date);
}
function addZeroToSingleDigit(date){
date = ("0" + date);
return date.substring(date.length-2, date.length);
}
function formatDate(date){
let day = date.getDate();
let month = date.getMonth()+1;
let year = date.getFullYear();
day = addZeroToSingleDigit(day);
month = addZeroToSingleDigit(month);
return year + "-" + month + "-" + day;
}
function displayMediaList(){
// Maps over all images stored in media and displays them as thumbnails.
// When thumbnail is clicked, reload the displayed media.
return media.map((singleMedia) =>
<div onClick={() => setIndex(media.indexOf(singleMedia))} key={singleMedia.url} className="thumbnail_con">
{
Thumbnail(singleMedia)
}
</div>
);
}
async function fetchMedia(date) {
// Fetch media from NASA and store it in the media array, and update index.
// You can get your own API-key for free at api.nasa.gov
let response = await fetch('https://api.nasa.gov/planetary/apod?api_key=' + KEY +'&date=' + date + '&thumbs=true')
let json = await response.json();
let mediaList = [...media];
mediaList.push(json);
setMedia(mediaList);
setIndex(mediaList.length-1);
}
useEffect(
() => {
fetchMedia(getCurrentDay());
}, []);
return (
<div className="App">
<header className="App-header">
<div>
{Media(getCurrentMedia())}
<button className="button" onClick={() => loadMedia()}> Load new image </button>
</div>
<div>
{displayMediaList()}
</div>
</header>
</div>
);
}
<file_sep>/src/components/Thumbnail.js
import React from "react";
export default function Thumbnail(props){
function getThumbnailUrl(){
if(props.media_type === "image"){
return props.url;
} else if(props.media_type === "video"){
return props.thumbnail_url;
} else {
return " ";
}
}
if(props){
return(
<img className="thumbnail" alt="Picture of the Day" src={getThumbnailUrl()} />
)
} else {
return null;
}
}<file_sep>/README.md
# NASA's Astronomy Picture of the Day
## How to run
This project does not contain the nasa_key.js
If you want to generate your own key, you can get one for free at [api.nasa.gov](api.nasa.gov)
You can also run the project with `KEY = "DEMO_KEY"`, though this has some limitations.
In the project directory, you can run:
### `npm start`
Runs the app in the development mode.\
Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
The page will reload if you make edits.\
You will also see any lint errors in the console.
### `npm run build`
Builds the app for production to the `build` folder.\
It correctly bundles React in production mode and optimizes the build for the best performance.
| fada004b64e54e7d3b2b3d0d3b44d791635c87dd | [
"JavaScript",
"Markdown"
] | 3 | JavaScript | AdrianE92/NASA_APOD | 6aa379f4d8df26e85711371d460608c817e0d574 | 0c0fc54eec974c129cd0e8e8e16b3ad37e2347de |
refs/heads/main | <file_sep>#!/bin/bash
rm covers/*
sudo -u pi pianobar &
sudo python piandora.py
<file_sep>#!/bin/bash
#
#Piandora installation
clear
echo "To continue, make sure you have a Pandora account"
echo "You will need your name and password "
echo "====================="
echo ""
echo "Press Ctrl-C to exit"
read -p "Press [Enter] key to continue..."
echo "Installing pianobar. This may take a while..."
sudo apt-get install pianobar
echo
echo "Generating config file for pianobar."
echo
mkdir -p /home/pi/.config/pianobar
echo
echo "Generate fifo file"
mkfifo /home/pi/.config/pianobar/ctl
echo
touch /home/pi/.config/pianobar/config
cat <<EOF > /home/pi/.config/pianobar/config
# This is an example configuration file for pianobar
# Change User credentials here..
user =
password =
# Keybindings
act_help = ?
act_songlove = +
act_songban = -
act_songnext = n
act_songpause = p
act_quit = q
act_voldown = (
act_volup = )
act_songinfo = i
event_command = /home/pi/Piandora/event.py
volume = -10
# Format strings
format_nowplaying_song = SONG: %t | %a | %l
format_nowplaying_station = STATION: %n | %i
format_msg_time = TIME: %s
# No special prefix on songs, stations or info
format_msg_nowplaying = %s
format_msg_info = %s
EOF
# create shortcut on desktop
echo "Creating Desktop shortcut:"
echo
touch Piandora.desktop
cat <<EOF > Piandora.desktop
#!/usr/bin/bash
[Desktop Entry]
Name=Piandora
Type=Application
Exec=lxterminal -t "Piandora" --working-directory=/home/pi/Piandora/ -e ./run.sh
Icon=/home/pi/Piandora/icon.png
Comment=test
Terminal=true
EOF
chmod +x Piandora.desktop
mv Piandora.desktop /home/pi/Desktop
echo
echo "Generate data.txt file"
touch /home/pi/Piandora/data.txt
cat <<EOF > /home/pi/Piandora/data.txt
Artist | Album | play | 299
EOF
echo
echo "Make files executible:"
echo
sudo chmod +x *.py
sudo chown pi:pi *.*
sudo chown pi:pi /home/pi/.config/pianobar/*
sudo chown pi:pi /home/pi/.config/pianobar
echo
echo "Starting to set up pianobar"
fingerprint=`openssl s_client -connect tuner.pandora.com:443 < /dev/null 2> /dev/null | openssl x509 -noout -fingerprint | tr -d ':' | cut -d'=' -f2` && echo tls_fingerprint = $fingerprint >> home/pi/.config/pianobar/config
echo
echo "Complete.."
<file_sep># Piandora
Raspberry pi GUI for pianobar, a client for Pandora using a touchscreen.
```
git clone https://github.com/granpino/Piandora.git
cd Piandora
chmod +x *.sh
sudo ./install.sh
```
Now open the config file and enter your Pandora credentials.
```
sudo nano ~/.config/pianobar/config
```
Full instructions at https://www.hackster.io/Granpino/raspberry-pi-piandora-39c124
<file_sep>
#!/usr/bin/python
# Piandora resolution set at 640x430
# This is to be used with a 3.5" HDMI touchscreen or larger
# Tested with the Raspberry pi 2 and raspbian stretch
# The program must be run within the Lxterminal.
# copyright (C) Granpino
# Rev1.01 by Granpino
import sys, pygame
from pygame.locals import *
import time
import datetime
import subprocess
import os
#import requests
pygame.init()
line1 = "Raspberry pi"
line2 = "Piandora"
play_time = 0
duration = 180
xy = 3
cyan = 50, 255, 255
blue = 26, 0, 255
black = 0, 0, 0
white = 255, 235, 235
red = 255, 0, 0
green = 0, 255, 0
silver = 192, 192, 192
gray = 40, 40, 40
#other
time.sleep(2) # wait for pianobar to load
clock = pygame.time.Clock()
subprocess.call("echo 's0' > /home/pi/.config/pianobar/ctl", shell=True)
play = True
STA0 = True
STA1 = False
STA2 = False
STA3 = False
STA4 = False
STA5 = False
STA6 = False
STA7 = False
volume = 70
cover_img = ('cover.jpg')
connection = None
#set size of the screen
size = width, height = 640, 430
### change screen mode for debugging
#screen = pygame.display.set_mode(size) #,pygame.FULLSCREEN)
screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
#define function that checks for mouse location
def on_click():
# exit has been pressed
if 550 < click_pos[0] < 620 and 18 < click_pos[1] < 64:
print "You pressed exit"
button(0)
# play was pressed
if 139 <= click_pos[0] <= 261 and 18 <= click_pos[1] <=53:
print "You pressed play"
button(1)
# station 0 was pressed
if 18 <= click_pos[0] <= 138 and 333 <= click_pos[1] <370:
print "You pressed STA0"
button(2)
# station 1 was pressed
if 138 <= click_pos[0] <= 259 and 333 <= click_pos[1] <=370:
print "You pressed STA1"
button(3)
# station 2 was pressed
if 259 <= click_pos[0] <= 380 and 333 <= click_pos[1] <=370:
print "You pressed STA2"
button(4)
# next was pressed
if 272 <= click_pos[0] <= 382 and 18 <= click_pos[1] <=53:
print "You pressed button next"
button(5)
# volume down 6 was pressed
if 500 <= click_pos[0] <= 620 and 380 <= click_pos[1] <= 417:
print "You pressed volume down"
button(6)
# volume up 7 was pressed
if 500 <= click_pos[0] <= 620 and 333 <= click_pos[1] <=370:
print "You pressed volume up"
button(7)
# station 3 was pressed
if 379 <= click_pos[0] <= 500 and 333 <= click_pos[1] <=370:
print "pressed STA3"
button(8)
# station 4 was pressed
if 20 <= click_pos[0] <= 139 and 380 <= click_pos[1] <=417:
print "You pressed STA4"
button(9)
# station 5 was pressed
if 138 <= click_pos[0] <= 259 and 380 <= click_pos[1] <=417:
print "You pressed STA5"
button(10)
# station 6 was pressed
if 259 <= click_pos[0] <= 380 and 380 <= click_pos[1] <=417:
print "You pressed STA6"
button(11)
# station 7 was pressed
if 379 <= click_pos[0] <= 500 and 380 <= click_pos[1] <=417:
print "You pressed STA7"
button(12)
# station like was pressed
if 20 <= click_pos[0] <= 139 and 18 <= click_pos[1] <=53:
print "You pressed like"
button(13)
# station no-like was pressed
if 379 <= click_pos[0] <= 500 and 18 <= click_pos[1] <=53:
print "You pressed nolike"
button(14)
#define action on pressing buttons
def button(number):
global album_img
global play
global x
global STA0
global STA1
global STA2
global STA3
global STA4
global STA5
global STA6
global STA7
global volume
print "You pressed button ",number
if number == 0: #time to exit
font = pygame.font.SysFont('sans', 20, bold=0)
btn_label1 = font.render("Exit", 1, (white))
btn_label2 = font.render("Shutdown", 1, (white))
while 1:
pygame.draw.rect(screen, white, (69, 197, 541, 113),0)
pygame.draw.rect(screen, gray, (359, 255, 110, 40),0)
pygame.draw.rect(screen, gray, (492, 255, 110, 40),0)
screen.blit(btn_label1,(399, 262))
screen.blit(btn_label2,(505, 262))
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
click_pos = pygame.mouse.get_pos()
if 359 < click_pos[0] < 465 and 255 < click_pos[1] < 295:
subprocess.call("echo -n 'q' > /home/pi/.config/pianobar/ctl", shell=True)
sys.exit()
if 492 < click_pos[0] < 598 and 255 < click_pos[1] < 295:
pygame.display.flip()
subprocess.call("echo -n 'q' > /home/pi/.config/pianobar/ctl", shell=True)
os.system("sudo shutdown -h now")
if event.type == KEYDOWN:
if event.key == K_ESCAPE: # ESC to exit
subprocess.call("echo -n 'q' > /home/pi/.config/pianobar/ctl", shell=True)
sys.exit()
pygame.display.flip()
time.sleep(1)
pygame.display.flip()
if number == 1: # play / pause
if play == True:
subprocess.call("echo -n 'p' > /home/pi/.config/pianobar/ctl", shell=True)
play = False
else:
subprocess.call("echo -n 'p' > /home/pi/.config/pianobar/ctl", shell=True)
play = True
refresh_menu_screen()
if number == 2: # Station 0.
subprocess.call("echo 's0' > /home/pi/.config/pianobar/ctl", shell=True)
STA0, STA1, STA2, STA3, STA4, STA5, STA6, STA7 = True, False, False, False, False, False, False, False
refresh_menu_screen()
if number == 3: # Station 1
subprocess.call("echo 's1' > /home/pi/.config/pianobar/ctl", shell=True)
STA0, STA1, STA2, STA3, STA4, STA5, STA6, STA7 = False, True, False, False, False, False, False, False
refresh_menu_screen()
if number == 4: #station 2
subprocess.call("echo 's2' > /home/pi/.config/pianobar/ctl", shell=True)
STA0, STA1, STA2, STA3, STA4, STA5, STA6, STA7 = False, False, True, False, False, False, False, False
refresh_menu_screen()
if number == 5:
subprocess.call("echo -n 'n' > /home/pi/.config/pianobar/ctl", shell=True)
play = True
refresh_menu_screen()
if number == 6: # volume down
if volume < 11:
volume = 10
else:
subprocess.call("echo -n '((((' > /home/pi/.config/pianobar/ctl", shell=True)
volume = volume - 10
refresh_menu_screen()
if number == 7: # volume up
if volume > 99:
volume = 100
else:
subprocess.call("echo -n '))))' > /home/pi/.config/pianobar/ctl", shell=True)
volume = volume + 10
# amixer -c0 set PCM 1% ----will not work with bluetooth
# amixer -q -M sset PCM 50%
refresh_menu_screen()
if number == 8: # station 3
subprocess.call("echo 's3' > /home/pi/.config/pianobar/ctl", shell=True)
STA0, STA1, STA2, STA3, STA4, STA5, STA6, STA7 = False, False, False, True, False, False, False, False
refresh_menu_screen()
if number == 9: #station 4
subprocess.call("echo 's4' > /home/pi/.config/pianobar/ctl", shell=True)
STA0, STA1, STA2, STA3, STA4, STA5, STA6, STA7 = False, False, False, False, True, False, False, False
refresh_menu_screen()
if number == 10: # station 5
subprocess.call("echo 's5' > /home/pi/.config/pianobar/ctl", shell=True)
STA0, STA1, STA2, STA3, STA4, STA5, STA6, STA7 = False, False, False, False, False, True, False, False
refresh_menu_screen()
if number ==11: #station 6
subprocess.call("echo 's6' > /home/pi/.config/pianobar/ctl", shell=True)
STA0, STA1, STA2, STA3, STA4, STA5, STA6, STA7 = False, False, False, False, False, False, True, False
refresh_menu_screen()
if number ==12: #station 7
subprocess.call("echo 's7' > /home/pi/.config/pianobar/ctl", shell=True)
STA0, STA1, STA2, STA3, STA4, STA5, STA6, STA7 = False, False, False, False, False, False, False, True
refresh_menu_screen()
if number ==13: #Like
subprocess.call("echo -n '+' > /home/pi/.config/pianobar/ctl", shell=True)
refresh_menu_screen()
if number ==14: # noLike
subprocess.call("echo -n '-' > /home/pi/.config/pianobar/ctl", shell=True)
refresh_menu_screen()
def get_tags():
global line1
global line2
global play_time
global duration
global play_time
global play
file1 = open("data.txt","r+")
lines = file1.readline().split(" | ")
line1 = lines[0]
line2 = lines[1]
line2 = line2[:54]
line3 = lines[2] # start song
duration = lines[3] # song duration
if line3 == "rst":
play_time = 0
if play == True:
play_time = play_time + 4
# print(play)
# print(int(play_time))
Lfont = pygame.font.Font(None,70)
Mfont = pygame.font.Font(None,32)
M2font = pygame.font.Font(None,40)
Sfont = pygame.font.Font(None,28)
skin1=pygame.image.load("buttons.png")
skin2=pygame.image.load("640x430.png")
def refresh_menu_screen():
global connect_img
global CurrPlaylist
global line1
global volume
global play_time
global duration
global xy
current_time = datetime.datetime.now().strftime('%I:%M')
seconds = datetime.datetime.now().strftime('%S')
time_label = Lfont.render(current_time, 1, (white))
sec_label = Mfont.render(seconds, 1, (white))
Header=M2font.render("Piandora", 1, (white))
screen.blit(skin2,(0,0))
screen.blit(skin1,(0,0))
screen.blit(Header,(250, 72))
pygame.draw.rect(screen, gray, (463, 110, 155, 49),0)
screen.blit(time_label,(465, 110))
screen.blit(sec_label, (590, 115))
try:
album_art=pygame.image.load(cover_img) # album art
album_art=pygame.transform.scale(album_art, (200,150 ))
screen.blit(album_art,(21,74))
except pygame.error:
time.sleep(1)
#=========================================================
##### display artist and song ####:
song_name=Sfont.render(line1, 1, (white))
artist=Sfont.render(line2, 1, (white))
duration_label=Mfont.render(str(duration), 1, (white))
screen.blit(song_name,(70,241))
screen.blit(artist,(70,281))
screen.blit(duration_label, (250, 145))
### add volume number
volume_tag=Mfont.render(str(volume) + "%", 1, (white))
screen.blit(volume_tag,(250,107))
### the math #####
# size of bar in pixels= 615 - 240
# ratio of bar to seconds = (615-240) / duration
# song time = seconds * ratio
# offset = 240
ratio = float(375.0 / int(duration))
ratio = ("%.2f" % round(ratio, 2))
# print(ratio)
play_time = float(play_time)
ratio = float(ratio)
# print(play_time)
# print(type(play_time))
# print(type(ratio))
y = (play_time * ratio)
# print(y)
y = round(y)
# print(y)
xy = (240 + y)
if xy >= 615: # pause bargraph
xy = 615
pygame.draw.line(screen, gray, (240,220), (615, 220), 10)
pygame.draw.line(screen, green, ((xy), 220), (615, 220), 10)
if STA0 == True:
screen.blit(M2font.render('1',1, green), (98, 340))
else:
screen.blit(M2font.render('1',1, white), (98, 340))
if STA1 == True:
screen.blit(M2font.render('2',1, green), (220, 340))
else:
screen.blit(M2font.render('2',1, white), (220, 340))
if STA2 == True:
screen.blit(M2font.render('3',1, green), (340, 340))
else:
screen.blit(M2font.render('3',1, white), (340, 340))
if STA3 == True:
screen.blit(M2font.render('4',1, green), (460, 340))
else:
screen.blit(M2font.render('4',1, white), (460, 340))
if STA4 == True:
screen.blit(M2font.render('5',1, green), (98, 385))
else:
screen.blit(M2font.render('5',1, white), (98, 385))
if STA5 == True:
screen.blit(M2font.render('6',1, green), (220, 385))
else:
screen.blit(M2font.render('6',1, white), (220, 385))
if STA6 == True:
screen.blit(M2font.render('7',1, green), (340, 385))
else:
screen.blit(M2font.render('7',1, white), (340, 385))
if STA7 == True:
screen.blit(M2font.render('8',1, green), (460, 385))
else:
screen.blit(M2font.render('8',1, white), (460, 385))
pygame.display.flip()
def main():
global click_pos
global line1
timer = pygame.time.get_ticks()
while True:
seconds = (pygame.time.get_ticks() - timer)/1000
if seconds > 3:
timer = pygame.time.get_ticks()
# print("getting media info every 3s")
get_tags()
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
click_pos = pygame.mouse.get_pos()
print "screen pressed" #for debugging purposes
print click_pos #for checking coordinates
on_click()
#Press ESC key on the computer to end while in VNC
if event.type == KEYDOWN:
if event.key == K_ESCAPE: # ESC key will kill it
sys.exit()
clock.tick(3) # refresh screen
refresh_menu_screen()
refresh_menu_screen()
main() # Main loop
<file_sep>#! /usr/bin/python
# Copyright (C) 2012 <NAME> <<EMAIL>>
#
# Based on code originally by <NAME>, LeetCode.net
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#====This file has been modified to work with piandora.....
import contextlib
import hashlib
import os
import sys
import subprocess
import time
from urllib2 import urlopen, URLError
title = "empty title"
song_duration = " | -"
artist_album = "empty artist"
def handle_event(type, **kwargs):
global playtime
"""
Read event parameters from stdin and handle events appropriately.
"""
# Error from pianobar, disregard
if kwargs.get("pRet") != "1":
return
# Handle specific events
if type == "songstart":
title = kwargs.get("title")
cover_url = kwargs.get("coverArt")
artist_album = " | by %s on %s" % (kwargs.get("artist"), kwargs.get("album"))
song_duration = " | %s"% (kwargs.get("songDuration"))
song_played = kwargs.get("songPlayed")
rating = kwargs.get("rating")
# station = kwargs.get("stationNum")
# print("==type " + type)
print("===song -" + title)
print("===atist - " + artist_album)
print("===duration - " + song_duration)
# print("===songplayed - " + song_played)
# print("===rating - " + rating)
# print("===param - " + param)
# print("===value - " + value)
config_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
filename = os.path.join(config_dir, "covers", hashlib.sha1(cover_url).hexdigest())
cover = fetch_cover(cover_url, filename)
playtime = ' | rst'
file1 = open("data.txt","w")
Line = [title, artist_album, playtime, song_duration]
file1.writelines(Line)
file1.close()
time.sleep(4)
playtime = ' | play'
file1 = open("data.txt","w")
Line = [title, artist_album, playtime, song_duration]
file1.writelines(Line)
file1.close()
def fetch_cover(url, filename):
global title
"""
Fetches album art from the URL specified by pianobar, and saves to disk.
"""
# If the "covers" directory does not exist under the pianobar config
# directory, create it
if not os.path.isdir(os.path.dirname(filename)):
os.mkdir(os.path.dirname(filename))
if not os.path.exists(filename):
try:
with contextlib.closing(urlopen(url)) as inf, open(filename, "wb") as outf:
outf.write(inf.read())
except URLError:
return ""
print("===Saved cover to home folder ==")
subprocess.call("mv covers/* cover.jpg", shell=True)
return "file://%s" % filename
def main(argv=None):
global title
global param
global value
if argv is None:
argv = sys.argv
# Read event type from command arguments
if len(sys.argv) < 2:
print "error reading event type from command arguments"
type = sys.argv[1]
# print("==type " + type)
# Read parameters from input
params = {}
for s in sys.stdin.readlines():
param, value = s.split("=", 1)
params[param.strip()] = value.strip()
# print("==params - " + s)
# print("==value " + value)
# Call the event handler
handle_event(type, **params)
return 0
if __name__ == "__main__":
#sys.exit(main())
main()
| e8ef60c1afdd243f18f9b8149ae2200bfa82d34e | [
"Markdown",
"Python",
"Shell"
] | 5 | Shell | granpino/Piandora | 0ff334ab75d7080375a660d05733bcabc67342d7 | 6bce583a177076e481a844d42a2c100fdbd3aefe |
refs/heads/master | <file_sep>[](https://travis-ci.org/rnitame/daily)
# daily
Get daily events from GitHub
## Usage
```
# Show all events
$ daily
# Show organization events only
$ daily -org <org_name>
```
## Set GitHub personal token
```
$ git config --global "github.token" xxxxx
```
<file_sep>NAME := daily
VERSION := v0.1.0
SRCS := $(shell find . -type f -name '*.go')
# TODO: set 'make bin/NAME'
test:
go test -cover -v `glide novendor`
cross-build:
for os in darwin linux windows; do \
for arch in amd64 386; do \
GOOS=$$os GOARCH=$$arch CGO_ENABLED=0 go build -a -tags netgo -installsuffix netgo $(LDFLAGS) -o dist/$$os-$$arch/$(NAME); \
done; \
done
create-zip:
for os in darwin linux windows; do \
for arch in amd64 386; do \
zip -r -m -q dist/$(NAME)-$$os-$$arch.zip dist/$$os-$$arch; \
done; \
done
<file_sep>package main
import (
"flag"
)
var (
org = flag.String("org", "", "organization name for showing events")
)
// Run コマンド実行
func Run() {
// ここで flag 受け取って organization の判定
// 判定によって実行する github.go のメソッドを変える
flag.Parse()
client := NewGitHubClient()
GetEvents(client, org)
}
<file_sep>package main
import (
"fmt"
"strings"
"time"
"log"
"github.com/google/go-github/github"
"github.com/pkg/errors"
gitconfig "github.com/tcnksm/go-gitconfig"
"github.com/tidwall/gjson"
"golang.org/x/oauth2"
)
// NewGitHubClient go-github のクライアント作成
func NewGitHubClient() *github.Client {
token, err := gitconfig.Global("github.token")
if err != nil {
log.Fatalln(errors.Wrap(err, "get github token failed"))
}
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: token},
)
tc := oauth2.NewClient(oauth2.NoContext, ts)
return github.NewClient(tc)
}
// GetEvents GitHub API から自分のイベントを取得
func GetEvents(client *github.Client, org *string) {
options := github.ListOptions{Page: 1, PerPage: 50}
user, _, err := client.Users.Get(oauth2.NoContext, "")
if err != nil {
log.Fatalln(errors.Wrap(err, "get users failed"))
}
events, _, err := client.Activity.ListEventsPerformedByUser(oauth2.NoContext, user.GetLogin(), false, &options)
if err == nil {
SieveOutEvents(events, org)
} else {
log.Fatalln(errors.Wrap(err, "get events failed"))
}
}
// SieveOutEvents flag によって出すイベントを絞る
func SieveOutEvents(events []*github.Event, org *string) {
jst, _ := time.LoadLocation("Asia/Tokyo")
today := time.Now()
const layout = "2006-01-02"
for _, value := range events {
// API から取ってきた CreatedAt の文字列に、コマンド叩いた日付が含まれていれば表示
if strings.Contains(value.CreatedAt.In(jst).String(), string(today.Format(layout))) {
// organization が指定されていたらその organization のイベントだけ出力
if *org != "" && !strings.Contains(*value.Repo.Name, *org) {
continue
}
payload, _ := value.ParsePayload()
// 特定のイベントだけタイトルを表示
switch *value.Type {
case "PullRequestEvent":
pr, ok := payload.(*github.PullRequestEvent)
if !ok {
log.Fatalln("Failed type assertion")
}
fmt.Println(*value.Repo.Name, *value.Type, *pr.PullRequest.Title)
case "IssuesEvent":
issue, ok := payload.(*github.IssuesEvent)
if !ok {
log.Fatalln("Failed type assertion")
}
fmt.Println(*value.Repo.Name, *value.Type, *issue.Issue.Title)
default:
json, _ := value.RawPayload.MarshalJSON()
action := gjson.Get(string(json), "action")
fmt.Println(*value.Repo.Name, *value.Type, action)
}
}
}
}
<file_sep>package main
import (
"reflect"
"testing"
"github.com/google/go-github/github"
)
func TestNewGitHubClient(t *testing.T) {
tests := []struct {
name string
want *github.Client
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := NewGitHubClient(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewGitHubClient() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetEvents(t *testing.T) {
type args struct {
client *github.Client
org *string
}
tests := []struct {
name string
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
GetEvents(tt.args.client, tt.args.org)
})
}
}
func TestSieveOutEvents(t *testing.T) {
type args struct {
events []*github.Event
org *string
}
tests := []struct {
name string
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
SieveOutEvents(tt.args.events, tt.args.org)
})
}
}
| d0a52474c651b95a726b304ab5a7b99792557c55 | [
"Markdown",
"Makefile",
"Go"
] | 5 | Markdown | rnitame/daily | 63784c14ccff443394652732979bb2685c58311c | 25576f4a08442b699fdb0946b301313e977e1766 |
refs/heads/master | <file_sep>// import UserModel from "../models/user";
export default {
/**
* 登录
* @param {*} ctx
*/
async login(ctx) {
ctx.body = {
code: 200,
msg: "登录成功",
data: ctx.request.body
};
}
};
<file_sep>import Koa from "koa";
import path from "path";
// import jwt from "koa-jwt";
import koaBody from "koa-body";
import koaStatic from "koa-static";
import routes from "./routes";
const app = new Koa();
app.use(koaStatic(path.join(__dirname, "/static")));
app.use(
koaBody({
multipart: true
})
);
app.use(async (ctx, next) => {
let start = new Date();
await next();
let ms = new Date() - start;
console.log("%s %s - %sms", ctx.method, ctx.url, ms);
});
// app.use(jwt({
// secret: "jwt__token"
// }).unless({
// path: [/^\/api\/login/, /^\/api\/register/]
// }));
app.use(routes.routes(), routes.allowedMethods());
app.on("error", (err, ctx) => {
console.error("server error", err, ctx);
});
app.listen(3000);
console.log("koa started at port 3000");
<file_sep>import mysql from "mysql2";
const connection = mysql.createConnection({
host: "localhost",
user: "root",
password: "<PASSWORD>",
database: "mcms",
charset: "utf8"
});
export default (query, params) => {
return new Promise(resolve => {
connection.query(query, params, (err, res, fields) => {
resolve({
err,
res,
fields
});
});
});
};
<file_sep>import query from "../config/db";
export default {
/**
* 创建新用户
* @param {string} username
* @param {string} password
*/
async create(username, password) {
const {
res
} = await query("insert into user(username, password) values (?,?)", [username, password]);
return res[0];
},
/**
* 根据用户名查找用户
* @param {string} username
*/
async findUserByName(username) {
const {
res
} = await query("select * from user where username = ?", [username]);
console.log(res);
return res[0];
},
/**
* 根据用户id查找用户
* @param {number} userid
*/
async findUserById(userid) {
const {
res
} = await query("select * from user where userid = ?", [userid]);
return res[0];
}
};
<file_sep>#!/usr/bin/env sh
echo 'start koa2 application'
set -x
npm run start &
sleep 1
echo $! > .pidfile
set +x
exit
echo 'Visit http://localhost:3000 to see your koa2 application in action.'<file_sep>// import commonModels from "../models/common";
export default {
async upload(ctx) {
if (!ctx.request.files.file) {
return (ctx.body = {
code: 300,
msg: "请选择图片"
});
}
// let file = ctx.request.files.file;
// let userId = ctx.request.body.userId;
// let file = ctx.request.files.file;
// // let extname = file.name.slice(file.name.lastIndexOf("."));
// let extname = path.extname(file.name);
// let filePath = "/upload/IMG_" + userId + "_" + +new Date() + extname;
// const readStream = fs.createReadStream(file.path);
// const writeStream = fs.createWriteStream(staticPath + filePath);
// readStream.pipe(writeStream);
// ctx.body = {
// status: 200,
// message: "",
// data: "http://localhost:3000" + filePath
// };
console.log(ctx.request.body);
console.log(ctx.request.files);
ctx.body = {
code: 200,
data: {
params: ctx.request.body
}
};
}
};
<file_sep># picturePreview
简单的图片上传预览
<file_sep>import Router from "koa-router";
import user from "../controllers/user.js";
// import common from "../controllers/common.js";
const router = new Router({ prefix: "/api" });
router.post("/login", user.login);
// router.post("/upload", common.upload);
export default router;
| a688aa052b6027ae6500ce790b5cbf05ddfba344 | [
"JavaScript",
"Markdown",
"Shell"
] | 8 | JavaScript | qiaoyao/picturePreview | 34062445dd8522916ef5a439106d7dd0bc29dd18 | a2d347ea4c2463d1deeb5b13f36cc4d1907eebde |
refs/heads/master | <repo_name>Roomka/codewars<file_sep>/src/main/test/com/com/roman/procopenco/codewars/GreedIsGoodTest.java
package com.roman.procopenco.codewars;
import static org.junit.Assert.assertEquals;
import junit.framework.TestCase;
import org.junit.Test;
public class GreedIsGoodTest {
@Test
public void testExample() {
assertEquals("Score for [5,1,3,4,1] must be 250:", 250, GreedIsGood.greedy(new int[]{5,1,3,4,1}));
assertEquals("Score for [1,1,1,3,1] must be 1100:", 1100, GreedIsGood.greedy(new int[]{1,1,1,3,1}));
assertEquals("Score for [2,4,4,5,4] must be 450:", 450, GreedIsGood.greedy(new int[]{2,4,4,5,4}));
}
@Test
public void testExampleGreedyOptimal() {
assertEquals("Score for [5,1,3,4,1] must be 250:", 250, GreedIsGood.greedyOptimal(new int[]{5,1,3,4,1}));
assertEquals("Score for [1,1,1,3,1] must be 1100:", 1100, GreedIsGood.greedyOptimal(new int[]{1,1,1,3,1}));
assertEquals("Score for [2,4,4,5,4] must be 450:", 450, GreedIsGood.greedyOptimal(new int[]{2,4,4,5,4}));
}
}<file_sep>/src/main/java/com/roman/procopenco/codewars/TCPFiniteStateMachine.java
package com.roman.procopenco.codewars;
import java.util.HashMap;
import java.util.Map;
/**
* Automatons, or Finite State Machines (FSM), are extremely useful to programmers when it comes to
* software design. You will be given a simplistic version of an FSM to code for a basic TCP
* session.
* <p>
* The outcome of this exercise will be to return the correct state of the TCP FSM based on the
* array of events given.
* <p>
* The input array of events will consist of one or more of the following strings:
* <p>
* APP_PASSIVE_OPEN, APP_ACTIVE_OPEN, APP_SEND, APP_CLOSE, APP_TIMEOUT, RCV_SYN, RCV_ACK,
* RCV_SYN_ACK, RCV_FIN, RCV_FIN_ACK The states are as follows and should be returned in all capital
* letters as shown:
* <p>
* CLOSED, LISTEN, SYN_SENT, SYN_RCVD, ESTABLISHED, CLOSE_WAIT, LAST_ACK, FIN_WAIT_1, FIN_WAIT_2,
* CLOSING, TIME_WAIT The input will be an array of events. Your job is to traverse the FSM as
* determined by the events, and return the proper state as a string, all caps, as shown above.
* <p>
* If an event is not applicable to the current state, your code will return "ERROR".
* <p>
* Action of each event upon each state: (the format is INITIAL_STATE: EVENT -> NEW_STATE)
* <p>
* CLOSED: APP_PASSIVE_OPEN -> LISTEN CLOSED: APP_ACTIVE_OPEN -> SYN_SENT LISTEN: RCV_SYN
* -> SYN_RCVD LISTEN: APP_SEND -> SYN_SENT LISTEN: APP_CLOSE -> CLOSED SYN_RCVD:
* APP_CLOSE -> FIN_WAIT_1 SYN_RCVD: RCV_ACK -> ESTABLISHED SYN_SENT: RCV_SYN ->
* SYN_RCVD SYN_SENT: RCV_SYN_ACK -> ESTABLISHED SYN_SENT: APP_CLOSE -> CLOSED ESTABLISHED:
* APP_CLOSE -> FIN_WAIT_1 ESTABLISHED: RCV_FIN -> CLOSE_WAIT FIN_WAIT_1: RCV_FIN ->
* CLOSING FIN_WAIT_1: RCV_FIN_ACK -> TIME_WAIT FIN_WAIT_1: RCV_ACK -> FIN_WAIT_2 CLOSING:
* RCV_ACK -> TIME_WAIT FIN_WAIT_2: RCV_FIN -> TIME_WAIT TIME_WAIT: APP_TIMEOUT ->
* CLOSED CLOSE_WAIT: APP_CLOSE -> LAST_ACK LAST_ACK: RCV_ACK -> CLOSED "EFSM TCP"
* <p>
* Examples ["APP_PASSIVE_OPEN", "APP_SEND", "RCV_SYN_ACK"] => "ESTABLISHED"
* <p>
* ["APP_ACTIVE_OPEN"] => "SYN_SENT"
* <p>
* ["APP_ACTIVE_OPEN", "RCV_SYN_ACK", "APP_CLOSE", "RCV_FIN_ACK", "RCV_ACK"] => "ERROR" This kata
* is similar to Design a Simple Automaton (Finite State Machine), and you may wish to try that kata
* before tackling this one.
* <p>
* See wikipedia page Transmission Control Protocol for further details.
* <p>
* See http://www.medianet.kent.edu/techreports/TR2005-07-22-tcp-EFSM.pdf page 4, for the FSM
* diagram used for this kata.
* <p>
* https://www.codewars.com/kata/54acc128329e634e9a000362/train/java
*/
public class TCPFiniteStateMachine {
private static Map<String, Map<String, String>> statesEventsMapping = new HashMap<>();
static {
Map<String, String> closedStateEvents = new HashMap<>();
closedStateEvents.put("APP_PASSIVE_OPEN", "LISTEN");
closedStateEvents.put("APP_ACTIVE_OPEN", "SYN_SENT");
statesEventsMapping.put("CLOSED", closedStateEvents);
Map<String, String> listenStateEvents = new HashMap<>();
listenStateEvents.put("RCV_SYN", "SYN_RCVD");
listenStateEvents.put("APP_SEND", "SYN_SENT");
listenStateEvents.put("APP_CLOSE", "CLOSED");
statesEventsMapping.put("LISTEN", listenStateEvents);
Map<String, String> synRcvdStateEvents = new HashMap<>();
synRcvdStateEvents.put("APP_CLOSE", "FIN_WAIT_1");
synRcvdStateEvents.put("RCV_ACK", "ESTABLISHED");
statesEventsMapping.put("SYN_RCVD", synRcvdStateEvents);
Map<String, String> synSentStateEvents = new HashMap<>();
synSentStateEvents.put("RCV_SYN", "SYN_RCVD");
synSentStateEvents.put("RCV_SYN_ACK", "ESTABLISHED");
synSentStateEvents.put("APP_CLOSE", "CLOSED");
statesEventsMapping.put("SYN_SENT", synSentStateEvents);
Map<String, String> establishedStateEvents = new HashMap<>();
establishedStateEvents.put("APP_CLOSE", "FIN_WAIT_1");
establishedStateEvents.put("RCV_FIN", "CLOSE_WAIT");
statesEventsMapping.put("ESTABLISHED", establishedStateEvents);
Map<String, String> finWait1StateEvents = new HashMap<>();
finWait1StateEvents.put("RCV_FIN", "CLOSING");
finWait1StateEvents.put("RCV_FIN_ACK", "TIME_WAIT");
finWait1StateEvents.put("RCV_ACK", "FIN_WAIT_2");
statesEventsMapping.put("FIN_WAIT_1", finWait1StateEvents);
Map<String, String> closingStateEvents = new HashMap<>();
closingStateEvents.put("RCV_ACK", "TIME_WAIT");
statesEventsMapping.put("CLOSING", closingStateEvents);
Map<String, String> finWait2StateEvents = new HashMap<>();
finWait2StateEvents.put("RCV_FIN", "TIME_WAIT");
statesEventsMapping.put("FIN_WAIT_2", finWait2StateEvents);
Map<String, String> timeWaitStateEvents = new HashMap<>();
timeWaitStateEvents.put("APP_TIMEOUT", "CLOSED");
statesEventsMapping.put("TIME_WAIT", timeWaitStateEvents);
Map<String, String> closeWaitStateEvents = new HashMap<>();
closeWaitStateEvents.put("APP_CLOSE", "LAST_ACK");
statesEventsMapping.put("CLOSE_WAIT", closeWaitStateEvents);
Map<String, String> lastAckStateEvents = new HashMap<>();
lastAckStateEvents.put("RCV_ACK", "CLOSED");
statesEventsMapping.put("LAST_ACK", lastAckStateEvents);
}
public static String traverseStates(String[] events) {
String state = "CLOSED"; // initial state, always
for (String event : events) {
Map<String, String> currentStateEvents = statesEventsMapping.get(state);
System.out.print(currentStateEvents);
if (!currentStateEvents.containsKey(event)) {
return "ERROR";
} else {
state = statesEventsMapping.get(state).get(event);
}
}
return state;
}
public static String traverseStatesShort(String[] events) {
String state = "CLOSED";
for (String event : events) {
state = statesEventsMapping.get(state).get(event);
if (state == null) return "ERROR";
}
return state;
}
}
<file_sep>/src/main/test/com/com/roman/procopenco/codewars/DoubleLinearTest.java
package com.roman.procopenco.codewars;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
public class DoubleLinearTest {
private static void testing(int actual, int expected) {
assertEquals(expected, actual);
}
@Test
public void test() {
System.out.println("Fixed Tests dblLinear");
testing(DoubleLinear.dblLinear3(10), 22);
testing(DoubleLinear.dblLinear3(20), 57);
testing(DoubleLinear.dblLinear3(30), 91);
testing(DoubleLinear.dblLinear3(50), 175);
testing(DoubleLinear.dblLinear3(100), 447);
}
}<file_sep>/src/main/java/com/roman/procopenco/codewars/PickPeaks.java
package com.roman.procopenco.codewars;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* In this kata, you will write a function that returns the positions and the values of the "peaks" (or local maxima) of a numeric array.
*
* For example, the array arr = [0, 1, 2, 5, 1, 0] has a peak at position 3 with a value of 5 (since arr[3] equals 5).
*
* The output will be returned as a ``Map<String,List>with two key-value pairs:"pos"and"peaks". If there is no peak in the given array, simply return {"pos" => [], "peaks" => []}`.
*
* Example: pickPeaks([3, 2, 3, 6, 4, 1, 2, 3, 2, 1, 2, 3]) should return {pos: [3, 7], peaks: [6, 3]} (or equivalent in other languages)
*
* All input arrays will be valid integer arrays (although it could still be empty), so you won't need to validate the input.
*
* The first and last elements of the array will not be considered as peaks (in the context of a mathematical function, we don't know what is after and before and therefore, we don't know if it is a peak or not).
*
* Also, beware of plateaus !!! [1, 2, 2, 2, 1] has a peak while [1, 2, 2, 2, 3] and [1, 2, 2, 2, 2] do not. In case of a plateau-peak, please only return the position and value of the beginning of the plateau. For example: pickPeaks([1, 2, 2, 2, 1]) returns {pos: [1], peaks: [2]} (or equivalent in other languages)
*
* Have fun!
*
* https://www.codewars.com/kata/5279f6fe5ab7f447890006a7/train/java
*/
public class PickPeaks {
public static Map<String, List<Integer>> getPeaks(int[] arr) {
Map<String,List<Integer>> ans = new HashMap<String,List<Integer>>() {{
put("pos", new ArrayList<Integer>() );
put("peaks", new ArrayList<Integer>() );
}};
int posMax = 0;
boolean matchAsc = false;
for (int i = 1 ; i < arr.length ; i++) {
if (arr[i-1] < arr[i]) {
matchAsc = true;
posMax = i;
}
if (matchAsc && arr[i-1] > arr[i]) {
matchAsc = false;
ans.get("pos").add(posMax);
ans.get("peaks").add(arr[posMax]);
}
}
return ans;
}
}
<file_sep>/src/main/test/com/com/roman/procopenco/codewars/BinaryArrayToNumberTest.java
package com.roman.procopenco.codewars;
import org.junit.Test;
import org.openjdk.jmh.annotations.*;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.TimeValue;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.*;
public class BinaryArrayToNumberTest extends BenchmarkUtility {
@Test
@Benchmark
public void ConvertBinaryArrayToIntLoop() throws Exception {
assertEquals(1, BinaryArrayToNumber.ConvertBinaryArrayToIntLoop(new ArrayList<>(Arrays.asList(0,0,0,1))));
assertEquals(15, BinaryArrayToNumber.ConvertBinaryArrayToIntLoop(new ArrayList<>(Arrays.asList(1,1,1,1))));
assertEquals(6, BinaryArrayToNumber.ConvertBinaryArrayToIntLoop(new ArrayList<>(Arrays.asList(0,1,1,0))));
assertEquals(9, BinaryArrayToNumber.ConvertBinaryArrayToIntLoop(new ArrayList<>(Arrays.asList(1,0,0,1))));
assertEquals(0, BinaryArrayToNumber.ConvertBinaryArrayToIntLoop(new ArrayList<>(Arrays.asList(0,0,0,0))));
}
@Test
@Benchmark
public void ConvertBinaryArrayToIntStream() throws Exception {
assertEquals(1, BinaryArrayToNumber.ConvertBinaryArrayToIntStream(new ArrayList<>(Arrays.asList(0,0,0,1))));
assertEquals(15, BinaryArrayToNumber.ConvertBinaryArrayToIntStream(new ArrayList<>(Arrays.asList(1,1,1,1))));
assertEquals(6, BinaryArrayToNumber.ConvertBinaryArrayToIntStream(new ArrayList<>(Arrays.asList(0,1,1,0))));
assertEquals(9, BinaryArrayToNumber.ConvertBinaryArrayToIntStream(new ArrayList<>(Arrays.asList(1,0,0,1))));
assertEquals(0, BinaryArrayToNumber.ConvertBinaryArrayToIntStream(new ArrayList<>(Arrays.asList(0,0,0,0))));
}
@Test
@Benchmark
public void convertBinaryArrayToInt() throws Exception {
assertEquals(1, BinaryArrayToNumber.ConvertBinaryArrayToIntBitwiseOperator(new ArrayList<>(Arrays.asList(0,0,0,1))));
assertEquals(15, BinaryArrayToNumber.ConvertBinaryArrayToIntBitwiseOperator(new ArrayList<>(Arrays.asList(1,1,1,1))));
assertEquals(6, BinaryArrayToNumber.ConvertBinaryArrayToIntBitwiseOperator(new ArrayList<>(Arrays.asList(0,1,1,0))));
assertEquals(9, BinaryArrayToNumber.ConvertBinaryArrayToIntBitwiseOperator(new ArrayList<>(Arrays.asList(1,0,0,1))));
assertEquals(0, BinaryArrayToNumber.ConvertBinaryArrayToIntBitwiseOperator(new ArrayList<>(Arrays.asList(0,0,0,0))));
}
}<file_sep>/README.md
# Codewars
Codewars Java Challenges - a repository where I save my java solutions to codewars challenges.
[](https://www.codewars.com/users/Roomka)
[Challenges documentation](https://roomka.github.io/codewars/)
<file_sep>/src/main/test/com/com/roman/procopenco/codewars/TCPFiniteStateMachineTest.java
package com.roman.procopenco.codewars;
import static org.junit.Assert.assertEquals;
import junit.framework.TestCase;
import org.junit.Test;
public class TCPFiniteStateMachineTest {
@Test
public void SampleTests() {
assertEquals("CLOSE_WAIT", TCPFiniteStateMachine.traverseStates(new String[] {"APP_ACTIVE_OPEN","RCV_SYN_ACK","RCV_FIN"}));
assertEquals("ESTABLISHED", TCPFiniteStateMachine.traverseStates(new String[] {"APP_PASSIVE_OPEN", "RCV_SYN","RCV_ACK"}));
assertEquals("LAST_ACK", TCPFiniteStateMachine.traverseStates(new String[] {"APP_ACTIVE_OPEN","RCV_SYN_ACK","RCV_FIN","APP_CLOSE"}));
assertEquals("SYN_SENT", TCPFiniteStateMachine.traverseStates(new String[] {"APP_ACTIVE_OPEN"}));
assertEquals("ERROR", TCPFiniteStateMachine.traverseStates(new String[] {"APP_PASSIVE_OPEN","RCV_SYN","RCV_ACK","APP_CLOSE","APP_SEND"}));
}
@Test
public void SampleTestsShortSolution() {
assertEquals("CLOSE_WAIT", TCPFiniteStateMachine.traverseStatesShort(new String[] {"APP_ACTIVE_OPEN","RCV_SYN_ACK","RCV_FIN"}));
assertEquals("ESTABLISHED", TCPFiniteStateMachine.traverseStatesShort(new String[] {"APP_PASSIVE_OPEN", "RCV_SYN","RCV_ACK"}));
assertEquals("LAST_ACK", TCPFiniteStateMachine.traverseStatesShort(new String[] {"APP_ACTIVE_OPEN","RCV_SYN_ACK","RCV_FIN","APP_CLOSE"}));
assertEquals("SYN_SENT", TCPFiniteStateMachine.traverseStatesShort(new String[] {"APP_ACTIVE_OPEN"}));
assertEquals("ERROR", TCPFiniteStateMachine.traverseStatesShort(new String[] {"APP_PASSIVE_OPEN","RCV_SYN","RCV_ACK","APP_CLOSE","APP_SEND"}));
}
}<file_sep>/src/main/test/com/com/roman/procopenco/codewars/SudokuSolutionValidatorTest.java
package com.roman.procopenco.codewars;
import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import junit.framework.TestCase;
import org.junit.Test;
public class SudokuSolutionValidatorTest {
@Test
public void exampleTest() {
int[][] sudoku = {
{5, 3, 4, 6, 7, 8, 9, 1, 2},
{6, 7, 2, 1, 9, 5, 3, 4, 8},
{1, 9, 8, 3, 4, 2, 5, 6, 7},
{8, 5, 9, 7, 6, 1, 4, 2, 3},
{4, 2, 6, 8, 5, 3, 7, 9, 1},
{7, 1, 3, 9, 2, 4, 8, 5, 6},
{9, 6, 1, 5, 3, 7, 2, 8, 4},
{2, 8, 7, 4, 1, 9, 6, 3, 5},
{3, 4, 5, 2, 8, 6, 1, 7, 9}
};
assertEquals(true, SudokuSolutionValidator.check(sudoku));
sudoku[0][0]++;
sudoku[1][1]++;
sudoku[0][1]--;
sudoku[1][0]--;
assertEquals(false, SudokuSolutionValidator.check(sudoku));
sudoku[0][0]--;
sudoku[1][1]--;
sudoku[0][1]++;
sudoku[1][0]++;
sudoku[4][4] = 0;
for (int[] num : sudoku) {
for (int ele : num) {
System.out.print(" " + ele);
}
System.out.println(" ");
}
assertEquals(false, SudokuSolutionValidator.check(sudoku));
}
}<file_sep>/src/main/java/com/roman/procopenco/codewars/SimpleStringExpansion.java
package com.roman.procopenco.codewars;
import java.util.ArrayDeque;
import java.util.Queue;
import java.util.Stack;
import java.util.stream.Stream;
/**
* Consider the following expansion:
*
* solve("3(ab)") = "ababab" -- because "ab" repeats 3 times
* solve("2(a3(b))") = "abbbabbb" -- because "a3(b)" == "abbb", which repeats twice.
* Given a string, return the expansion of that string.
*
* Input will consist of only lowercase letters and numbers (1 to 9) in valid parenthesis. There will be no letters or numbers after the last closing parenthesis.
*
* More examples in test cases.
*
* Good luck!
*
* Please also try Simple time difference
*/
public class SimpleStringExpansion {
public static String solves(String s){
String new_s = "";
for(char ch : new StringBuilder(s).reverse().toString().toCharArray()) {
if(Character.isDigit(ch)) new_s = new_s.repeat(Integer.parseInt(ch + ""));
if(Character.isLetter(ch)) new_s = ch + new_s;
}
return new_s;
}
}
<file_sep>/pom.xml
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.roman.procopenco.codewars</groupId>
<artifactId>javacodewars</artifactId>
<packaging>jar</packaging>
<version>0.1-SNAPSHOT</version>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
</properties>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>11</source>
<target>11</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>2.7</version>
<configuration>
<docletArtifact>
<groupId>com.google.doclava</groupId>
<artifactId>doclava</artifactId>
<version>1.0.5</version>
</docletArtifact>
<doclet>com.google.doclava.Doclava</doclet> <!-- | bootclasspath required by Sun's JVM -->
<bootclasspath>${sun.boot.class.path}</bootclasspath>
<additionalparam>-quiet -federate JDK
http://download.oracle.com/javase/6/docs/api/index.html?
-federationxml JDK http://doclava.googlecode.com/svn/static/api/openjdk-6.xml -hdf project.name
"${project.name}" -d ${project.basedir}/docs
</additionalparam>
<useStandardDocletOptions>false
</useStandardDocletOptions> <!-- | Apple's JVM sometimes requires more memory -->
<additionalJOption>-J-Xmx1024m</additionalJOption>
<show>private</show>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-site-plugin</artifactId>
<version>3.7.1</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-project-info-reports-plugin</artifactId>
<version>3.0.0</version>
</plugin>
</plugins>
</build>
<name>javacodewars</name>
<url>http://maven.apache.org</url>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-library</artifactId>
<version>1.3</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.openjdk.jmh</groupId>
<artifactId>jmh-core</artifactId>
<version>1.21</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.openjdk.jmh</groupId>
<artifactId>jmh-generator-annprocess</artifactId>
<version>1.21</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.openjdk.jmh</groupId>
<artifactId>jmh-core</artifactId>
<version>RELEASE</version>
<scope>compile</scope>
</dependency>
</dependencies>
</project>
<file_sep>/src/main/java/com/roman/procopenco/codewars/BraceChecker.java
package com.roman.procopenco.codewars;
import java.util.*;
/**
* https://www.codewars.com/kata/valid-braces/java <br>
* Write a function that takes a string of braces, and determines if the order of the braces is valid. It should return true if the string is valid, and false if it's invalid. <br>
* <p>
* This Kata is similar to the Valid Parentheses Kata, but introduces new characters: brackets [], and curly braces {}. Thanks to @arnedag for the idea!
* <p>
* All input strings will be nonempty, and will only consist of parentheses, brackets and curly braces: ()[]{}.
* <p>
* What is considered Valid?
* A string of braces is considered valid if all braces are matched with the correct brace.
* <p>
* Examples
* "(){}[]" => True
* "([{}])" => True
* "(}" => False
* "[(])" => False
* "[({})](]" => False
*/
public class BraceChecker {
/**
* Imperative solution, that uses a stack to add all opening braces and pop al closing ones.
* The algorithm checks.
*
* @param braces string of braces.
* @return order of braces is valid or not.
*/
public static boolean isValid(String braces) {
Deque<Character> parentheses = new ArrayDeque<>();
Map<Character, Character> bracesMap = new HashMap<>();
bracesMap.put('(', ')');
bracesMap.put('[', ']');
bracesMap.put('{', '}');
for (Character c : braces.toCharArray()) {
if (c.equals('(') || c.equals('[') || c.equals('{')) {
parentheses.push(c);
} else if (!isClosingValid(bracesMap, parentheses, c)) {
return false;
} else {
parentheses.pop();
}
}
return parentheses.isEmpty();
}
/**
* Checks if the closing brace is valid, based on the opening one added to stack.
*
* @param bracesMap map with braces, key as opening brace and value as closing brace.
* @param deque stack with braces
* @param closingCharacter last closing character.
* @return if closing bracket is valid or not.
*/
protected static boolean isClosingValid(Map<Character, Character> bracesMap, Deque deque, Character closingCharacter) {
if (deque.isEmpty()) {
return false;
}
Character openingCharacter = (Character) deque.peek();
return bracesMap.get(openingCharacter) != null && bracesMap.get(openingCharacter).equals(closingCharacter);
}
/**
* Imperative solution, that uses a stack to add all opening braces and pop al closing ones.
* The algorithm checks.
*
* @param braces string of braces.
* @return order of braces is valid or not.
*/
public static boolean isValid2(String braces) {
Stack<Character> s = new Stack<>();
for (char c : braces.toCharArray())
if (s.size() > 0 && isClosing(s.peek(), c)) s.pop();
else s.push(c);
return s.size() == 0;
}
/**
* Checks if closing bracket is valid , that is if it's the same type as opening one.
*
* @param openingBracket
* @param closingBracket
* @return if closing bracket is valid or not.
*/
protected static boolean isClosing(char openingBracket, char closingBracket) {
return (openingBracket == '{' && closingBracket == '}') || (openingBracket == '(' && closingBracket == ')') || (openingBracket == '[' && closingBracket == ']');
}
}
<file_sep>/src/main/java/com/roman/procopenco/codewars/DuplicateEncoder.java
package com.roman.procopenco.codewars;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;
/**
* https://www.codewars.com/kata/54b42f9314d9229fd6000d9c/train/java
* The goal of this exercise is to convert a string to a new string where each character in the new string is '(' if that character appears only once in the original string, or ')' if that character appears more than once in the original string. Ignore capitalization when determining if a character is a duplicate.
* <p>
* Examples:
* <p>
* "din" => "((("
* <p>
* "recede" => "()()()"
* <p>
* "Success" => ")())())"
* <p>
* "(( @" => "))((
*/
public class DuplicateEncoder {
/**
* Converts a string to a new string composed of round brackets, substitutes letters appeared once in the string by (, and ) letters appeared multiple times in the string.
* The algorithm uses a map to save the number of a characters occurrences, if it appears once it substitues with '(' otherwise with ')'.
* Imperative solution 1.
*
* @param word original word to be substituted.
* @return substituted word.
*/
public static String encodeHashMap(String word) {
char[] characters = word.toLowerCase().toCharArray();
Map charMap = new HashMap<Character, Integer>();
calculateCharOccurences(characters, charMap);
substituteCharacters(characters, charMap);
return new String(characters);
}
/**
* Calculates the number of times a character appears in a string and set the number as value in a hashMap.
*
* @param characters
* @param charMap
*/
protected static void calculateCharOccurences(char[] characters, Map charMap) {
for (char s : characters) {
if (charMap.containsKey(s)) {
int occurences = (int) charMap.get(s);
charMap.put(s, occurences + 1);
} else {
charMap.put(s, 1);
}
}
}
/**
* Substitute characters with round brackets based on the number of occurrences in the string.
*
* @param originalCharacters
* @param occurrencesCharMap
*/
protected static void substituteCharacters(char[] originalCharacters, Map occurrencesCharMap) {
for (int i = 0; i < originalCharacters.length; i++) {
int occurences = (int) occurrencesCharMap.get(originalCharacters[i]);
if (occurences > 1) {
originalCharacters[i] = ')';
} else {
originalCharacters[i] = '(';
}
}
}
/**
* Converts a string to a new string composed of round brackets, substitutes letters appeared once in the string by (, and ) letters appeared multiple times in the string.
* The algorithms checks the first and last position of a letter in the string, if it the same it means that the letter appeared once otherwise multiple times.
* Stream solution 1.
*
* @param word original word to be substituted.
* @return substituted word.
*/
static String encodeStream(String word) {
return word.toLowerCase()
.chars()
.mapToObj(i -> String.valueOf((char) i))
.map(i -> word.toLowerCase().indexOf(i) == word.toLowerCase().lastIndexOf(i) ? "(" : ")")
.collect(Collectors.joining());
}
/**
* Converts a string to a new string composed of round brackets, substitutes letters appeared once in the string by (, and ) letters appeared multiple times in the string.
* The algorithms stores the number of occurrences of a word, at first occurrences it replaces with '(' , at second occurrence it substitutes first occurrence and the second one.
* All successive occurrences are directly substituted with ')'.
* Imperative solution 2.
*
* @param word original word to be substituted.
* @return substituted word.
*/
static String encodeHashpMapOptimized(String word) {
word = word.toLowerCase();
Map<Character, Integer> letters = new HashMap<Character, Integer>();
StringBuilder result = new StringBuilder();
for (int i = 0; i < word.length(); i++) {
char c = word.charAt(i);
Integer index = letters.get(c);
if (index == null) {
// First occurrence
result.append("(");
letters.put(c, i);
} else if (index >= 0) {
// 2nd occurrence, replace first instance, and set entry to -1
result.replace(index, index + 1, ")");
result.append(")");
letters.put(c, -1);
} else {
result.append(")");
}
}
return result.toString();
}
}<file_sep>/src/main/test/com/com/roman/procopenco/codewars/PangramCheckerTest.java
package com.roman.procopenco.codewars;
import org.junit.Test;
import static org.junit.Assert.*;
public class PangramCheckerTest {
@Test
public void test1Regular() {
String pangram1 = "The quick brown fox jumps over the lazy dog.";
PangramChecker pc = new PangramChecker();
assertEquals(true, pc.check(pangram1));
}
@Test
public void test2Regular() {
String pangram2 = "You shall not pass!";
PangramChecker pc = new PangramChecker();
assertEquals(false, pc.check(pangram2));
}
@Test
public void test1Stream1() {
String pangram1 = "The quick brown fox jumps over the lazy dog.";
PangramChecker pc = new PangramChecker();
assertEquals(true, pc.checkStream1(pangram1));
}
@Test
public void test2Stream1() {
String pangram2 = "You shall not pass!";
PangramChecker pc = new PangramChecker();
assertEquals(false, pc.checkStream1(pangram2));
}
@Test
public void test1Stream2() {
String pangram1 = "The quick brown fox jumps over the lazy dog.";
PangramChecker pc = new PangramChecker();
assertEquals(true, pc.checkStream2(pangram1));
}
@Test
public void test2Stream2() {
String pangram2 = "You shall not pass!";
PangramChecker pc = new PangramChecker();
assertEquals(false, pc.checkStream2(pangram2));
}
}<file_sep>/src/main/java/com/roman/procopenco/codewars/NthSeries.java
package com.roman.procopenco.codewars;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.util.Locale;
import java.util.stream.IntStream;
/**
* https://www.codewars.com/kata/555eded1ad94b00403000071/train/java <br/>
* <p>
* Task: <br/>
* Your task is to write a function which returns the sum of following series upto nth term(parameter). <br/>
* <p>
* Series: 1 + 1/4 + 1/7 + 1/10 + 1/13 + 1/16 +... <br/>
* Rules: <br/>
* You need to round the answer to 2 decimal places and return it as String. <br/>
* <p>
* If the given value is 0 then it should return 0.00 <br/>
* <p>
* You will only be given Natural Numbers as arguments. <br/>
* <p>
* Examples: <br/>
* SeriesSum(1) => 1 = "1.00" <br/>
* SeriesSum(2) => 1 + 1/4 = "1.25" <br/>
* SeriesSum(5) => 1 + 1/4 + 1/7 + 1/10 + 1/13 = "1.57" <br/>
*/
public class NthSeries {
/**
* Returns the sum of series of number upt to nth therm passed in input.
* Iterative implementation 1.
*
* @param n
* @return
*/
public static String seriesSumPersonal(int n) {
double divideNumber = 1.00;
DecimalFormatSymbols otherSymbols = new DecimalFormatSymbols(Locale.getDefault());
otherSymbols.setDecimalSeparator('.');
otherSymbols.setGroupingSeparator('.');
DecimalFormat df = new DecimalFormat("0.00", otherSymbols);
df.setMaximumFractionDigits(2);
double result = 0.00;
for (int i = 1; i <= n; i++) {
result += (1 / divideNumber);
divideNumber += 3;
}
return df.format(result);
}
/**
* Returns the sum of series of number upt to nth therm passed in input.
* Iterative implementation 1.
*
* @param n
* @return
*/
public static String seriesSumImperative(int n) {
DecimalFormatSymbols otherSymbols = new DecimalFormatSymbols(Locale.getDefault());
otherSymbols.setDecimalSeparator('.');
otherSymbols.setGroupingSeparator('.');
DecimalFormat df = new DecimalFormat("0.00", otherSymbols);
double sum = 0.0;
for (int i = 0; i < n; i++)
sum += 1.0 / (1 + 3 * i);
return df.format(sum);
}
/**
* Returns the sum of series of number upt to nth therm passed in input.
* Implementation using java streams.
*
* @param n
* @return
*/
public static String seriesSumStream(int n) {
DecimalFormatSymbols otherSymbols = new DecimalFormatSymbols(Locale.getDefault());
otherSymbols.setDecimalSeparator('.');
otherSymbols.setGroupingSeparator('.');
DecimalFormat df = new DecimalFormat("0.00", otherSymbols);
return df.format(IntStream.range(0, n).mapToDouble(num -> 1.0 / (1 + num * 3)).sum());
}
}
<file_sep>/src/main/java/com/roman/procopenco/codewars/TenMinWalk.java
package com.roman.procopenco.codewars;
import java.util.HashMap;
import java.util.Map;
/**
* https://www.codewars.com/kata/take-a-ten-minute-walk/java <br/>
*
* You live in the city of Cartesia where all roads are laid out in a perfect grid. <br/>
* You arrived ten minutes too early to an appointment, so you decided to take the opportunity to go for a short walk. <br/>
* The city provides its citizens with a Walk Generating App on their phones -- everytime you press the button it sends you an array of one-letter strings representing directions to walk (eg. ['n', 's', 'w', 'e']).<br/>
* You always walk only a single block in a direction and you know it takes you one minute to traverse one city block, so create a function that will return true if the walk <br/>
* the app gives you will take you exactly ten minutes (you don't want to be early or late!) and will, of course, return you to your starting point. Return false otherwise.<br/>
* <br/>
* Note: you will always receive a valid array containing a random assortment of direction letters ('n', 's', 'e', or 'w' only). <br/>
* It will never give you an empty array (that's not a walk, that's standing still!).<br/>
*
*/
public class TenMinWalk {
/**
* Check if list of walks to be done is valid and can be done in 10 minutes or not.
* Iterative solution.
* The algorithm uses a HashMap to store the number of times each side of walk is made and checks if the number of times happens on East-West is the same
* and North-South is the same.
* @param walk
* @return
*/
public static boolean isValid(char[] walk) {
if (walk.length != 10) return false;
Map <Character, Integer> directionsTaken = new HashMap<>();
directionsTaken.put('n',0);
directionsTaken.put('s',0);
directionsTaken.put('w',0);
directionsTaken.put('e',0);
for (char s : walk){
directionsTaken.put(s, directionsTaken.get(s) + 1);
}
return directionsTaken.get('s') == directionsTaken.get('n') && directionsTaken.get('w') == directionsTaken.get('e');
}
/**
* Check if list of walks to be done is valid and can be done in 10 minutes or not.
* Solutions using streams.
* The algorithm checks if the number of times happens on East-West is the same
* and North-South is the same.
* @param walk
* @return
*/
public static boolean isValidStream(char[] walk) {
String s = new String(walk);
return s.chars().filter(p->p=='n').count()==s.chars().filter(p->p=='s').count()&&
s.chars().filter(p->p=='e').count()==s.chars().filter(p->p=='w').count()&&s.chars().count()==10;
}
}
<file_sep>/src/main/java/com/roman/procopenco/codewars/GreedIsGood.java
package com.roman.procopenco.codewars;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
/**
* Greed is a dice game played with five six-sided dice. Your mission, should you choose to accept it, is to score a throw according to these rules. You will always be given an array with five six-sided dice values.
*
* Three 1's => 1000 points
* Three 6's => 600 points
* Three 5's => 500 points
* Three 4's => 400 points
* Three 3's => 300 points
* Three 2's => 200 points
* One 1 => 100 points
* One 5 => 50 point
* A single die can only be counted once in each roll. For example, a given "5" can only count as part of a triplet (contributing to the 500 points) or as a single 50 points, but not both in the same roll.
*
* Example scoring
*
* Throw Score
* --------- ------------------
* 5 1 3 4 1 250: 50 (for the 5) + 2 * 100 (for the 1s)
* 1 1 1 3 1 1100: 1000 (for three 1s) + 100 (for the other 1)
* 2 4 4 5 4 450: 400 (for three 4s) + 50 (for the 5)
* In some languages, it is possible to mutate the input to the function. This is something that you should never do. If you mutate the input, you will not be able to pass all the tests.
*
* https://www.codewars.com/kata/5270d0d18625160ada0000e4/train/java
*/
public class GreedIsGood {
static Map<Integer, Integer> trippleDigitsMap = new HashMap<>();
static Map<Integer, Integer> singleDigitsMap = new HashMap<>();
static {
trippleDigitsMap.put(1, 1000);
trippleDigitsMap.put(6, 600);
trippleDigitsMap.put(5, 500);
trippleDigitsMap.put(4, 400);
trippleDigitsMap.put(3, 300);
trippleDigitsMap.put(2, 200);
singleDigitsMap.put(1, 100);
singleDigitsMap.put(5, 50);
}
public static int greedy(int[] dice){
Map<Integer, Integer> singleDigits = new HashMap<>();
AtomicReference<Integer> result = new AtomicReference<>(0);
for (int i = 0; i < dice.length; i++) {
int currentNumber = dice[i];
if (singleDigits.containsKey(currentNumber)) {
singleDigits.put(currentNumber, singleDigits.get(currentNumber) + 1);
} else {
singleDigits.put(currentNumber, 1);
}
}
singleDigits.forEach((k, v) -> {
int numberOfOccurences = v;
if (v >= 3) {
result.updateAndGet(v1 -> v1 + trippleDigitsMap.get(k));
numberOfOccurences = v - 3;
}
if (singleDigitsMap.containsKey(k)) {
int finalNumberOfOccurences = numberOfOccurences;
result.updateAndGet(v1 -> v1 + singleDigitsMap.get(k) * finalNumberOfOccurences);
}
});
return result.get();
}
public static int greedyOptimal(int[] dice) {
int n[] = new int[7];
for (int d : dice) n[d]++;
return n[1]/3*1000 + n[1]%3*100 + n[2]/3*200 + n[3]/3*300 + n[4]/3*400 + n[5]/3*500 + n[5]%3*50 + n[6]/3*600;
}
}
<file_sep>/src/main/java/com/roman/procopenco/codewars/Diamond.java
package com.roman.procopenco.codewars;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static java.lang.String.join;
import static java.util.Collections.nCopies;
/**
* https://www.codewars.com/kata/give-me-a-diamond/train/java <br/>
* This kata is to practice simple string output. Jamie is a programmer, and James' girlfriend. She likes diamonds, and wants a diamond string from James. Since James doesn't know how to make this happen, he needs your help.<br/>
* <p>
* ###Task: <br/>
* <p>
* You need to return a string that displays a diamond shape on the screen using asterisk ("*") characters. Please see provided test cases for exact output format.<br/>
* <p>
* The shape that will be returned from print method resembles a diamond, where the number provided as input represents the number of *’s printed on the middle line. The line above and below will be centered and will have 2 less *’s than the middle line. This reduction by 2 *’s for each line continues until a line with a single * is printed at the top and bottom of the figure.<br/>
* <p>
* Return null if input is even number or negative (as it is not possible to print diamond with even number or negative number).<br/>
*/
public class Diamond {
/**
* Print a diamond asterisk composed of number of rows passed as parameter.
* Iterative Solution implementation.
*
* @param n
* @return
*/
public static String print(int n) {
if (n < 0 || Math.floorMod(n, 2) == 0) return null;
StringBuilder sb = new StringBuilder();
int spaces = n / 2;
int asterisks = 1;
for (int i = 1; i <= n; i++) {
sb.append(join("", nCopies(spaces, " ")));
sb.append(join("", nCopies(asterisks, "*")));
sb.append("\n");
if (i <= n / 2) {
spaces = spaces - 1;
asterisks = asterisks + 2;
} else {
spaces = spaces + 1;
asterisks = asterisks - 2;
}
}
return sb.toString();
}
/**
* Print a diamond asterisk composed of number of rows passed as parameter.
* Implementation proposed using java streams.
*
* @param n
* @return
*/
public static String printStream(int n) {
return n < 0 || n % 2 == 0 ? null : IntStream
.range(0, n)
.mapToObj(x ->
join("", nCopies(Math.abs(n / 2 - x), " ")) +
join("", nCopies(n - 2 * Math.abs(n / 2 - x), "*")))
.collect(Collectors.joining("\n")) + "\n";
}
}
| 0e41531fbe0a067588357f82eeba3180ae4f40eb | [
"Markdown",
"Java",
"Maven POM"
] | 17 | Java | Roomka/codewars | 3bcfca2171673b6d885df2f3aaaac328d9316744 | 28cd0b1470cc49073518c5402987db356c9e95cf |
refs/heads/master | <repo_name>thirtythirty/ThirtyPlayList_iOS<file_sep>/ThirtyPlayList/GenreData.swift
//
// GenreData.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/09/20.
// Copyright © 2016年 SKT. All rights reserved.
//
import RealmSwift
import MediaPlayer
// 端末内の曲の情報をアーティスト単位で格納するテーブル
class GenreData: Object {
static let realm = try! Realm()
dynamic var id = 0
dynamic var name = "" // アーティスト名
dynamic var use = true // 使用するかどうかの情報
// idをプライマリキーに設定
override static func primaryKey() -> String? {
return "id"
}
static func create(name: String?) -> GenreData {
var name_: String?
if let tmp = name{
name_ = tmp
// nilではない
} else {
// 名前のない曲は存在しないはず
name_ = "unkown"
}
let artist = GenreData()
artist.id = lastId()
artist.name = name_!
artist.use = true
return artist
}
// オートインクリメント機能
static func lastId() -> Int {
if let tail = realm.objects(GenreData).last {
return tail.id+1
} else {
return 1
}
}
// すでに曲が保存されているかチェック
// ある:true、保存されていない:false
static func existCheckByInfo(name: String?) -> Bool {
var name_: String?
if let tmp = name{
name_ = tmp
// nilではない
} else {
// 名前のない曲は存在しないはず
return false
}
let artist = realm.objects(GenreData).filter("name = %@",name_!)
if(artist.count == 0){
return false
} else {
return true
}
}
func save(){
try! GenreData.realm.write{
GenreData.realm.add(self)
}
}
static func DataSet(){
// 端末にある音楽を取得
let query = MPMediaQuery.genresQuery()
// クラウドにある音楽をフィルターで取り除く
query.addFilterPredicate(MPMediaPropertyPredicate(value: false, forProperty: MPMediaItemPropertyIsCloudItem))
if let collections = query.collections {
for collection in collections {
if let genreName = collection.representativeItem!.genre {
// すでに保存しているかチェック
if(GenreData.existCheckByInfo(genreName) == true){
continue
}
let s = GenreData.create(genreName)
s.save()
}
}
}
}
// すべての曲の情報を取得する
// 引数のsonginfoに格納する
static func getAllGenreInfo(inout genreInfo: [[AnyObject]]) {
genreInfo.removeAll()
let artists = realm.objects(GenreData)
for artist in artists {
genreInfo.append([artist.name, artist.use, false])
}
}
static func getSongInfoByGenreName(genreName: String) -> [SongInfo] {
var songInfo: [SongInfo] = []
let songs = realm.objects(SongData).filter("genre = %@", genreName)
if(songs.count == 0){
return songInfo
} else {
for song in songs {
let key_song_number = KeySongData.keySongCheck(song.id)
songInfo.append(SongInfo(id: song.id, title: song.name, use: song.use,
artist: song.artist, album: song.album, duration: song.duration, key_song_number: key_song_number))
}
}
return songInfo
}
// useを更新する
static func updateUseableByName(genreName: String, use: Bool) {
let artist = realm.objects(GenreData).filter("name = %@", genreName)
if(artist.count == 0){
return
} else {
let s = artist[0]
try! GenreData.realm.write{
s.use = use
GenreData.realm.add(s, update: true)
}
}
}
}<file_sep>/ThirtyPlayList/Podfile
# Uncomment this line to define a global platform for your project
# platform :ios, '9.0'
target 'ThirtyPlayList' do
# Comment this line if you're not using Swift and don't want to use dynamic frameworks
use_frameworks!
# Pods for ThirtyPlayLIst_Bata
pod 'RealmSwift'
# pod 'Realm', git: '<EMAIL>:realm/realm-cocoa.git', branch: 'master'
# pod 'RealmSwift', git: '<EMAIL>:realm/realm-cocoa.git', branch: 'master'
# pod 'Realm', :git => 'https://github.com/realm/realm-cocoa.git', :branch => 'master', :submodules => true
# pod 'Realm', :git => 'https://github.com/realm/realm-cocoa.git', :branch => 'master'
# pod 'RealmSwift', :git => 'https://github.com/realm/realm-cocoa.git', :branch => 'master'
end
<file_sep>/ThirtyPlayList/ThirtyPlayList/SongInfo.swift
//
// SongInfo.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/08/27.
// Copyright © 2016年 SKT. All rights reserved.
//
import Foundation
struct SongInfo {
var id: Int
var title: String
var use: Bool
var artist: String
var album: String
var duration: Double
var key_song_number: Int
}<file_sep>/ThirtyPlayList/ThirtyPlayList/TabBarController.swift
//
// TabBarController.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/08/31.
// Copyright © 2016年 SKT. All rights reserved.
//
import UIKit
class TabBarController: UITabBarController {
override func viewDidLoad() {
super.viewDidLoad()
// 非選択時のタブの下の文字の色変更
let colorNormal = UIColor(red: 255/255, green: 255/255, blue: 255/255, alpha: 1.0)
let selectedAttributes_n = [NSForegroundColorAttributeName: colorNormal]
UITabBarItem.appearance().setTitleTextAttributes(selectedAttributes_n, forState: UIControlState.Normal)
// 選択時
let colorSelected = UIColor(red: 36/255, green: 47/255, blue: 232/255, alpha: 1.0)
let selectedAttributes_s = [NSForegroundColorAttributeName: colorSelected]
UITabBarItem.appearance().setTitleTextAttributes(selectedAttributes_s, forState: UIControlState.Selected)
// タブ背景設定
let colorBg = UIColor(red: 72/255, green: 187/255, blue: 255/255, alpha: 1.0)
UITabBar.appearance().barTintColor = colorBg
// アイコンの色
let colorKey = UIColor(red: 36/255, green: 47/255, blue: 232/255, alpha: 1.0)
UITabBar.appearance().tintColor = colorKey
// Do any additional setup after loading the view.
// 非選択時、アイコンが白になるようにする
let img_names = ["keysong_icon_tab_30","play_tab_30","yaruki_tab_30"]
for (i, item) in self.tabBar.items!.enumerate() {
item.image = UIImage(named: img_names[i])?.imageWithRenderingMode(UIImageRenderingMode.AlwaysOriginal)
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
/*
// MARK: - Navigation
// In a storyboard-based application, you will often want to do a little preparation before navigation
override func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject?) {
// Get the new view controller using segue.destinationViewController.
// Pass the selected object to the new view controller.
}
*/
}
<file_sep>/ThirtyPlayList/ThirtyPlayList/SecondViewController.swift
//
// SecondViewController.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/08/26.
// Copyright © 2016年 SKT. All rights reserved.
//
import UIKit
class SecondViewController: UIViewController, UIPickerViewDataSource, UIPickerViewDelegate {
// ピッカーで使う情報
let useableMinute: [String] = ["15分", "20分", "25分", "30分", "35分", "40分", "45分", "50分", "55分", "60分", "65分", "70分", "75分", "80分", "85分", "90分"]
let useableSplit: [String] = ["1分割","2分割","3分割","4分割","5分割","6分割"]
// ピッカーで選択している値が入る
// 初期値は30,2
var selectMinute: Int = 30
var selectSplit: Int = 2
// 作るプレイリストの合計再生時間を決めるピッカー
@IBOutlet weak var durationSelectPicker: UIPickerView!
// 決定ボタン(storyboardで設定済み)
@IBAction func GoToPlayBottom(sender: AnyObject) {
}
@IBOutlet weak var GoButtom: UIButton!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
GoButtom.layer.borderWidth = 10
GoButtom.layer.borderColor = UIColor(red: 72/255, green: 187/255, blue: 255/255, alpha: 1.0).CGColor
GoButtom.layer.cornerRadius = 85
GoButtom.clipsToBounds = true
// ピッカーの初期値を30分,2分割にする
durationSelectPicker.selectRow(3, inComponent: 0, animated: true)
durationSelectPicker.selectRow(1, inComponent: 1, animated: true)
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
// ピッカーのデータを返す
func pickerView(pickerView: UIPickerView, titleForRow row: Int, forComponent component: Int) -> String? {
if(component == 0){
return useableMinute[row]
} else if(component == 1){
return useableSplit[row]
}
return ""
}
// ピッカーは一つ
func numberOfComponentsInPickerView(pickerView: UIPickerView) -> Int {
return 2
}
// ピッカーの扱うデータの総数を返す
func pickerView(pickerView: UIPickerView, numberOfRowsInComponent component: Int) -> Int {
if( component == 0){
return useableMinute.count
} else if(component == 1){
return useableSplit.count
}
return 0
}
// ピッカーが止まったら、selectMinuteを更新
func pickerView(pickerView: UIPickerView, didSelectRow row: Int, inComponent component: Int) {
if(component == 0){
selectMinute = 15 + 5*row
durationSelectPicker.selectRow((selectMinute / 15)-1, inComponent: 1, animated: true)
selectSplit = (selectMinute / 15)
} else if(component == 1){
selectSplit = row+1
}
}
// ピッカーが選択した時間を次のPlayViewControllerに渡す
override func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject?){
let playViewController = segue.destinationViewController as! PlayViewController
playViewController.selectedMinute = selectMinute
playViewController.selectedSplit = selectSplit
playViewController.hidesBottomBarWhenPushed = true
}
}
<file_sep>/ThirtyPlayList/ThirtyPlayList/KeySongData.swift
//
// KeySongData.swift
// ThirtyPlayLIst_Bata
//
// Created by 坂田 和也 on 2016/08/25.
// Copyright © 2016年 SKT. All rights reserved.
//
import RealmSwift
import MediaPlayer
// キーソングを格納するテーブル
class KeySongData: Object {
static let realm = try! Realm()
dynamic var id = 0
dynamic var songdata_id = 0
dynamic var key_song_number = 0
override static func primaryKey() -> String? {
return "id"
}
static func create(songdata_id: Int, key_song_number: Int) -> KeySongData {
let key_song = KeySongData()
key_song.id = lastId()
key_song.songdata_id = songdata_id
key_song.key_song_number = key_song_number
return key_song
}
static func lastId() -> Int {
if let tail_song = realm.objects(KeySongData).last {
return tail_song.id+1
} else {
return 1
}
}
// すでに存在するならそれを更新する
func save(){
let song = KeySongData.realm.objects(KeySongData).filter("key_song_number = %@",self.key_song_number)
if(song.count == 0){
try! SongData.realm.write{
SongData.realm.add(self)
}
} else {
try! SongData.realm.write{
self.id = song[0].id
SongData.realm.add(self, update: true)
}
}
}
// キーソングかどうかチェック
static func keySongCheck(songdata_id: Int) -> Int {
let keysong = realm.objects(KeySongData).filter("songdata_id = %@",songdata_id)
if(keysong.count == 0){
return -1
} else {
return keysong[0].key_song_number
}
}
// キーソングが設定されているかチェック
static func KeySongisSet() ->Bool{
var isSet = true
for i in 1...6 {
let keysong = realm.objects(KeySongData).filter("key_song_number = %@", i)
if(keysong.count == 0){
isSet = false
} else {
let song = realm.objects(SongData).filter("id = %@",keysong[0].songdata_id)
if(song.count == 0){
isSet = false
}
}
}
return isSet
}
// すべてのキーソングを取得
static func getAllKeySongInfo() ->[SongInfo] {
var songinfo: [SongInfo] = []
let keysongs = realm.objects(KeySongData).sorted("key_song_number")
for keysong in keysongs {
let song = realm.objects(SongData).filter("id = %@",keysong.songdata_id)
songinfo.append(SongInfo(id: song[0].id, title: song[0].name, use: song[0].use,
artist: song[0].artist, album: song[0].album, duration: song[0].duration, key_song_number: keysong.key_song_number))
}
return songinfo
}
// 再生回数順にキーソングを取得する
static func autoKeySongSet() ->Bool {
var playCounts: [Int] = []
var playCountsIds: [Int] = []
let query = MPMediaQuery.albumsQuery()
query.addFilterPredicate(MPMediaPropertyPredicate(value: false, forProperty: MPMediaItemPropertyIsCloudItem))
if let songs = query.items {
for song in songs {
let id = SongData.getIdSongByInfo(song.title,artist: song.artist, album: song.albumTitle)
if(id != -1){
playCounts.append(song.playCount)
playCountsIds.append(id)
}
}
}
if(playCounts.count == 0){
return false
}
let sorted = playCounts.sort()
var i = sorted.count-1
var key_song_id: [Int] = []
while (i >= sorted.count-6){
var j = 0
while(playCounts[j] != sorted[i]){j+=1}
key_song_id.append(playCountsIds[j])
i-=1;
}
for i in 0..<key_song_id.count {
let key_song = KeySongData.create(key_song_id[i],key_song_number: i+1)
key_song.save()
}
return true
}
}
<file_sep>/ThirtyPlayList/ThirtyPlayList/CustomPlaySongTableViewCell.swift
//
// CustomPlaySongTableViewCell.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/08/29.
// Copyright © 2016年 SKT. All rights reserved.
//
import UIKit
class CustomPlaySongTableViewCell: UITableViewCell {
override func awakeFromNib() {
super.awakeFromNib()
// Initialization code
}
@IBOutlet weak var SongImg: UIImageView!
@IBOutlet weak var SongTitle: UILabel!
@IBOutlet weak var SongDuration: UILabel!
@IBOutlet weak var KeySongImg: UIImageView!
override func setSelected(selected: Bool, animated: Bool) {
super.setSelected(selected, animated: animated)
// Configure the view for the selected state
}
func setKeySongImginPlaylist(key_song_number: Int){
if(key_song_number == -1){
KeySongImg.image = nil
} else {
KeySongImg.image = UIImage(named: "keysong_icon_" + key_song_number.description)
}
}
}
<file_sep>/ThirtyPlayList/ThirtyPlayList/FirstViewController.swift
//
// FirstViewController.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/08/26.
// Copyright © 2016年 SKT. All rights reserved.
//
import UIKit
class FirstViewController: UIViewController , UITableViewDelegate, UITableViewDataSource, UISearchBarDelegate, SongCellDelegate, SongHeaderCellDelegate{
@IBOutlet weak var SongTableView: UITableView!
// ヘッダーの情報が入ったAnyObject配列
private var headerInfo: [[AnyObject]] = []
// 表示する曲の情報が入った構造体SongInfoの配列
private var songInfo: [[SongInfo]] = []
@IBOutlet weak var SongSearchBar: UISearchBar!
@IBOutlet weak var ModeSelectBar: UIToolbar!
private var modeNumber: Int = 1
@IBAction func ChangeALLTable(sender: AnyObject) {
modeNumber = 0
changeColorInModeBar()
// SongDataテーブルにあるすべての曲の情報をsongInfoに格納
songInfo.removeAll()
songInfo.append(SongData.getAllSongInfo())
// アコーディオンテーブルのヘッダー作成
headerInfo.removeAll()
headerInfo.append(["すべての曲", true, true])
SongTableView.reloadData()
}
@IBAction func ChangeArtistTable(sender: AnyObject) {
modeNumber = 1
changeColorInModeBar()
songInfo.removeAll()
ArtistData.getAllArtistInfo(&headerInfo)
for h in headerInfo {
let name = h[0] as! String
songInfo.append(ArtistData.getSongInfoByArtistName(name))
}
SongTableView.reloadData()
}
@IBAction func ChangePlayListTable(sender: AnyObject){
modeNumber = 2
changeColorInModeBar()
songInfo.removeAll()
PlayListData.getAllPlayListInfo(&headerInfo)
for h in headerInfo {
let name = h[0] as! String
songInfo.append(PlayListData.getSongInfoByPlayListName(name))
}
SongTableView.reloadData()
}
@IBAction func ChangeGenreTable(sender: AnyObject) {
modeNumber = 3
changeColorInModeBar()
songInfo.removeAll()
GenreData.getAllGenreInfo(&headerInfo)
for h in headerInfo {
let name = h[0] as! String
songInfo.append(GenreData.getSongInfoByGenreName(name))
}
SongTableView.reloadData()
}
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
// テーブルのヘッダーの縦幅指定
SongTableView.sectionHeaderHeight = 50
// 端末にある曲を取得し、データベースに登録する
SongData.DataSet()
ArtistData.DataSet()
GenreData.DataSet()
PlayListData.DataSet()
// キーソング(15分最後で鍵をかけられた(固定された)曲)が登録されていないなら初期化
if(KeySongData.KeySongisSet() == false){
// 端末から情報を取り、再生回数が多い順にKeySongDataテーブルに登録
KeySongData.autoKeySongSet()
}
// SongDataテーブルにあるすべての曲の情報をsongInfoに格納
songInfo.append(SongData.getAllSongInfo())
// アコーディオンテーブルのヘッダー作成
headerInfo.removeAll()
headerInfo.append(["すべての曲", true, true])
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
// 表示モード選択バーをの文字の色を変える
func changeColorInModeBar(){
for i in 0..<ModeSelectBar.items!.count {
ModeSelectBar.items![i].tintColor = UIColor(red: 187/255, green: 234/255, blue: 252/255, alpha: 1.0)
}
ModeSelectBar.items![1+modeNumber*2].tintColor = UIColor.whiteColor()
}
// Cellが選択された際に呼び出される
// キーソング設定をするアクションシートを出す
func tableView(tableView: UITableView, didSelectRowAtIndexPath indexPath: NSIndexPath) {
let keysongInfo = KeySongData.getAllKeySongInfo()
let alertController = UIAlertController(
title: "キーソング設定",
message: "\"\(songInfo[indexPath.section][indexPath.row].title)\"を何番のキーソングに設定しますか?(キーソング1が一番優先度が高い)",
preferredStyle: .ActionSheet)
var messages: [String] = []
for i in 0..<6{
let str = "キーソング\(i+1)(\(keysongInfo[i].title))"
messages.append(str)
}
let keySong1 = UIAlertAction(
title: messages[0],
style: .Default,
handler: { action in
self.updateKeySong(1, section: indexPath.section, index: indexPath.row, tableView: tableView)
})
let keySong2 = UIAlertAction(
title: messages[1],
style: .Default,
handler: { action in
self.updateKeySong(2, section: indexPath.section, index: indexPath.row, tableView: tableView)
})
let keySong3 = UIAlertAction(
title: messages[2],
style: .Default,
handler: { action in
self.updateKeySong(3, section: indexPath.section, index: indexPath.row, tableView: tableView)
})
let keySong4 = UIAlertAction(
title: messages[3],
style: .Default,
handler: { action in
self.updateKeySong(4, section: indexPath.section, index: indexPath.row, tableView: tableView)
})
let keySong5 = UIAlertAction(
title: messages[4],
style: .Default,
handler: { action in
self.updateKeySong(5, section: indexPath.section, index: indexPath.row, tableView: tableView)
})
let keySong6 = UIAlertAction(
title: messages[5],
style: .Default,
handler: { action in
self.updateKeySong(6, section: indexPath.section, index: indexPath.row, tableView: tableView)
})
let cancel = UIAlertAction(
title: "キャンセル",
style: .Cancel,
handler: { action in
// 何もしない
}
)
alertController.addAction(keySong1)
alertController.addAction(keySong2)
alertController.addAction(keySong3)
alertController.addAction(keySong4)
alertController.addAction(keySong5)
alertController.addAction(keySong6)
alertController.addAction(cancel)
self.presentViewController(alertController, animated: true,completion: nil)
}
// アクションシートから呼び出される、キーソング設定関数
// 新しくデータを入れるわけではなく、更新する
func updateKeySong(key_song_number: Int,section: Int, index: Int, tableView: UITableView){
let keysong = KeySongData.create(songInfo[section][index].id,key_song_number: key_song_number)
// すでにデータがある場合は、そのkey_song_numberのデータを更新する
keysong.save()
// headerInfo, songInfoを更新
for i in 0..<headerInfo.count {
for j in 0..<songInfo[i].count {
if(songInfo[i][j].key_song_number == key_song_number){
if(i == section && j == index){
break
}
songInfo[i][j].key_song_number = -1
break
}
}
}
songInfo[section][index].key_song_number = key_song_number
// テーブルを再描写
SongTableView.reloadData()
}
func tableView(tableView: UITableView, viewForHeaderInSection section: Int) -> UIView? {
let cell = CustomHeaderFooterView(reuseIdentifier: "Header")
cell.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(FirstViewController.tapHeader(_:))))
var canUseableSongCount = 0
for song in songInfo[section] {
if(song.use == true){
canUseableSongCount+=1
}
}
let headertext = (headerInfo[section][0] as? String)! + "(\(canUseableSongCount)/\(songInfo[section].count)曲)"
cell.headerTitle.text = headertext
cell.headerSwitch.setOn((headerInfo[section][1] as? Bool)!, animated: false)
cell.section = section
cell.frame.size.height = 100
cell.setExpanded((headerInfo[section][2] as? Bool)!)
cell.delegate = self
return cell
}
func numberOfSectionsInTableView(tableView: UITableView) -> Int {
return headerInfo.count
}
// テーブルに表示する配列の総数
func tableView(tableView: UITableView, numberOfRowsInSection section: Int) -> Int{
if((headerInfo[section][2] as? Bool) == false){
return 0
}
return songInfo[section].count
}
func tapHeader(gestureRecognizer: UITapGestureRecognizer) {
guard let cell = gestureRecognizer.view as? CustomHeaderFooterView else {
return
}
let extended = headerInfo[cell.section][2] as? Bool
headerInfo[cell.section][2] = !(extended!)
SongTableView.reloadSections(NSIndexSet(index: cell.section), withRowAnimation: .None)
}
// Cellに値を設定
func tableView(tableView: UITableView, cellForRowAtIndexPath indexPath: NSIndexPath) -> UITableViewCell {
let cell = tableView.dequeueReusableCellWithIdentifier("SongCell") as! CustomSongTableViewCell
// それぞれの値を設定
cell.SongTitle.text = songInfo[indexPath.section][indexPath.row].title
cell.setKeySongImg(songInfo[indexPath.section][indexPath.row].key_song_number)
cell.songId = songInfo[indexPath.section][indexPath.row].id
cell.SongUseSwitch_Outlet.firstItem.setOn!(songInfo[indexPath.section][indexPath.row].use, animated: false)
cell.index = indexPath.row
cell.section = indexPath.section
// updateUseable関数をCustomSongTableViewCellから移譲
cell.delegate = self
return cell
}
// CustomSongTableViewCellからのデリゲート
// songInfoのユーザからの使用、不使用の情報をsongInfoに更新する
func updateUseable(section: Int, index: Int,use: Bool){
songInfo[section][index].use = use
SongTableView.reloadData()
}
// CustomHeaderFooterViewからのデリゲート
// songInfoのユーザからの使用、不使用の情報をsongInfoに更新する
func updateUseableInSameSection(section: Int, use: Bool){
headerInfo[section][1] = use
ArtistData.updateUseableByName((headerInfo[section][0] as? String)!,use: use)
for i in 0..<songInfo[section].count {
songInfo[section][i].use = use
SongData.updateUseableById(songInfo[section][i].id,use: use)
}
SongTableView.reloadData()
}
//サーチバー更新時
func searchBar(searchBar: UISearchBar, textDidChange searchText: String) {
// 検索
songInfo.removeAll()
songInfo.append(SongData.getSongInfoByTitleSearch(searchText))
headerInfo.removeAll()
headerInfo.append(["検索結果:" + songInfo[0].count.description + "件", true, true])
self.SongTableView.reloadData()
}
//キャンセルクリック時
func searchBarCancelButtonClicked(searchBar: UISearchBar) {
SongSearchBar.text = ""
ChangeALLTable([])
self.view.endEditing(true)
}
//サーチボタンクリック時
func searchBarSearchButtonClicked(searchBar: UISearchBar) {
self.view.endEditing(true)
}
}
<file_sep>/ThirtyPlayList/ThirtyPlayList/PlayListData.swift
//
// PlayListData.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/09/20.
// Copyright © 2016年 SKT. All rights reserved.
//
import RealmSwift
import MediaPlayer
// 端末内の曲の情報をアーティスト単位で格納するテーブル
class PlayListData: Object {
static let realm = try! Realm()
dynamic var id = 0
dynamic var name = "" // アーティスト名
dynamic var use = true // 使用するかどうかの情報
let songs = List<SongData>() // SongDataと多対多の関係になるので、Realmのリストを活用する
// idをプライマリキーに設定
override static func primaryKey() -> String? {
return "id"
}
static func create(name: String?) -> PlayListData {
var name_: String?
if let tmp = name{
name_ = tmp
// nilではない
} else {
// 名前のない曲は存在しないはず
name_ = "unkown"
}
let playlist = PlayListData()
playlist.id = lastId()
playlist.name = name_!
playlist.use = true
return playlist
}
// オートインクリメント機能
static func lastId() -> Int {
if let tail = realm.objects(PlayListData).last {
return tail.id+1
} else {
return 1
}
}
// すでに曲が保存されているかチェック
// ある:true、保存されていない:false
static func existCheckByInfo(name: String?) -> Bool {
var name_: String?
if let tmp = name{
name_ = tmp
// nilではない
} else {
// 名前のない曲は存在しないはず
return false
}
let playlist = realm.objects(PlayListData).filter("name = %@",name_!)
if(playlist.count == 0){
return false
} else {
return true
}
}
func save(){
try! PlayListData.realm.write{
PlayListData.realm.add(self)
}
}
static func DataSet(){
// 端末にある音楽を取得
let query = MPMediaQuery.playlistsQuery()
// クラウドにある音楽をフィルターで取り除く
query.addFilterPredicate(MPMediaPropertyPredicate(value: false, forProperty: MPMediaItemPropertyIsCloudItem))
if let collections = query.collections {
for collection in collections {
if let playlist: MPMediaPlaylist = collection as! MPMediaPlaylist {
let playlistName = playlist.valueForProperty(MPMediaPlaylistPropertyName) as? String
// すでに保存しているかチェック
if(PlayListData.existCheckByInfo(playlistName) == true){
continue
}
let s = PlayListData.create(playlistName)
for item in collection.items {
let songdata = SongData.getSongDataByInfo(item.title, artist: item.artist, album: item.albumTitle)
if(songdata.count != 0){
s.songs.append(songdata[0])
}
}
s.save()
}
}
}
}
// すべての曲の情報を取得する
// 引数のsonginfoに格納する
static func getAllPlayListInfo(inout playlistInfo: [[AnyObject]]) {
playlistInfo.removeAll()
let playlists = realm.objects(PlayListData)
for playlist in playlists {
playlistInfo.append([playlist.name, playlist.use, false])
}
}
static func getSongInfoByPlayListName(playlistName: String) -> [SongInfo] {
var songInfo: [SongInfo] = []
let playlist = realm.objects(PlayListData).filter("name = %@", playlistName)
if(playlist.count == 0){
return songInfo
} else {
for song in playlist[0].songs {
let key_song_number = KeySongData.keySongCheck(song.id)
songInfo.append(SongInfo(id: song.id, title: song.name, use: song.use,
artist: song.artist, album: song.album, duration: song.duration, key_song_number: key_song_number))
}
}
return songInfo
}
// useを更新する
static func updateUseableByName(playlistName: String, use: Bool) {
let playlist = realm.objects(PlayListData).filter("name = %@", playlistName)
if(playlist.count == 0){
return
} else {
let s = playlist[0]
try! PlayListData.realm.write{
s.use = use
PlayListData.realm.add(s, update: true)
}
}
}
static func AllDelete(){
let playlists = realm.objects(PlayListData)
for playlist in playlists {
try! realm.write {
realm.delete(playlist)
}
}
}
}<file_sep>/ThirtyPlayList/ThirtyPlayList/PlayViewController.swift
//
// PlayViewController.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/08/29.
// Copyright © 2016年 SKT. All rights reserved.
//
import UIKit
import MediaPlayer
class PlayViewController: UIViewController, MPMediaPickerControllerDelegate, UITableViewDelegate, UITableViewDataSource {
// SecondViewControllerから渡された、作成するプレイリストの合計時間
var selectedMinute: Int = 0
var selectedSplit: Int = 0
var player = MPMusicPlayerController() // 音楽を再生するプレイヤー
var playlistInfo: [SongInfo] = [] // 作成したn分プレイリストの曲の情報を格納
var playlistMediaItem: [MPMediaItem] = []
var isPlay: Bool = true // 再生から始める
var playedSongCount = 0
var playedKeySongCount = 0
@IBOutlet weak var Message: UILabel! // "もうすぐn分経過"を表示する
@IBOutlet weak var KeySongImg: UIImageView!
@IBOutlet weak var playlistProgressbar: UIProgressView! //プログレスバー
@IBOutlet weak var PlayOrStopButtonView: UIButton!
@IBAction func PlayOrStopButton(sender: AnyObject) { // 再生ボタン
let button = (sender as! UIButton)
if isPlay {
player.pause()
button.setImage(UIImage(named: "play_icon"), forState: UIControlState.Normal)
UIApplication.sharedApplication().idleTimerDisabled = false // 自動ロック抑制を解除
if timer.valid == true {
timer.invalidate()
}
isPlay = false
} else {
player.play()
button.setImage(UIImage(named: "stop_icon"), forState: UIControlState.Normal)
UIApplication.sharedApplication().idleTimerDisabled = true
if timer.valid == false {
timer = NSTimer.scheduledTimerWithTimeInterval(1.0, target: self, selector: #selector(PlayViewController.countUpdate(_:)), userInfo: nil, repeats: true)// 自動ロック抑制
}
isPlay = true
}
}
@IBOutlet weak var PlayListTableView: UITableView!
@IBOutlet weak var SongImg: UIImageView!
@IBOutlet weak var SongTitle: UILabel!
var timer:NSTimer!
@IBOutlet weak var playlistCountDown: UILabel!
private var countDown: Int = 0
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
player = MPMusicPlayerController.systemMusicPlayer()
let notificationCenter = NSNotificationCenter.defaultCenter()
notificationCenter.addObserver(self,selector: #selector(PlayViewController.nowPlayingItemChanged(_:)),name: MPMusicPlayerControllerNowPlayingItemDidChangeNotification,object: player)
player.beginGeneratingPlaybackNotifications()
let detector = Detector() // プレイリスト計算機の生成
let playlist = detector.makePlayList(selectedMinute, split: selectedSplit)// n分プレイリストを作成
if(playlist.songsId.count == 0){
// 検索に失敗
let alertController = UIAlertController(
title: "\(selectedMinute)分プレイリストの作成に失敗",
message: "ヒント:曲を増やしたり、キーソングを変えたり、分割数を減らすと成功するかも!",
preferredStyle: .Alert)
let okAction = UIAlertAction(
title: "OK",
style: .Default,
handler: { action in
self.navigationController?.popViewControllerAnimated(true)
})
alertController.addAction(okAction)
self.presentViewController(alertController, animated: true,completion: nil)
return
}
// 作成されたプレイリストの各曲の情報取得
SongData.getSongInfoBysongsId(&playlistInfo, songsId: playlist.songsId)
for i in 0..<playlist.songsId.count {
// 各曲のMPMediaItemを取得
let query = MPMediaQuery.songsQuery()
query.addFilterPredicate(MPMediaPropertyPredicate(value: playlistInfo[i].title, forProperty: MPMediaItemPropertyTitle))
playlistMediaItem.append(query.items![0])
}
let playlistMediaItemCollection = MPMediaItemCollection.init(items: playlistMediaItem)
// 曲をプレイヤーに設定
player.setQueueWithItemCollection(playlistMediaItemCollection)
// タイマーを設定
timer = NSTimer.scheduledTimerWithTimeInterval(1.0, target: self, selector: #selector(PlayViewController.countUpdate(_:)), userInfo: nil, repeats: true)
NSRunLoop.currentRunLoop().addTimer(timer, forMode: NSRunLoopCommonModes)
countDown = selectedMinute * 60
// 自動ロック抑制
UIApplication.sharedApplication().idleTimerDisabled = true
// 再生開始
player.play()
treeUpdate(0)
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
deinit{
let notificationCenter = NSNotificationCenter.defaultCenter()
notificationCenter.removeObserver(self, name: MPMusicPlayerControllerNowPlayingItemDidChangeNotification,object: player)
player.endGeneratingPlaybackNotifications()
UIApplication.sharedApplication().idleTimerDisabled = false
player.pause()
}
override func viewWillDisappear(animated: Bool) {
super.viewWillDisappear(animated)
// タイマーを破棄する
if(timer != nil && timer.valid == true){
timer.invalidate()
}
}
func nowPlayingItemChanged(notification:NSNotification){
if let mediaItem = player.nowPlayingItem {
updateSongInformationUI(mediaItem)
}
}
//曲情報を更新
func updateSongInformationUI(mediaItem: MPMediaItem){
SongTitle.text = mediaItem.title ?? "不明な曲"
if(playedSongCount < playlistInfo.count && playlistInfo[playedSongCount].title == mediaItem.title){
if(playlistInfo[playedSongCount].key_song_number != -1){
KeySongImg.image = UIImage(named: "keysong_icon_" + playlistInfo[playedSongCount].key_song_number.description)
let splitedMinute = Double(selectedMinute)/Double(selectedSplit)
playedKeySongCount+=1
let time_soon = splitedMinute * Double(playedKeySongCount)
if(Int((time_soon * 10) % 10) == 0){
Message.text = "もうすぐ\n\(Int(time_soon))分"
}else {
Message.text = "もうすぐ\n\(String(format: "%.1f",time_soon))分"
}
} else {
KeySongImg.image = nil
Message.text = ""
}
playedSongCount+=1
}
if let artwork = mediaItem.artwork{
let image = artwork.imageWithSize(SongImg.bounds.size)
SongImg.image = image
} else {
SongImg.image = nil
SongImg.backgroundColor = UIColor.whiteColor()
}
}
// タイマー
func countUpdate(sender: NSTimer){
countDown-=1
if((selectedMinute*60 - countDown) % (1*60) == 0){
// 1分ごとでやる木の情報更新
treeUpdate(1)
}
let h = countDown / (60*60)
let m = countDown / 60 % 60
let s = countDown % 60
playlistCountDown.text = String(format: "%01d:%02d:%02d",h,m,s)
playlistProgressbar.setProgress(1.0-Float(countDown)/Float(selectedMinute*60), animated: true)
if(countDown == 0){
// タイマー破棄
timer.invalidate()
return
}
}
//テーブルに表示する配列の総数を返す
func tableView(tableView: UITableView, numberOfRowsInSection section: Int) -> Int {
return playlistMediaItem.count
}
//Cellに値を設定する.
func tableView(tableView: UITableView, cellForRowAtIndexPath indexPath: NSIndexPath) -> UITableViewCell {
let cell = tableView.dequeueReusableCellWithIdentifier("playSongCell") as! CustomPlaySongTableViewCell
if let artwork = playlistMediaItem[indexPath.row].artwork {
let image = artwork.imageWithSize(cell.SongImg.bounds.size)
cell.SongImg.image = image
} else {
cell.SongImg.image = nil
cell.SongImg.backgroundColor = UIColor.lightGrayColor()
}
cell.SongTitle.text = playlistInfo[indexPath.row].title
let minute = Int(round(playlistInfo[indexPath.row].duration))/60
let second = Int(round(playlistInfo[indexPath.row].duration))%60
cell.SongDuration.text = String(format: "%d:%02d",minute,second)
cell.setKeySongImginPlaylist(playlistInfo[indexPath.row].key_song_number)
return cell
}
// やる木のパラメータ更新
func treeUpdate(addMinute: Int){
if(TreeData.TreeCount() == 0){// ないなら初期化
let tree = TreeData.create("")
tree.save()
}
let calendar = NSCalendar(identifier: NSCalendarIdentifierGregorian)
let lastLeaf = TreeData.getLastLeaf()
var treeInfo = TreeData.getLastTreeInfo()
var leaf = TreeLeafData()
if(calendar!.isDate(lastLeaf.UpdateDate, inSameDayAsDate: NSDate()) == false){
leaf = TreeLeafData.create(treeInfo.id, totalTime_min: 0, leafNumber: lastLeaf.leafNumber+1, UpdateDate: NSDate())
} else {
leaf = lastLeaf
}
try! TreeLeafData.realm.write{
leaf.totalTime_min += addMinute
TreeData.realm.add(leaf, update: true)
}
}
/*
// MARK: - Navigation
// In a storyboard-based application, you will often want to do a little preparation before navigation
override func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject?) {
// Get the new view controller using segue.destinationViewController.
// Pass the selected object to the new view controller.
}
*/
}
<file_sep>/ThirtyPlayList/ThirtyPlayList/CustomHeaderFooterView.swift
//
// CustomHeaderFooterView.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/09/19.
// Copyright © 2016年 SKT. All rights reserved.
//
import UIKit
protocol SongHeaderCellDelegate {
func updateUseableInSameSection(section: Int,use: Bool)
}
class CustomHeaderFooterView: UITableViewHeaderFooterView {
var section: Int = 0
var arrow = UIImageView()
var headerTitle = UILabel()
var headerSwitch = UISwitch()
var delegate: SongHeaderCellDelegate?
override init(reuseIdentifier: String?) {
super.init(reuseIdentifier: reuseIdentifier)
self.arrow.translatesAutoresizingMaskIntoConstraints = false
self.contentView.addSubview(self.arrow)
self.contentView.addConstraints([
NSLayoutConstraint(item: arrow, attribute: .Leading, relatedBy: .Equal, toItem: self.contentView, attribute: .Leading, multiplier: 1.0, constant: 10),
NSLayoutConstraint(item: arrow, attribute: .CenterY, relatedBy: .Equal, toItem: self.contentView, attribute: .CenterY, multiplier: 1.0, constant: 0),
NSLayoutConstraint(item: arrow,
attribute: NSLayoutAttribute.Width,
relatedBy: NSLayoutRelation.Equal,
toItem: self.contentView,
attribute: NSLayoutAttribute.Width,
multiplier: 0.0,
constant: 30),
NSLayoutConstraint(item: arrow,
attribute: NSLayoutAttribute.Height,
relatedBy: NSLayoutRelation.Equal,
toItem: self.contentView,
attribute: NSLayoutAttribute.Width,
multiplier: 0.0,
constant: 30)])
self.headerSwitch.translatesAutoresizingMaskIntoConstraints = false
self.contentView.addSubview(self.headerSwitch)
self.contentView.addConstraints([
NSLayoutConstraint(item: headerSwitch, attribute: .Right, relatedBy: .Equal, toItem: self.contentView, attribute: .Right, multiplier: 1.0, constant: -12),
NSLayoutConstraint(item: headerSwitch, attribute: .CenterY, relatedBy: .Equal, toItem: self.contentView, attribute: .CenterY, multiplier: 1.0, constant: 0)])
headerSwitch.addTarget(self, action: #selector(CustomHeaderFooterView.onClickHeaderSwitch(_:)), forControlEvents: UIControlEvents.ValueChanged)
headerSwitch.onTintColor = UIColor(red: 72/255, green: 187/255, blue: 255/255, alpha: 1.0)
self.headerTitle.translatesAutoresizingMaskIntoConstraints = false
self.contentView.addSubview(self.headerTitle)
self.contentView.addConstraints([
NSLayoutConstraint(item: headerTitle, attribute: .Leading, relatedBy: .Equal, toItem: self.contentView, attribute: .Leading, multiplier: 1.0, constant: 50),
NSLayoutConstraint(item: headerTitle, attribute: .CenterY, relatedBy: .Equal, toItem: self.contentView, attribute: .CenterY, multiplier: 1.0, constant: 0),
NSLayoutConstraint(item: headerTitle,
attribute: NSLayoutAttribute.Height,
relatedBy: NSLayoutRelation.Equal,
toItem: self.contentView,
attribute: NSLayoutAttribute.Height,
multiplier: 0.0,
constant: 30)
, NSLayoutConstraint(item: headerTitle, attribute: .Width, relatedBy: .Equal, toItem: self.contentView, attribute: .Width, multiplier: 0.0, constant: 200)
])
headerTitle.textColor = UIColor.grayColor()
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
internal func onClickHeaderSwitch(sender: UISwitch){
updateUseableInSameSection(section,use: sender.on)
if sender.on {
print("on")
}
else {
print("off")
}
}
func setExpanded(expanded: Bool) {
if(expanded == true){
arrow.image = UIImage(named: "arrow_up")
} else {
arrow.image = UIImage(named: "arrow_down")
}
}
func updateUseableInSameSection(section: Int,use: Bool){
delegate?.updateUseableInSameSection(section, use: use)
}
/*
// Only override drawRect: if you perform custom drawing.
// An empty implementation adversely affects performance during animation.
override func drawRect(rect: CGRect) {
// Drawing code
}
*/
}
<file_sep>/ThirtyPlayList/ThirtyPlayList/TreeLeafData.swift
//
// TreeLeafData.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/09/23.
// Copyright © 2016年 SKT. All rights reserved.
//
import RealmSwift
class TreeLeafData: Object {
static let realm = try! Realm()
dynamic var id = 0
dynamic var treeId = 0
dynamic var totalTime_min = 0
dynamic var leafNumber = 0
dynamic var UpdateDate = NSDate()
override static func primaryKey() -> String? {
return "id"
}
static func create(treeId: Int, totalTime_min: Int, leafNumber: Int, UpdateDate: NSDate) -> TreeLeafData {
let leaf = TreeLeafData()
leaf.id = lastId()
leaf.treeId = treeId
leaf.totalTime_min = totalTime_min
leaf.leafNumber = leafNumber
leaf.UpdateDate = UpdateDate
return leaf
}
static func lastId() -> Int {
if let tail_leaf = realm.objects(TreeLeafData).last {
return tail_leaf.id+1
} else {
return 1
}
}
func save(){
try! TreeLeafData.realm.write{
TreeLeafData.realm.add(self)
}
}
}<file_sep>/ThirtyPlayList/ThirtyPlayList/SongList.swift
//
// Song.swift
// ThirtyPlayLIst_Bata
//
// Created by 坂田 和也 on 2016/08/23.
// Copyright © 2016年 SKT. All rights reserved.
//
import Foundation
struct SongList {
var songsId: [Int]
var duration: Double
}<file_sep>/ThirtyPlayList/ThirtyPlayList/SongData.swift
//
// Song.swift
// ThirtyPlayLIst_Bata
//
// Created by 坂田 和也 on 2016/08/20.
// Copyright © 2016年 SKT. All rights reserved.
//
import RealmSwift
import MediaPlayer
// 端末内の曲の情報を格納するテーブル
class SongData: Object {
static let realm = try! Realm()
dynamic var id = 0
dynamic var name = "" // 曲名
dynamic var artist = "" // アーティスト名
dynamic var album = "" // アルバム名
dynamic var genre = "" // ジャンル名
dynamic var duration: Double = 0.0 // 再生時間(秒)
dynamic var use = true // この曲を使用するかどうかの情報
// idをプライマリキーに設定
override static func primaryKey() -> String? {
return "id"
}
static func create(name: String?, artist: String?, album: String?, genre: String?, duration: Double) -> SongData {
var name_: String?
var artist_: String?
var album_: String?
var genre_: String?
if let tmp = name{
name_ = tmp
// nilではない
} else {
// 名前のない曲は存在しないはず
name_ = "unkown"
}
if let tmp = artist{
// nilではない
artist_ = tmp
} else {
artist_ = ""
}
if let tmp = album{
// nilではない
album_ = tmp
} else {
album_ = ""
}
if let tmp = genre{
// nilではない
genre_ = tmp
} else {
genre_ = ""
}
let song = SongData()
song.id = lastId()
song.name = name_!
song.artist = artist_!
song.album = album_!
song.genre = genre_!
song.duration = duration
song.use = true
return song
}
// オートインクリメント機能
static func lastId() -> Int {
if let tail_song = realm.objects(SongData).last {
return tail_song.id+1
} else {
return 1
}
}
// すでに曲が保存されているかチェック
// ある:true、保存されていない:false
static func existCheckByInfo(name: String?, artist: String?, album: String?) -> Bool {
var name_: String?
var artist_: String?
var album_: String?
if let tmp = name{
name_ = tmp
// nilではない
} else {
// 名前のない曲は存在しないはず
return false
}
if let tmp = artist{
// nilではない
artist_ = tmp
} else {
artist_ = ""
}
if let tmp = album{
// nilではない
album_ = tmp
} else {
album_ = ""
}
let song = realm.objects(SongData).filter("name = %@ AND artist = %@ AND album = %@",name_!,artist_!,album_!)
if(song.count == 0){
return false
} else {
return true
}
}
// 曲名、アーティスト名、アルバム名から検索し、そのidを返す
static func getIdSongByInfo(name: String?, artist: String?, album: String?) -> Int{
var name_: String
var artist_: String
var album_: String
if let tmp = name{
name_ = tmp
// nilではない
} else {
// 名前のない曲は存在しないはず
return -1
}
if let tmp = artist{
// nilではない
artist_ = tmp
} else {
artist_ = ""
}
if let tmp = album{
// nilではない
album_ = tmp
} else {
album_ = ""
}
let song = realm.objects(SongData).filter("name = %@ AND artist = %@ AND album = %@",name_,artist_,album_)
if(song.count == 0 || song[0].use == false){
return -1
} else {
return song[0].id
}
}
// 曲名、アーティスト名、アルバム名から検索し、そのもの(Results)を返す
static func getSongDataByInfo(name: String?, artist: String?, album: String?) -> Results<SongData>{
var name_: String
var artist_: String
var album_: String
if let tmp = name{
name_ = tmp
// nilではない
} else {
// 名前のない曲は存在しないはず
name_ = ""
}
if let tmp = artist{
// nilではない
artist_ = tmp
} else {
artist_ = ""
}
if let tmp = album{
// nilではない
album_ = tmp
} else {
album_ = ""
}
let song = realm.objects(SongData).filter("name = %@ AND artist = %@ AND album = %@",name_,artist_,album_)
return song
}
func save(){
try! SongData.realm.write{
SongData.realm.add(self)
}
}
static func deleteSong(song: SongData){
try! realm.write {
realm.delete(song)
}
}
static func DataSet(){
// 端末内の曲に変更があったか判定するフラグ
var update_flag = false
// 端末にある音楽を取得
let query = MPMediaQuery.songsQuery()
// クラウドにある音楽をフィルターで取り除く
query.addFilterPredicate(MPMediaPropertyPredicate(value: false, forProperty: MPMediaItemPropertyIsCloudItem))
if let songs = query.items {
for song in songs {
// すでに保存しているかチェック
if(SongData.existCheckByInfo(song.title,artist: song.artist, album: song.albumTitle) == true){
continue
}
// ジャンルから音楽でないものを判定
// 必ずジャンルが設定されているわけではないので、音楽以外が混ざってしまう可能性は存在する
if(song.genre == "Spoken & Audio"){
continue
}
let s = SongData.create(
song.title,
artist: song.artist,
album: song.albumTitle,
genre: song.genre,
duration: song.playbackDuration)
s.save()
// 新しい曲が追加された
update_flag = true
}
}
// 以前保存された曲がまだ存在しているかチェック
// 端末内に存在指定なけらば、削除
let songsData = realm.objects(SongData)
for songData in songsData {
var notFound = true
if let songs = query.items {
for song in songs {
let name_: String = song.title! // 曲名がない曲はない
var artist_: String
var album_: String
if let tmp = song.artist{
// nilではない
artist_ = tmp
} else {
artist_ = ""
}
if let tmp = song.albumTitle{
// nilではない
album_ = tmp
} else {
album_ = ""
}
if(songData.name == name_ && songData.artist == artist_ && songData.album == album_){
notFound = false
break
}
}
}
if(notFound == true){
deleteSong(songData)
// 端末内の曲が減った
update_flag = true
}
}
// 端末内の曲に変更があったら、端末内のプレイリストに変更があるはずなので
// 保存されているプレイリストの情報を削除(作り直す)
if(update_flag){
PlayListData.AllDelete()
}
}
// useがtrueの曲の数を返す
static func UseableSongCount() -> Int{
let songs = realm.objects(SongData)
var count = 0
for song in songs {
if(song.use){
count+=1
}
}
return count
}
// すべての曲の情報を取得する
// 引数のsonginfoに格納する
static func getAllSongInfo() -> [SongInfo] {
var songinfo: [SongInfo] = []
let songs = realm.objects(SongData)
for song in songs {
let key_song_number = KeySongData.keySongCheck(song.id)
songinfo.append(SongInfo(id: song.id, title: song.name, use: song.use,
artist: song.artist, album: song.album, duration: song.duration, key_song_number: key_song_number))
}
return songinfo
}
// タイトルから検索し、結果を引数songinfoに格納する
static func getSongInfoByTitleSearch(searchText: String) -> [SongInfo]{
var songinfo: [SongInfo] = []
let songs = realm.objects(SongData).filter("name CONTAINS %@",searchText)
for song in songs {
let key_song_number = KeySongData.keySongCheck(song.id)
songinfo.append(SongInfo(id: song.id, title: song.name, use: song.use,
artist: song.artist, album: song.album, duration: song.duration, key_song_number: key_song_number))
}
return songinfo
}
// songsId(SongDataのidの配列)から情報を検索し、songinfoに格納する
static func getSongInfoBysongsId(inout songinfo: [SongInfo], songsId: [Int]){
songinfo.removeAll()
for songId in songsId {
let song = realm.objects(SongData).filter("id = %@",songId)
if(song.count == 0){
return
}
let key_song_number = KeySongData.keySongCheck(songId)
songinfo.append(SongInfo(id: songId, title: song[0].name, use: song[0].use,
artist: song[0].artist, album: song[0].album, duration: song[0].duration, key_song_number: key_song_number))
}
}
// useを更新する
static func updateUseableById(songId: Int, use: Bool) {
let song = realm.objects(SongData).filter("id = %@", songId)
if(song.count == 0){
return
} else {
let s = song[0]
try! SongData.realm.write{
s.use = use
SongData.realm.add(s, update: true)
}
}
}
}
<file_sep>/ThirtyPlayList/ThirtyPlayList/CustomSongTableViewCell.swift
//
// CustomSongTableViewCell.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/08/27.
// Copyright © 2016年 SKT. All rights reserved.
//
import UIKit
protocol SongCellDelegate {
func updateUseable(section: Int, index: Int,use: Bool)
}
class CustomSongTableViewCell: UITableViewCell {
public var songId: Int = 0
var section: Int = 0
var index: Int = 0
var delegate: SongCellDelegate?
@IBAction func SongUseSwitch(sender: UISwitch) {
SongData.updateUseableById(songId,use: sender.on)
updateUseable(section, index: index, use: sender.on)
}
@IBOutlet weak var SongUseSwitch_Outlet: NSLayoutConstraint!
@IBOutlet weak var SongTitle: UILabel!
@IBOutlet weak var keysong_img: UIImageView!
override func awakeFromNib() {
super.awakeFromNib()
// Initialization code
}
override func setSelected(selected: Bool, animated: Bool) {
super.setSelected(selected, animated: animated)
// Configure the view for the selected state
}
func setKeySongImg(key_song_number: Int){
if(key_song_number == -1){
keysong_img.image = UIImage(named: "keysong_icon_unset")
} else {
keysong_img.image = UIImage(named: "keysong_icon_" + key_song_number.description)
}
}
func updateUseable(section: Int, index: Int,use: Bool){
delegate?.updateUseable(section, index: index, use: use)
}
}
<file_sep>/ThirtyPlayList/ThirtyPlayList/TreeData.swift
//
// TreeData.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/08/31.
// Copyright © 2016年 SKT. All rights reserved.
//
import RealmSwift
class TreeData: Object {
static let realm = try! Realm()
dynamic var id = 0
dynamic var treeName = ""
override static func primaryKey() -> String? {
return "id"
}
static func create(treeName: String) -> TreeData {
let tree = TreeData()
tree.id = lastId()
tree.treeName = treeName
return tree
}
static func lastId() -> Int {
if let tail_tree = realm.objects(TreeData).last {
return tail_tree.id+1
} else {
return 1
}
}
func save(){
try! TreeData.realm.write{
TreeData.realm.add(self)
}
}
static func TreeCount() ->Int {
let tree = realm.objects(TreeData)
return tree.count
}
static func getLastTreeInfo() -> TreeInfo {
let tail_tree = realm.objects(TreeData).last!
let leaves = realm.objects(TreeLeafData).filter("treeId = %@",tail_tree.id).sorted("id", ascending: true)
if(leaves.count == 0){
let calendar = NSCalendar(identifier: NSCalendarIdentifierGregorian)!
let treeInfo = TreeInfo(id: tail_tree.id, treeName: tail_tree.treeName,totalTime_min: 0 ,totalDay: 0,haveLeafCount: 0,
leafTime: [])
return treeInfo
}
var totalTime = 0
var totalDay = 0
var leafTime: [Int] = []
for leaf in leaves {
totalTime += leaf.totalTime_min
totalDay += 1
leafTime.append(leaf.totalTime_min)
}
var haveLeafCount = totalDay
if(haveLeafCount > 31){
haveLeafCount = 31
}
let treeInfo = TreeInfo(id: tail_tree.id, treeName: tail_tree.treeName,totalTime_min: totalTime ,totalDay: totalDay,haveLeafCount: haveLeafCount,
leafTime: leafTime)
return treeInfo
}
static func getLastLeaf() -> TreeLeafData{
let tail_tree = realm.objects(TreeData).last!
let leaves = realm.objects(TreeLeafData).filter("treeId = %@",tail_tree.id).sorted("id", ascending: true)
if(leaves.count == 0){
let calendar = NSCalendar(identifier: NSCalendarIdentifierGregorian)!
let reaf = TreeLeafData.create(tail_tree.id, totalTime_min: 0, leafNumber: 0, UpdateDate: calendar.dateByAddingUnit(.Day, value: -1, toDate: NSDate(),
options: NSCalendarOptions())!)
return reaf
}
return (leaves.last)!
}
}
<file_sep>/ThirtyPlayList/ThirtyPlayList/TreeInfo.swift
//
// TreeInfo.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/08/31.
// Copyright © 2016年 SKT. All rights reserved.
//
import Foundation
struct TreeInfo {
var id: Int
var treeName: String
var totalTime_min: Int
var totalDay: Int
var haveLeafCount: Int
var leafTime: [Int]
}<file_sep>/ThirtyPlayList/ThirtyPlayList/DetectPlayList.swift
//
// DetectPlayList.swift
// ThirtyPlayLIst_Bata
//
// Created by 坂田 和也 on 2016/08/23.
// Copyright © 2016年 SKT. All rights reserved.
//
import Foundation
import RealmSwift
// 任意の時間のplaylsitを算出する計算機クラス
class Detector {
// keysong以外の曲を格納
private var Songs: [SongList] = []
// keysong(分割の最後にくる曲)を格納
private var KeySongs: [SongList] = []
init() {
let realm = try! Realm() // データベース
let key_songs = realm.objects(KeySongData) // KeySongDataテーブル
let songs = realm.objects(SongData) // SongDataテーブル
// KeySongsを初期化
for key_song in key_songs {
// KeySongDataテーブルからキーソングのSongDataのidを取得し、それを元にSongDataテーブルから情報を取得しKeySongsに格納
let s = realm.objects(SongData).filter("id = %@",key_song.songdata_id)
if(s.count != 0){
KeySongs.append(SongList(songsId: [s[0].id],duration: s[0].duration))
}
}
// Songsを初期化
for song in songs {
var uninclude_keysong = true
for keysong in KeySongs {
if(song.id == keysong.songsId[0]){
uninclude_keysong = false
break
}
}
// キーソング以外を格納する
if(uninclude_keysong == true){
if(song.use == true){
Songs.append(SongList(songsId: [song.id],duration: song.duration))
}
}
}
// Songsを2つ組み合わせてものを一つのSongList構造体として格納する
let single_songlist_count = Songs.count // 一つの曲の数
var i = 0
while i < single_songlist_count {
var j = i+1
while j < single_songlist_count {
// 二つ曲を組み合わせ、一つの曲として扱う
Songs.append(
SongList(songsId: Songs[i].songsId + Songs[j].songsId,
duration: Songs[i].duration + Songs[j].duration)
)
j+=1
}
i+=1
}
// Songsの数が0場合ははじく
if(Songs.count == 0){
return
}
// 昇順にソート
quickSort(&Songs,first: 0,last: Songs.count-1)
}
// 任意の時間から十五分プレイリストを組み合わせて完成してプレイリストを返す関数
// minute: 15~90の間
func makePlayList(minute: Int, split: Int) ->SongList {
var playlist:SongList = SongList(songsId: [],duration: 0)
// 時間が短すぎる場合プレイリストが作れない可能性が高い
// keysongは6個しか設定しないので、90分以上は作れないのではじく
if(minute < 15 || minute > 90){
return SongList(songsId: [],duration: 0)
}
// SongsやKeySongsの数が少なすぎる場合ははじく
if(Songs.count <= 10 || KeySongs.count < 6){
return SongList(songsId: [],duration: 0)
}
if(split < 1 || split > 6){
return SongList(songsId: [],duration: 0)
}
var splitedDuration = Double(minute)/Double(split)
var use_keysong = split-1 // はじめに使うキーソング
for i in 0..<split {
// keysongを追加さらにランダムに曲を選んであと17分以下にしてからdetectPlayList関数より計算
var target_success_flag = false
// 乱数を使うのでうまくいかなかったら3回までトライする
for try_num in 0..<3 {
var target_duration = splitedDuration * 60
var target_songlist = SongList(songsId: [], duration: 0)
target_duration -= KeySongs[use_keysong].duration
while(target_duration > 17.0 * 60.0){
let rand = Int(arc4random_uniform(UInt32(Songs.count)))
var target_songsId = target_songlist.songsId + Songs[rand].songsId
if(uniqueCheck(&target_songsId)){
target_songlist.songsId += Songs[rand].songsId
target_songlist.duration += Songs[rand].duration
target_duration -= Songs[rand].duration
}
}
var results = detectPlayList(target_duration)
if(results.count == 0){
continue
} else {
// 3回ランダムを試す
for k in 0..<3 {
let rand = Int(arc4random_uniform(UInt32(results.count)))
var target_songsId = target_songlist.songsId + results[rand].songsId
if(uniqueCheck(&target_songsId)){
playlist.duration += target_songlist.duration + results[rand].duration
playlist.songsId += target_songlist.songsId + results[rand].songsId
target_success_flag = true
break
}
}
if(target_success_flag == true){
break
}
// 最初から探す
for result in results {
var target_songsId = target_songlist.songsId + result.songsId
if(uniqueCheck(&target_songsId)){
playlist.duration += target_songlist.duration + result.duration
playlist.songsId += target_songlist.songsId + result.songsId
target_success_flag = true
break
}
}
if(target_success_flag == true){
break
}
}
}
if(target_success_flag == false){
// 見つからなかった
return SongList(songsId: [],duration: 0)
}
// 見つかった
// キーソングを最後に追加
playlist.duration += KeySongs[use_keysong].duration
playlist.songsId += KeySongs[use_keysong].songsId
// 一つぶんできたのでデクリメント
use_keysong-=1
}
return playlist
}
// 任意の時間のPlayListを作る関数
// 返り値はSongListの配列
// Songsの各組み合わせだけを検索(2~4曲の組み合わせだけ)とし、検索範囲を縮小
// 二分探索を用いて高速化
func detectPlayList(duration: Double) ->[SongList]{
var detectSongs: [SongList] = []
let range = [0.1, 1.0]
for r in range {
for i in 0..<Songs.count {
// Songs[i]を使用する場合を二分探査で検索
var results = binarySearch(duration-Songs[i].duration, range: r)
if(results.count > 0){ // 一つでもあったなら
for j in 0..<results.count {
// Song[i]を組み合わせる
results[j].duration += Songs[i].duration
results[j].songsId += Songs[i].songsId
// 要素が重複していないなら、追加
if(uniqueCheck(&results[j].songsId)){
detectSongs.append(results[j])
}
}
}
}
if(detectSongs.count > 0){
break
}
}
return detectSongs
}
// 与えられたSongList配列をduration(再生時間)を元に昇順ソート
private func quickSort(inout songs: [SongList],first: Int,last: Int){
var right = last
var left = first
let pivot = first
while(left < right){
while(songs[left].duration <= songs[pivot].duration && left < last){left+=1}
while(songs[right].duration > songs[pivot].duration && right > first){right-=1}
if(left < right){
(songs[right], songs[left]) = (songs[left],songs[right])
}
}
(songs[pivot], songs[right]) = (songs[right],songs[pivot])
if((right-1)-first > 0){
quickSort(&songs,first: first, last: right-1)
}
if(last-left > 0){
quickSort(&songs,first: left, last: last)
}
}
// 与えられた時間の曲をSongs配列から二分探索で探す関数
// 許容誤差分すべて取ってくる
private func binarySearch(duration: Double, range: Double) ->[SongList]{
var left = -1
var right = Songs.count
var songs: [SongList] = []
while (right - left > 1){
let mid = (left+right)/2
if (Songs[mid].duration <= duration && Songs[mid].duration >= duration-range){
var i = mid
while(i+1 < Songs.count && Songs[i+1].duration <= duration){ i+=1 }
while(i >= 0 && Songs[i].duration >= duration-range){
songs.append(Songs[i])
i-=1
}
return songs
} else if(Songs[mid].duration < duration-range) {
left = mid
} else {
right = mid
}
}
return songs
}
// 与えれれたInt型配列に重複がないか判定
func uniqueCheck(inout a: [Int]) ->Bool{
var i = 0
var unique = true
while(i < a.count){
var j = i+1
while(j < a.count){
if(a[i] == a[j]){
unique = false
break
}
j+=1
}
if(unique == false){ break }
i+=1
}
return unique
}
}<file_sep>/ThirtyPlayList/ThirtyPlayList/ThirdViewController.swift
//
// ThirdViewController.swift
// ThirtyPlayList
//
// Created by 坂田 和也 on 2016/08/30.
// Copyright © 2016年 SKT. All rights reserved.
//
import UIKit
class ThirdViewController: UIViewController, UITextFieldDelegate {
@IBOutlet weak var TreeGroundImg: UIImageView!
@IBOutlet weak var treeNameTextField: UITextField!
@IBOutlet weak var toDayTotalTime: UILabel!
@IBOutlet weak var totalTimeLabel: UILabel!
@IBOutlet weak var totalDayLabel: UILabel!
@IBOutlet weak var RunningDayLabel: UILabel!
@IBOutlet weak var MessageLabel: UILabel!
@IBAction func TapResetButton(sender: AnyObject) {
let alertController = UIAlertController(
title: "やる木のリセット",
message: "やる木をリセットして、新しくやる木を育てますか?",
preferredStyle: .Alert)
let resetAction = UIAlertAction(
title: "OK",
style: .Default,
handler: { action in
self.resetTree()
})
let cancelAction = UIAlertAction(
title: "キャンセル",
style: .Cancel,
handler: { action in
// print("no")
})
alertController.addAction(resetAction)
alertController.addAction(cancelAction)
self.presentViewController(alertController, animated: true,completion: nil)
}
var TreeCGRect = CGRect()
var TreeTrunkView = UIImageView()
var TreeLeaves: [UIImageView] = []
let MaxLeavesNum = [2,7,16,30,0]
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
}
override func viewDidAppear(animated: Bool) {
let TreeX = TreeGroundImg.frame.origin.x
let TreeY = TreeGroundImg.frame.origin.y
let TreeWidth = TreeGroundImg.frame.width
let TreeHeight = TreeGroundImg.frame.height
TreeCGRect = CGRectMake(TreeX, TreeY, TreeWidth, TreeHeight)
displayTreeAndInfo()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func displayTreeAndInfo(){
for i in 0..<TreeLeaves.count {
TreeLeaves[i].removeFromSuperview()
}
TreeLeaves.removeAll()
if(TreeData.TreeCount() == 0){
let tree = TreeData.create("")
tree.save()
}
let treeInfo = TreeData.getLastTreeInfo()
let lastLeaf = TreeData.getLastLeaf()
treeNameTextField.text = treeInfo.treeName
toDayTotalTime.text = "\(lastLeaf.totalTime_min)分"
totalTimeLabel.text = "\(treeInfo.totalTime_min)分"
totalDayLabel.text = "\(treeInfo.totalDay)日"
// var treeimg_num = treeInfo.totalDay
// if( treeimg_num > 31){
// treeimg_num = 31
// }
// treeImg.image = UIImage(named: "yaruki_"+"\(treeimg_num)")
var yaruki_type = 1
for i in 0..<MaxLeavesNum.count {
if(treeInfo.haveLeafCount <= MaxLeavesNum[i]){
yaruki_type = i+1
break
}
}
if(treeInfo.haveLeafCount > 30){
yaruki_type = 5
}
// 幹の描画
TreeTrunkView.image = UIImage(named: "yaruki"+yaruki_type.description)
TreeTrunkView.frame = TreeCGRect
self.view.addSubview(TreeTrunkView)
// やる木の5段階目には葉っぱがない
if(yaruki_type > 4){
MessageLabel.text = "1ヶ月継続、習慣化おめでとう!(これ以上やる木は成長しません)"
return
} else {
MessageLabel.text = ""
}
// 葉の描画
for i in 1...MaxLeavesNum[yaruki_type-1] {
let leafImg = UIImageView()
leafImg.image = UIImage(named: "yaruki"+yaruki_type.description + "_" + i.description)?.imageWithRenderingMode(.AlwaysTemplate)
leafImg.frame = TreeCGRect
if(treeInfo.leafTime.count < i){
leafImg.tintColor = UIColor.whiteColor()
} else if(treeInfo.leafTime[i-1] < 15){
leafImg.tintColor = UIColor(red: 162/255, green: 255/255, blue: 159/255, alpha: 1.0)
} else if(treeInfo.leafTime[i-1] < 30){
leafImg.tintColor = UIColor(red: 174/255, green: 255/255, blue: 69/255, alpha: 1.0)
} else if(treeInfo.leafTime[i-1] < 45){
leafImg.tintColor = UIColor(red: 12/255, green: 205/255, blue: 2/255, alpha: 1.0)
} else if(treeInfo.leafTime[i-1] < 60){
leafImg.tintColor = UIColor(red: 21/255, green: 148/255, blue: 0/255, alpha: 1.0)
} else if(treeInfo.leafTime[i-1] < 75){
leafImg.tintColor = UIColor(red: 248/255, green: 234/255, blue: 7/255, alpha: 1.0)
} else if(treeInfo.leafTime[i-1] < 90){
leafImg.tintColor = UIColor(red: 240/255, green: 156/255, blue: 10/255, alpha: 1.0)
} else {
leafImg.tintColor = UIColor(red: 241/255, green: 82/255, blue: 43/255, alpha: 1.0)
}
TreeLeaves.append(leafImg)
self.view.addSubview(leafImg)
}
}
//改行ボタンが押された際に呼ばれるデリゲートメソッド.
func textFieldShouldReturn(textField: UITextField) -> Bool {
textField.resignFirstResponder()
var treeInfo = TreeData.getLastTreeInfo()
treeInfo.treeName = treeNameTextField.text!
let tree = TreeData()
try! TreeData.realm.write{
tree.id = treeInfo.id
tree.treeName = treeInfo.treeName
TreeData.realm.add(tree, update: true)
}
return true
}
func resetTree(){
let tree = TreeData.create("")
tree.save()
displayTreeAndInfo()
}
/*
// MARK: - Navigation
// In a storyboard-based application, you will often want to do a little preparation before navigation
override func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject?) {
// Get the new view controller using segue.destinationViewController.
// Pass the selected object to the new view controller.
}
*/
}
| 56415c08bb7c702605926c6851480ec424cf3317 | [
"Swift",
"Ruby"
] | 19 | Swift | thirtythirty/ThirtyPlayList_iOS | bd83ea9971af7615662d3ddb2f558a43c82b943b | a4bfc5f5b81807f52cb0003216b087ce2f734e03 |
refs/heads/master | <repo_name>superzyx/shellscripts<file_sep>/beforeit.sh
systemctl stop firewalld && systemctl disable firewalld
setenforce 0
yum -y install vim
<file_sep>/README.md
# shellscripts
shell
<file_sep>/initialSever.sh
#!/usr/bin/env bash
#
#author: superzyx
#date: 2019/07/30
#usage: initial server
systemctl stop firewalld && systemctl disable firewalld
sed -ri s/SELINUX=enforcing/SELINUX=disabled/g /etc/selinux/config
setenforce 0
mkdir /tasks
echo '* * */7 * * bash /tasks/ntpDate.sh' >> /var/spool/cron/$(whoami)
cat << EOF > /tasks/ntpDate.sh
#!/usr/bin/env bash
#
#author: superzyx
#date: 2019/07/30
#usage: update time
if [! -f /usr/bin/ntpdate]; then
yum -y install ntpdate
ntpdate -b ntp1.aliyun.com
else
ntpdate -b ntp1.aliyun.com
fi
EOF
echo "export HISTSIZE=10000" >> /etc/profile
echo "export HISTTIMEFORMAT=\"%Y-%m-%d %H-%M-%S\"" >> /etc/profile
chattr +ai /etc/passwd /etc/shadow /etc/group
yum -y install vim bash-completion net-tools
if [ $? -eq 0 ];then
exit 0
else
exit 12
fi
| f8b516eea8cabc216044084ec0f416ac9d5cd07c | [
"Markdown",
"Shell"
] | 3 | Shell | superzyx/shellscripts | d027759dc3e2df99ca1abc71bdce88563d76d9fe | 113c248a97d7867ed88a3f14b5c2d126503761b9 |
refs/heads/master | <repo_name>OliverKohlDSc/JavaCourse<file_sep>/Uebung7_Clock/src/gmbh/conteco/ClockPane.java
package gmbh.conteco;
import java.util.Calendar;
import java.util.GregorianCalendar;
import javafx.scene.layout.Pane;
import javafx.scene.paint.Color;
import javafx.scene.shape.Circle;
import javafx.scene.shape.Line;
import javafx.scene.text.Text;
public class ClockPane extends Pane {
private int hour;
private int minute;
private int second;
private double w = 250, h = 250;
public ClockPane() {
setCurrentTime();
}
public ClockPane(int hour, int minute, int second) {
this.hour = hour;
this.minute = minute;
this.second = second;
paintClock();
}
public int getHour() { return hour; }
public int getMinute() {
return minute;
}
public int getSecond() {
return second;
}
public void setHour(int hour) {
this.hour = hour;
paintClock();
}
public void setMinute(int minute) {
this.minute = minute;
paintClock();
}
public void setSecond(int second) {
this.second = second;
paintClock();
}
public void setCurrentTime() {
Calendar calendar = new GregorianCalendar();
this.hour = calendar.get(Calendar.HOUR_OF_DAY);
this.minute = calendar.get(Calendar.MINUTE);
this.second = calendar.get(Calendar.SECOND);
paintClock();
}
protected void paintClock() {
double clockRadius = Math.min(w, h * 0.8 * 0.5);
double centerX = w / 2;
double centerY = h / 2;
Circle circle = new Circle(centerX, centerY, clockRadius);
circle.setFill(Color.WHITE);
circle.setStroke(Color.BLACK);
Text text12 = new Text(centerX - 5, centerY - clockRadius + 12, "12");
Text text9 = new Text(centerX - clockRadius + 3, centerY + 5, "9");
Text text3 = new Text(centerX + clockRadius - 10, centerY +3, "3");
Text text6 = new Text(centerX - 3, centerY + clockRadius - 3, "12");
double secondLength = clockRadius * 0.8;
double secondX = centerX + secondLength * Math.sin(second * (2 * Math.PI / 60));
double secondY = centerY - secondLength * Math.cos(second * (2 * Math.PI / 60));
Line secondLine = new Line(centerX, centerY, secondX, secondY);
secondLine.setStroke(Color.RED);
double minuteLength = clockRadius * 0.65;
double minuteX = centerX + minuteLength * Math.sin(minute * (2 * Math.PI / 60));
double minuteY = centerY - minuteLength * Math.cos(minute * (2 * Math.PI / 60));
Line minuteLine = new Line(centerX, centerY, minuteX, minuteY);
minuteLine.setStroke(Color.BLUE);
double hourLength = clockRadius * 0.5;
double hourX = centerX + hourLength * Math.sin(hour * (2 * Math.PI / 60) * (2 * Math.PI / 12));
double hourY = centerY - hourLength * Math.cos(hour * (2 * Math.PI / 60) * (2 * Math.PI / 12));
Line hourLine = new Line(centerX, centerY, hourX, hourY);
hourLine.setStroke(Color.GREEN);
getChildren().clear();
getChildren().addAll(circle, text12, text9, text3, text6, secondLine, minuteLine, hourLine);
}
}<file_sep>/Uebung3a_Stack/src/de/volkswagen/java/gui/Main.java
package de.volkswagen.java.gui;
import de.volkswagen.java.data.Stack;
import javafx.application.Application;
import javafx.event.ActionEvent;
import javafx.event.EventHandler;
import javafx.scene.Scene;
import javafx.scene.control.Button;
import javafx.scene.control.TextField;
import javafx.scene.layout.BorderPane;
import javafx.scene.layout.HBox;
import javafx.scene.layout.Priority;
import javafx.scene.layout.VBox;
import javafx.stage.Stage;
public class Main extends Application {
private Stack<Double> stack = new Stack<>();
@Override
public void start(Stage primaryStage) throws Exception {
HBox dataInput = new HBox(2.0);
TextField input = new TextField();
dataInput.getChildren().add(input);
HBox.setHgrow(input, Priority.SOMETIMES);
Button pushButton = new Button("Push");
dataInput.getChildren().add(pushButton);
HBox.setHgrow(pushButton, Priority.NEVER);
Button popButton = new Button("Pop");
dataInput.getChildren().add(popButton);
HBox.setHgrow(popButton, Priority.NEVER);
VBox vbox = new VBox(dataInput);
StackDisplay stackDisplay = new StackDisplay(this.stack);
pushButton.setOnAction(new EventHandler<ActionEvent>() {
@Override
public void handle(ActionEvent e) {
Main.this.stack.push(Double.parseDouble(input.getText()));
stackDisplay.update();
input.setText("");
}
});
popButton.setOnAction(new EventHandler<ActionEvent>() {
@Override
public void handle(ActionEvent e) {
input.setText(Double.toString(Main.this.stack.pop()));
stackDisplay.update();
}
});
BorderPane root = new BorderPane(vbox, null, stackDisplay, null, null);
Scene scene = new Scene(root, 600, 300);
primaryStage.setScene(scene);
primaryStage.show();
}
public static void main(String[] args) {
Application.launch();
}
}
<file_sep>/LC2_CharacterProcessor_Step4/src/gmbh/conteco/Main.java
package gmbh.conteco;
import java.io.Reader;
import java.io.StringReader;
public class Main {
// Ignore whitespace
public static void main(String[] args) {
Reader reader = new StringReader("H ello");
Box<Integer> n = new Box<>(0);
CharacterProcessor.process(reader, ch -> {
if (!Character.isWhitespace(ch))
n.value++;
});
System.out.println(n);
}
}<file_sep>/Java9Deprecated/src/DeprecatedDemo.java
public class DeprecatedDemo {
/**
* Multiplicates two numbers
* @deprecated
* This method should no longer be used for multiplication.
* <p> Use {@link Utils#...} instead
* @param operand one
* @param operand two
* @return the multiplied result
*/
@Deprecated (since = "1.5", forRemoval = true)
public int calculate(int wert1, int wert2) {
return wert1 * wert2;
}
}
<file_sep>/LC2_CharacterProcessor_Step5/src/gmbh/conteco/CharacterProcessor.java
package gmbh.conteco;
import java.io.Reader;
public class CharacterProcessor {
public static void process(Reader reader,
Tester<Character> tester, Handler<Character> handler) {
try(Reader r = reader) {
int ch = r.read();
while (ch != -1) {
if (tester.test((char)ch))
handler.handle((char) ch);
ch = r.read();
}
}
catch (Exception e) { throw new RuntimeException(e); }
}
public static void process(Reader reader, Handler<Character> handler) {
process(reader, (ch) -> true, handler);
}
}<file_sep>/IterableAndBiMap/src/com/company/ValueStore.java
package com.company;
public class ValueStore {
public final int i;
public final String s;
public final String data;
public ValueStore(int i, String s, String data) {
this.i = i;
this.s = s;
this.data = data;
}
@Override
public String toString() {
return "ValueStore [i = " + i + ", s = " + s + ", data = " + data + "]";
}
}
<file_sep>/SubscriberObserverPattern_Zoo/src/com/company/Main.java
package com.company;
public class Main {
public static void main(String[] args) {
Zoo zoo = new Zoo();
//zoo.registerAnimalAddedListener(animal -> System.out.println("Animal has been added: " + animal));
zoo.registerAnimalAddedListener(new PrintNameAnimalAddedListener());
zoo.addAnimal(new Animal("Dog"));
zoo.addAnimal(new Animal("Cat"));
}
}
<file_sep>/Zeiterfassung/src/gmbh/conteco/TimeSheet.java
package gmbh.conteco;
/*
import java.util.GregorianCalendar;
import java.util.Date;
import java.util.Calendar;
import java.text.SimpleDateFormat;
*/
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
public class TimeSheet {
private Employee employee;
private List<TimeSheetEntry> entries;
class Headers {
public static final String employeeName = "Employee";
}
public void display() {
// +----------+
// | Employee |
// +----------+
List<String> headers = Arrays.asList(Headers.employeeName);
displayHeader(headers);
}
private void displayHeader(Collection<String> listOfTitles) {
listOfTitles.stream().forEach(title -> {
drawLine(title.length()+4 /* 2 spaces + 2 delimiters */);
System.out.print("| " + title + " |");
drawLine(title.length()+4 /* 2 spaces + 2 delimiters */);
});
}
private void drawLine(int length) {
if (length < 3)
return;
// Start of line
System.out.print("+");
// Draw the middle part of the line
System.out.print("-".repeat(length-2));
// End of line
System.out.print("+");
}
}
<file_sep>/DemonstratingEvents/src/gmbh/conteco/LightningEvent.java
package gmbh.conteco;
import javafx.event.Event;
import javafx.event.EventTarget;
import javafx.event.EventType;
public class LightningEvent extends Event {
private int i, j;
public int getI() {
return this.i;
}
public int getJ() {
return this.j;
}
// The onlny valid EventType for our lightning event.
public static final EventType<LightningEvent> PLASMA_STRIKE = new EventType<>(Event.ANY, "PLASMA_STRIKE");
public LightningEvent() {
super(PLASMA_STRIKE);
}
public LightningEvent(EventType<? extends Event> eventType) {
this();
}
public LightningEvent(Object o, EventTarget eventTarget, EventType<? extends Event> eventType) {
super(o, eventTarget, PLASMA_STRIKE);
this.i = ((LightningRectangle)eventTarget).getI();
this.j = ((LightningRectangle)eventTarget).getJ();
}
@Override
public EventType<? extends LightningEvent> getEventType() {
return (EventType<? extends LightningEvent>)super.getEventType();
}
@Override
public LightningEvent copyFor(Object o, EventTarget eventTarget) {
return (LightningEvent)super.copyFor(o, eventTarget);
}
}<file_sep>/Zeiterfassung_v1/src/gmbh/conteco/ImageableButton.java
package gmbh.conteco;
import javafx.event.EventHandler;
import javafx.scene.Node;
import javafx.scene.control.ButtonBase;
import javafx.scene.image.Image;
import javafx.scene.image.ImageView;
import javafx.scene.input.MouseEvent;
// TODO: This interface can be optimized further - how?
public interface ImageableButton {
// TODO: What is missing here - what's a good coding style?
interface Addable<E> {
boolean add(E e);
}
public default <E extends ButtonBase> void createImageView(Addable<Node> addable,
E button, final Image selected, final Image unselected) {
final ImageView imageView = new ImageView(selected);
// TODO: Describe what this line does
addable.add(imageView);
// TODO: Use a lambda expression instead
imageView.setOnMousePressed(new EventHandler<MouseEvent>() {
public void handle(MouseEvent evt) {
imageView.setImage(unselected);
}
});
// TODO: Use a lambda expression instead
imageView.setOnMouseReleased(new EventHandler<MouseEvent>() {
public void handle(MouseEvent evt) {
imageView.setImage(selected);
}
});
button.setGraphic(imageView);
}
public void setImage(final Image selected, final Image unselected);
public default void setImage(final Image image) {
setImage(image, image);
}
}<file_sep>/MultipleWindows/src/sample/Controller.java
package sample;
// Model
// View
// Controller
public class Controller {
}
<file_sep>/LambdaExpressions/src/gmbh/conteco/Main.java
package gmbh.conteco;
import javafx.application.Application;
import javafx.scene.Scene;
import javafx.scene.control.Button;
import javafx.scene.control.TextField;
import javafx.scene.layout.FlowPane;
import javafx.stage.Stage;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
public class Main extends Application {
// Private Field / Property
private static final Map<String, BinaryOperator> operators = new LinkedHashMap<>();
private static TextField textFieldX;
private static TextField textFieldY;
private static TextField textFieldResult;
// Block
{
operators.put("Plus", (x, y) -> x + y);
operators.put("Minus", (x, y) -> x - y);
operators.put("Times", (x, y) -> x * y);
operators.put("Div", (x, y) -> x / y);
}
@Override
public void start(Stage primaryStage) throws Exception{
FlowPane pane = new FlowPane();
Scene scene = new Scene(pane);
textFieldX = new TextField();
textFieldY = new TextField();
textFieldResult = new TextField();
pane.getChildren().addAll(textFieldX, textFieldY);
addButtons(pane);
pane.getChildren().add(textFieldResult);
primaryStage.setScene(scene);
primaryStage.setTitle("Maps mit Lambdas");
primaryStage.show();
}
private static void addButtons(FlowPane pane) {
for (Map.Entry<String, BinaryOperator> entry : operators.entrySet()) {
Button button = new Button(entry.getKey());
button.setOnAction(actionEvent -> onCalc(entry.getValue()));
pane.getChildren().add(button);
}
}
private static void onCalc(BinaryOperator op) {
int x = Integer.parseInt(textFieldX.getText());
int y = Integer.parseInt(textFieldY.getText());
int result = op.apply(x, y);
textFieldResult.setText(String.valueOf(result));
}
public static void main(String[] args) {
launch(args);
}
}<file_sep>/Uebung3f_UPN-Rechner_3/src/de/volkswagen/java/gui/InputButton.java
package de.volkswagen.java.gui;
import java.util.function.Function;
import javafx.scene.control.Button;
import javafx.scene.control.TextField;
public class InputButton extends Button {
public InputButton(String label, TextField textField, Function<String, String> editFunction) {
super(label);
this.setOnAction(e -> {
textField.setText(editFunction.apply(textField.getText()));
});
}
}
<file_sep>/untitled/src/sample/Square.java
package sample;
@FunctionalInterface
public interface Square {
int calculate(int operand);
}<file_sep>/Kilometergeld/src/gmbh/conteco/Trip.java
package gmbh.conteco;
import java.time.LocalDate;
public class Trip {
private String licensePlate;
private String driver;
private LocalDate date;
private long kmAtStart;
private long kmAtEnd;
private long kmDriven;
private boolean isBusinessTrip;
private String purpose;
private String route;
public Trip(String licensePlate, String driver, LocalDate date, long kmAtStart, long kmAtEnd, long kmDriven, boolean isBusinessTrip, String purpose, String route) {
this.licensePlate = licensePlate;
this.driver = driver;
this.date = date;
this.kmAtStart = kmAtStart;
this.kmAtEnd = kmAtEnd;
this.kmDriven = kmDriven;
this.isBusinessTrip = isBusinessTrip;
this.purpose = purpose;
this.route = route;
}
public static Trip importNewFromCSV(String line) throws IllegalArgumentException{
String[] input = line.split(",");
String licensePlate = input[0].trim();
String driver = input[1].trim();
LocalDate date = LocalDate.parse(input[2].trim());
long kmAtStart = Long.parseLong(input[3].trim());
long kmAtEnd = Long.parseLong(input[4].trim());
long kmDriven = Long.parseLong(input[5].trim());
boolean isBusinessTrip = Boolean.valueOf(input[6].trim());
String purpose = input[7].trim();
String route = input[8].trim();
return new Trip(licensePlate, driver, date, kmAtStart, kmAtEnd, kmDriven, isBusinessTrip, purpose, route);
}
public String exportToCSV() {
StringBuilder sb = new StringBuilder();
sb.append(licensePlate + ",");
sb.append(driver + ",");
sb.append(date + ",");
sb.append(kmAtStart + ",");
sb.append(kmAtEnd + ",");
sb.append(kmDriven + ",");
sb.append(isBusinessTrip + ",");
sb.append(licensePlate + ",");
sb.append(purpose + ",");
sb.append(route);
return sb.toString();
}
public String getLicensePlate() {
return licensePlate;
}
public String getDriver() {
return driver;
}
public LocalDate getDate() {
return date;
}
public String getDateAsString() {
return date.toString();
}
public long getKmAtStart() {
return kmAtStart;
}
public long getKmAtEnd() {
return kmAtEnd;
}
public long getKmDriven() {
return kmDriven;
}
public boolean isBusinessTrip() {
return isBusinessTrip;
}
public String getIsBusinessTrip() {
return isBusinessTrip ? "Yes" : "No";
}
public String getPurpose() {
return purpose;
}
public String getRoute() {
return route;
}
}
<file_sep>/DemonstratingEvents/src/gmbh/conteco/StrikeEventHandler.java
package gmbh.conteco;
import javafx.event.EventHandler;
public class StrikeEventHandler implements EventHandler<LightningEvent> {
@Override
public void handle(LightningEvent event) {
LightningRectangle rect = (LightningRectangle) event.getTarget();
rect.strike();
System.out.println("Received strike: " + rect.getI() + ", " + rect.getJ());
}
}
<file_sep>/Zeiterfassung_v1/src/gmbh/conteco/ImageMenuButton.java
package gmbh.conteco;
import javafx.scene.control.MenuButton;
import javafx.scene.image.Image;
public class ImageMenuButton extends MenuButton implements ImageableButton{
public ImageMenuButton() { super(); }
public ImageMenuButton(String text) { super(text); }
// TODO: What's the reason for adding these three lines?
// TODO: Is there a difference to the ImageButton class?
public void setImage(final Image selected, final Image unselected) {
createImageView(this.getChildren()::add, this, selected, unselected);
}
}<file_sep>/JavaLocaleImprovements/src/LocaleDemo_de.java
import java.util.ListResourceBundle;
public class LocaleDemo_de extends ListResourceBundle {
private Object[][] contents = {
{ "price", 1.00},
{ "currency", "EUR" }
};
@Override
protected Object[][] getContents() {
return contents;
}
}
<file_sep>/LC2_CharacterProcessor_Step3/src/gmbh/conteco/Main.java
package gmbh.conteco;
import static java.lang.System.*;
import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
public class Main{
public static void main(String[] args) {
Reader reader = new StringReader("Hello ");
List<Character> chars = new ArrayList<>();
CharacterProcessor.process(reader, ch -> chars.add(ch));
for (Character ch : chars)
out.println(ch);
// This will produce an error
// Variable used in a lambda expressions should be final or effectively final
// int n = 0;
// CharacterProcessor.process(reader, ch -> n++); // NOK!!!
// out.println(n);
// What can we do to circumvent this behaviour?
// We've the possibility to use a final object
// Here comes our Box<T> class into play
reader = new StringReader("Hello");
Box<Integer> n = new Box<>(0);
CharacterProcessor.process(reader, ch -> n.value++);
out.println(n);
}
}<file_sep>/Java8StreamsDemo/src/Main.java
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class Main {
public static void main(String[] args) {
List<Person> personen = new ArrayList<>();
personen.add(new Person("Alfred","Maier"));
personen.add(new Person("Jane","Doe"));
personen.add(new Person("Joe","Miller"));
personen.add(new Person("Albert","Schweizer"));
personen.add(new Person("Lisa","Müller"));
personen.add(new Person("Sarah","Smith"));
// Java 8 Streams / Stream API
personen.stream()
.filter(element -> element.getVorname().equals("Jane"))
.collect(Collectors.toList());
}
}
<file_sep>/InterfacesWH/src/com/company/Person.java
package com.company;
public class Person {
private String vorname;
private String nachname;
private PersonenJob personenJob;
public Person(String vorname, String nachname, PersonenJob personenJob) {
this.vorname = vorname;
this.nachname = nachname;
this.personenJob = personenJob;
}
public String getVorname() {
return this.vorname;
}
public String getNachname() {
return this.nachname;
}
@Override
public String toString() {
return vorname + " " + nachname + " ist " + this.personenJob.getJob();
}
}
<file_sep>/InterfacesWH/src/com/company/Main.java
package com.company;
import java.io.BufferedWriter;
import java.io.File;
import java.util.function.Function;
public class Main {
static Logger logger = null;
enum LogType {
CONSOLE,
FILE
}
public static void main(String[] args) {
LogType logType = LogType.CONSOLE;
if (logType == LogType.CONSOLE)
/*
logger = new Logger() {
@Override
public void log(Number number) {
System.out.println(number);
}
};
*/
//logger = number -> System.out.println(number);
logger = System.out::println;
else {
//BufferedWriter
}
logger.log(3.1415f);
// Schritt 1 - anonymous inner class
System.out.println(new Person("Albert", "Einstein", new PersonenJob() {
@Override
public String getJob() {
return "Physiker";
}
}) );
// Lambda Expression
System.out.println(new Person("Peter", "Maier",
() -> {
return "Verkäufer";
}
));
// Lambda expression
System.out.println(new Person("Albert", "Einstein", () ->
"Physiker"
) );
// Lambda expression
System.out.println(new Person("Albert", "Einstein", () -> "Physiker"));
System.out.println(new Person("Albert", "Einstein", () -> getJobName()));
System.out.println(new Person("Albert", "Einstein", Main::getJobName));
tryCatch((o) -> {
System.out.println("Doing something ...");
return "TEST";
});
tryCatch(new Function<Integer, String>() {
@Override
public String apply(Integer s) {
System.out.println("Doing something ...");
return "TEST";
}
});
}
public static <T,R> R tryCatch(Function<T, R> function) {
try {
return function.apply(null);
}
catch (Exception ex) {
System.err.println(ex.getMessage());
return null;
}
}
public static String getJobName() {
return "Reporter";
}
}<file_sep>/InterfacesWH/src/com/company/MyInterface.java
package com.company;
public interface MyInterface {
/* public abstract */ double addNumbers(double number1, double number2);
public default double addNumber2(double numer1, double number2) {
return 0;
}
public default double substract(double number1, double number2) {
System.out.println("Subtract -> MyInterface");
return 0;
}
}
<file_sep>/Java11StringMethods/src/com/company/Main.java
package com.company;
import java.io.IOException;
import java.lang.reflect.Method;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.stream.Collectors;
public class Main {
public void myPublicMethod() {
}
private void myPrivateMethod() {
}
class Nested {
public void myNestedPublicMethod() {
myPrivateMethod();
}
}
public static void main(String[] args) {
String s = "Winter";
System.out.println(s.isBlank());
String s2 = "";
System.out.println(s2.isBlank());
String s3 = "OK\nOK\nLine3";
System.out.println(s3);
System.out.println(s3.lines().collect(Collectors.toList()));
String s4 = " mein Name ";
s4.trim();
System.out.print(">");
System.out.print(s4.strip());
System.out.println("<");
System.out.print(">");
System.out.print(s4.stripLeading());
System.out.println("<");
System.out.print(">");
System.out.print(s4.stripTrailing());
System.out.println("<");
String str = "=".repeat(3);
System.out.println(str);
// Local type inference in Java 10
var list = new ArrayList<String>();
try {
Path path = Files.writeString(
Files.createTempFile("meineDatei", ".txt"),
"This is my content");
} catch (IOException e) {
e.printStackTrace();
}
String result = "";
String day = "M";
switch(day) {
case "M":
case "W":
case "F":
result = "MWF";
break;
}
String result2 = switch (day) {
case "M", "W", "F" -> "MWF";
case "T", "TH", "S" -> "TTS";
default -> {
if (day.isEmpty())
break "Please insert a valid day.";
else
break "Look like a Sunday.";
}
}
// JEP 323 : Local-Variable Syntax for Lambda Parameters
//(s1, s2) -> s1 + s2;
Main main = new Main();
Method m = main.getClass().getDeclareMethod("myPrivateMethod");
m.invoke(main);
}
}<file_sep>/JavaSerializationInputFiler/src/Main.java
public class Main {
public static void main(String[] args) {
FilterTest.testFilter("length", 3);
}
}<file_sep>/Java8Annotations/src/RequestForEnhancement.java
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
// apt
// Annotation Processin g Tool
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface RequestForEnhancement {
int id();
String synpsis();
String engineer() default "[unassigned]";
String date() default "[unimplmented]";
}
<file_sep>/RpnCalculator/src/Calculator.java
@FunctionalInterface
public interface Calculator {
public Double calculate(Double operand1, Double operand2);
}<file_sep>/Java9DiamondOperator/src/com/company/MyHandler.java
package com.company;
public abstract class MyHandler<T> {
private T content;
public MyHandler(T content) {
this.content = content;
System.out.println("constructor for MyHandler with content '" + content.toString() + "' called.");
}
public T getcontent() {
return content;
}
public void setContent(T content) {
this.content = content;
}
abstract void handle();
}
<file_sep>/MatchNoneDemo/src/com/company/Employee.java
package com.company;
import java.util.ArrayList;
import java.util.List;
public class Employee {
public int id;
public String name;
public int sal;
public Employee (int id, String name, int sal) {
this.id = this.id;
this.name = name;
this.sal = sal;
}
public static List<Employee> getEmpList(){
List<Employee> list = new ArrayList<>();
list.add(new Employee(1, "A", 2000));
list.add(new Employee(2, "B", 3000));
list.add(new Employee(3, "C", 4000));
list.add(new Employee(4, "D", 5000));
return list;
}
}
<file_sep>/JavaCRUDwithH2_5/src/Main.java
import java.sql.Connection;
import java.sql.Date;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.time.LocalDate;
import java.time.ZoneId;
import java.util.ArrayList;
import java.util.List;
import java.sql.PreparedStatement;
public class Main {
private static final String DB_USER = "sa";
private static final String DB_PASS = "<PASSWORD>";
private static final String CONNECTIONSTRING = "jdbc:h2:~/h2demo;AUTO_SERVER=TRUE";
private static final String DRIVER = "org.h2.Driver";
public static void main(String[] args) {
Connection connection = null;
List<Customer> customers = new ArrayList<>();
customers.add(new Customer(100, "Herbert", 33, "Braunschweig", 1200, LocalDate.now(ZoneId.of("Europe/Berlin"))));
customers.add(new Customer(101, "Alfred", 21, "Braunschweig", 1300, LocalDate.now(ZoneId.of("Europe/Berlin"))));
customers.add(new Customer(102, "Leonie", 98, "Braunschweig", 2000, LocalDate.now(ZoneId.of("Europe/Berlin"))));
customers.add(new Customer(103, "Sabine", 77, "Braunschweig", 1750, LocalDate.now(ZoneId.of("Europe/Berlin"))));
try
{
Class.forName(DRIVER);
System.out.println("SQL driver for H2 loaded.");
connection = DriverManager.getConnection(CONNECTIONSTRING, DB_USER, DB_PASS);
System.out.println("Opened connection to DB server.");
String sql = "INSERT INTO Customer (id,name,age,salary,address,start_date) VALUES (?,?,?,?,?,?)";
for (Customer customer : customers) {
PreparedStatement prepStatement = connection.prepareStatement(sql);
prepStatement.setInt(1, customer.getId());
prepStatement.setString(2, customer.getName());
prepStatement.setInt(3, customer.getAge());
prepStatement.setInt(4, customer.getSalary());
prepStatement.setString(5, customer.getAddress());
prepStatement.setObject(6, customer.getStartDate());
prepStatement.executeUpdate();
if (prepStatement != null)
try {
prepStatement.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
} catch (ClassNotFoundException ex) {
ex.printStackTrace();
} catch (SQLException e) {
e.printStackTrace();
} finally {
if (connection != null)
try {
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
}<file_sep>/Uebung0_CustomTextField/src/gmbh/conteco/Main.java
package gmbh.conteco;
import javafx.application.Application;
import javafx.scene.Scene;
import javafx.scene.control.Button;
import javafx.scene.control.Label;
import javafx.scene.layout.HBox;
import javafx.stage.Stage;
public class Main extends Application {
@Override
public void start(Stage primaryStage) throws Exception {
MyOwnTextField textField1 = new MyOwnTextField();
MyOwnTextField textField2 = new MyOwnTextField();
MyOwnTextField textField3 = new MyOwnTextField();
Button button = new Button("Click me");
HBox hBox = new HBox();
hBox.getChildren().addAll(textField1, new Label("+"), textField2, new Label("="), textField3, button);
button.setOnAction( event -> {
int firstNum = Integer.parseInt(textField1.getText());
int secondNum = Integer.parseInt(textField2.getText());
textField3.setText(Integer.toString(firstNum + secondNum));
});
Scene scene = new Scene(hBox, 400, 50);
primaryStage.setTitle("´´ Sample App ``");
primaryStage.setScene(scene);
primaryStage.show();
}
public static void main(String[] args) {
launch(args);
}
}<file_sep>/DemonstratingEvents/src/gmbh/conteco/Main.java
package gmbh.conteco;
import javafx.animation.KeyFrame;
import javafx.animation.RotateTransition;
import javafx.animation.Timeline;
import javafx.application.Application;
import javafx.event.ActionEvent;
import javafx.event.EventHandler;
import javafx.event.EventType;
import javafx.scene.Group;
import javafx.scene.PerspectiveCamera;
import javafx.scene.Scene;
import javafx.scene.control.Button;
import javafx.scene.input.KeyEvent;
import javafx.scene.input.MouseEvent;
import javafx.scene.layout.TilePane;
import javafx.scene.paint.Color;
import javafx.scene.paint.PhongMaterial;
import javafx.scene.text.Font;
import javafx.scene.text.FontWeight;
import javafx.scene.transform.Rotate;
import javafx.stage.Stage;
import javafx.scene.shape.*;
import javafx.scene.text.Text;
import javafx.util.Duration;
import java.util.Random;
import static gmbh.conteco.LightningEvent.PLASMA_STRIKE;
public class Main extends Application {
private static final int FIELD_SIZE = 10;
private static final Random random = new Random(42);
@Override
public void start(Stage primaryStage) throws Exception{
TilePane field = generateField();
Scene scene = new Scene(field);
primaryStage.setScene(scene);
primaryStage.setResizable(false);
primaryStage.show();
field.addEventHandler(PLASMA_STRIKE, event -> System.out.println("Field handled strike: " + event.getI() + ", " + event.getJ()));
periodicallyStrikeRandomNumbers(field);
/*
Button button = new Button("Click me!");
button.setOnAction(new EventHandler<ActionEvent>() {
@Override
public void handle(ActionEvent actionEvent) {
}
});
button.addEventHandler(ActionEvent.ACTION, event -> System.out.println("Click"));
button.addEventHandler(MouseEvent.MOUSE_ENTERED, event -> System.out.println("Mouse Entered"));
button.addEventHandler(PLASMA_STRIKE, event -> System.out.println("Plasma"));
Scene scene = new Scene(button);
primaryStage.setScene(scene);
primaryStage.setResizable(false);
primaryStage.show();
*/
}
public void periodicallyStrikeRandomNumbers(TilePane field) {
Timeline timeline = new Timeline(
new KeyFrame(Duration.seconds(0), event -> strikeRandomNode(field)),
new KeyFrame(Duration.seconds(2))
);
timeline.setCycleCount(Timeline.INDEFINITE);
timeline.play();
}
private void strikeRandomNode(TilePane field) {
LightningRectangle currentNode = (LightningRectangle)field.getChildren().get(random.nextInt(FIELD_SIZE * FIELD_SIZE));
LightningEvent lightningEvent = new LightningEvent(this, currentNode, PLASMA_STRIKE);
currentNode.fireEvent(lightningEvent);
}
public static TilePane generateField() {
TilePane field = new TilePane();
field.setPrefColumns(FIELD_SIZE);
field.setMinWidth(TilePane.USE_PREF_SIZE);
field.setMaxWidth(TilePane.USE_PREF_SIZE);
for (int i = 0; i < FIELD_SIZE; i++) {
for (int j = 0; j < FIELD_SIZE; j++) {
field.getChildren().add(new LightningRectangle(i, j, new StrikeEventHandler()));
}
}
return field;
}
public static void eventIntroduction(Stage primaryStage) {
Circle circle = new Circle();
circle.setRadius(25.0f);
circle.setCenterX(300.0f);
circle.setCenterY(150.0f);
circle.setFill(Color.BROWN);
circle.setStrokeWidth(20);
Box box = new Box();
box.setHeight(250.0);
box.setWidth(250.0);
box.setDepth(100);
box.setTranslateX(150);
box.setTranslateY(150);
box.setTranslateZ(150);
PhongMaterial material = new PhongMaterial();
material.setDiffuseColor(Color.DARKSLATEBLUE);
RotateTransition rotation = new RotateTransition();
rotation.setAxis(Rotate.Y_AXIS);
rotation.setNode(box);
rotation.setByAngle(320);
rotation.setCycleCount(30);
rotation.setAutoReverse(true);
PerspectiveCamera camera = new PerspectiveCamera();
camera.setTranslateX(0);
camera.setTranslateY(0);
camera.setTranslateZ(0);
box.setMaterial(material);
EventHandler<MouseEvent> handler = new EventHandler<MouseEvent>() {
@Override
public void handle(MouseEvent mouseEvent) {
circle.setFill(Color.LIMEGREEN);
}
};
box.addEventHandler(MouseEvent.MOUSE_CLICKED, new EventHandler<MouseEvent>() {
@Override
public void handle(MouseEvent mouseEvent) {
rotation.play();
//rotation.stop();
}
});
circle.addEventFilter(MouseEvent.MOUSE_CLICKED, handler);
circle.removeEventFilter(MouseEvent.MOUSE_CLICKED, handler);
Text text = new Text("Click on the circle to change its color");
text.setFill(Color.GREEN);
text.setX(150);
text.setY(200);
text.setFont(Font.font(null, FontWeight.BOLD, 15));
Group group = new Group(box, circle, text);
Scene scene = new Scene(group, 600, 400);
scene.setCamera(camera);
primaryStage.setTitle("Hello World");
primaryStage.setScene(scene);
primaryStage.show();
}
public static void main(String[] args) {
launch(args);
}
}
<file_sep>/MiniCalcJava8/src/Main.java
public class Main {
public static void main(String[] args) {
String operator = "%";
double wert1 = 2.5;
double wert2 = 3;
System.out.println(Calc.getEnumValue(operator).getResult(wert1, wert2));
}
}
<file_sep>/Tamagotchi_EventDemo/src/sample/TamagotchiButton.java
package sample;
import javafx.scene.control.Button;
public class TamagotchiButton extends Button {
private boolean isHungry = false;
public boolean isHungry() {
return this.isHungry;
}
public void setHungry(boolean hungry) {
this.isHungry = hungry;
HungerEvent actionEvent = new HungerEvent(HungerEvent.HUNGRY);
this.fireEvent(actionEvent);
}
}
<file_sep>/Z1_addAll/src/conteco/gmbh/Main.java
package conteco.gmbh;
import javafx.application.Application;
import javafx.scene.Scene;
import javafx.scene.control.Button;
import javafx.scene.layout.StackPane;
import javafx.stage.Stage;
import javafx.scene.text.Font;
// Name, weight, posture, size
public class Main extends Application {
@Override
public void start(Stage primaryStage) throws Exception{
StackPane pane = new StackPane();
pane.getChildren().add(new Button("Click me! 👍🏻"));
Scene scene = new Scene(pane, 300, 300);
primaryStage.setTitle("Working with Fonts ...");
primaryStage.setScene(scene);
primaryStage.show();
}
public static void main(String[] args) {
launch(args);
}
}
<file_sep>/untitled3/src/Logable.java
@FunctionalInterface
public interface Logable {
Log getLogEntry(String logEntry);
}<file_sep>/Zeiterfassung/src/gmbh/conteco/Main.java
package gmbh.conteco;
import java.io.*;
import java.nio.ByteBuffer;
import java.nio.channels.Pipe;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.WritableByteChannel;
import java.nio.file.*;
public class Main {
private TimeSheet timeSheet = new TimeSheet();
// Not included: EmployeeGroup
// GroupRole
// EmployeeRole
public static void main(String[] args) {
// 1. Teil -> Befüllen
// 2. Teil -> Anzeige
// 3. Teil -> User Input
Main main = new Main();
main.displayTimesheet();;
}
public void fillTimesheet() throws IOException {
// NIO.2
//java.nio.file.Path
// new java.io.File("").toPath()
FileInputStream fis = new FileInputStream("");
ReadableByteChannel source = fis.getChannel();
FileOutputStream fos = new FileOutputStream("");
WritableByteChannel destination = fos.getChannel();
copyData(source, destination);
source.close();
destination.close();
//Pipe p = Pipe.open();
//Pipe.SourceChannel sc = p.source();
//Pipe.SinkChannel sc2 = p.sink();
}
private static void copyData(ReadableByteChannel src, WritableByteChannel dest) throws IOException {
ByteBuffer buffer = ByteBuffer.allocateDirect(20 * 1024);
while(src.read(buffer) != -1){
// The buffer is used to drained
buffer.flip();
// make sure that buffer was fully drained
while (buffer.hasRemaining()) {
dest.write(buffer);
}
//Now the buffer is empty , ready for refilling it again.
buffer.clear();
}
}
public void displayTimesheet() {
this.timeSheet.display();
}
}
<file_sep>/DiagramsEx/src/gmbh/conteco/Main.java
package gmbh.conteco;
import javafx.application.Application;
import javafx.geometry.Side;
import javafx.scene.Node;
import javafx.scene.Parent;
import javafx.scene.Scene;
import javafx.scene.chart.BarChart;
import javafx.scene.chart.CategoryAxis;
import javafx.scene.chart.LineChart;
import javafx.scene.chart.NumberAxis;
import javafx.scene.chart.PieChart;
import javafx.scene.chart.XYChart;
import javafx.scene.layout.StackPane;
import javafx.scene.layout.VBox;
import javafx.stage.Stage;
public class Main extends Application {
public static void main(String[] args) {
launch(args);
}
@Override
public void start(Stage primaryStage) throws Exception {
VBox vbox = new VBox(createBarChart());
Scene scene = new Scene(vbox, 400, 200);
primaryStage.setScene(scene);
primaryStage.setTitle("BarChart demo");
primaryStage.show();
StackPane piePane = new StackPane(createPieChart());
Scene pieScene = new Scene (piePane, 400, 400);
Stage pieStage = new Stage();
pieStage.setScene(pieScene);
pieStage.setTitle("PieChart demo");
pieStage.show();
Scene combyScene = new Scene (createCombinationOfCharts(), 400, 300);
Stage combyStage = new Stage();
combyStage.setScene(combyScene);
combyStage.setTitle("Combining two charts");
combyStage.show();
}
private Parent createCombinationOfCharts() {
StackPane root = new StackPane();
CategoryAxis xAxis = new CategoryAxis();
xAxis.setLabel("Country");
NumberAxis yAxis = new NumberAxis();
yAxis.setLabel("Population");
// Bar chart
BarChart<String, Number> barChart = new BarChart<>(xAxis, yAxis);
barChart.setLegendVisible(false);
barChart.setAnimated(false);
// Data series #1
XYChart.Series<String, Number> dataSeries1 = new XYChart.Series<>();
dataSeries1.getData().add(new XYChart.Data<>("Austria", 25601.34));
dataSeries1.getData().add(new XYChart.Data<>("Brazil", 20148.82));
dataSeries1.getData().add(new XYChart.Data<>("France", 10000.00));
dataSeries1.getData().add(new XYChart.Data<>("Germany", 35407.15));
dataSeries1.getData().add(new XYChart.Data<>("USA", 12000));
barChart.getData().add(dataSeries1);
// Line Chart -> Create the overlay - our second chart
LineChart<String, Number> lineChart = new LineChart<>(xAxis, yAxis);
lineChart.setLegendVisible(false);
lineChart.setAnimated(false);
lineChart.getXAxis().setVisible(false);
lineChart.getYAxis().setVisible(false);
lineChart.setAlternativeColumnFillVisible(false);
lineChart.setAlternativeRowFillVisible(false);
lineChart.setHorizontalGridLinesVisible(false);
lineChart.setVerticalGridLinesVisible(false);
// This should be solved by using resources instead
//lineChart.getStylesheets().add("CombineCharts.css");
lineChart.getStylesheets().addAll(getClass().getResource("CombineCharts.css").toExternalForm());
// Data series #2
int delta = 1000;
XYChart.Series<String, Number> dataSeries2 = new XYChart.Series<>();
dataSeries2.getData().add(new XYChart.Data<>("Austria", 25601.34 + delta));
dataSeries2.getData().add(new XYChart.Data<>("Brazil", 20148.82 + delta));
dataSeries2.getData().add(new XYChart.Data<>("France", 10000.00 + delta));
dataSeries2.getData().add(new XYChart.Data<>("Germany", 35407.15 + delta));
dataSeries2.getData().add(new XYChart.Data<>("USA", 12000 + delta));
lineChart.getData().add(dataSeries2);
root.getChildren().addAll(barChart, lineChart);
return root;
}
private Node createPieChart() {
PieChart pieChart = new PieChart();
PieChart.Data slice1 = new PieChart.Data("Germany", 3.948);
PieChart.Data slice2 = new PieChart.Data("France", 2.778);
PieChart.Data slice3 = new PieChart.Data("Italy", 2.084);
PieChart.Data slice4 = new PieChart.Data("Austria", 0.455);
pieChart.getData().add(slice1);
pieChart.getData().add(slice2);
pieChart.getData().add(slice3);
pieChart.getData().add(slice4);
pieChart.setTitle("2018: GDP by Country");
pieChart.setLegendSide(Side.LEFT);
return pieChart;
}
private Node createBarChart() {
CategoryAxis xAxis = new CategoryAxis();
xAxis.setLabel("Programming Language");
NumberAxis yAxis = new NumberAxis();
yAxis.setLabel("Percent");
BarChart<String, Number> barChart = new BarChart<>(xAxis, yAxis);
XYChart.Series<String, Number> dataSeries1 = new XYChart.Series<>();
XYChart.Series<String, Number> dataSeries2 = new XYChart.Series<>();
XYChart.Series<String, Number> dataSeries3 = new XYChart.Series<>();
dataSeries1.setName("2020");
dataSeries1.getData().add(new XYChart.Data<String, Number>("Java", 17.78));
dataSeries1.getData().add(new XYChart.Data<String, Number>("C", 16.33));
dataSeries1.getData().add(new XYChart.Data<String, Number>("Pyhton", 10.11));
dataSeries2.setName("2015");
dataSeries2.getData().add(new XYChart.Data<String, Number>("Java", 15.58));
dataSeries2.getData().add(new XYChart.Data<String, Number>("C", 16.64));
dataSeries2.getData().add(new XYChart.Data<String, Number>("Pyhton", 2.61));
dataSeries3.setName("2010");
dataSeries3.getData().add(new XYChart.Data<String, Number>("Java", 17.51));
dataSeries3.getData().add(new XYChart.Data<String, Number>("C", 17.28));
dataSeries3.getData().add(new XYChart.Data<String, Number>("Pyhton", 4.23));
barChart.getData().add(dataSeries1);
barChart.getData().add(dataSeries2);
barChart.getData().add(dataSeries3);
barChart.setTitle("History of programming language");
return barChart;
}
}<file_sep>/LC2_CharacterProcessor_Step5/src/gmbh/conteco/Main.java
package gmbh.conteco;
import static java.lang.System.*;
import java.io.Reader;
import java.io.StringReader;
public class Main {
public static void main(String[] args) {
Reader reader = new StringReader("Hello ");
Box<Integer> n = new Box<>(0);
CharacterProcessor.process(reader,
ch -> ! Character.isWhitespace(ch),
ch -> n.value++);
out.println(n);
}
}
<file_sep>/JavaPlatformLoggingApi/src/gmbh/conteco/Main.java
package gmbh.conteco;
import java.lang.System.Logger.Level;
public class Main {
private static System.Logger LOGGER = System.getLogger("JavaPlatformLoggingApi");
public static void main(String[] args) {
LOGGER.log(Level.INFO, "It just works!");
LOGGER.log(Level.ERROR, "Ups ...");
// ch.qos.logback
// <configuration>
// <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
// <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
// <pattern>
// %d{dd.MM.yyyy HH:mm:ss} [%thread] %-5level %logger{36} -- %msg%n
// </pattern>
// </encoder>
// </appender>
// <root>
// <appender-ref ref="STDOUT />
// </root>
// </configuration>
// -Dlogback.configurationFile=mods/logback.xml
// SL4J
}
}
<file_sep>/untitled/src/sample/B.java
package sample;
public interface B extends A {
default void display() {
System.out.println("Displaying B");
}
}
<file_sep>/Exceptions/src/gmbh/conteco/Main.java
package gmbh.conteco;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class Main {
public static void main(String[] args) throws ClassNotFoundException {
// Object
// |
// Throwable
// Exception Error
// Unchecked Exception
// Runtime exceptions Virtual machine error
// NullPointer exceptions
// Checked Exception Assert error
// IO Exception
// Compile time exception
//String str = null;
//System.out.println(str.length());
/*
int a = 1;
int b = 0;
int result = 0;
try {
result = computeDivision(a,b);
System.out.println(result);
}
catch (ArithmeticException ex)
{
System.err.println(ex.getMessage());
}
*/
//String input = "TEST";
//int i = Integer.parseInt(input);
/*
// 0,1,2 indices -> length 3
// ArrayIndexOutOfBoundsException
int[] numbers = {1,2,3};
System.out.println(numbers[4]);
// java.lang.InterruptedException
Class.forName("org.h2.Driver");
// InsufficientFundsException
java.sql.Connection connection;
try {
new FileInputStream("");
} catch (FileNotFoundException | SecurityException e) {
e.printStackTrace();
} finally {
if (connection != null) {
try {
connection.close();
} catch (SQLException throwables) {
throwables.printStackTrace();
}
}
}
*/
// Streams Java 1.0 - 1.7
// System.out
// System.in
// System.err
// FileInputStream
// FileOutputStream
// Java 8 Streams = Java Stream API
java.util.List<String> names = java.util.List.of("Albert", "Ahmed", "Jenny", "Lisa");
java.util.List<String> names_sorted = names.stream().sorted().skip(1).sorted().collect(Collectors.toList());
Stream<Integer> infiniteStream = Stream.iterate(0,i -> i +2);
List<Integer> result = infiniteStream.limit(10).collect(Collectors.toList());
System.out.println(result);
}
static int divideByZero(int a, int b) {
int i = a / b;
return i;
}
static int computeDivision(int a, int b) {
int result = 0;
try {
result = divideByZero(a,b);
}
catch (NumberFormatException ex)
{
System.err.println(ex.getMessage());
}
return result;
}
}
<file_sep>/JavaGenericsPart1/src/com/company/Calculate.java
package com.company;
import java.math.BigDecimal;
import java.util.List;
public class Calculate {
public static double add1(List<? extends Number> list) {
double result = 0;
for (Number number : list) {
result += number.doubleValue();
}
return result;
}
public static <T extends Number> T add2(List<T> list) {
T result = (T)(Number)new BigDecimal("0");
for (T number : list) {
result = (T)(Number)new BigDecimal(number.toString())
.add(new BigDecimal(result.toString()));
}
return result;
}
}
<file_sep>/RpnCalculator/src/Main.java
public class Main {
// Generic Stack -> Integer, Float, Double, Long
// Implementation eines Stacks
// Push & Pop
// RPN (UPN) ... reverse polish notation, umgekehrte polnische Notation
// (7+3)*2
// +, -, *, /
// Operand1 (7) Operator (*) Operand2 (3)
// 1. Operanden: ! Fakultät
// x ^ 3
// x ^ 2
// 0. Operanden: PI
// 7 3 PLUS 2 MAL
// RechnenOperationen in einer Klasse
// Lambda Expressions, Functional Interface
// +------------------+
// + 7 8 9
// + 4 5 6
// + 1 2 3
// String -> repeat, split
// Default Method -> Formattierung -> Ausgabe
// For -> Streams -> .forEach
// Functional Interfaces -> überlegen -> vorgefertigte Interfaces gibt, die ich verwenden kann.
public double process(double operand1, double operand2, Calculator calculator) {
return calculator.calculate(operand1, operand2);
}
public static void main(String[] args) {
Main calculator = new Main();
// The traditional way using anonymous class
System.out.println("Addition: " + calculator.process(3, 4, new Calculator() {
@Override
public Double calculate(Double operand1, Double operand2) {
return operand1 + operand2;
}
}));
// the lambda way; how simplified the code became
Calculator calcSubtraction = (Double operand1, Double operand2) -> {
return operand1 - operand2;
};
System.out.println("Subtraction: " + calculator.process(3, 4, calcSubtraction));
// the lambda way; further simplification using 'Type Inference'
System.out.println("Multiplication: " +
calculator.process(3,4, (num1, num2) -> {
return num1 * num2;
}));
// the lamba way; the simplified approach
System.out.println("Divide: " + calculator.process(3,4, (num1, num2) -> num1 / num2));
}
}<file_sep>/Java10/src/com/company/Main.java
package com.company;
import java.util.List;
public class Main {
public static void main(String[] args) {
// JEP 286
// Local variable type inference
var numbers = List.of(1,2,3,4,5); // inferred value ArrayList<Integer>
for (var number : numbers ) {
System.out.println(number);
}
// -XX:+UnlockkExperimentalVMOptions -XX:UseJVMCICompiler
// -Xshare:on -XX:+UseAppCDS -XX:DumpLoadedClassList=hello_world:lst -cp hello_world.jar HelloWorld
// -Xshare:off -XX:+UseAppCDS
}
}
<file_sep>/Z1_addAll/src/module-info.java
module Z1.addAll {
requires javafx.controls;
requires javafx.fxml;
exports conteco.gmbh;
}<file_sep>/MiniCalcJava8/src/Calc.java
import java.util.Arrays;
import java.util.Optional;
import java.util.function.*;
public enum Calc {
ADDITION ("+" , (wert1, wert2) -> wert1+wert2),
SUBTRACTION ("-", (wert1, wert2) -> wert1-wert2),
DIVISION ("/", (wert1, wert2) -> wert1/wert2),
MULTIPLICATION ("*", (wert1, wert2) -> wert1*wert2),
//EXPONENT ("^", (wert1, wert2) -> Math.pow(wert1, wert2));
EXPONENT ("^", Math::pow),
MODULO ("%", (wert1, wert2) -> wert1%wert2);
private String operator;
private BiFunction<Double, Double, Double> function;
private Calc(String operator, BiFunction<Double, Double, Double> function) {
this.function = function;
this.operator = operator;
}
public Double getResult(Double wert1, Double wert2) {
return function.apply(wert1, wert2);
}
public String getOperator() {
return this.operator;
}
public static Calc getEnumValue(String operator) {
// JAVA 8 Streams
Optional<Calc> result =
Arrays.asList(Calc.values()).stream()
.filter(element -> element.operator.equals(operator))
.findFirst();
if (result.isPresent())
return result.get();
return null;
}
}<file_sep>/untitled/src/sample/A.java
package sample;
public interface A {
default void display() {
System.out.println("Displaying A");
}
}
<file_sep>/SubscriberObserverPattern_Zoo/src/com/company/AnimalAddedListener.java
package com.company;
public interface AnimalAddedListener {
public void onAnimalAdded (Animal animal);
}
<file_sep>/IterableAndBiMap/src/com/company/Main.java
package com.company;
import java.util.Map;
public class Main {
public static void main(String[] args) {
final BiMap<Integer, String, ValueStore> map = new BiMap<>((i, s) -> new ValueStore(i,s, i + " " + s.toUpperCase()));
ValueStore value1 = map.get(1, "one");
System.out.println(value1);
ValueStore value2 = map.get(1, "one");
System.out.println(value1 == value2);
map.get(1, "two");
map.get(5, "red");
map.get(5, "blue");
map.get(5, "green");
Map<String, ValueStore> map1 = map.get(1);
for (Map.Entry<String, ValueStore> e : map1.entrySet()) {
System.out.println(e.getKey() + " ==> " + e.getValue());
}
Map<String, ValueStore> map2 = map.get(5);
for (Map.Entry<String, ValueStore> e : map2.entrySet()) {
System.out.println(e.getKey() + " ==> " + e.getValue());
}
}
}
<file_sep>/Uebung0_CustomTextField/src/gmbh/conteco/MyOwnTextField.java
package gmbh.conteco;
import javafx.scene.control.TextField;
public class MyOwnTextField extends TextField {
public MyOwnTextField() {
this.setPrefWidth(70);
}
@Override
public void replaceText(int start, int end, String text) {
// Überprüfe ob die Eingabe eine Zahl ist
if (text.matches("[0-9]+")) {
// wenn ja, dann darf das eingegeben werden
super.replaceText(start, end, text);
}
}
@Override
public void replaceSelection(String text) {
// Überprüfe ob die Eingabe eine Zahl ist
if (text.matches("[0-9]+")) {
// wenn ja, dann darf das eingegeben werden
super.replaceSelection(text);
}
}
}
<file_sep>/SubscriberObserverPattern_Zoo/src/com/company/Zoo.java
package com.company;
import java.util.ArrayList;
import java.util.List;
public class Zoo {
private List<Animal> animals = new ArrayList<>();
private List<AnimalAddedListener> listeners = new ArrayList<>();
public void addAnimal (Animal animal) {
this.animals.add(animal);
this.notifyAnimalAddedListeners(animal);
}
public void deregisterAnimalAddedListener (AnimalAddedListener listener) {
this.listeners.remove(listener);
}
public void registerAnimalAddedListener (AnimalAddedListener listener) {
this.listeners.add(listener);
}
protected void notifyAnimalAddedListeners (Animal animal) {
// Notify each of the listeners in the list (registered listeners)
this.listeners.forEach(listener -> listener.onAnimalAdded(animal));
}
}
<file_sep>/Java8Annotations/src/Games.java
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
@Retention(RetentionPolicy.RUNTIME)
public @interface Games {
Game[] value();
}<file_sep>/Nashorn/src/com/company/Main.java
package com.company;
import javax.script.ScriptEngineManager;
import javax.script.ScriptEngine;
import javax.script.ScriptException;
public class Main {
public static void main(String[] args) {
ScriptEngineManager scriptEngineManager = new ScriptEngineManager();
ScriptEngine nashorn = scriptEngineManager.getEngineByName("nashorn");
String meinName = "Oliver";
Integer result = null;
try {
nashorn.eval("print('" + meinName + "')");
result = (Integer)nashorn.eval("10 + 2");
} catch (ScriptException ex) {
ex.printStackTrace();
}
System.out.println(result.toString());
}
}
<file_sep>/Tamagotchi_EventDemo/src/sample/Main.java
package sample;
import javafx.application.Application;
import javafx.scene.Scene;
import javafx.stage.Stage;
import java.util.ArrayList;
import java.util.List;
public class Main extends Application {
@Override
public void start(Stage primaryStage) throws Exception{
TamagotchiButton button = new TamagotchiButton();
button.addEventHandler(HungerEvent.HUNGRY, event -> {
System.out.println("I'm hungry.");
});
button.addEventHandler(HungerEvent.PEE, event -> {
System.out.println("😊");
});
button.setHungry(true);
primaryStage.setTitle("Hello World");
primaryStage.setScene(new Scene(button, 300, 275));
primaryStage.show();
List<Float> meineZahlen = new ArrayList<>();
meineZahlen.add(3f);
meineZahlen.add(5f);
//Wildcards.add(meineZahlen);
}
public static void main(String[] args) {
launch(args);
}
}
<file_sep>/JavaSerializationInputFiler/src/FilterTest.java
import java.io.ObjectInputFilter;
import java.io.ObjectInputFilter.FilterInfo;
import java.lang.invoke.SerializedLambda;
import java.lang.reflect.Proxy;
import java.util.ArrayList;
import java.util.List;
public class FilterTest {
public static void testFilter(String name, int value) {
Class<?> arrayClass = new int[0].getClass();
String pattern = String.format("%s=%d;%s=%d", name, value, name, value -1);
ObjectInputFilter filter = ObjectInputFilter.Config.createFilter(pattern);
filter.checkInput(new FilterValues(arrayClass, value, value, value, value));
}
static class FilterValues implements ObjectInputFilter.FilterInfo {
private final Class<?> classVar;
private final long arrayLength;
private final long depth;
private final long references;
private final long streamBytes;
public FilterValues(Class<?> classVar, long arrayLength, long depth,
long references, long streamBytes) {
this.classVar = classVar;
this.arrayLength = arrayLength;
this.depth = depth;
this.references = references;
this.streamBytes = streamBytes;
}
@Override
public Class<?> serialClass() {
return this.classVar;
}
@Override
public long arrayLength() {
return this.arrayLength;
}
@Override
public long depth() {
return this.depth;
}
@Override
public long references() {
return this.references;
}
@Override
public long streamBytes() {
return streamBytes;
}
}
public ObjectInputFilter.Status checkInput(FilterInfo filter) {
List<Class<?>> classes = new ArrayList<>();
if (filter.serialClass() != null) {
if (filter.serialClass().getName().contains("$$Lambda$")) {
classes.add(SerializedLambda.class);
} else if (Proxy.isProxyClass(filter.serialClass())) {
classes.add(Proxy.class);
}
else {
classes.add(filter.getClass());
}
}
return ObjectInputFilter.Status.ALLOWED;
}
}<file_sep>/Kilometergeld/src/gmbh/conteco/Main.java
package gmbh.conteco;
import javafx.application.Application;
import javafx.collections.FXCollections;
import javafx.collections.ObservableList;
import javafx.geometry.Insets;
import javafx.scene.Parent;
import javafx.scene.Scene;
import javafx.scene.control.*;
import javafx.scene.control.Button;
import javafx.scene.control.Label;
import javafx.scene.control.Menu;
import javafx.scene.control.MenuBar;
import javafx.scene.control.MenuItem;
import javafx.scene.control.TextArea;
import javafx.scene.control.TextField;
import javafx.scene.control.cell.PropertyValueFactory;
import javafx.scene.layout.BorderPane;
import javafx.scene.layout.GridPane;
import javafx.scene.layout.HBox;
import javafx.stage.Stage;
import java.io.*;
import java.nio.file.Files;
import java.time.LocalDate;
import java.util.ArrayList;
import java.util.Optional;
public class Main extends Application {
private String resourceDirectory = "res";
private File tripsFile = new File(resourceDirectory + "/Trips.txt");
File driversFile = new File(resourceDirectory + "/Drivers.ser");
File licensePlatesFile = new File(resourceDirectory + "/LicensePlates.ser");
private CheckBox businessTripCheckBox = new CheckBox("Business trip?");
private ComboBox<String> licensePlateComboBox = new ComboBox<>();
private ComboBox<String> driverComboBox = new ComboBox<>();
private DatePicker datePicker = new DatePicker();
private TextField kmAtStartTxf = new TextField();
private TextField kmAtEndTxf = new TextField();
private TextArea purposeTxf = new TextArea();
private TextArea routeTxf = new TextArea();
private ObservableList<Trip> tripList = FXCollections.observableArrayList();
private ObservableList<String> licensePlateList = FXCollections.observableArrayList();
private ObservableList<String> driverList = FXCollections.observableArrayList();
TableView<Trip> tripTableView = new TableView<>();
@Override
public void start(Stage primaryStage) throws Exception {
primaryStage.setScene(new Scene(createRoot(), 1100, 500));
primaryStage.setTitle("Driver's logbook");
primaryStage.show();
}
public static void main(String[] args) {
launch(args);
}
private Parent createRoot() {
BorderPane root = new BorderPane();
/* Labels Config */
Label licensePlateLabel = new Label("License Plate");
Label driverLabel = new Label("Driver");
Label dateLabel = new Label("Date");
Label kmAtStartLabel = new Label("KM at trip start");
Label kmAtEndLabel = new Label("KM at trip end");
Label purposeLabel = new Label("Purpose");
Label routeLabel = new Label("Route");
businessTripCheckBox.setSelected(true);
/* Grid Config */
GridPane grid = new GridPane();
grid.add(licensePlateLabel, 0, 0);
grid.add(licensePlateComboBox, 1, 0);
grid.add(driverLabel, 0, 1);
grid.add(driverComboBox, 1, 1);
grid.add(dateLabel, 0, 2);
grid.add(datePicker, 1, 2);
grid.add(kmAtStartLabel, 0, 3);
grid.add(kmAtStartTxf, 1, 3);
grid.add(kmAtEndLabel, 0, 4);
grid.add(kmAtEndTxf, 1, 4);
grid.add(businessTripCheckBox, 1, 5);
grid.add(purposeLabel, 0, 6);
grid.add(purposeTxf, 1, 6);
grid.add(routeLabel, 0, 7);
grid.add(routeTxf, 1, 7);
/* Save and Cancel Buttons */
Button saveButton = new Button("Save Trip");
saveButton.setOnAction(handler -> {
saveTrip();
});
Button cancelButton = new Button("Cancel");
cancelButton.setOnAction(handler -> {
cancelCreation();
});
HBox saveAndCancel = new HBox(saveButton, cancelButton);
grid.add(saveAndCancel, 1, 8);
/* Layout Config */
kmAtStartTxf.setMaxWidth(200);
kmAtEndTxf.setMaxWidth(200);
purposeTxf.setMaxWidth(200);
routeTxf.setMaxWidth(200);
saveButton.setMinWidth(80);
cancelButton.setMinWidth(80);
saveAndCancel.setSpacing(10);
grid.setPadding(new Insets(20));
grid.setHgap(40);
grid.setVgap(10);
root.setTop(createMenu());
root.setLeft(grid);
/* Data load */
licensePlateComboBox.setItems(licensePlateList);
driverComboBox.setItems(driverList);
configureTable();
root.setCenter(tripTableView);
loadTrips();
loadDrivers();
loadLicensePlates();
return root;
}
/* Create / Cancel Methods */
private Trip createTrip() {
String licensePlate = licensePlateComboBox.getSelectionModel().getSelectedItem();
String driver = driverComboBox.getSelectionModel().getSelectedItem();
LocalDate date = datePicker.getValue();
long kmAtStart = Long.parseLong(kmAtStartTxf.getText());
long kmAtEnd = Long.parseLong(kmAtEndTxf.getText());
long kmDriven = kmAtEnd - kmAtStart;
boolean isBusinessTrip = businessTripCheckBox.isSelected();
String purpose = purposeTxf.getText();
String route = routeTxf.getText();
return new Trip(licensePlate, driver, date, kmAtStart, kmAtEnd, kmDriven, isBusinessTrip, purpose, route);
}
private void cancelCreation() {
businessTripCheckBox.setSelected(true);
licensePlateComboBox.getSelectionModel().clearSelection();
driverComboBox.getSelectionModel().clearSelection();
datePicker.setValue(LocalDate.now());
kmAtStartTxf.setText("");
kmAtEndTxf.setText("");
purposeTxf.setText("");
routeTxf.setText("");
}
/* IO Methods */
private void saveTrip() {
try (BufferedWriter writer = Files.newBufferedWriter(tripsFile.toPath())) {
Trip newTrip = createTrip();
tripList.add(newTrip);
for (Trip thisTrip : tripList) {
writer.write(thisTrip.exportToCSV());
writer.newLine();
}
} catch (IOException e) {
e.printStackTrace();
}
}
private void loadTrips() {
if (!CheckFolderExists() || !tripsFile.exists())
return;
try (BufferedReader reader = Files.newBufferedReader(this.tripsFile.toPath())) {
String line = null;
while ((line = reader.readLine()) != null) {
tripList.add(Trip.importNewFromCSV(line));
}
} catch (IOException e) {
e.printStackTrace();
}
}
private void saveDrivers() {
try (ObjectOutputStream driversStream = new ObjectOutputStream(new FileOutputStream(driversFile))) {
ArrayList<String> importList = new ArrayList<>(driverList);
driversStream.writeObject(importList);
} catch (NotSerializableException e) {
System.out.println("Not everything was serialized.");
} catch (IOException e) {
e.printStackTrace();
}
}
private void loadDrivers() {
if (!CheckFolderExists() || !driversFile.exists())
return;
try (ObjectInputStream driversStream = new ObjectInputStream(new FileInputStream(driversFile))) {
ArrayList<String> importList = (ArrayList<String>) driversStream.readObject();
driverList.addAll(importList);
} catch (NotSerializableException e) {
System.out.println("Not everything was deserialized.");
} catch (IOException e) {
e.printStackTrace();
} catch (ClassNotFoundException e) {
e.printStackTrace();
}
}
private void saveLicensePlates() {
try (ObjectOutputStream licensePlatesStream = new ObjectOutputStream(new FileOutputStream(licensePlatesFile))) {
ArrayList<String> importList = new ArrayList<>(licensePlateList);
licensePlatesStream.writeObject(importList);
} catch (NotSerializableException e) {
System.out.println("Not everything was serialized.");
} catch (IOException e) {
e.printStackTrace();
}
}
private void loadLicensePlates() {
if (!CheckFolderExists() || !licensePlatesFile.exists())
return;
try (ObjectInputStream licensePlatesStream = new ObjectInputStream(new FileInputStream(licensePlatesFile))) {
ArrayList<String> importList = (ArrayList<String>) licensePlatesStream.readObject();
licensePlateList.addAll(importList);
} catch (NotSerializableException e) {
System.out.println("Not everything was deserialized.");
} catch (IOException e) {
e.printStackTrace();
} catch (ClassNotFoundException e) {
e.printStackTrace();
}
}
private boolean CheckFolderExists() {
File resFolder = new File(this.resourceDirectory);
if (!resFolder.exists()) {
return resFolder.mkdir();
}
return true;
}
/* Table Config Methods */
private void configureTable() {
TableColumn<Trip, String> licensePlateColumn = createColumn("License Plate", "licensePlate");
TableColumn<Trip, String> driverColumn = createColumn("Driver", "driver");
TableColumn<Trip, String> dateColumn = createColumn("Date","dateAsString");
TableColumn<Trip, String> kmAtStartColumn = createColumn("KM at start","kmAtStart");
TableColumn<Trip, String> kmAtEndColumn = createColumn("KM at end","kmAtEnd");
TableColumn<Trip, String> kmDrivenColumn = createColumn("Total km driven","kmDriven");
TableColumn<Trip, String> isBusinessColumn = createColumn("Business trip?","isBusinessTrip");
TableColumn<Trip, String> purposeColumn = createColumn("Purpose","purpose");
TableColumn<Trip, String> routeColumn = createColumn("Route","route");
tripTableView.getColumns().addAll(licensePlateColumn,driverColumn,dateColumn,kmAtStartColumn,kmAtEndColumn,kmDrivenColumn,isBusinessColumn,purposeColumn,routeColumn);
tripTableView.setItems(tripList);
}
private <T> TableColumn<T, String> createColumn(String columnName, String propertyName) {
TableColumn<T, String> newColumn = new TableColumn<>(columnName);
newColumn.setCellValueFactory(new PropertyValueFactory<>(propertyName));
return newColumn;
}
/* Menu Config Methods */
private MenuBar createMenu() {
MenuBar menuBar = new MenuBar();
/* License Plate Management */
Menu licensePlateMenu = new Menu("Manage License Plates");
MenuItem createLicensePlateMenu = new MenuItem("Create New");
createLicensePlateMenu.setOnAction(handler -> {
TextInputDialog dialog = new TextInputDialog();
dialog.setTitle("Dialog");
dialog.setHeaderText("New License Plate");
dialog.setContentText("Please enter a valid license plate number:");
Optional<String> result = dialog.showAndWait();
result.ifPresent(name -> licensePlateList.addAll(name));
});
MenuItem deleteLicensePlateMenu = new MenuItem("Delete");
deleteLicensePlateMenu.setOnAction(handler -> {
ChoiceDialog<String> dialog = new ChoiceDialog<>(null, licensePlateList);
dialog.setTitle("Dialog");
dialog.setHeaderText("Delete License Plate");
dialog.setContentText("Choose a license plate to delete:");
Optional<String> result = dialog.showAndWait();
result.ifPresent(selection -> licensePlateList.remove(selection));
});
MenuItem saveCurrentPlatesMenu = new MenuItem("Save current");
saveCurrentPlatesMenu.setOnAction(handler -> {
saveLicensePlates();
});
licensePlateMenu.getItems().addAll(createLicensePlateMenu, deleteLicensePlateMenu, saveCurrentPlatesMenu);
/* License Plate Management */
Menu driverMenu = new Menu("Manage Drivers");
MenuItem createDriver = new MenuItem("Create New");
createDriver.setOnAction(handler -> {
TextInputDialog dialog = new TextInputDialog();
dialog.setTitle("Dialog");
dialog.setHeaderText("New Driver");
dialog.setContentText("Please enter a valid driver name:");
Optional<String> result = dialog.showAndWait();
result.ifPresent(name -> driverList.addAll(name));
});
MenuItem deleteDriver = new MenuItem("Delete");
deleteDriver.setOnAction(handler -> {
ChoiceDialog<String> dialog = new ChoiceDialog<>(null, driverList);
dialog.setTitle("Dialog");
dialog.setHeaderText("Delete Driver");
dialog.setContentText("Choose a driver to delete:");
Optional<String> result = dialog.showAndWait();
result.ifPresent(selection -> driverList.remove(selection));
});
MenuItem saveCurrentDriversMenu = new MenuItem("Save current");
saveCurrentDriversMenu.setOnAction(handler -> {
saveDrivers();
});
driverMenu.getItems().addAll(createDriver, deleteDriver, saveCurrentDriversMenu);
menuBar.getMenus().addAll(licensePlateMenu, driverMenu);
return menuBar;
}
}<file_sep>/InterfacesWH/src/com/company/PersonenJob.java
package com.company;
@FunctionalInterface
public interface PersonenJob {
String getJob();
}<file_sep>/LC2_CharacterProcessor_Step5/src/gmbh/conteco/Box.java
package gmbh.conteco;
public class Box<T> {
public T value;
public Box(T start) {
this.value = start;
}
public String toString() {
return this.value.toString();
}
}<file_sep>/Nashorn/src/com/company/Decimal.js
var BigDecimal = Java.type('java.math.BigDecimal');
function calculate (income, interest) {
var result = new BigDecimal(income).multiply(new BigDecimal(interest)).divide ( new BigDecimal ("100"), 2, BigDecimal.ROUND_HALF_EVEN);
return result.toPlainString();
}
// Should print 177509536812209646641661.41
print(calculate(1241325432253214134526324,14.3))<file_sep>/NIO_vs_IO/src/Main.java
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.RandomAccessFile;
import java.net.URI;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
public class Main {
public static void main(String[] args) throws IOException {
RandomAccessFile file = new RandomAccessFile("./customers.txt", "rw");
FileChannel inputChannel = file.getChannel();
ByteBuffer buffer = ByteBuffer.allocate(32);
int bytesRead = inputChannel.read(buffer);
while (bytesRead != -1) {
System.out.println("Read " + bytesRead);
buffer.flip();
while (buffer.hasRemaining()) {
System.out.print((char)buffer.get());
}
buffer.clear();
bytesRead = inputChannel.read(buffer);
}
file.close();
FileInputStream input = null;
FileOutputStream output = null;
try {
input = new FileInputStream("./customers.txt");
output = new FileOutputStream("./output.txt");
int i;
while (( i = input.read()) != -1) {
output.write(i);
}
}
finally {
if (input != null) {
input.close();
}
if (output != null) {
output.close();
}
}
}
}
<file_sep>/JavaCRUDwithH2_6v2/src/CreateCustomerSql.java
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
public final class CreateCustomerSql {
private CreateCustomerSql() {
}
public static boolean execute() throws ClassNotFoundException, SQLException {
Connection connection = ConnectionString.getInstance().getConnection();
Statement statement = connection.createStatement();
String sql = "CREATE TABLE IF NOT EXISTS CUSTOMER" +
"(" +
"ID INT PRIMARY KEY," +
//"ID INT," +
"NAME VARCHAR(20)," +
"AGE NUMBER," +
"ADDRESS VARCHAR(20)," +
"SALARY NUMBER, " +
"START_DATE DATE" +
")";
return statement.execute(sql);
}
}
<file_sep>/JavaHTTP2Push/src/Sample3.java
import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
public class Sample3 {
private static List<CompletableFuture<Void>> asyncPushRequests = new ArrayList<CompletableFuture<Void>>();
public static void call() {
HttpClient client = HttpClient.newHttpClient();
//HttpResponse
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create("https://http2.golang.org/serverpush")
.build();
client.sendAsync(request,
HttpResponse.BodyHandlers.ofString(), pushPromiseHandler())
.thenApply(HttpResponse::body)
.thenAccept((b) -> System.out.println("\nMain resource:\n" + b))
.join();
asyncPushRequests.forEach(CompletableFuture::join);
System.out.println("\nFetched a total of " +
asyncPushRequests.size() + " push requests");
}
private static HttpResponse.PushPromiseHandler<String> getPromiseHandler () {
return (HttpRequest pushPromiseRequest,
Function<HttpResponse.BoHandler<String> ,
CompletableFuture<HttpResponse<String>>> acceptor) -> {
CompletableFuture<Void> pushcf =
acceptor.apply(HttpResponse.BodyHandlers.ofString())
.thenApply(HttpResponse::body);
.thenAccept((b) -> System.out.println(
"\nPushed resource body:\n " + b));
asyncPushRequests.add(pushcf);
System.out.println("Just got pushed, size = " + AsyncBoxView.size());
System.out.println("Just got pushed, URL = " + AsyncBoxView.uri());
System.out.println("Just got pushed, header = " + AsyncBoxView.header());
}
}
| 12b32a7185fead4ade89ab162894c3c78cb5c2c7 | [
"JavaScript",
"Java"
] | 63 | Java | OliverKohlDSc/JavaCourse | a3b8fef206212f331989c42e87ac8aad9028a220 | 24cfaab6dcc86b0c738fc6b25c44bbfdd4594a8c |
refs/heads/master | <file_sep># Nuntius
An online platform aims at solving current issue in education system, enhancing collaboration among teacher-parent-student.
## Getting Started
The product of this project is a webapp. Go to [NUNTIUS](https://nuntius-6e5fd.firebaseapp.com). You can register using your email or your google account. Features for ease communication among teachers, parents and students are being built.
### Prerequisites
Any browser and network.
### Installing
Not needed.
## Deployment
User information is stored in and retrieved from firebase (https://nuntius-6e5fd.firebaseapp.com).
## Built With
* HTML5, CSS3, Javascript, Jquery, JSON
* [Firebase](https://firebase.google.com/) - The firebase web database, account authentication, storage and hosting used
## Contributing
Please read [NUNTIUS.INSIGHT](https://github.com/humble92/Nuntius/pulse) for details on our code of conduct, and the process for submitting pull requests to us.
## Versioning
We use [github](https://github.com/humble92/Nuntius/commits/master) for versioning. For the versions available, see the [NUNTIUS](https://github.com/humble92/Nuntius).
## Authors
* **<NAME>**
* **<NAME>**
* **<NAME>**
See also the list of [contributors](https://github.com/humble92/Nuntius/graphs/contributors) who participated in this project.
## Acknowledgments
* BCIT Project instructors: <NAME> and <NAME>
<file_sep><!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="Generator" content="EditPlus®">
<meta name="Author" content="Group 7">
<meta name="Keywords" content="BCIT, CST, Project 1">
<meta name="Description" content="Practice project: not only a practice, also our best for our society.">
<title>Nuntius</title>
</head>
<body>
<b>Problem: Teacher-student-parents communications and interactions are inefficient and ineffective!</b>
<br>
<a href="persona1.html">Persona 1: (Teacher)</a>
<br>
<a href="persona2.html">Persona 2: (Student)</a>
<br>
<a href="persona3.html">Persona 3: (Parent)</a>
</body>
</html>
<file_sep><!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Nuntius-MainPage</title>
<link href="../css/messageSystem.css" rel="stylesheet">
<link href="../css/header.css" rel="stylesheet">
<link href="../css/responsive.css" rel="stylesheet">
<script src="https://www.gstatic.com/firebasejs/5.5.9/firebase.js"></script>
<script src="../js/setup_firebase.js"></script>
<script src="../js/jquery-3.3.1.min.js"></script>
<script src="../js/common.js"></script>
<script src="../js/pagecommon.js"></script>
</head>
<body>
<div id="header">
<div id="menu-icon">
<div class="menu-bar"> </div>
<div class="menu-bar"> </div>
<div class="menu-bar"> </div>
</div>
<div id="userpic">
</div>
<div id="logo">
<a href="main.html"><img src="../img/LOGO(new).png" alt="logo" width="120px" height="auto"></a>
</div>
</div>
<div id="container">
<div id="menu">
<ul>
<li> <a href="messageSystem.html">Message System</a></li>
<li> <a href="performanceRecord.html" >Performance Record</a></li>
<li><a href="notimplemented.html">Student Grades</a></li>
<li><a href="notimplemented.html">Bookstore</a></li>
<li><a href="notimplemented.html">Library</a></li>
<li><a href="notimplemented.html">Group Study</a></li>
</ul>
</div>
<div id="dropdown">
<ul>
<li id="profile"><a href="profile.html">Profile</a></li>
<li id="accountseting"><a href="notimplemented.html">Account Setting</a></li>
<li id="logout"><a href="login.html">Log Out</a></li>
</ul>
</div>
<div id="content">
<button class="button"><a href="email.html">compose</a></button>
<br>
<br>
<div class="galleryItem">
<div class="msghead">
<div class="headtext"><h3>Math</h3></div>
<div class="headbtn">
<button id="myBtn1">Read</button>
<!-- The Modal -->
<div id="myModal1" class="modal">
<!-- Modal content -->
<div class="modal-content">
<span class="close">×</span>
<p>Some text in the Modal..</p>
</div>
</div>
</div>
</div>
<p>We will be working on a Finance Project this week.<br>Step 1. Math 9 Finance Project<br>Step 2. http://quicklatex.com/Latex<br>Step 3. Budgeting Spreadsheet Math 9 Finance Project PT II</p>
</div>
<div class="galleryItem">
<div class="msghead">
<div class="headtext"><h3>English</h3></div>
<div class="headbtn">
<button id="myBtn2">Read</button>
<!-- The Modal -->
<div id="myModal2" class="modal">
<!-- Modal content -->
<div class="modal-content">
<span class="close">×</span>
<p>Some text in the Modal..</p>
</div>
</div>
</div>
</div>
<p>First thing: I will be grouping you into podcast teams for tomorrow. I will have checked off your summary and given it back tomorrow. Think of a theme you’d like to explore with your groups.</p>
</div>
<div class="galleryItem">
<div class="msghead">
<div class="headtext"><h3>Physics</h3></div>
<div class="headbtn">
<button id="myBtn3">Read</button>
<!-- The Modal -->
<div id="myModal3" class="modal">
<!-- Modal content -->
<div class="modal-content">
<span class="close">×</span>
<p>Some text in the Modal..</p>
</div>
</div>
</div>
</div>
<p>Questions on pg 28<br>Finish 1.3 notes<br>Practice pg 28 #3<br>1.3 Review pg 33 #1-5<br>Chapter 1 Review (p 34 #1-21)</p>
</div>
<div class="galleryItem">
<div class="msghead">
<div class="headtext"><h3>Chemistry</h3></div>
<div class="headbtn">
<button id="myBtn4">Read</button>
<!-- The Modal -->
<div id="myModal4" class="modal">
<!-- Modal content -->
<div class="modal-content">
<span class="close">×</span>
<p>Some text in the Modal..</p>
</div>
</div>
</div>
</div>
<p>You can find the links from the menu structure or from the links below<br>Please Click Here if you wish to take the Oct. 17th Retest. Here is our course outline, learning outcomes and the periodic table (and a virtual one) we will be using.</p>
</div>
<div class="galleryItem">
<div class="msghead">
<div class="headtext"><h3>Geology</h3></div>
<div class="headbtn">
<button id="myBtn5">Read</button>
<!-- The Modal -->
<div id="myModal5" class="modal">
<!-- Modal content -->
<div class="modal-content">
<span class="close">×</span>
<p>Some text in the Modal..</p>
</div>
</div>
</div>
</div>
<p>Rock Cycle & Bowen’s Quiz /10<br>Finish/go over Igneous lab,<br>Summary Qs pg 29 DUE NEXT CLASS<br>Finish Ch 3 notes Pg 26-27<br>Assign White Workbook pg 19 #48 – 57 and 59</p>
</div>
<div class="galleryItem">
<div class="msghead">
<div class="headtext"><h3>Economics</h3></div>
<div class="headbtn">
<button id="myBtn6">Read</button>
<!-- The Modal -->
<div id="myModal6" class="modal">
<!-- Modal content -->
<div class="modal-content">
<span class="close">×</span>
<p>Some text in the Modal..</p>
</div>
</div>
</div>
</div>
<p>Lectures, assignments or readings are given daily. Students will be involved in: discussing the readings, debating, partner/group work, watching and/or critiquing films in the curriculum, and understanding charts and graphs depicting information covered.</p>
</div>
<div class="galleryItem">
<div class="msghead">
<div class="headtext"><h3>Biology</h3></div>
<div class="headbtn">
<button id="myBtn7">Read</button>
<!-- The Modal -->
<div id="myModal7" class="modal">
<!-- Modal content -->
<div class="modal-content">
<span class="close">×</span>
<p>Some text in the Modal..</p>
</div>
</div>
</div>
</div>
<p>Please download the practice questions and complete it before thursday.</p>
</div>
<div class="galleryItem">
<div class="msghead">
<div class="headtext"><h3>PE</h3></div>
<div class="headbtn">
<button id="myBtn8">Read</button>
<!-- The Modal -->
<div id="myModal8" class="modal">
<!-- Modal content -->
<div class="modal-content">
<span class="close">×</span>
<p>Some text in the Modal..</p>
</div>
</div>
</div>
</div>
<p>Tomorrow PE class will be on Gym A.</p>
</div>
<div class="galleryItem">
<div class="msghead">
<div class="headtext"><h3>Social studies</h3></div>
<div class="headbtn">
<button id="myBtn9">Read</button>
<!-- The Modal -->
<div id="myModal9" class="modal">
<!-- Modal content -->
<div class="modal-content">
<span class="close">×</span>
<p>Some text in the Modal..</p>
</div>
</div>
</div>
</div>
<p>Global and regional conflicts that have been a powerful force in shaping our contemporary world and identities.
<br>The development of political institutions are influenced by economic, social, ideological, and geographic factors.</p>
</div>
</div>
<footer>
<div id="footer"><p>2018 CST Project1 Group7</p></div>
</footer>
</div>
<script src="../js/messageSystem.js"></script>
</body>
</html>
<file_sep>// A $( document ).ready() block.
$( document ).ready(function() {
initApp = function() {
firebase.auth().onAuthStateChanged(function(user) {
if (user) {
// User is signed in.
var displayName = user.displayName;
var email = user.email;
var emailVerified = user.emailVerified;
var photoURL = user.photoURL;
var uid = user.uid;
var phoneNumber = user.phoneNumber;
var providerData = user.providerData;
user.getIdToken().then(function(accessToken) {
//Save user info in Session Storage
var userInfo = {'uid': uid, 'name':displayName, 'email':email};
sessionStorage.setItem('userInfo', JSON.stringify(user));
});
} else {
sessionStorage.removeItem('userInfo');
window.location.href='/';
// User is signed out.
//document.getElementById('sign-in-status').textContent = 'Signed out';
//document.getElementById('sign-in').textContent = 'Sign in';
//document.getElementById('account-details').textContent = null;
}
}, function(error) {
console.log(error);
});
};
window.addEventListener('load', function() {
initApp();
});
$("#logout").on("click", function(e) { // Logout button listener
var promise = firebase.auth().signOut(); // Firebase Authenticated User Signout
promise.then(function(){
sessionStorage.setItem('user');
window.location.href='/';
});
});
});
| 1576b3451177d9df9c90c0f8d6c1172a44f257ea | [
"Markdown",
"JavaScript",
"HTML"
] | 4 | Markdown | humble92/Nuntius | 4281aab67694573c3d847d8bd05b0dc0c305b179 | e0a353ea08d01eebf84e60ae5f1f49f18abda2f1 |
refs/heads/master | <file_sep>export const tableSchema ={
columns:[//-------------定义每一列如何显示
{
title: '用户名', dataItemKey: 'username', width: 100,
render:'showUserNameColumnFnName',//用户可能需要显示高亮一点
},
{ title: '专业', dataItemKey: 'dept', width: 100 },
{ title: '主页', dataItemKey: 'homepage',width: 300,
render:'showMyHomePageFnName'//主页可能需要显示成超链接
},
{ title: '操作', dataItemKey: '', width:100 },//dateKey为空,标识为操作列
],
rowKeyNames:['username','dept'], //--定义每一行记录根据哪几个字段能确定数据库中的一条记录
width:600,/**表格的宽度 */
}
export const data = [
{ username: '张三',dept:"软件工程",homepage:'wwww.baidu.com' ,key:'001'},
//key 为查询记录不重复的一个字符串,可以不传,如果不传会根据tableSchema中定义的主键字段自动拼接字符串
{ username: '李四',dept:"英语",homepage:'www.jd.com' ,key:'002' }
];
| 326d16e60c843b1d1329501a11560d1b8cf9b851 | [
"TypeScript"
] | 1 | TypeScript | yichengjie/page-cfg | dcfa340b1fdf1435e8f0cf5b8576fcb68ba5cff4 | d97884fcffa5cb7037709efc671834b682012365 |
refs/heads/master | <file_sep>// drivable interface is created
interface drivable
{
start():void
drive():void
getPosition(position:string):void
}
// car class implement the interface drivable and include all the property
class car implements drivable
{
constructor(public name:string){}
start():void
{
console.log(this.name + " = is started")
}
drive():void
{
console.log("Its drive fastly")
}
getPosition(position:string):void
{
console.log(this.name +" position is always "+ position)
}
}
//instance of class object is created
let Adi= new car("Adi")
//through the object method is called
Adi.start()
Adi.drive()
Adi.getPosition("No:1")
//instance of class secound object is created
let Ford= new car("Ford Icon")
//through the object method is called
Ford.start()
Ford.drive()
Ford.getPosition("No:2")
<file_sep># angularassignment3.2
example of interface
| 825f67e2dd5b5f2973f610c5f43ee16e26b72911 | [
"Markdown",
"TypeScript"
] | 2 | TypeScript | abiramibharanidharan/angularassignment3.2 | f89a64a17ad02387154c40e6a81f57a2aa81f79f | 6e8a366c0cc71e8d5ea5a93898b80c79444db12d |
refs/heads/master | <repo_name>Godfrey18/Message-me<file_sep>/app/models/user.rb
class User < ApplicationRecord
has_many :messages
validates :username,presence:true, length: {minimum:7,maximum:15}
has_secure_password
end<file_sep>/app/controllers/chatrooms_controller.rb
class ChatroomsController < ApplicationController
def index
@message =Message.new
@messages = Message.custom_display
end
end | 729c1557fdcfda77ee7a49fa504667e6eb95de06 | [
"Ruby"
] | 2 | Ruby | Godfrey18/Message-me | 22541f02d9172776dc7cfd16fbfbf2e1333de143 | 4146dedf2b43a30e6e995cc99192f2d04dc53272 |
refs/heads/master | <repo_name>jrgalia/ExData_Plotting1<file_sep>/plot3.R
#reads a file in table format
data <- read.table("./household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", dec=".")
#subsetting Feb 1-2, 2007
subset <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#plotting to png: 480x480
datetime <- strptime(paste(subset$Date, subset$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subset$Global_active_power)
sub1 <- as.numeric(subset$Sub_metering_1)
sub2 <- as.numeric(subset$Sub_metering_2)
sub3 <- as.numeric(subset$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, sub1, type="l", ylab="Energy sub metering", xlab="")
lines(datetime, sub2, type="l", col="red")
lines(datetime, sub3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()<file_sep>/plot2.R
#reads a file in table format
data <- read.table("./household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", dec=".")
#subsetting Feb 1-2, 2007
subset <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#plotting to png: 480x480
datetime <- strptime(paste(subset$Date, subset$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subset$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() | 0f855a08d5492261ca29bfc76941105e9090e1dc | [
"R"
] | 2 | R | jrgalia/ExData_Plotting1 | e7509069fa7601b414ce1a2891822118ba568b75 | d1e86a954bcf4df92fbc8cc7fe2c4578d1ca63ac |
refs/heads/main | <repo_name>es2013/photo-port<file_sep>/src/components/Gallery/index.js
import React from 'react';
import { capitalizeFirstLetter } from '../../utils/helpers';
import PhotoList from '../PhotoList';
// import photo from "../../assets/small/commercial/0.jpg";
function Gallery(props) {
const { currentCategory } = props;
return (
<section>
<h1 data-testid="h1tag">{capitalizeFirstLetter(currentCategory.name)}</h1>
<p>{currentCategory.description}</p>
<PhotoList category={currentCategory.name} />
</section>
);
}
// function Gallery() {
// //update Gallery that gets name and desc from object
// const currentCategory = {
// name: "commercial",
// description:
// "Photos of grocery stores, food trucks, and other commercial projects",
// };
// return (
// //use helper function to capitaliza first letter iin name when rendered afrer importing helpler file
// <section>
// <h1>{capitalizeFirstLetter(currentCategory.name)}</h1>
// <p>{capitalizeFirstLetter(currentCategory.name)}</p>
// <PhotoList />
// </section>
// );
// }
export default Gallery; | fd340456b3ced0b6474a307c1e5223e02c9b055f | [
"JavaScript"
] | 1 | JavaScript | es2013/photo-port | dc183d09fe0eebe62c745f06ba62b69d09be6505 | b2bff778b4180e39bb96e3595e229fb6654537f8 |
refs/heads/dev | <file_sep><?php
namespace Modules\Payment\Contracts;
use App\Contracts\ValidateTransactionExists;
use Modules\Payment\Exceptions\ObjectVerificationFailedException;
use Psr\Http\Message\ResponseInterface;
interface Refund
{
/**
* Check if the transaction in question is a valid transaction or not before voiding it
*
* @return ValidateTransactionExists false on failure json object on success
*
* @throws \Exception
*/
public function validateTransaction() : ValidateTransactionExists;
/**
* @param bool $threeDSecure
* @return mixed
* @throws \Exception
*/
public function refundOrder($threeDSecure = false);
/**
* Generate Merchant Session Key
*
* @return ResponseInterface
* @throws \Exception
*/
public function getToken() : ResponseInterface;
/**
* Gracefully handles request errors
*
* @return array|false false on failure json object on success
*/
public function validateResponse();
}
<file_sep><?php
namespace Tests\Unit;
use Dividebuy\Payment\Contracts\Card;
use GuzzleHttp\Client;
use Illuminate\Foundation\Testing\RefreshDatabase;
use Illuminate\Http\JsonResponse;
use Modules\Payment\Gateway\Sagepay\Payment;
use Modules\Payment\Gateway\Sagepay\VoidPayment;
use Psr\Http\Message\ResponseInterface;
use Tests\TestCase;
class SessionTokenTest extends TestCase
{
protected $payload;
protected $err_payload;
protected $requestHeaders;
protected $requestMissingHeaders;
protected function setUp(): void
{
parent::setUp(); // TODO: Change the autogenerated stub
$this->payload = json_decode(
'{
"cardDetails":{
"cardholderName": "<NAME>",
"cardNumber": "4929000000006",
"expiryDate": "0320",
"securityCode": "123"
},
"key": "<KEY>
"vendorName": "rematchtest",
"baseUrl": "https://pi-test.sagepay.com/api/v1/"
}',
true);
$this->requestHeaders = [
"Authorization" => "Basic <KEY>
"Content-Type" => "application/json",
"vendorName" => "rematchtest"
];
}
/**
* A basic test example.
*
* @test
*
* @return void
* @throws \Exception
*/
public function testShouldBeAbleToGenerateSessionToken()
{
$response = $this->post('/api/payment/session-token', $this->payload, $this->requestHeaders);
$result = $response->decodeResponseJson();
$this->assertIsString($result['merchantSessionKey']);
$this->assertArrayHasKey('merchantSessionKey', $result);
}
}
<file_sep><?php
namespace Modules\Payment\Entities;
use Illuminate\Database\Eloquent\Model;
use Modules\Payment\Entities\Concerns\UsesUuid;
class Transaction extends Model
{
use UsesUuid;
protected $fillable = [
'user_id','transaction_id','response_message'
];
}
<file_sep><?php
namespace Modules\Payment\Http\Requests;
use GuzzleHttp\Client;
use GuzzleHttp\Exception\ClientException;
use GuzzleHttp\Exception\ServerException;
use GuzzleHttp\RequestOptions;
use Psr\Http\Message\ResponseInterface;
class ClientRequestHandler
{
/**
* @param string $apiEndPoint
* @param string $method
* @param array $data
* @param array $headers
* @param string $vendorTxtCode
* @return ResponseInterface
* @throws \Exception
*/
public function makeRequest(string $apiEndPoint, string $method = 'post', array $data = [], $headers = [], $vendorTxtCode = '')
{
$postData = [
'vendorName' => env('SAGEPAY_VENDORNAME'),
'vendorTxCode' => $vendorTxtCode,
];
$postData = array_merge($postData, $data);
$client = new Client(
[
RequestOptions::HEADERS => $headers
]
);
return $this->processRequest($apiEndPoint, $client, $postData);
}
/**
* @param \Exception $exception
* @return string|null
*/
private function setDescriptionHandler(\Exception $exception)
{
$description = $exception->getMessage();
if ($exception->hasResponse()) {
$response = json_decode($exception->getResponse()->getBody()->getContents(), true);
$description = array_key_exists('description', $response) ? $response['description'] : null;
if (array_key_exists('errors', $response)) {
$description = $response['errors'][0]['description'];
if (array_key_exists('property', $response['errors'][0])) {
$description .= ' {' . $response['errors'][0]['property'] . '}';
}
}
}
return $description;
}
/**
* @param string $apiEndPoint
* @param Client $client
* @param array $postData
* @return \Illuminate\Http\JsonResponse|ResponseInterface
*/
public function processRequest(string $apiEndPoint, Client $client, array $postData)
{
try {
$response = $client->post($apiEndPoint, ['json' => $postData]);
return $response;
} catch (ClientException $clientException) {
$description = $this->setDescriptionHandler($clientException);
return response()->json(['error' => $description], $clientException->getCode());
} catch (ServerException $serverException) {
$description = $this->setDescriptionHandler($serverException);
return response()->json(['error' => $description], $serverException->getCode());
} catch (\Exception $exception) {
$description = $this->setDescriptionHandler($exception);
return response()->json(['error' => $description], $exception->getCode());
}
}
}
<file_sep><?php
namespace Tests\Unit;
use Dividebuy\Payment\Contracts\Card;
use GuzzleHttp\Client;
use Illuminate\Foundation\Testing\RefreshDatabase;
use Illuminate\Foundation\Testing\TestResponse;
use Illuminate\Http\JsonResponse;
//use Illuminate\Support\Facades\Request;
use Illuminate\Http\Request;
use Modules\Payment\Gateway\Sagepay\Payment;
use Modules\Payment\Gateway\Sagepay\VoidPayment;
use Psr\Http\Message\ResponseInterface;
use Tests\TestCase;
class RefundPaymentTest extends TestCase
{
protected $payload;
protected $err_payload;
protected $requestHeaders;
protected function setUp(): void
{
parent::setUp(); // TODO: Change the autogenerated stub
$this->payload = json_decode(
'{
"transactionType": "Refund",
"referenceTransactionId": "5D639A87-1CA2-6DED-AEF6-66A640A68AD6",
"vendorTxCode": "REFUND-2019-",
"amount": 4,
"description": "Demo transaction"
}',
true);
$this->requestHeaders = [
"Authorization" => "Basic dzlSN2ZSOWYxenhnYXNTNWVjNDZia05vaTFsekFDNGlrV1pxa2gxZnFFa1Z6RkxsS0M6enVBRnVpckM1UEc0bEoyMlQzcmxCdDRXY1NmcTRpOWdyblJOcWpHWktYVGhDOFMwVmkwakt3V21tMHc2RGhZd2Q=",
"Content-Type" => "application/json",
"vendorName" => "rematchtest"
];
}
/**
* A basic test example.
*
* @test
*
* @return void
* @throws \Exception
*/
public function testShouldBeAbleToMakeRepeatPayment()
{
$response = $this->withHeaders($this->requestHeaders)->postJson('/api/payment/refund', $this->payload);
$result = $response->decodeResponseJson();
$this->assertArrayHasKey('statusDetail', $result);
$this->assertEquals('The Authorisation was Successful.', $result['statusDetail']);
}
public function testPaymentValidationMissingFieldError() {
unset($this->payload["amount"]);
$response = $this->withHeaders($this->requestHeaders)->postJson('/api/payment/refund', $this->payload);
$response->decodeResponseJson();
$this->assertInstanceOf(TestResponse::class, $response);
$this->assertEquals($response->json('message'), 'The gateway response did not contain all the mandatory fields [amount]');
}
}
<file_sep><?php
namespace Modules\Payment\Gateway\Sagepay;
use GuzzleHttp\Client;
use GuzzleHttp\RequestOptions;
use Illuminate\Http\Request;
use Modules\Payment\Contracts\PayableOrder;
use Modules\Payment\Http\Requests\ClientRequestHandler;
use Psr\Http\Message\ResponseInterface;
class PaymentGateway
{
protected $merchantSessionKeyObject;
protected $cardIdentifierObject;
protected $requestHeaders;
protected $clientRequest;
protected $threeDSecure;
protected $transactionType;
/**
* @var PayableOrder $payload
*/
protected $payload;
protected $request;
// /**
// * PaymentGateway constructor.
// * @param Request $request
// */
// public function __construct(Request $request)
// {
//
// $this->payload = $this->request = $request;
// $this->clientRequest = new ClientRequestHandler();
//
// }
// protected $merchantSessionKeyObject;
// protected $cardIdentifierObject;
// protected $threeDSecure;
// protected $transactionType;
public function __construct(Request $request)
{
$this->payload = $request->request->all();
$this->request = $request;
$headers = [];
foreach ($request->header() as $k => $v) {
$headers[$k] = $v[0];
}
$this->requestHeaders = $headers;
$this->clientRequest = new ClientRequestHandler();
}
/**
* Generate Merchant Session Key
*
* @return ResponseInterface
* @throws \Exception
*/
public function getToken() : ResponseInterface {
$requestHandler = new ClientRequestHandler();
$apiEndpoint = env('SAGEPAY_BASEURL') . 'merchant-session-keys/';
$data = [
'vendor_name' => $this->request->headers->get('authorization')
];
$response = $requestHandler->makeRequest($apiEndpoint, 'post', $data, $this->request->headers->all());
if ($response instanceof ResponseInterface) {
$this->merchantSessionKeyObject = json_decode($response->getBody()->__toString());
return $response;
}
}
/**
* @return \Illuminate\Http\JsonResponse|ResponseInterface
* @throws \Exception
*/
public function createCardIdentifier() : ResponseInterface {
$cardDetails = [
"cardDetails" => [
"cardholderName" => $this->payload['cardDetails']['cardholderName'],
"cardNumber" => $this->payload['cardDetails']['cardNumber'],
"expiryDate" => $this->payload['cardDetails']['expiryDate'],
"securityCode" => $this->payload['cardDetails']['securityCode']
]
];
$apiEndPoint = $this->payload['baseUrl'] . 'card-identifiers';
$postData = [
'vendorName' => $this->payload['vendorName'],
'vendorTxCode' => null,
];
$postData = array_merge($postData, $cardDetails);
$merchantSessionKey = json_decode($this->getToken()->getBody()->__toString())->merchantSessionKey; //$this->merchantSessionKeyObject->merchantSessionKey;
$client = new Client(
[
RequestOptions::HEADERS => [
'Authorization' => 'Bearer ' . $merchantSessionKey
]
]
);
$response = $this->clientRequest->processRequest($apiEndPoint, $client, $postData);
if ($response instanceof ResponseInterface) {
$this->cardIdentifierObject = json_decode($response->getBody()->__toString());
return $response;
}
}
// /**
// * @param \Exception $exception
// * @return string|null
// */
// private function setDescriptionHandler(\Exception $exception) : string
// {
//
// $description = $exception->getMessage();
//
// if ($exception->hasResponse()) {
//
// $response = json_decode($exception->getResponse()->getBody()->getContents(), true);
//
// $description = array_key_exists('description', $response) ? $response['description'] : null;
//
// if (array_key_exists('errors', $response)) {
// $description = $response['errors'][0]['description'];
// if (array_key_exists('property', $response['errors'][0])) {
// $description .= ' {' . $response['errors'][0]['property'] . '}';
// }
// }
//
// }
//
// return $description;
// }
/**
* Gracefully handles request errors
*
* @return void false on failure json object on success
* @throws \Exception
*/
public function validateResponse()
{
$this->getToken();
$this->createCardIdentifier();
}
/**
* @param bool $threeDSecure
* @return mixed
* @throws \Exception
*/
public function processOrder($threeDSecure = false)
{
$postData = $this->preparePayload();
$apiEndpoint = env('SAGEPAY_BASEURL') . 'transactions';
$response = $this->clientRequest->makeRequest($apiEndpoint, 'post', $postData, $this->requestHeaders);
if ($response instanceof ResponseInterface) {
return json_decode($response->getBody()->__toString());
}
return $response;
}
/**
* @return mixed
* @throws \Exception
*/
public function processVoidOrder()
{
$postData = $this->preparePayload();
$apiEndpoint = env('SAGEPAY_BASEURL') . 'transactions/' . $this->payload['transactionId'] . '/instructions';
$response = $this->clientRequest->makeRequest($apiEndpoint, 'post', $postData, $this->requestHeaders);
if ($response instanceof ResponseInterface) {
return json_decode($response->getBody()->__toString());
}
return $response;
}
// protected function request_headers()
// {
// if(function_exists("apache_request_headers"))
// {
// if($headers = apache_request_headers())
// {
// return $headers;
// }
// }
// $headers = array();
// foreach(array_keys($_SERVER) as $skey)
// {
// if(substr($skey, 0, 5) == "HTTP_")
// {
// $headername = str_replace(" ", "-", ucwords(strtolower(str_replace("_", " ", substr($skey, 0, 5)))));
// $headers[$headername] = $_SERVER[$skey];
// }
// }
// return $headers;
// }
}
<file_sep><?php
/**
* Created by PhpStorm.
* User: khululekanimkhonza
* Date: 04/12/2019
* Time: 21:51
*/
namespace App\Contracts;
interface ValidateTransactionExists
{
/**
* Transaction in question identifier
*
* @return string
*/
public function setTransactionId() : string;
/**
*
* @return array object with transaction data
* @throws \Exception
*/
public function checkTransactionExists() : array;
/**
* This will be used for refund, and void optlions as we will need to check the amount being processed is not
* greater than the total invoice amount.
*
* @return float
*/
public function getTransactionAmount() : float;
}
<file_sep><?php
namespace Tests\Unit;
use Dividebuy\Payment\Contracts\Card;
use Illuminate\Foundation\Testing\RefreshDatabase;
use Tests\TestCase;
class VoidTest extends TestCase
{
protected $payload;
protected $requestHeaders;
protected $err_payload;
protected function setUp(): void
{
parent::setUp(); // TODO: Change the autogenerated stub
$this->payload = json_decode(
'{
"transactionType": "Payment",
"transactionId": "A66F7DC8-705F-0512-C149-62AB40304FD8",
"cardDetails":{
"cardholderName": "<NAME>",
"cardNumber": "4929000000006",
"expiryDate": "0320",
"securityCode": "123"
},
"vendorTxCode": "EC1V-12654-",
"amount": 156700,
"currency": "GBP",
"description": "Demo transaction",
"apply3DSecure": "UseMSPSetting",
"customerFirstName": "Bruce",
"customerLastName": "Lee",
"customerEmail": "<EMAIL>",
"billingAddress": {
"address1": "407 St. John Street",
"city": "London",
"postalCode": "EC1V 4AB",
"country": "GB"
},
"entryMethod": "Ecommerce",
"key": "<KEY>
"vendorName": "rematchtest",
"baseUrl": "https://pi-test.sagepay.com/api/v1/",
"paymentType": "void"
}',
true);
$this->requestHeaders = [
"Authorization" => "Basic <KEY>
"Content-Type" => "application/json",
"vendorName" => "rematchtest"
];
}
/**
* A basic test example.
*
* @test
*
* @return void
* @throws \Exception
*/
public function testShouldBeAbleToGenerateSessionToken()
{
$response = $this->post('/api/payment/session-token', $this->payload, $this->requestHeaders);
$result = $response->decodeResponseJson();
$this->assertIsString($result['merchantSessionKey']);
$this->assertArrayHasKey('merchantSessionKey', $result);
}
/**
* A basic test example.
*
* @test
*
* @return void
* @throws \Exception
*/
public function testShouldNotBeAbleToGenerateSessionToken()
{
$response = $this->withHeaders($this->requestHeaders)->post('/api/payment/session-token', $this->payload);
$result = $response->decodeResponseJson();
$this->assertIsString($result['merchantSessionKey']);
$this->assertArrayHasKey('merchantSessionKey', $result);
}
}
<file_sep><?php
use Illuminate\Http\Request;
use Illuminate\Support\Facades\Route;
use \GuzzleHttp\Psr7\Response;
/*
|--------------------------------------------------------------------------
| API Routes
|--------------------------------------------------------------------------
|
| Here is where you can register API routes for your application. These
| routes are loaded by the RouteServiceProvider within a group which
| is assigned the "api" middleware group. Enjoy building your API!
|
*/
//
//Route::middleware('auth:api')->get('/payment', function (Request $request) {
// return $request->user();
//});
//Route::prefix('v1')->group(function() {
Route::prefix('payment')->group(function() {
Route::post('/pay', 'PaymentController@payment');
Route::post('/repeat', 'PaymentController@repeat');
Route::post('/refund', 'PaymentController@refund');
Route::post('/defer', 'PaymentController@defer');
Route::post('/void', 'PaymentController@void');
Route::post('/auth-token', 'PaymentController@index');
Route::post('/card-identifier', 'PaymentController@cardAuthorization');
Route::post('/session-token', 'PaymentController@sessionToken');
Route::post('/card-identifier_', function () {
$card = new Modules\Payment\Gateway\Sagepay\Payment();
$token = $card->getToken();
dump($token->getBody()->__toString());
});
Route::get('/fulfill-secure-payment', function (Request $request) {
dump('we are coming from a secure payment', $request->server->all());
});
Route::get('test', function () {
$client = new GuzzleHttp\Client(
[
\GuzzleHttp\RequestOptions::HEADERS => [
'Content-Type' => 'application/json',
'Authorization' => 'Basic <KEY>
],
\GuzzleHttp\RequestOptions::JSON => ['vendorName' => 'rematchtest']
]);
$apiEndPoint = 'https://pi-test.sagepay.com/api/v1/merchant-session-keys';
$response = $client->post($apiEndPoint, []);
dump($response->getBody()->__toString());
dd();
$this->auth = json_decode($response->getBody()->getContents());
$config = $this->client->getConfig();
$config['headers']['Authorization'] = 'Basic <KEY>';
$this->client = new \GuzzleHttp\Client($config);
});
});
//});
Route::get('test', function () {
$trans = new \Modules\Payment\Entities\Transaction();
$trans->user_id = '04d7f995-ef33-4870-a214-4e21c51ff76e';
$trans->transaction_id = 'TEST_API_' . time();
$trans->response_message = randomArray();
$trans->save();
return response()->json(\Modules\Payment\Entities\Transaction::all());
});
Route::get('test-header', function (Request $request) {
$response = new \GuzzleHttp\Psr7\Response(200, ['Authorization' => 'Basic SOME-SYPSPSER-LINGSKSJDKFJKSD-HGWKJEHKJS']);
$header = $request->header('Authorization');
dump($request, $response);
});
//function randomArray() {
//
// $quote = array(
// "I wish I had",
// "Why Can't I have",
// "Can I have",
// "Did you have",
// "Will you get",
// "When will I get"
// );
//
// $items = array(
// "Money",
// "Time",
// "Sex",
// "Coffee",
// "A Better Job",
// "A Life",
// "Better Programming Skills",
// "Internet that was mine",
// "More Beer",
// "More Donuts",
// "Candy",
// "My Daughter",
// "Cable",
// "A Dining Room Table",
// "Better Couches",
// "A PS4",
// "A New Laptop",
// "A New Phone",
// "Water",
// "Rum",
// "Movies",
// "A Desktop Computer",
// "A Fish Tank",
// "My Socks",
// "My Jacket",
// "More Coffee",
// "More Koolaid",
// "More Power",
// "A Truck",
// "Toolbox",
// "More fish for Fish Tank",
// "A Screwdriver",
// "A Projector",
// "More Pants"
// );
//
// return $rand_keys = $quote[array_rand($quote,1)] . ' ' . $rand_keys = $items[array_rand($items,1)] . ' ' . $rand_keys = $items[array_rand($items,1)] . ' ' . $rand_keys = $items[array_rand($items,1)];
//
//}
//// App v1 API
//Route::group([
// 'middleware' => ['app', 'api.version:1'],
// 'namespace' => 'App\Http\Controllers\App',
// 'prefix' => 'api/v1',
//], function ($router) {
// require base_path('routes/app_api.v1.php');
//});
//
//// App v2 API
//Route::group([
// 'middleware' => ['app', 'api.version:2'],
// 'namespace' => 'App\Http\Controllers\App',
// 'prefix' => 'api/v2',
//], function ($router) {
// require base_path('routes/app_api.v2.php');
//});
<file_sep><?php
namespace Modules\Payment\Contracts;
use Carbon\Exceptions\InvalidDateException;
interface Card
{
/**
* @return string
*/
public function getCardHolderName() : string;
/**
* @return string
*/
public function getCardNumber() : string;
/**
* @return string
*
* @throws InvalidDateException
*/
public function getCardExpiryDate() : string;
/**
* This is a numerical value btwn 3-4 chars long, depending on the implementing contract
*
* @return int
*/
public function getCardSecurityCode() : int;
}
<file_sep><?php
namespace Modules\Payment\Contracts;
use App\Contracts\ValidateTransactionExists;
interface VoidTransaction
{
/**
* Instruction to cancel the order
*
* @return mixed
*/
public function voidOrder();
/**
* Check if the transaction in question is a valid transaction or not before voiding it
*
* @return ValidateTransactionExists false on failure json object on success
*/
public function validateTransaction() : ValidateTransactionExists;
/**
* Gracefully handles request errors
*
* @return array|false false on failure json object on success
*/
public function validateResponse();
}
<file_sep><?php
namespace Modules\Payment\Gateway\Sagepay;
use Illuminate\Http\Request;
use Illuminate\Validation\Rule;
use Illuminate\Support\Facades\Validator;
use Modules\Payment\Exceptions\ObjectVerificationFailedException;
use Modules\Payment\Http\Requests\ClientRequestHandler;
use \Modules\Payment\Contracts\Payment as PaymentContractInterface;
use phpDocumentor\Reflection\Types\Parent_;
class Payment extends PaymentGateway implements PaymentContractInterface
{
protected $merchantSessionKeyObject;
protected $cardIdentifierObject;
protected $threeDSecure;
protected $transactionType;
public function __construct(Request $request)
{
parent::__construct($request);
$this->validateResponse();
}
/**
* @param bool $threeDSecure
* @return mixed
* @throws \Exception
*/
public function paymentOrder($threeDSecure = false) {
if ($threeDSecure === true) {
$this->threeDSecure = 'Force';
} else {
$this->threeDSecure = 'Disable';
}
$this->transactionType = "Payment";
return $this->processOrder();
}
/**
* @return array
* @throws \Exception
*/
protected function preparePayload() {
$avsCheck = 'UseMSPSetting';
$this->validateInput();
return [
'transactionType' => $this->transactionType,
'paymentMethod' => [
'card' => [
'merchantSessionKey' => $this->merchantSessionKeyObject->merchantSessionKey,
'cardIdentifier' => $this->cardIdentifierObject->cardIdentifier,
'save' => false
]
],
'vendorTxCode' => $this->payload['vendorTxCode'] . time(), // @TODO REMOVE TIME
'amount' => $this->payload['amount'],
'currency' => $this->payload['currency'],
'description' => $this->payload['description'],
'apply3DSecure' => $this->threeDSecure,
'applyAvsCvcCheck' => $avsCheck,
'customerFirstName' => $this->payload['customerFirstName'],
'customerLastName' => $this->payload['customerLastName'],
'customerEmail' => $this->payload['customerEmail'],
'billingAddress' => [
'address1' => $this->payload['billingAddress']['address1'],
'city' => $this->payload['billingAddress']['city'],
'postalCode' => $this->payload['billingAddress']['postalCode'],
'country' => $this->payload['billingAddress']['country'],
],
'entryMethod' => $this->payload['entryMethod']
];
}
/**
* @return void
* @throws ObjectVerificationFailedException
*/
protected function validateInput()
{
$rules = [
'transactionType' => ['max:40'],
'cardDetails.cardholderName' => ['required', 'max:40'],
'cardDetails.cardNumber' => ['required', 'max:16'],
'cardDetails.expiryDate' => ['required', 'max:4'],
'cardDetails.securityCode' => ['required', 'max:3'],
'vendorTxCode' => ['required', 'max:40'],
'apply3DSecure' => [
'required',
Rule::in(["UseMSPSetting", "Force", "Disable", "ForceIgnoringRules"]),
],
'customerFirstName' => ['required'],
'customerFirstName' => ['required'],
'billingAddress.address1' => ['required'],
'billingAddress.city' => ['required'],
'billingAddress.postalCode' => ['required'],
'billingAddress.country' => ['required']
];
$validator = Validator::make($this->payload, $rules);
if ($validator->fails()) {
throw new ObjectVerificationFailedException('The gateway response did not contain all the mandatory fields ['. implode(', ', array_keys($validator->errors()->getMessages())) .']');
}
}
}
<file_sep><?php
namespace Modules\Payment\Contracts;
use Psr\Http\Message\ResponseInterface;
interface Deferred
{
/**
* @param bool $threeDSecure
* @return mixed
* @throws \Exception
*/
public function processOrder($threeDSecure = false);
/**
* @return \Illuminate\Http\JsonResponse|ResponseInterface
*/
public function createCardIdentifier() : ResponseInterface;
/**
* Generate Merchant Session Key
*
* @return ResponseInterface
* @throws \Exception
*/
public function getToken() : ResponseInterface;
/**
* Gracefully handles request errors
*
* @return array|false false on failure json object on success
*/
public function validateResponse();
/**
* @param bool $threeDSecure
* @return mixed
* @throws \Exception
*/
public function deferredOrder($threeDSecure = false);
}
<file_sep><?php
namespace Modules\Payment\Exceptions;
use Exception;
class ObjectVerificationFailedException extends Exception
{
}
<file_sep><?php
namespace Tests\Unit;
use Dividebuy\Payment\Contracts\Card;
use GuzzleHttp\Client;
use Illuminate\Foundation\Testing\RefreshDatabase;
use Illuminate\Foundation\Testing\TestResponse;
use Illuminate\Http\JsonResponse;
//use Illuminate\Support\Facades\Request;
use Illuminate\Http\Request;
use Modules\Payment\Gateway\Sagepay\Payment;
use Modules\Payment\Gateway\Sagepay\VoidPayment;
use Psr\Http\Message\ResponseInterface;
use Tests\TestCase;
class RepeatPaymentTest extends TestCase
{
protected $payload;
protected $err_payload;
protected $requestHeaders;
protected function setUp(): void
{
parent::setUp(); // TODO: Change the autogenerated stub
$this->payload = json_decode(
'{
"transactionType":"Repeat",
"referenceTransactionId": "5D639A87-1CA2-6DED-AEF6-66A640A68AD6",
"vendorTxCode":"REPEAT-EC1V-",
"amount":2000,
"currency":"GBP",
"description":"Great product repeated",
"shippingDetails":{
"recipientFirstName":"Sam",
"recipientLastName":"Jones",
"shippingAddress1":"407 St John Street",
"shippingCity":"London",
"shippingPostalCode":"EC1V 4AB",
"shippingCountry":"GB"
}
}',
true);
$this->requestHeaders = [
"Authorization" => "Basic dz<KEY>
"Content-Type" => "application/json",
"vendorName" => "rematchtest"
];
}
/**
* A basic test example.
*
* @test
*
* @return void
* @throws \Exception
*/
public function testShouldBeAbleToMakeRepeatPayment()
{
$response = $this->withHeaders($this->requestHeaders)->postJson('/api/payment/repeat', $this->payload);
$result = $response->decodeResponseJson();
$this->assertArrayHasKey('statusDetail', $result);
$this->assertEquals('The Authorisation was Successful.', $result['statusDetail']);
}
public function testPaymentValidationMissingFieldError() {
unset($this->payload["amount"]);
$response = $this->withHeaders($this->requestHeaders)->postJson('/api/payment/repeat', $this->payload);
$response->decodeResponseJson();
dump($response->json('message'));
$this->assertInstanceOf(TestResponse::class, $response);
$this->assertEquals($response->json('message'), 'The gateway response did not contain all the mandatory fields [amount]');
}
}
<file_sep><?php
namespace Tests\Unit;
use Dividebuy\Payment\Contracts\Card;
use GuzzleHttp\Client;
use Illuminate\Foundation\Testing\RefreshDatabase;
use Illuminate\Foundation\Testing\TestResponse;
use Illuminate\Http\JsonResponse;
//use Illuminate\Support\Facades\Request;
use Illuminate\Http\Request;
use Modules\Payment\Gateway\Sagepay\Payment;
use Modules\Payment\Gateway\Sagepay\VoidPayment;
use Psr\Http\Message\ResponseInterface;
use Tests\TestCase;
class VoidPaymentTest extends TestCase
{
protected $payload;
protected $transactionId;
protected $requestHeaders;
protected function setUp(): void
{
parent::setUp(); // TODO: Change the autogenerated stub
$this->payload = json_decode(
'{
"transactionType": "Payment",
"transactionId": "A66F7DC8-705F-0512-C149-62AB40304FD8",
"cardDetails":{
"cardholderName": "<NAME>",
"cardNumber": "4929000000006",
"expiryDate": "0320",
"securityCode": "123"
},
"vendorTxCode": "EC1V-12654-",
"amount": 6325,
"currency": "GBP",
"description": "Demo transaction",
"apply3DSecure": "UseMSPSetting",
"customerFirstName": "Void",
"customerLastName": "Lee",
"customerEmail": "<EMAIL>",
"billingAddress": {
"address1": "407 St. John Street",
"city": "London",
"postalCode": "EC1V 4AB",
"country": "GB"
},
"entryMethod": "Ecommerce",
"key": "<KEY>
"vendorName": "rematchtest",
"baseUrl": "https://pi-test.sagepay.com/api/v1/",
"paymentType": "void"
}',
true);
$this->requestHeaders = [
"Authorization" => "<KEY>
"Content-Type" => "application/json",
"vendorName" => "rematchtest"
];
}
/**
* A basic test example.
*
* @test
*
* @return void
* @throws \Exception
*/
public function testShouldBeAbleToMakePaymentAndVoidGeneratedTransaction()
{
$response = $this->withHeaders($this->requestHeaders)->postJson('/api/payment/pay', $this->payload);
$result = $response->decodeResponseJson();
$transactionId = $result['transactionId'];
$amount = $result['amount']['totalAmount'];
$this->assertArrayHasKey('statusDetail', $result);
$this->assertEquals('The Authorisation was Successful.', $result['statusDetail']);
$response = $this->withHeaders($this->requestHeaders)->postJson('/api/payment/void', json_decode(
'{
"instructionType": "void",
"transactionId": "'.$transactionId.'"
}',
true));
$result = $response->decodeResponseJson();
dump('we are here now!', $result);
$this->assertArrayHasKey('instructionType', $result);
}
public function testPaymentValidationMissingFieldError() {
unset($this->payload["customerFirstName"]);
$response = $this->withHeaders($this->requestHeaders)->postJson('/api/payment/pay', $this->payload);
$response->decodeResponseJson();
dump($response->json('message'));
$this->assertInstanceOf(TestResponse::class, $response);
$this->assertEquals($response->json('message'), 'The gateway response did not contain all the mandatory fields [customerFirstName]');
}
public function testShouldErrWhenMakingPaymentWithoutAmount() {
unset($this->payload["amount"]);
$response = $this->withHeaders($this->requestHeaders)->postJson('/api/payment/pay', $this->payload);
$result = $response->decodeResponseJson();
$this->assertArrayHasKey('message', $result);
$this->assertEquals('Undefined index: amount', $result['message']);
}
}
<file_sep><?php
namespace App\Providers;
use Dividebuy\Payment\Contracts\Payment;
use Dividebuy\Payment\Contracts\Payment as PaymentGatewayInterface;
use Illuminate\Support\ServiceProvider;
use Modules\Payment\Gateway\Sagepay\PaymentGateway;
class AppServiceProvider extends ServiceProvider
{
/**
* Register any application services.
*
* @return void
*/
public function register()
{
// $this->app->singleton(Payment::class, PaymentGateway::class);
}
/**
* Bootstrap any application services.
*
* @return void
*/
public function boot()
{
//
}
}
<file_sep><?php
namespace Modules\Payment\Contracts;
use App\Contracts\ValidateTransactionExists;
interface Repeat
{
/**
* Reference Transaction ID that needs to be repeated.
*
* @return string
*/
public function getReferenceTransactionId() : string;
/**
* Gracefully handles request errors
*
* @return ValidateTransactionExists false on failure json object on success
*/
public function validateTransaction() : ValidateTransactionExists;
/**
* @return mixed
*/
public function getPaymentResult();
/**
* This will return a billable address object attached to the card
*
* @param PayableOrder $order
*/
public function setOrder(PayableOrder $order);
/**
* @param bool $threeDSecure
* @return array
*/
public function repeatOrder($threeDSecure = false);
public function preparePayload();
public function validateInput();
}
<file_sep><?php
namespace Modules\Payment\Http\Controllers;
use Illuminate\Http\JsonResponse;
use Illuminate\Http\Request;
use Illuminate\Routing\Controller;
use Modules\Payment\Gateway\Sagepay\Deferred;
use Modules\Payment\Gateway\Sagepay\PaymentGateway;
use Modules\Payment\Gateway\Sagepay\Payment;
use \GuzzleHttp\Psr7\Response;
use Modules\Payment\Gateway\Sagepay\Refund;
use Modules\Payment\Gateway\Sagepay\Repeat;
use Modules\Payment\Gateway\Sagepay\VoidPayment;
use Psr\Http\Message\ResponseInterface;
class PaymentController extends Controller
{
protected $token;
protected $merchantSessionKey;
protected $cardIdentifier;
/**
* Display a listing of the resource.
* @return Response
* @throws \Exception
*/
public function index(Request $request)
{
// $msk = new PaymentGateway();
$msk = new \Modules\Payment\Gateway\Sagepay\Payment($request);
// $msk = new \Modules\Payment\Gateway\Sagepay\Repeat($request);
// $msk = new \Modules\Payment\Gateway\Sagepay\Refund($request);
// $msk = new \Modules\Payment\Gateway\Sagepay\Deferred($request);
// $msk = new \Modules\Payment\Gateway\Sagepay\VoidPayment($request);
// dump($msk->paymentOrder(true));
dump($msk->paymentOrder(false));
// dump($msk->repeatOrder(false));
// dump($msk->refundOrder());
// dump($msk->deferredOrder(false));
// dump($msk->voidOrder());
}
/**
* Display a listing of the resource.
* @param Request $request
* @return JsonResponse
* @throws \Exception
*/
public function payment(Request $request)
{
$payment = new Payment($request);
$payment = $payment->paymentOrder(false);
if ($payment instanceof ResponseInterface) {
return json_decode($payment->getBody()->__toString());
}
return new JsonResponse($payment, 200);
}
/**
* Display a listing of the resource.
* @param Request $request
* @return JsonResponse
* @throws \Exception
*/
public function repeat(Request $request)
{
$payment = new Repeat($request);
$payment = $payment->repeatOrder(false);
if ($payment instanceof ResponseInterface) {
return json_decode($payment->getBody()->__toString());
}
return new JsonResponse($payment, 200);
}
/**
* Display a listing of the resource.
* @param Request $request
* @return JsonResponse
* @throws \Exception
*/
public function refund(Request $request)
{
$payment = new Refund($request);
$payment = $payment->refundOrder(false);
if ($payment instanceof ResponseInterface) {
return json_decode($payment->getBody()->__toString());
}
return new JsonResponse($payment, 200);
}
/**
* Display a listing of the resource.
* @param Request $request
* @return JsonResponse
* @throws \Exception
*/
public function void(Request $request)
{
$payment = new VoidPayment($request);
$payment = $payment->voidOrder(false);
if ($payment instanceof ResponseInterface) {
return json_decode($payment->getBody()->__toString());
}
return new JsonResponse($payment, 200);
}
/**
* Display a listing of the resource.
* @param Request $request
* @return JsonResponse
* @throws \Exception
*/
public function defer(Request $request)
{
$payment = new Deferred($request);
$payment = $payment->deferredOrder(false);
if ($payment instanceof ResponseInterface) {
return json_decode($payment->getBody()->__toString());
}
return new JsonResponse($payment, 200);
}
/**
* Display a listing of the resource.
* @param Request $request
* @return JsonResponse
* @throws \Exception
*/
public function sessionToken(Request $request)
{
$card = new PaymentGateway($request);
$token = $card->getToken();
if ( $token instanceof ResponseInterface) {
return new JsonResponse(json_decode($token->getBody()->__toString()), 200);
}
return new JsonResponse(
'Error creating a Card Token',
201
);
}
/**
* Display a listing of the resource.
* @return JsonResponse
* @throws \Exception
*/
public function cardAuthorization(Request $request)
{
$card = new PaymentGateway($request);
$token = $card->createCardIdentifier();
if ( $token instanceof ResponseInterface) {
return new JsonResponse(json_decode($token->getBody()->__toString()), 200);
}
return new JsonResponse(
'Error creating a Card Token',
201
);
}
}
<file_sep><?php
namespace Modules\Payment\Gateway\Sagepay;
use Illuminate\Http\Request;
use Illuminate\Support\Facades\Validator;
use Modules\Payment\Exceptions\ObjectVerificationFailedException;
use Modules\Payment\Http\Requests\ClientRequestHandler;
class Repeat extends PaymentGateway
{
protected $merchantSessionKeyObject;
protected $cardIdentifierObject;
protected $threeDSecure;
protected $transactionType;
public function __construct(Request $request)
{
parent::__construct($request);
}
public function repeatOrder($threeDSecure = false) {
$this->threeDSecure = 'Disable';
$this->transactionType = "Repeat";
return $this->processOrder();
}
/**
* Prepare the payload expected by the Payment API
*
* @return array
* @throws ObjectVerificationFailedException
*/
protected function preparePayload() {
$this->validateInput();
return [
'transactionType' => $this->transactionType,
'referenceTransactionId' => $this->payload['referenceTransactionId'],
'vendorTxCode' => $this->payload['vendorTxCode'] . time(),
'amount' => $this->payload['amount'],
'currency' => $this->payload['currency'],
'description' => $this->payload['description'],
'shippingDetails' => [
'recipientFirstName' => $this->payload['shippingDetails']['recipientFirstName'],
'recipientLastName' => $this->payload['shippingDetails']['recipientLastName'],
'shippingAddress1' => $this->payload['shippingDetails']['shippingAddress1'],
'shippingCity' => $this->payload['shippingDetails']['shippingCity'],
'shippingPostalCode' => $this->payload['shippingDetails']['shippingPostalCode'],
'shippingCountry' => $this->payload['shippingDetails']['shippingCountry'],
],
];
}
/**
* @return void
* @throws ObjectVerificationFailedException
*/
protected function validateInput()
{
$rules = [
'transactionType' => ['required'],
'referenceTransactionId' => ['required'],
'vendorTxCode' => ['required'],
'amount' => ['required'],
'description' => ['required'],
'shippingDetails.recipientFirstName' => ['required'],
'shippingDetails.recipientLastName' => ['required'],
'shippingDetails.shippingAddress1' => ['required'],
'shippingDetails.shippingCity' => ['required'],
'shippingDetails.shippingPostalCode' => ['required'],
'shippingDetails.shippingCountry' => ['required']
];
$validator = Validator::make($this->payload, $rules);
if ($validator->fails()) {
throw new ObjectVerificationFailedException('The gateway response did not contain all the mandatory fields ['. implode(', ', array_keys($validator->errors()->getMessages())) .']');
}
}
}
<file_sep><?php
namespace Tests\Unit;
use Dividebuy\Payment\Contracts\Card;
use GuzzleHttp\Client;
use Illuminate\Foundation\Testing\RefreshDatabase;
use Illuminate\Http\JsonResponse;
use Modules\Payment\Gateway\Sagepay\Payment;
use Modules\Payment\Gateway\Sagepay\VoidPayment;
use Psr\Http\Message\ResponseInterface;
use Tests\TestCase;
class GenerateCITest extends TestCase
{
protected $payload;
protected $err_payload;
protected $requestHeaders;
protected $requestMissingHeaders;
protected function setUp(): void
{
parent::setUp(); // TODO: Change the autogenerated stub
$this->payload = json_decode(
'{
"cardDetails":{
"cardholderName": "<NAME>",
"cardNumber": "4929000000006",
"expiryDate": "0320",
"securityCode": "123"
},
"key": "<KEY>
"vendorName": "rematchtest",
"baseUrl": "https://pi-test.sagepay.com/api/v1/"
}',
true);
$this->requestHeaders = [
"Authorization" => "Basic <KEY>
"Content-Type" => "application/json",
"vendorName" => "rematchtest"
];
}
/**
* A basic test example.
*
* @test
*
* @return void
* @throws \Exception
*/
public function testShouldBeAbleToGenerateCI()
{
try {
$response = $this->post('/api/payment/card-identifier', $this->payload, $this->requestHeaders);
} catch (\Exception $e) {
$this->expectExceptionMessage('The HTTP status code "0" is not valid.');
}
// $this->assertTrue(true);
// dd($response);
//
// $result = $response->decodeResponseJson();
//
// dump($result);
// $this->assertIsString($result['merchantSessionKey']);
// $this->assertArrayHasKey('merchantSessionKey', $result);
}
/**
* A basic test example.
*
* @test
*
* @return void
* @throws \Exception
*/
public function testShouldNotBeAbleToGenerateCI()
{
$response = $this->withHeaders($this->requestHeaders)->post('/api/payment/session-token', $this->payload);
$result = $response->decodeResponseJson();
$this->assertIsString($result['merchantSessionKey']);
$this->assertArrayHasKey('merchantSessionKey', $result);
}
}
<file_sep><?php
namespace Modules\Payment\Contracts;
use Modules\Payment\Contracts\Address;
use Modules\Payment\Contracts\Card;
interface PayableOrder
{
/**
* This can be either a transactionId / referenceTransactionId
* @return string | null
*/
public function getPaymentOrderId();
/**
* Should be in pence for most payments providers.
*
* @return float
*/
public function getPaymentAmount();
/**
* @return string
*/
public function getPaymentDescription();
/**
* @return string
*/
public function getCustomerEmail();
/**
* Billing Address/Shipping Address need to be defined for any transactions.
*
* @return Address
*/
public function getAddress() : Address;
/**
* Card Object attached to the request
*
* @return Card
*/
public function getCard() : Card;
/**
* Client PaymentGateway Key
*
* <KEY>
*
* @return string
*/
public function getKey() : string;
}
<file_sep><?php
/*
|-------------------------------------------
| API Version
|-------------------------------------------
|
| This value is the version of your api.
| It's used when there's no specified
| version on the routes, so it will take this
| as the default, or current.
*/
return [
'name' => 'Payment',
/*
|--------------------------------------------------------------------------
| Sagepay Configuration
|--------------------------------------------------------------------------
| Vendor Name: The vendor name for the account
| Integration Key: The value for the Integration key (username)
| Integration Password: The value for the Integration password
|
*/
'vendor_name' => env('SAGEPAY_VENDOR', 'vendorname'),
/*
|--------------------------------------------------------------------------
| Mode
|--------------------------------------------------------------------------
| Environment: This specifies the environment for which the credentials apply (test or live)
|
*/
'mode' => (strtoupper(env('APP_ENV')) !== 'PRODUCTION') ? env('SAGEPAY_MODE', 'test') : 'live',
'base_url' => env('SAGEPAY_URL', 'https://pi-test.sagepay.com/api/v1/'),
'currency' => env('SAGEPAY_CURRENCY', 'GBP'),
'country' => env('SAGEPAY_COUNTRY', 'GB'),
'call' => env('SAGEPAY_CALL', true),
'secure3Durl' => env('SAGEPAY_SUCCESS', 'http://sagepay.local/pay'),
/*
|--------------------------------------------------------------------------
| Authentication Configuration
|--------------------------------------------------------------------------
| In order to access our protected resources you must authenticate with our API by providing us with your:
|
| Integration Key: The value for the Integration key (username)
| Integration Password: The value for the Integration password
|
*/
'username' => env('SAGEPAY_USERNAME', 'gandalf'),
'password' => env('SAGEPAY_PASSWORD', '<PASSWORD>!@^!'),
'key' => env('SAGEPAY_KEY', '<KEY>),
];
<file_sep><?php
namespace Modules\Payment\Gateway\Sagepay;
use App\Contracts\ValidateTransactionExists;
use Illuminate\Http\Request;
use Illuminate\Validation\Rule;
use Illuminate\Support\Facades\Validator;
use Modules\Payment\Exceptions\ObjectVerificationFailedException;
use Modules\Payment\Http\Requests\ClientRequestHandler;
use \Modules\Payment\Contracts\VoidTransaction as PaymentContractInterface;
class VoidPayment extends PaymentGateway implements PaymentContractInterface
{
protected $merchantSessionKeyObject;
protected $cardIdentifierObject;
protected $transactionType;
public function __construct(Request $request)
{
parent::__construct($request);
}
/**
* @return mixed
* @throws \Exception
*/
public function voidOrder() {
$this->transactionType = "void";
return $this->processVoidOrder();
}
/**
* @return array
* @throws ObjectVerificationFailedException
*/
protected function preparePayload() : array {
$this->validateInput();
return [
'instructionType' => $this->transactionType
];
}
/**
* @return void
* @throws ObjectVerificationFailedException
*/
protected function validateInput()
{
$rules = [
'instructionType' => ['required'],
];
$validator = Validator::make($this->payload, $rules);
if ($validator->fails()) {
throw new ObjectVerificationFailedException('The gateway response did not contain all the mandatory fields ['. implode(', ', array_keys($validator->errors()->getMessages())) .']');
}
}
/**
* Check if the transaction in question is a valid transaction or not before voiding it
*
* @return ValidateTransactionExists false on failure json object on success
*
* @throws \Exception
*/
public function validateTransaction(): ValidateTransactionExists
{
// TODO: Implement validateTransaction() method.
}
}
| c452728255cced2fd9f39bb4ec7e4324c01e685c | [
"PHP"
] | 24 | PHP | intelij/sdfsdfsdflskjdflksjdflksjdflksjdfl | 860c2bf3d90e60288c3a020a591ca9e902cfe3aa | 42e7eec59119802d5af3fa48c3a3eb0075920f9c |
refs/heads/master | <repo_name>SURAJ2600/DreamTrip<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/NetworkDirectory/APISERVICE.kt
package learncode.example.com.dreamtrip
import com.google.gson.JsonElement
import com.google.gson.JsonObject
import okhttp3.MultipartBody
import okhttp3.RequestBody
import retrofit2.Call
import retrofit2.http.Body
import retrofit2.http.Multipart
import retrofit2.http.POST
import retrofit2.http.Part
/**
* Created by suraj on 23/5/18.
*/
interface APISERVICE {
/*API END POINT*/
@POST("checklogin")
fun UserLogin(@Body body: JsonObject): Call<JsonElement>
@POST("registerUser")
fun UserRegisteration(@Body body: JsonObject): Call<JsonElement>
@Multipart
@POST("updateUser")
fun UpdateUserDetails(@Part("id") id: RequestBody,
@Part("fname") firstname: RequestBody,
@Part("lname") lastname: RequestBody,
@Part("gender") gender: RequestBody,
@Part("dob") dob: RequestBody,
@Part("email") email: RequestBody,
@Part("mobile") mobile: RequestBody,
@Part image: MultipartBody.Part): Call<JsonElement>
@POST("listAttractions")
fun GetListAttractionDestination(@Body body: JsonObject): Call<JsonElement>
@POST("getUser")
fun GetUserById(@Body body: JsonObject): Call<JsonElement>
}<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/Activities/SplashScreen_Activity.kt
package learncode.example.com.dreamtrip.Activities
import android.content.Intent
import android.os.Bundle
import android.os.Handler
import android.support.v7.app.AppCompatActivity
import learncode.example.com.dreamtrip.R
import learncode.example.com.dreamtrip.Utility.SessionManager
import learncode.example.com.dreamtrip.Utility.Util
class SplashScreen_Activity : AppCompatActivity() {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
getHadler()
}
private fun getHadler() {
Handler().postDelayed({
if (SessionManager.getSession(Util.session_email, this@SplashScreen_Activity).length == 0) {
val mainIntent = Intent(this, LoginActivity::class.java)
startActivity(mainIntent)
finish()
} else {
val mainIntent = Intent(this, HomeScreen_Activity::class.java)
startActivity(mainIntent)
finish()
}
}, 3000)
}
}<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/DataModelClass/DestinationItem.kt
package learncode.example.com.dreamtrip.DataModelClass
import android.os.Parcel
import android.os.Parcelable
/**
* Created by vadivel on 2/7/18.
*/
class DestinationItem(var title:String,var description:String,var image:String,var latitude:String,
var longitude:String,var contact:String,var timing:String,var sitelink:String) : Parcelable {
constructor(parcel: Parcel) : this(
parcel.readString(),
parcel.readString(),
parcel.readString(),
parcel.readString(),
parcel.readString(),
parcel.readString(),
parcel.readString(),
parcel.readString()) {
}
override fun writeToParcel(parcel: Parcel, flags: Int) {
parcel.writeString(title)
parcel.writeString(description)
parcel.writeString(image)
parcel.writeString(latitude)
parcel.writeString(longitude)
parcel.writeString(contact)
parcel.writeString(timing)
parcel.writeString(sitelink)
}
override fun describeContents(): Int {
return 0
}
companion object CREATOR : Parcelable.Creator<DestinationItem> {
override fun createFromParcel(parcel: Parcel): DestinationItem {
return DestinationItem(parcel)
}
override fun newArray(size: Int): Array<DestinationItem?> {
return arrayOfNulls(size)
}
}
}
<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/Activities/HomeScreen_Activity.kt
package learncode.example.com.dreamtrip.Activities
import android.Manifest
import android.app.Dialog
import android.content.Context
import android.content.Intent
import android.content.pm.PackageManager
import android.os.Bundle
import android.support.v4.app.ActivityCompat
import android.support.v4.content.ContextCompat
import android.support.v7.app.AppCompatActivity
import android.support.v7.widget.LinearLayoutManager
import android.support.v7.widget.RecyclerView
import android.view.View
import android.widget.ImageView
import android.widget.TextView
import com.example.admin2base.myapplication.utils.AppUtils
import com.google.gson.Gson
import com.google.gson.JsonElement
import com.google.gson.JsonObject
import com.squareup.picasso.Picasso
import learncode.example.com.dreamtrip.Adapter.DestinationAdapter
import learncode.example.com.dreamtrip.ApiCient
import learncode.example.com.dreamtrip.DataModelClass.DestinationItem
import learncode.example.com.dreamtrip.R
import learncode.example.com.dreamtrip.Utility.SessionManager
import learncode.example.com.dreamtrip.Utility.Util
import org.json.JSONObject
import retrofit2.Call
import retrofit2.Callback
import retrofit2.Response
class HomeScreen_Activity : AppCompatActivity() {
/*Variable declaration*/
val TAG = HomeScreen_Activity::class.java.simpleName
val RetrofitClient = ApiCient
var mDestinationlist = ArrayList<DestinationItem>()
var adapter: DestinationAdapter? = null
var mProfile:ImageView?=null
var mBack_btn:ImageView?=null
var mTitle:TextView?=null
var edit_profile:ImageView?=null
var mRecylerView: RecyclerView? = null
private var mContext: Context? = null
var mProgressDialog: Dialog? = null
//Permision code that will be checked in the method onRequestPermissionsResult
private val LOCATION_PERMISIION_CODE = 23
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_home_screen_)
mContext = this@HomeScreen_Activity
Init()
mTitle?.text = getString(R.string.home)
edit_profile?.visibility=View.GONE
try {
Picasso.get().load(SessionManager.getSession(Util.session_user_image,mContext))
.placeholder(R.mipmap.ic_profile).into(mProfile);
}
catch (e:Exception)
{
e.printStackTrace()
}
/**
*
* Getting list of places
*
*
* */
if(isReadStorageAllowed()){
//If permission is already having then showing the toast
//Existing the method with return
GetListAttractionSpot();
}
else{
requestStoragePermission();
}
//If the app has not the permission then asking for the permission
/**
*
* Redirect to the profile page
*
*
* */
mProfile?.setOnClickListener(View.OnClickListener {
val intent = Intent(this, ProfileActivity::class.java)
startActivity(intent)
})
mBack_btn?.setOnClickListener(View.OnClickListener {
finish();
})
mRecylerView?.layoutManager = LinearLayoutManager(mContext)
// recylerview_Auctionbook?.setNestedScrollingEnabled(false);
adapter = DestinationAdapter(mContext as HomeScreen_Activity, mDestinationlist)
mRecylerView?.adapter = adapter
}
private fun Init() {
mProfile = findViewById<ImageView>(R.id.profile)
mBack_btn = findViewById<ImageView>(R.id.back_btn)
mTitle = findViewById<TextView>(R.id.mTitle)
edit_profile=findViewById<ImageView>(R.id.edit_profile)
mRecylerView = findViewById<RecyclerView>(R.id.recyler_destination) as RecyclerView
}
/**
*
*Getting place list using RESTAPI
*
*
* */
private fun GetListAttractionSpot() {
try {
if (Util.isConnected(mContext as HomeScreen_Activity)) {
mProgressDialog=Util.ShowProgressView(mContext as HomeScreen_Activity)
mProgressDialog?.show()
val Request = JsonObject()
Request.addProperty("email", SessionManager.getSession(Util.session_email, mContext))
var network_request = RetrofitClient.create().GetListAttractionDestination(Request)
network_request.enqueue(object : Callback<JsonElement> {
override fun onResponse(call: Call<JsonElement>?, response: Response<JsonElement>?) {
if (response != null) {
var jsonobject = JSONObject(response.body().toString())
AppUtils.instance.debugLog("-----", "RES==" + jsonobject)
if (jsonobject.getString("iserror").equals("No")) {
mProgressDialog?.dismiss()
if (jsonobject.has("data")) {
var dataarray = jsonobject.getJSONArray("data")
var gson = Gson()
if (dataarray.length() != 0) {
mDestinationlist.clear()
for (i in 0..dataarray.length() - 1) {
var deetination_object = dataarray.getJSONObject(i).toString()
var destination_model = gson.fromJson(deetination_object, DestinationItem::class.java)
mDestinationlist.add(destination_model)
}
mRecylerView?.adapter?.notifyDataSetChanged()
}
}
}
} else {
}
}
override fun onFailure(call: Call<JsonElement>?, t: Throwable?) {
if (t != null) {
t.printStackTrace()
};
}
})
}
} catch (e: Exception) {
e.printStackTrace()
}
}
private fun isReadStorageAllowed(): Boolean {
//Getting the permission status
val result = ContextCompat.checkSelfPermission(this, Manifest.permission.ACCESS_FINE_LOCATION)
//If permission is granted returning true
return if (result == PackageManager.PERMISSION_GRANTED) true else false
//If permission is not granted returning false
}
//Requesting permission
private fun requestStoragePermission() {
if (ActivityCompat.shouldShowRequestPermissionRationale(this, Manifest.permission.ACCESS_FINE_LOCATION)) {
//If the user has denied the permission previously your code will come to this block
//Here you can explain why you need this permission
//Explain here why you need this permission
}
//And finally ask for the permission
ActivityCompat.requestPermissions(this, arrayOf(Manifest.permission.ACCESS_FINE_LOCATION), LOCATION_PERMISIION_CODE)
}
//This method will be called when the user will tap on allow or deny
override fun onRequestPermissionsResult(requestCode: Int, permissions: Array<String>, grantResults: IntArray) {
//Checking the request code of our request
if (requestCode == LOCATION_PERMISIION_CODE) {
//If permission is granted
if (grantResults.size > 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) {
GetListAttractionSpot();
} else {
GetListAttractionSpot();
}
}
}
}
<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/Activities/RegisterScreen_Activity.kt
package learncode.example.com.dreamtrip.Activities
import android.annotation.SuppressLint
import android.app.DatePickerDialog
import android.app.Dialog
import android.content.Context
import android.content.Intent
import android.os.Bundle
import android.support.v7.app.AppCompatActivity
import android.view.View
import android.widget.*
import com.google.gson.JsonElement
import com.google.gson.JsonObject
import kotlinx.android.synthetic.main.activity_register_screen_.*
import learncode.example.com.dreamtrip.ApiCient
import learncode.example.com.dreamtrip.R
import learncode.example.com.dreamtrip.Utility.Util
import org.json.JSONException
import org.json.JSONObject
import retrofit2.Call
import retrofit2.Response
import java.util.*
@SuppressLint("Registered")
class RegisterScreen_Activity : AppCompatActivity() {
/*Variable declaration*/
var mFirstname: EditText? = null
var mLastname: EditText? = null
var mPassword: EditText? = null
var mConfirmPassword: EditText? = null
var mEmailaddress: EditText? = null
var mMobilenumber: EditText? = null
var mDob: TextView? = null
var mGender: String = ""
val TAG = RegisterScreen_Activity::class.java.simpleName
private var mContext: Context? = null
val mGenderArray = arrayOf("Select Gender", "Male", "Female")
var mProgressDialog: Dialog? = null
val RetrofitClient = ApiCient
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_register_screen_)
mContext = this@RegisterScreen_Activity
/*Initilaize views*/
Init();
//Adapter for spinner Gender
val adapter = ArrayAdapter<String>(this, R.layout.customtextview_spinner, mGenderArray)
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item)
user_genderspinner.adapter = adapter
//item selected listener for spinner
user_genderspinner.onItemSelectedListener = object : AdapterView.OnItemSelectedListener {
override fun onNothingSelected(p0: AdapterView<*>?) {
TODO("not implemented") //To change body of created functions use File | Settings | File Templates.
}
override fun onItemSelected(p0: AdapterView<*>?, p1: View?, p2: Int, p3: Long) {
mGender = mGenderArray[p2]
// Toast.makeText(mContext,""+mGender,Toast.LENGTH_SHORT).show()
}
}
/**
*
* Get basic detail from user and submit a values
*
*
* */
btn_register.setOnClickListener(View.OnClickListener {
if (mFirstname?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.enter_first_name), Toast.LENGTH_SHORT).show()
} else if (mLastname?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.enter_lastname), Toast.LENGTH_SHORT).show()
} else if (mEmailaddress?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.enteremail), Toast.LENGTH_SHORT).show()
} else if (!Util.isValidEmail(mEmailaddress?.getText().toString().trim())) {
Toast.makeText(mContext, getString(R.string.enter_email), Toast.LENGTH_SHORT).show()
} else if (mPassword?.getText().toString().trim().length < 6) {
Toast.makeText(mContext, getString(R.string.enter_pwd), Toast.LENGTH_SHORT).show()
} else if (mConfirmPassword?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.pwd_lenth), Toast.LENGTH_SHORT).show()
} else if (mConfirmPassword == mPassword) {
Toast.makeText(mContext, getString(R.string.pwd_mismatch), Toast.LENGTH_SHORT).show()
} else if (mMobilenumber?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.enter_mob_num), Toast.LENGTH_SHORT).show()
} else if (mMobilenumber?.getText().toString().trim().length < 8) {
Toast.makeText(mContext, "" + mGender, Toast.LENGTH_SHORT).show()
Toast.makeText(mContext, getString(R.string.min_number), Toast.LENGTH_SHORT).show()
} else if (mGender.equals(mGenderArray[0])) {
Toast.makeText(mContext, getString(R.string.select_gender), Toast.LENGTH_SHORT).show()
} else if (mDob?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.dob), Toast.LENGTH_SHORT).show()
} else {
try {
if (Util.isConnected(mContext as RegisterScreen_Activity)) {
mProgressDialog = Util.ShowProgressView(mContext as RegisterScreen_Activity)
mProgressDialog?.show()
try {
val Request = JsonObject()
Request.addProperty("fname", mFirstname?.getText().toString().trim())
Request.addProperty("lname", mLastname?.getText().toString().trim())
Request.addProperty("email", mEmailaddress?.getText().toString().trim())
Request.addProperty("mobile", mMobilenumber?.getText().toString().trim())
Request.addProperty("password", <PASSWORD>?.getText().toString().trim())
Request.addProperty("cpassword", mConfirmPassword?.getText().toString().trim())
Request.addProperty("dob", mDob?.getText().toString().trim())
Request.addProperty("gender", mGender)
var networkrequest = ApiCient.create().UserRegisteration(Request)
networkrequest.enqueue(object : retrofit2.Callback<JsonElement> {
override fun onResponse(call: Call<JsonElement>?, response: Response<JsonElement>?) {
if (response != null) {
var responce_object = JSONObject(response.body().toString())
if (responce_object.getString("iserror").equals("Yes")) {
mProgressDialog?.dismiss()
Toast.makeText(mContext, "" + responce_object.getString("data"), Toast.LENGTH_SHORT)
.show()
} else {
mProgressDialog?.dismiss()
val mIntent = Intent(mContext, LoginActivity::class.java)
mIntent.putExtra("username", mEmailaddress?.getText().toString().trim())
mIntent.putExtra("password", mPassword?.getText().toString().trim())
startActivity(mIntent)
Toast.makeText(mContext, "" + responce_object.getString("message"), Toast.LENGTH_SHORT)
.show()
}
}
}
override fun onFailure(call: Call<JsonElement>?, t: Throwable?) {
mProgressDialog?.dismiss()
Toast.makeText(mContext, getString(R.string.failed), Toast.LENGTH_SHORT)
.show()
}
})
} catch (e: JSONException) {
e.printStackTrace()
}
} else {
Toast.makeText(mContext, getString(R.string.no_internet), Toast.LENGTH_SHORT)
.show()
}
} catch (e: Exception) {
e.printStackTrace()
}
}
})
mDob?.setOnClickListener(View.OnClickListener { view ->
SetDateOfBirth();
})
signin_textview.setOnClickListener(View.OnClickListener {
finish()
})
}
private fun SetDateOfBirth() {
val c = Calendar.getInstance()
val year = c.get(Calendar.YEAR)
val month = c.get(Calendar.MONTH)
val day = c.get(Calendar.DAY_OF_MONTH)
val dpd = DatePickerDialog(mContext, DatePickerDialog.OnDateSetListener { view, year, monthOfYear, dayOfMonth ->
// Display Selected date in textbox
mDob?.setText("" + dayOfMonth + "-" + monthOfYear + "-" + year)
}, year, month, day)
dpd.show()
}
private fun Init() {
mFirstname = findViewById<EditText>(R.id.user_firstname) as EditText
mLastname = findViewById<EditText>(R.id.user_lastname) as EditText
mEmailaddress = findViewById<EditText>(R.id.user_email) as EditText
mPassword = findViewById<EditText>(R.id.user_password) as EditText
mConfirmPassword = findViewById<EditText>(R.id.user_confirmpassword) as EditText
mMobilenumber = findViewById<EditText>(R.id.user_mobile) as EditText
mDob = findViewById<TextView>(R.id.user_dob) as TextView
}
}
<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/Adapter/DestinationAdapter.kt
package learncode.example.com.dreamtrip.Adapter
import android.Manifest
import android.app.Activity
import android.content.Intent
import android.content.pm.PackageManager
import android.location.Location
import android.net.Uri
import android.support.v4.app.ActivityCompat
import android.support.v4.content.ContextCompat
import android.support.v7.widget.RecyclerView
import android.view.LayoutInflater
import android.view.View
import android.view.ViewGroup
import android.widget.ImageView
import android.widget.LinearLayout
import android.widget.TextView
import android.widget.Toast
import com.example.admin2base.myapplication.utils.AppUtils
import com.squareup.picasso.Picasso
import learncode.example.com.dreamtrip.Activities.DestinationdetailScreen_Activity
import learncode.example.com.dreamtrip.DataModelClass.DestinationItem
import learncode.example.com.dreamtrip.R
import learncode.example.com.dreamtrip.Utility.GPSTracker
import java.util.*
/**
* Created by vadivel on 2/7/18.
*/
class DestinationAdapter(internal var activity: Activity, internal var Destinationlist: ArrayList<DestinationItem>) : RecyclerView.Adapter<DestinationAdapter.ViewHolder>() {
var mPos: Int = 0;
var mDistance: String = "";
private val REQUEST_PHONE_CALL = 23
override fun getItemCount(): Int {
return Destinationlist.size;
}
override fun onCreateViewHolder(parent: ViewGroup, viewType: Int): ViewHolder {
val itemView = LayoutInflater.from(parent.context)
.inflate(R.layout.destination_listitem, parent, false)
return ViewHolder(itemView)
}
override fun onBindViewHolder(holder: ViewHolder, position: Int) {
var destination_model = Destinationlist.get(holder.adapterPosition)
holder.txt_place?.setText("" + destination_model.title)
holder.txt_description?.setText("" + destination_model.description)
holder.txt_phone?.setText("Contact :" + destination_model.contact)
holder.txt_location_disctane?.setText("Distance :" + destination_model.latitude)
var gpsTracker = GPSTracker(activity);
if (gpsTracker.getIsGPSTrackingEnabled()) {
try {
var destination_lat: Double = destination_model.latitude.toDouble();
var destination_longi: Double = destination_model.longitude.toDouble();
AppUtils.instance.debugLog("LOCATION","DDD"+gpsTracker.latitude+":"+gpsTracker.longitude)
AppUtils.instance.debugLog("LOCATION","DDD"+destination_lat+":"+destination_longi)
var destination_location = Location("");
destination_location.setLatitude(destination_lat);
destination_location.setLongitude(destination_longi);
var source_location = Location("");
source_location.setLatitude(gpsTracker.latitude);
source_location.setLongitude(gpsTracker.longitude);
var distanceInMeters = source_location.distanceTo(destination_location)/1000
AppUtils.instance.debugLog("mVal","distanceInMeters"+distanceInMeters)
holder.txt_location_disctane?.setText("" + distanceInMeters+" KM")
mDistance=""+distanceInMeters+"KM"
}catch (e:Exception){
e.printStackTrace()
AppUtils.instance.debugLog("LOCATION","DDD"+e)
}
} else {
gpsTracker.showSettingsAlert();
}
try {
Picasso.get().load(destination_model.image)
.error( R.mipmap.ic_splashimage )
.placeholder( R.drawable.progressanimation ).into(holder.img_destination);
} catch (e: Exception) {
e.printStackTrace()
}
holder.txt_phone.setOnClickListener(View.OnClickListener {
mPos = holder.adapterPosition;
if (ContextCompat.checkSelfPermission(activity, Manifest.permission.CALL_PHONE) != PackageManager.PERMISSION_GRANTED) {
Toast.makeText(activity, "Please enable Cell phone permission", Toast.LENGTH_LONG).show()
} else {
mCall(holder.adapterPosition)
}
})
}
fun mCall(adapterPosition: Int) {
if (ContextCompat.checkSelfPermission(activity, Manifest.permission.CALL_PHONE) != PackageManager.PERMISSION_GRANTED) {
ActivityCompat.requestPermissions(activity, arrayOf(Manifest.permission.ACCESS_FINE_LOCATION),REQUEST_PHONE_CALL);
}
else
{
val callIntent = Intent(Intent.ACTION_CALL)
callIntent.data = Uri.parse("tel:" + Destinationlist.get(adapterPosition).contact)
activity.startActivity(callIntent)
}
}
inner class ViewHolder(itemView: View) : RecyclerView.ViewHolder(itemView) {
var txt_place: TextView
var txt_description: TextView
var txt_location_disctane: TextView
var txt_phone: TextView
var img_destination: ImageView
var destinationitem_mainlayout:LinearLayout
init {
txt_place = itemView.findViewById<TextView>(R.id.txt_place) as TextView
txt_description = itemView.findViewById<TextView>(R.id.txt_dexcription) as TextView
txt_location_disctane = itemView.findViewById<TextView>(R.id.txt_distance) as TextView
txt_phone = itemView.findViewById<TextView>(R.id.txt_phonenumber) as TextView
img_destination = itemView.findViewById<ImageView>(R.id.img_destination) as ImageView
destinationitem_mainlayout=itemView.findViewById<LinearLayout>(R.id.destination_mainlayout) as LinearLayout
img_destination.setOnClickListener(View.OnClickListener {
mCallDetail(adapterPosition);
})
txt_description.setOnClickListener(View.OnClickListener {
mCallDetail(adapterPosition);
})
txt_place.setOnClickListener(View.OnClickListener {
mCallDetail(adapterPosition);
})
val metrics = activity.getResources().getDisplayMetrics()
// Gets the layout params that will allow you to resize the layout
val params = destinationitem_mainlayout.getLayoutParams()
// Changes the height and width to the specified *pixels*
params.height = metrics.heightPixels/4
destinationitem_mainlayout.setLayoutParams(params)
}
}
fun mCallDetail(adapterPosition: Int) {
val intent = Intent(activity, DestinationdetailScreen_Activity::class.java)
intent.putExtra("distance",""+mDistance)
intent.putExtra("destination_object", Destinationlist.get(adapterPosition))
activity.startActivity(intent)
}
@Override
fun onRequestPermissionsResult(requestCode: Int,
permissions: Array<String>, grantResults: IntArray) {
when (requestCode) {
REQUEST_PHONE_CALL -> {
if (grantResults.size > 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) {
mCall(mPos)
} else {
}
return
}
}
}
}
<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/Utility/AppUtils.kt
package com.example.admin2base.myapplication.utils
import android.util.Log
/**
* Created by admin2base on 19/5/18.
*/
class AppUtils private constructor(){
var debug_status=true
companion object {
private var utils: AppUtils = AppUtils()
val instance: AppUtils
get() = utils
}
fun debugLog(tag:String, message:String){
if(debug_status) {
Log.d(tag, message)
}
}
fun errorLog(tag:String, message:String){
if(debug_status) {
Log.e(tag, message)
}
}
}<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/NetworkDirectory/ApiCient.kt
package learncode.example.com.dreamtrip
import com.google.gson.Gson
import com.google.gson.GsonBuilder
import okhttp3.OkHttpClient
import retrofit2.Retrofit
import retrofit2.converter.gson.GsonConverterFactory
import java.util.concurrent.TimeUnit
/**
* Created by Suraj on 23/5/18.
*/
class ApiCient private constructor() {
companion object {
fun getAPIurl(): String {
return "http://mccollinsmedia.com/myproject/service/";
}
fun GetClient(): OkHttpClient {
return OkHttpClient.Builder()
.connectTimeout(120, TimeUnit.SECONDS)
.writeTimeout(120, TimeUnit.SECONDS)
.readTimeout(120, TimeUnit.SECONDS)
.build()
}
fun create(): APISERVICE {
val retrofit = Retrofit.Builder()
.client(GetClient())
.baseUrl(getAPIurl())
.addConverterFactory(GsonConverterFactory.create(getGson()))
.build()
return retrofit.create(APISERVICE::class.java)
}
fun getGson(): Gson {
return GsonBuilder()
.setLenient()
.create();
}
}
}
<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/Activities/ProfileActivity.kt
package learncode.example.com.dreamtrip.Activities
import android.Manifest
import android.Manifest.permission.CAMERA
import android.Manifest.permission.WRITE_EXTERNAL_STORAGE
import android.app.Activity
import android.app.AlertDialog
import android.app.DatePickerDialog
import android.app.Dialog
import android.content.ContentValues
import android.content.Context
import android.content.DialogInterface
import android.content.Intent
import android.content.pm.PackageManager
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import android.net.Uri
import android.os.Build
import android.os.Bundle
import android.provider.MediaStore
import android.support.annotation.RequiresApi
import android.support.v4.app.ActivityCompat
import android.support.v4.content.ContextCompat
import android.support.v7.app.AppCompatActivity
import android.util.Log
import android.view.View
import android.widget.*
import com.example.admin2base.myapplication.utils.AppUtils
import com.google.gson.Gson
import com.google.gson.JsonElement
import com.google.gson.JsonObject
import com.squareup.picasso.Picasso
import id.zelory.compressor.Compressor
import kotlinx.android.synthetic.main.activity_register_screen_.*
import learncode.example.com.dreamtrip.ApiCient
import learncode.example.com.dreamtrip.R
import learncode.example.com.dreamtrip.Utility.SessionManager
import learncode.example.com.dreamtrip.Utility.Util
import okhttp3.MediaType
import okhttp3.MultipartBody
import okhttp3.RequestBody
import org.json.JSONObject
import retrofit2.Call
import retrofit2.Callback
import retrofit2.Response
import java.io.File
import java.io.IOException
import java.util.*
class ProfileActivity : AppCompatActivity() {
/*Variable declaration*/
var mFirstname: EditText? = null
var mLastname: EditText? = null
var mEmailaddress: EditText? = null
var mMobilenumber: EditText? = null
var mDob: TextView? = null
var textview_logout: TextView? = null
var mGender: String = "Male"
var mBack_btn: ImageView? = null
var mTitle: TextView? = null
var btn_Submit: TextView? = null
var mProfile: ImageView? = null
var img_profileimage: ImageView? = null
var edit_profile: ImageView? = null
private var mFile: File? = null
private val REQUEST_READ_PERMISSION = 114
private val REQUEST_CAMERA_PERMISSION = 115
private val SELECT_PHOTO = 1001
private val SELECT_CAMERA = 1002
private var imageUri: Uri? = null
private var thumbnail: Bitmap? = null
private var imageurl = ""
var mProgressDialog: Dialog? = null
private var image_param_string = ""
val TAG = RegisterScreen_Activity::class.java.simpleName
private var mContext: Context? = null
val mGenderArray = arrayOf("Select Gender", "Male", "Female")
val RetrofitClient = ApiCient
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_profile_screen_)
mContext = this@ProfileActivity
Init()
GetUserById()
edit_profile?.visibility = View.VISIBLE
mProfile?.visibility = View.GONE
btn_Submit?.visibility = View.GONE
mTitle?.text = getString(R.string.profile)
EnabledViews(false)
mBack_btn?.setOnClickListener(View.OnClickListener {
finish();
})
edit_profile?.setOnClickListener { v ->
EnabledViews(true)
btn_Submit?.visibility = View.VISIBLE
mFirstname?.requestFocus()
}
img_profileimage?.setOnClickListener(View.OnClickListener { v ->
mPickImage()
})
//Adapter for spinner
val adapter = ArrayAdapter<String>(this, R.layout.customtextview_spinner, mGenderArray)
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item)
user_genderspinner.adapter = adapter
//item selected listener for spinner
user_genderspinner.onItemSelectedListener = object : AdapterView.OnItemSelectedListener {
override fun onNothingSelected(p0: AdapterView<*>?) {
TODO("not implemented") //To change body of created functions use File | Settings | File Templates.
}
override fun onItemSelected(p0: AdapterView<*>?, p1: View?, p2: Int, p3: Long) {
mGender = mGenderArray[p2]
// Toast.makeText(mContext,""+mGender,Toast.LENGTH_SHORT).show()
}
}
/**
*
* Edit profile
*
*
* */
btn_Submit?.setOnClickListener(View.OnClickListener {
if (mFile == null) {
Toast.makeText(mContext, getString(R.string.choose_profile), Toast.LENGTH_SHORT).show()
} else if (mFirstname?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.enter_first_name), Toast.LENGTH_SHORT).show()
} else if (mLastname?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.enter_lastname), Toast.LENGTH_SHORT).show()
} else if (mEmailaddress?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.enteremail), Toast.LENGTH_SHORT).show()
} else if (!Util.isValidEmail(mEmailaddress?.getText().toString().trim())) {
Toast.makeText(mContext, getString(R.string.enter_email), Toast.LENGTH_SHORT).show()
} else if (mMobilenumber?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.enter_mob_num), Toast.LENGTH_SHORT).show()
} else if (mMobilenumber?.getText().toString().trim().length < 8) {
Toast.makeText(mContext, "" + mGender, Toast.LENGTH_SHORT).show()
Toast.makeText(mContext, getString(R.string.lenth_of_mob), Toast.LENGTH_SHORT).show()
} else if (mGender.equals(mGenderArray[0])) {
Toast.makeText(mContext, getString(R.string.select_gender), Toast.LENGTH_SHORT).show()
} else if (mDob?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.enter_dob), Toast.LENGTH_SHORT).show()
} else {
try {
if (Util.isConnected(mContext as ProfileActivity)) {
mProgressDialog = Util.ShowProgressView(mContext as ProfileActivity)
mProgressDialog?.show()
try {
var id =
RequestBody.create(MediaType.parse("multipart/form-data"), SessionManager.getSession(Util.session_user_id, mContext));
// adding another part within the multipart request
var firstname =
RequestBody.create(MediaType.parse("multipart/form-data"), mFirstname?.getText().toString().trim());
var lastname =
RequestBody.create(MediaType.parse("multipart/form-data"), mLastname?.getText().toString().trim());
var email =
RequestBody.create(MediaType.parse("multipart/form-data"), mEmailaddress?.getText().toString().trim());
var dob =
RequestBody.create(MediaType.parse("multipart/form-data"), mDob?.getText().toString().trim());
var mobile =
RequestBody.create(MediaType.parse("multipart/form-data"), mMobilenumber?.getText().toString().trim());
var gender =
RequestBody.create(MediaType.parse("multipart/form-data"), mGender);
var requestFile =
RequestBody.create(MediaType.parse("multipart/form-data"), mFile);
// MultipartBody.Part is used to send also the actual file name
var body =
MultipartBody.Part.createFormData("file", mFile?.getName(), requestFile);
var networkrequest = ApiCient.create().UpdateUserDetails(id, firstname, lastname, gender, dob, email, mobile, body)
networkrequest.enqueue(object : retrofit2.Callback<JsonElement> {
override fun onResponse(call: Call<JsonElement>?, response: Response<JsonElement>?) {
if (response != null) {
mProgressDialog?.dismiss()
mFile = null
var responce_object = JSONObject(response.body().toString())
AppUtils.instance.debugLog("RESPONCE FROM PROFILE UPDATE", "------------>" + responce_object)
if (responce_object.getString("iserror").equals("No")) {
Toast.makeText(mContext, responce_object.getString("message"), Toast.LENGTH_SHORT)
.show()
} else {
Toast.makeText(mContext, getString(R.string.update_error), Toast.LENGTH_SHORT)
.show()
}
SessionManager.saveSession(Util.session_email, "" + mEmailaddress?.getText().toString().trim(), mContext)
val mIntent = Intent(mContext, HomeScreen_Activity::class.java)
startActivity(mIntent)
finish()
}
}
override fun onFailure(call: Call<JsonElement>?, t: Throwable?) {
Toast.makeText(mContext, getString(R.string.update_error), Toast.LENGTH_SHORT)
.show()
}
})
} catch (e: Exception) {
e.printStackTrace()
}
} else {
Toast.makeText(mContext, getString(R.string.connect_internet), Toast.LENGTH_SHORT)
.show()
}
} catch (e: Exception) {
e.printStackTrace()
}
}
})
mDob?.setOnClickListener(View.OnClickListener { view ->
SetDateOfBirth();
})
/**
*
* Logout the User
*
*
* */
textview_logout!!.setOnClickListener({
val builder: AlertDialog.Builder
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
builder = AlertDialog.Builder(this, android.R.style.Theme_Material_Dialog_Alert)
} else {
builder = AlertDialog.Builder(this)
}
builder.setTitle("")
.setMessage("Are you sure you want to logout?")
.setPositiveButton(android.R.string.yes, DialogInterface.OnClickListener { dialog, which ->
// continue with delete
SessionManager.ClearSession(this);
val mIntent = Intent(mContext, LoginActivity::class.java).setFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP)
startActivity(mIntent)
finish()
})
.setNegativeButton(android.R.string.no, DialogInterface.OnClickListener { dialog, which ->
// do nothingdialog
dialog.dismiss()
})
.setIcon(android.R.drawable.ic_dialog_alert)
.show()
})
}
private fun EnabledViews(b: Boolean) {
mFirstname?.setEnabled(b)
mLastname?.setEnabled(b)
mEmailaddress?.setEnabled(b)
mMobilenumber?.setEnabled(b)
mDob?.setEnabled(b)
mTitle?.setEnabled(b)
img_profileimage?.setEnabled(b)
}
/****
*
* Get User Profile data
* */
private fun GetUserById() {
if (Util.isConnected(mContext as ProfileActivity)) {
mProgressDialog = Util.ShowProgressView(mContext as ProfileActivity)
mProgressDialog?.show()
val Request = JsonObject()
Request.addProperty("id", SessionManager.getSession(Util.session_user_id, mContext))
var network_request = RetrofitClient.create().GetUserById(Request)
network_request.enqueue(object : Callback<JsonElement> {
override fun onResponse(call: Call<JsonElement>?, response: Response<JsonElement>?) {
var mResponse = JSONObject(response!!.body().toString())
if (mResponse.has("data")) {
mProgressDialog?.dismiss()
var dataarray = mResponse.getJSONArray("data")
var gson = Gson()
AppUtils.instance.debugLog(TAG,"DDDD"+mResponse)
if (dataarray.length() != 0) {
mFirstname!!.setText("" + dataarray.getJSONObject(0).getString("fname"))
mLastname!!.setText("" + dataarray.getJSONObject(0).getString("lname"))
mEmailaddress!!.setText("" + dataarray.getJSONObject(0).getString("email"))
mDob!!.setText("" + dataarray.getJSONObject(0).getString("dob"))
mMobilenumber!!.setText("" + dataarray.getJSONObject(0).getString("mobile"))
if (dataarray.getJSONObject(0).getString("gender").equals("Male")) {
user_genderspinner.setSelection(1);
mGender = "Male"
} else {
mGender = "Female"
user_genderspinner.setSelection(2);
}
try {
Picasso.get().load(dataarray.getJSONObject(0).getString("image"))
.placeholder(R.mipmap.ic_profile).into(img_profileimage);
} catch (e: Exception) {
e.printStackTrace()
}
SessionManager.saveSession(Util.session_email,dataarray.getJSONObject(0).getString("email"),mContext)
SessionManager.saveSession(Util.session_email,dataarray.getJSONObject(0).getString("email"),mContext)
SessionManager.saveSession(Util.session_user_image,dataarray.getJSONObject(0).getString("image"),mContext)
}
}
}
override fun onFailure(call: Call<JsonElement>?, t: Throwable?) {
if (t != null) {
t.printStackTrace()
}
}
})
} else {
Toast.makeText(mContext, "Please enable Internet", Toast.LENGTH_SHORT)
.show()
}
}
private fun SetDateOfBirth() {
val c = Calendar.getInstance()
val year = c.get(Calendar.YEAR)
val month = c.get(Calendar.MONTH)
val day = c.get(Calendar.DAY_OF_MONTH)
val dpd = DatePickerDialog(mContext, DatePickerDialog.OnDateSetListener { view, year, monthOfYear, dayOfMonth ->
// Display Selected date in textbox
mDob?.setText("" + dayOfMonth + "-" + monthOfYear + "-" + year)
}, year, month, day)
dpd.show()
}
private fun Init() {
mFirstname = findViewById<EditText>(R.id.user_firstname) as EditText
mLastname = findViewById<EditText>(R.id.user_lastname) as EditText
mEmailaddress = findViewById<EditText>(R.id.user_email) as EditText
mMobilenumber = findViewById<EditText>(R.id.user_mobile) as EditText
mDob = findViewById<TextView>(R.id.user_dob) as TextView
textview_logout = findViewById<TextView>(R.id.textview_logout) as TextView
mBack_btn = findViewById<ImageView>(R.id.back_btn)
mTitle = findViewById<TextView>(R.id.mTitle)
btn_Submit = findViewById<TextView>(R.id.btn_Submit)
mProfile = findViewById<ImageView>(R.id.profile)
edit_profile = findViewById<ImageView>(R.id.edit_profile)
img_profileimage = findViewById<ImageView>(R.id.img_profileimage)
}
fun mPickImage() {
val saveDialog = android.app.AlertDialog.Builder(this)
saveDialog.setTitle("Pick Image")
saveDialog.setMessage("Choose image from")
saveDialog.setPositiveButton("GALLERY") { dialog, which -> askForPermissionGallery() }
saveDialog.setNegativeButton("CAMERA") { dialog, which -> askForCameraPermission() }
saveDialog.show()
}
internal fun openGallery() {
val i = Intent(Intent.ACTION_PICK,
MediaStore.Images.Media.EXTERNAL_CONTENT_URI)
startActivityForResult(i, SELECT_PHOTO)
}
internal fun openCamera() {
/* Intent intent = new Intent(android.provider.MediaStore.ACTION_IMAGE_CAPTURE);
startActivityForResult(intent, SELECT_CAMERA);*/
var values = ContentValues()
values.put(MediaStore.Images.Media.TITLE, "New Picture")
values.put(MediaStore.Images.Media.DESCRIPTION, "From your Camera")
imageUri = contentResolver.insert(
MediaStore.Images.Media.EXTERNAL_CONTENT_URI, values)
val intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)
intent.putExtra(MediaStore.EXTRA_OUTPUT, imageUri)
startActivityForResult(intent, SELECT_CAMERA)
}
fun hasPermissions(): Boolean {
val result = ContextCompat.checkSelfPermission(this@ProfileActivity, WRITE_EXTERNAL_STORAGE)
val result1 = ContextCompat.checkSelfPermission(this@ProfileActivity, CAMERA)
return result == PackageManager.PERMISSION_GRANTED && result1 == PackageManager.PERMISSION_GRANTED
}
private fun requestPermission() {
ActivityCompat.requestPermissions(this, arrayOf(WRITE_EXTERNAL_STORAGE, CAMERA), REQUEST_CAMERA_PERMISSION)
}
private fun askForCameraPermission() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
if (!hasPermissions()) {
requestPermission()
} else {
openCamera()
}
} else {
openCamera()
}
}
private fun askForPermissionGallery() {//
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
if (checkSelfPermission(Manifest.permission.WRITE_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {
ActivityCompat.requestPermissions(this, arrayOf(Manifest.permission.WRITE_EXTERNAL_STORAGE),
REQUEST_READ_PERMISSION)
return
} else {
openGallery()
}
} else {
openGallery()
}
}
private fun getRealPathFromURI(contentURI: Uri): String {
val result: String
val cursor = contentResolver.query(contentURI, null, null, null, null)
if (cursor == null) { // Source is Dropbox or other similar local file path
result = contentURI.path
} else {
cursor.moveToFirst()
val idx = cursor.getColumnIndex(MediaStore.Images.ImageColumns.DATA)
result = cursor.getString(idx)
cursor.close()
}
return result
}
@RequiresApi(api = Build.VERSION_CODES.M)
override fun onRequestPermissionsResult(requestCode: Int, permissions: Array<String>, grantResults: IntArray) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults)
when (requestCode) {
REQUEST_CAMERA_PERMISSION ->
if (grantResults.size > 0) {
val locationAccepted = grantResults[0] == PackageManager.PERMISSION_GRANTED
val cameraAccepted = grantResults[1] == PackageManager.PERMISSION_GRANTED
if (locationAccepted && cameraAccepted)
openCamera()
else {
/* Snackbar.make(mView, "Permission Denied, You cannot access the write and camera permission.", Snackbar.LENGTH_LONG).show()
*/
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
if (shouldShowRequestPermissionRationale(WRITE_EXTERNAL_STORAGE)) {
showMessageOKCancel("You need to allow access to both the permissions",
DialogInterface.OnClickListener { dialog, which ->
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
requestPermissions(arrayOf(WRITE_EXTERNAL_STORAGE, CAMERA),
REQUEST_CAMERA_PERMISSION)
}
})
return
}
} else {
if (shouldShowRequestPermissionRationale(WRITE_EXTERNAL_STORAGE)) {
showMessageOKCancel("You need to allow access to both the permissions",
DialogInterface.OnClickListener { dialog, which ->
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
requestPermissions(arrayOf(WRITE_EXTERNAL_STORAGE, CAMERA),
REQUEST_CAMERA_PERMISSION)
}
})
return
}
}
}
}
REQUEST_READ_PERMISSION -> if (grantResults[0] == PackageManager.PERMISSION_GRANTED) {
openGallery()
} else {
}
}
}
private fun showMessageOKCancel(message: String, okListener: DialogInterface.OnClickListener) {
AlertDialog.Builder(this@ProfileActivity)
.setMessage(message)
.setPositiveButton("OK", okListener)
.setNegativeButton("Cancel", null)
.create()
.show()
}
override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) {
super.onActivityResult(requestCode, resultCode, data)
if (requestCode == SELECT_PHOTO && resultCode == Activity.RESULT_OK && data != null && data.data != null) {
val selectedImage = data.data
val filePathColumn = arrayOf(MediaStore.Images.Media.DATA)
val cursor = contentResolver.query(selectedImage!!, filePathColumn, null, null, null) ?: return
cursor.moveToFirst()
val columnIndex = cursor.getColumnIndex(filePathColumn[0])
val filePath = cursor.getString(columnIndex)
cursor.close()
image_param_string = "" + filePath
img_profileimage?.setImageBitmap(BitmapFactory.decodeFile(filePath));
val mFile1 = File(filePath)
Log.e("**********", "!!FILE SIZE==>" + mFile1.length() / 1024)
try {
mFile = Compressor(this).compressToFile(mFile1)
Log.e("**********", "!!FILE SIZE==>" + mFile?.length()!! / 1024)
} catch (e: IOException) {
e.printStackTrace()
mFile = File(filePath)
}
} else {
assert(data != null)
try {
thumbnail = MediaStore.Images.Media.getBitmap(
contentResolver, imageUri)
imageurl = getRealPathFromURI(imageUri!!)
val mFile1 = File(imageurl)
Log.e("**********", "!!FILE 1 SIZE==>" + mFile1.length() / 1024)
try {
mFile = Compressor(this).compressToFile(mFile1)
Log.e("**********", "!!FILE SIZE==>" + mFile?.length()!! / 1024)
} catch (e: IOException) {
e.printStackTrace()
mFile = File(imageurl)
}
image_param_string = "" + imageurl
} catch (e: Exception) {
e.printStackTrace()
}
}
}
}
<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/Activities/DestinationdetailScreen_Activity.kt
package learncode.example.com.dreamtrip.Activities
import android.content.ActivityNotFoundException
import android.content.Context
import android.content.Intent
import android.net.Uri
import android.os.Bundle
import android.support.v7.app.AppCompatActivity
import android.view.View
import android.widget.ImageView
import android.widget.TextView
import android.widget.Toast
import com.squareup.picasso.Picasso
import learncode.example.com.dreamtrip.DataModelClass.DestinationItem
import learncode.example.com.dreamtrip.R
import learncode.example.com.dreamtrip.Utility.SessionManager
import learncode.example.com.dreamtrip.Utility.Util
class DestinationdetailScreen_Activity : AppCompatActivity() {
var mDestination_img:ImageView?=null
var mDestination_number:TextView?=null
var mDestination_distance:TextView?=null
var mDestination_timing:TextView?=null
var mDestination_title:TextView?=null
var mDestination_noverview:TextView?=null
var mDestination_model:DestinationItem?=null
var mProfile:ImageView?=null
var mBack_btn :ImageView?=null
var mTitle:TextView?=null
private var mContext: Context? = null
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_destinationdetail_screen_)
mContext = this@DestinationdetailScreen_Activity
Inint();
mTitle?.text = getString(R.string.detail_page)
try {
Picasso.get().load(SessionManager.getSession(Util.session_user_image,mContext))
.placeholder(R.mipmap.ic_profile).into(mProfile);
}
catch (e:Exception)
{
e.printStackTrace()
}
var intent=intent;
if(intent.hasExtra("destination_object"))
{
mDestination_model=intent.getParcelableExtra("destination_object")
try {
Picasso.get().load(mDestination_model?.image).into(mDestination_img);
}
catch (e:Exception)
{
e.printStackTrace()
}
mTitle?.text = ""+mDestination_model?.title
mDestination_number?.setText("Contact :"+mDestination_model?.contact)
mDestination_distance?.setText("Distance :"+intent.getStringExtra("distance"))
mDestination_timing?.setText(""+mDestination_model?.timing)
mDestination_title?.setText(""+mDestination_model?.title)
mDestination_noverview?.setText(""+mDestination_model?.description)
mDestination_img?.setOnClickListener(View.OnClickListener {
mDestination_noverview?.performClick()
})
mDestination_title?.setOnClickListener(View.OnClickListener {
mDestination_noverview?.performClick()
})
mDestination_noverview?.setOnClickListener(View.OnClickListener {
try {
val myIntent = Intent(Intent.ACTION_VIEW, Uri.parse(mDestination_model?.sitelink))
startActivity(myIntent)
} catch (e: ActivityNotFoundException) {
Toast.makeText(this, "No application can handle this request." + " Please install a webbrowser", Toast.LENGTH_LONG).show()
e.printStackTrace()
}
})
}
mProfile?.setOnClickListener(View.OnClickListener {
val intent = Intent(this, ProfileActivity::class.java)
startActivity(intent)
})
mBack_btn?.setOnClickListener(View.OnClickListener {
finish();
})
}
private fun Inint() {
mDestination_img=findViewById<ImageView>(R.id.img_place) as ImageView
mDestination_number=findViewById<TextView>(R.id.textview_tnumber) as TextView
mDestination_distance=findViewById<TextView>(R.id.textview_distance) as TextView
mDestination_timing=findViewById<TextView>(R.id.textview_timing) as TextView
mDestination_title=findViewById<TextView>(R.id.textview_original_title) as TextView
mDestination_noverview=findViewById<TextView>(R.id.textview_overview) as TextView
mProfile = findViewById<ImageView>(R.id.profile)
mBack_btn = findViewById<ImageView>(R.id.back_btn)
mTitle = findViewById<TextView>(R.id.mTitle)
}
override fun onBackPressed() {
super.onBackPressed()
finish()
}
}
<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/DataModelClass/User.kt
package learncode.example.com.dreamtrip.DataModelClass
/**
* Created by vadivel on 2/7/18.
*/
data class User(var user_id:String,var fname:String,var lname:String,var email:String,var mobile:String,
var password:String,var dob:String,var gender:String,var userimage:String)
/* "user_id": "14",
"fname": "Suraj",
"lname": "S",
"email": "<EMAIL>",
"mobile": "633548466",
"password": "<PASSWORD>",
"dob": "2018-06-13",
"gender": "Male",
"userimage": ""*/
<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/Utility/Util.kt
package learncode.example.com.dreamtrip.Utility
import android.annotation.SuppressLint
import android.app.Dialog
import android.content.Context
import android.graphics.Color
import android.graphics.drawable.ColorDrawable
import android.net.ConnectivityManager
import android.text.TextUtils
import android.util.Patterns
import android.view.LayoutInflater
import learncode.example.com.dreamtrip.R
/**
* Created by vadivel on 2/7/18.
*/
class Util private constructor() {
companion object {
var session_email: String = "email"
var session_user: String = "user"
var session_user_id: String = "user_id"
var session_user_image: String = "user_image"
fun ShowProgressView(mCtx: Context): Dialog {
val factory = LayoutInflater.from(mCtx)
val DialogView = factory.inflate(R.layout.progressview_layout, null)
val main_dialog = Dialog(mCtx)
main_dialog.window!!.setBackgroundDrawable(ColorDrawable(Color.TRANSPARENT))
main_dialog.setCanceledOnTouchOutside(true)
main_dialog.setCancelable(true)
main_dialog.setContentView(DialogView)
return main_dialog
}
fun isValidEmail(target: CharSequence): Boolean {
return !TextUtils.isEmpty(target) && Patterns.EMAIL_ADDRESS.matcher(target).matches()
}
@SuppressLint("MissingPermission")
fun isConnected(context: Context): Boolean {
val cm = context
.getSystemService(Context.CONNECTIVITY_SERVICE) as ConnectivityManager
val activeNetwork = cm.activeNetworkInfo
return activeNetwork != null && activeNetwork.isConnectedOrConnecting
}
}
}
<file_sep>/app/src/main/java/learncode/example/com/dreamtrip/Activities/LoginActivity.kt
package learncode.example.com.dreamtrip.Activities
import android.app.Dialog
import android.content.Context
import android.content.Intent
import android.os.Bundle
import android.support.v7.app.AppCompatActivity
import android.view.View
import android.widget.Button
import android.widget.EditText
import android.widget.Toast
import com.google.gson.Gson
import com.google.gson.JsonElement
import com.google.gson.JsonObject
import kotlinx.android.synthetic.main.activity_login_screen_.*
import learncode.example.com.dreamtrip.ApiCient
import learncode.example.com.dreamtrip.DataModelClass.User
import learncode.example.com.dreamtrip.R
import learncode.example.com.dreamtrip.Utility.SessionManager
import learncode.example.com.dreamtrip.Utility.Util
import org.json.JSONObject
import retrofit2.Call
import retrofit2.Response
class LoginActivity : AppCompatActivity() {
/*Variable declaration*/
var mUsername: EditText? = null
var mPassword: EditText? = null
var mLogin_button: Button? = null
val TAG = LoginActivity::class.java.simpleName
val RetrofitClient = ApiCient
private var mContext: Context? = null
var mProgressDialog: Dialog? = null
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_login_screen_)
mContext = this@LoginActivity
/* Intializee the view from layouts*/
Init();
GetIntent();
/**
*
* Validate a input data and Submit the form
*
*
* */
mLogin_button?.setOnClickListener(View.OnClickListener {
if (mUsername?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.enter_email), Toast.LENGTH_SHORT).show()
} else if (!Util.isValidEmail(mUsername?.getText().toString().trim())) {
Toast.makeText(mContext, "Enter a valid email", Toast.LENGTH_SHORT).show()
} else if (mPassword?.getText().toString().trim().length == 0) {
Toast.makeText(mContext, getString(R.string.enter_pwd), Toast.LENGTH_SHORT).show()
} else {
try {
if (Util.isConnected(mContext as LoginActivity)) {
mProgressDialog=Util.ShowProgressView(mContext as LoginActivity)
mProgressDialog?.show()
val Request = JsonObject()
Request.addProperty("email", mUsername?.getText().toString().trim())
Request.addProperty("password", mPassword?.getText().toString().trim())
var networkrequest = ApiCient.create().UserLogin(Request)
networkrequest.enqueue(object : retrofit2.Callback<JsonElement> {
override fun onResponse(call: Call<JsonElement>?, response: Response<JsonElement>?) {
if (response != null) {
mProgressDialog?.dismiss()
var mResponse = JSONObject(response.body().toString())
if (mResponse.getString("iserror").equals("Yes")) {
Toast.makeText(mContext, "" + mResponse.getString("message"), Toast.LENGTH_SHORT)
.show()
} else {
if (mResponse.has("data")) {
var dataarray = mResponse.getJSONArray("data")
var gson = Gson()
if (dataarray.length() != 0) {
for (i in 0..dataarray.length() - 1) {
var user_object = dataarray.getJSONObject(i).toString()
var user = gson.fromJson(user_object, User::class.java)
SessionManager.saveSession(Util.session_user_id, "" + user.user_id, mContext)
SessionManager.saveSession(Util.session_user, "" + user, mContext)
SessionManager.saveSession(Util.session_email, "" + user?.email, mContext)
SessionManager.saveSession(Util.session_user_image,""+user?.userimage,mContext)
val mIntent = Intent(mContext, HomeScreen_Activity::class.java)
startActivity(mIntent)
finish()
}
}
}
}
}
}
override fun onFailure(call: Call<JsonElement>?, t: Throwable?) {
}
})
} else {
Toast.makeText(mContext, "Please connect to intenet ", Toast.LENGTH_SHORT)
.show()
}
} catch (e: Exception) {
e.printStackTrace()
}
}
})
register_user.setOnClickListener(View.OnClickListener {
val mIntent = Intent(mContext, RegisterScreen_Activity::class.java)
startActivity(mIntent)
})
}
private fun GetIntent() {
var intent=intent
if(intent.hasExtra("username")) {
intent.getStringExtra("username")
intent.getStringExtra("password")
mUsername?.setText("" + intent.getStringExtra("username"))
mPassword?.setText("" + intent.getStringExtra("password"))
}
else{
}
}
/***
* Init views
* */
private fun Init() {
mUsername = findViewById<EditText>(R.id.user_name) as EditText
mPassword = findViewById<EditText>(R.id.user_password) as EditText
mLogin_button = findViewById<Button>(R.id.btn_login)
}
}
| 5657dd31819498d213a460e8b63e9c60f4bb1b69 | [
"Kotlin"
] | 13 | Kotlin | SURAJ2600/DreamTrip | d118f94d27f299f5f1b4615757d36ab4749ea703 | 939d38d5c6ad4fc4b14ab42e55f0f9ae76eb5c60 |
refs/heads/master | <file_sep>package siafile
import (
"encoding/binary"
"os"
"github.com/HyperspaceApp/Hyperspace/crypto"
"github.com/HyperspaceApp/Hyperspace/modules"
)
type (
// FileData is a helper struct that contains all the relevant information
// of a file. It simplifies passing the necessary data between modules and
// keeps the interface clean.
FileData struct {
Name string
FileSize uint64
MasterKey crypto.TwofishKey
ErasureCode modules.ErasureCoder
RepairPath string
PieceSize uint64
Mode os.FileMode
Deleted bool
UID string
Chunks []FileChunk
}
// FileChunk is a helper struct that contains data about a chunk.
FileChunk struct {
Pieces [][]Piece
}
)
// NewFromFileData creates a new SiaFile from a FileData object that was
// previously created from a legacy file.
func NewFromFileData(fd FileData) *SiaFile {
file := &SiaFile{
staticMetadata: Metadata{
staticFileSize: int64(fd.FileSize),
staticMasterKey: fd.MasterKey,
mode: fd.Mode,
staticPieceSize: fd.PieceSize,
siaPath: fd.Name,
},
deleted: fd.Deleted,
staticUID: fd.UID,
}
file.staticChunks = make([]Chunk, len(fd.Chunks))
for i := range file.staticChunks {
file.staticChunks[i].staticErasureCode = fd.ErasureCode
file.staticChunks[i].staticErasureCodeType = [4]byte{0, 0, 0, 1}
binary.LittleEndian.PutUint32(file.staticChunks[i].staticErasureCodeParams[0:4], uint32(file.staticChunks[i].staticErasureCode.MinPieces()))
binary.LittleEndian.PutUint32(file.staticChunks[i].staticErasureCodeParams[4:8], uint32(file.staticChunks[i].staticErasureCode.NumPieces()-file.staticChunks[i].staticErasureCode.MinPieces()))
file.staticChunks[i].pieces = make([][]Piece, file.staticChunks[i].staticErasureCode.NumPieces())
}
// Populate the pubKeyTable of the file and add the pieces.
pubKeyMap := make(map[string]int)
for chunkIndex, chunk := range fd.Chunks {
for pieceIndex, pieceSet := range chunk.Pieces {
for _, piece := range pieceSet {
// Check if we already added that public key.
if _, exists := pubKeyMap[string(piece.HostPubKey.Key)]; !exists {
pubKeyMap[string(piece.HostPubKey.Key)] = len(file.pubKeyTable)
file.pubKeyTable = append(file.pubKeyTable, piece.HostPubKey)
}
// Add the piece to the SiaFile.
file.staticChunks[chunkIndex].pieces[pieceIndex] = append(file.staticChunks[chunkIndex].pieces[pieceIndex], Piece{
HostPubKey: piece.HostPubKey,
MerkleRoot: piece.MerkleRoot,
})
}
}
}
return file
}
// ExportFileData creates a FileData object from a SiaFile that can be used to
// convert the file into a legacy file.
func (sf *SiaFile) ExportFileData() FileData {
sf.mu.RLock()
defer sf.mu.RUnlock()
fd := FileData{
Name: sf.staticMetadata.siaPath,
FileSize: uint64(sf.staticMetadata.staticFileSize),
MasterKey: sf.staticMetadata.staticMasterKey,
ErasureCode: sf.staticChunks[0].staticErasureCode,
RepairPath: sf.staticMetadata.localPath,
PieceSize: sf.staticMetadata.staticPieceSize,
Mode: sf.staticMetadata.mode,
Deleted: sf.deleted,
UID: sf.staticUID,
}
// Return a deep-copy to avoid race conditions.
fd.Chunks = make([]FileChunk, len(sf.staticChunks))
for chunkIndex := range fd.Chunks {
fd.Chunks[chunkIndex].Pieces = make([][]Piece, len(sf.staticChunks[chunkIndex].pieces))
for pieceIndex := range fd.Chunks[chunkIndex].Pieces {
fd.Chunks[chunkIndex].Pieces[pieceIndex] = make([]Piece, len(sf.staticChunks[chunkIndex].pieces[pieceIndex]))
copy(fd.Chunks[chunkIndex].Pieces[pieceIndex], sf.staticChunks[chunkIndex].pieces[pieceIndex])
}
}
return fd
}
<file_sep>Version History
---------------
Sept 19, 2018:
v0.2.0
- Full nodes now generate and maintain Golomb-coded set filters
- Wallets now generate addresses in accordance with an address gap limit
- Scanning uses GCS filters and address gap limits and is much faster
- Eliminated rescans
- Added a /wallet/build/transaction API endpoint
- Changed the /wallet/address [GET] API endpoint to a /wallet/address [POST] endpoint
- Added a new /wallet/address [GET] API endpoint to retrieve an address that has not been seen on the blockchain
July 2018:
v0.1.0: Closed beta release.
v0.1.1: Adjust genesis block mining difficulty.
<file_sep>package siafile
import (
"math"
"os"
"time"
"github.com/HyperspaceApp/Hyperspace/crypto"
"github.com/HyperspaceApp/Hyperspace/modules"
"github.com/HyperspaceApp/Hyperspace/types"
)
type (
// Metadata is the metadata of a SiaFile and is JSON encoded.
Metadata struct {
staticVersion [16]byte // version of the sia file format used
staticFileSize int64 // total size of the file
staticPieceSize uint64 // size of a single piece of the file
localPath string // file to the local copy of the file used for repairing
siaPath string // the path of the file on the Sia network
// fields for encryption
staticMasterKey crypto.TwofishKey // masterkey used to encrypt pieces
staticSharingKey crypto.TwofishKey // key used to encrypt shared pieces
// The following fields are the usual unix timestamps of files.
modTime time.Time // time of last content modification
changeTime time.Time // time of last metadata modification
accessTime time.Time // time of last access
createTime time.Time // time of file creation
// File ownership/permission fields.
mode os.FileMode // unix filemode of the sia file - uint32
uid int // id of the user who owns the file
gid int // id of the group that owns the file
// staticChunkMetadataSize is the amount of space allocated within the
// siafile for the metadata of a single chunk. It allows us to do
// random access operations on the file in constant time.
staticChunkMetadataSize uint64
// The following fields are the offsets for data that is written to disk
// after the pubKeyTable. We reserve a generous amount of space for the
// table and extra fields, but we need to remember those offsets in case we
// need to resize later on.
//
// chunkOffset is the offset of the first chunk, forced to be a factor of
// 4096, default 4kib
//
// pubKeyTableOffset is the offset of the publicKeyTable within the
// file.
//
chunkOffset int64
pubKeyTableOffset int64
}
)
// ChunkSize returns the size of a single chunk of the file.
func (sf *SiaFile) ChunkSize(chunkIndex uint64) uint64 {
return sf.staticChunkSize(chunkIndex)
}
// Delete removes the file from disk and marks it as deleted. Once the file is
// deleted, certain methods should return an error.
func (sf *SiaFile) Delete() {
sf.mu.Lock()
defer sf.mu.Unlock()
sf.deleted = true
}
// Deleted indicates if this file has been deleted by the user.
func (sf *SiaFile) Deleted() bool {
sf.mu.RLock()
defer sf.mu.RUnlock()
return sf.deleted
}
// Expiration returns the lowest height at which any of the file's contracts
// will expire.
func (sf *SiaFile) Expiration(contracts map[string]modules.RenterContract) types.BlockHeight {
sf.mu.RLock()
defer sf.mu.RUnlock()
if len(sf.pubKeyTable) == 0 {
return 0
}
lowest := ^types.BlockHeight(0)
for _, pk := range sf.pubKeyTable {
contract, exists := contracts[string(pk.Key)]
if !exists {
continue
}
if contract.EndHeight < lowest {
lowest = contract.EndHeight
}
}
return lowest
}
// HostPublicKeys returns all the public keys of hosts the file has ever been
// uploaded to. That means some of those hosts might no longer be in use.
func (sf *SiaFile) HostPublicKeys() []types.SiaPublicKey {
sf.mu.RLock()
defer sf.mu.RUnlock()
return sf.pubKeyTable
}
// LocalPath returns the path of the local data of the file.
func (sf *SiaFile) LocalPath() string {
sf.mu.RLock()
defer sf.mu.RUnlock()
return sf.staticMetadata.localPath
}
// MasterKey returns the masterkey used to encrypt the file.
func (sf *SiaFile) MasterKey() crypto.TwofishKey {
return sf.staticMetadata.staticMasterKey
}
// Mode returns the FileMode of the SiaFile.
func (sf *SiaFile) Mode() os.FileMode {
sf.mu.RLock()
defer sf.mu.RUnlock()
return sf.staticMetadata.mode
}
// PieceSize returns the size of a single piece of the file.
func (sf *SiaFile) PieceSize() uint64 {
return sf.staticMetadata.staticPieceSize
}
// Rename changes the name of the file to a new one.
// TODO: This will actually rename the file on disk once we persist the new
// file format.
func (sf *SiaFile) Rename(newName string) error {
sf.mu.Lock()
defer sf.mu.Unlock()
sf.staticMetadata.siaPath = newName
return nil
}
// SetMode sets the filemode of the sia file.
func (sf *SiaFile) SetMode(mode os.FileMode) {
sf.mu.Lock()
defer sf.mu.Unlock()
sf.staticMetadata.mode = mode
}
// SetLocalPath changes the local path of the file which is used to repair
// the file from disk.
func (sf *SiaFile) SetLocalPath(path string) {
sf.mu.Lock()
defer sf.mu.Unlock()
sf.staticMetadata.localPath = path
}
// SiaPath returns the file's sia path.
func (sf *SiaFile) SiaPath() string {
sf.mu.RLock()
defer sf.mu.RUnlock()
return sf.staticMetadata.siaPath
}
// Size returns the file's size.
func (sf *SiaFile) Size() uint64 {
return uint64(sf.staticMetadata.staticFileSize)
}
// UploadedBytes indicates how many bytes of the file have been uploaded via
// current file contracts. Note that this includes padding and redundancy, so
// uploadedBytes can return a value much larger than the file's original filesize.
func (sf *SiaFile) UploadedBytes() uint64 {
sf.mu.RLock()
defer sf.mu.RUnlock()
var uploaded uint64
for _, chunk := range sf.staticChunks {
for _, pieceSet := range chunk.pieces {
// Note: we need to multiply by SectorSize here instead of
// f.pieceSize because the actual bytes uploaded include overhead
// from TwoFish encryption
uploaded += uint64(len(pieceSet)) * modules.SectorSize
}
}
return uploaded
}
// UploadProgress indicates what percentage of the file (plus redundancy) has
// been uploaded. Note that a file may be Available long before UploadProgress
// reaches 100%, and UploadProgress may report a value greater than 100%.
func (sf *SiaFile) UploadProgress() float64 {
// TODO change this once tiny files are supported.
if sf.Size() == 0 {
return 100
}
uploaded := sf.UploadedBytes()
var desired uint64
for i := uint64(0); i < sf.NumChunks(); i++ {
desired += modules.SectorSize * uint64(sf.ErasureCode(i).NumPieces())
}
return math.Min(100*(float64(uploaded)/float64(desired)), 100)
}
// ChunkSize returns the size of a single chunk of the file.
func (sf *SiaFile) staticChunkSize(chunkIndex uint64) uint64 {
return sf.staticMetadata.staticPieceSize * uint64(sf.staticChunks[chunkIndex].staticErasureCode.MinPieces())
}
<file_sep>package wallet
import (
"github.com/HyperspaceApp/Hyperspace/build"
"github.com/HyperspaceApp/Hyperspace/modules"
"github.com/HyperspaceApp/Hyperspace/persist"
"github.com/HyperspaceApp/Hyperspace/types"
siasync "github.com/HyperspaceApp/Hyperspace/sync"
)
const scanMultiplier = 4 // how many more keys to generate after each scan iteration
// This is legacy code from the bad old days of terrible seed scanning and improper
// management of pubkey generation. It will be useful for grabbing addresses made by
// wallets not behaving in accordance with the addressGapLimit specified by BIP 44.
// numInitialKeys is the number of keys generated by the seedScanner before
// scanning the blockchain for the first time.
var numInitialKeys = func() uint64 {
switch build.Release {
case "dev":
return 10e3
case "standard":
return 10e6
case "testing":
return 10e3
default:
panic("unrecognized build.Release")
}
}()
// A slowSeedScanner scans the blockchain for addresses that belong to a given
// seed. This is for legacy scanning.
type slowSeedScanner struct {
dustThreshold types.Currency // minimum value of outputs to be included
keys map[types.UnlockHash]uint64 // map address to seed index
keysArray [][]byte
maximumExternalIndex uint64
seed modules.Seed
addressGapLimit uint64
siacoinOutputs map[types.SiacoinOutputID]scannedOutput
cs modules.ConsensusSet
gapScanner *seedScanner
lastConsensusChange modules.ConsensusChangeID
cancel chan struct{}
log *persist.Logger
}
func (s slowSeedScanner) getMaximumExternalIndex() uint64 {
return s.maximumExternalIndex
}
func (s slowSeedScanner) getMaximumInternalIndex() uint64 {
return s.gapScanner.maximumInternalIndex
}
func (s *slowSeedScanner) setDustThreshold(d types.Currency) {
s.dustThreshold = d
s.gapScanner.dustThreshold = d
}
func (s slowSeedScanner) getSiacoinOutputs() map[types.SiacoinOutputID]scannedOutput {
return s.siacoinOutputs
}
func (s slowSeedScanner) numKeys() uint64 {
return uint64(len(s.keys))
}
// generateKeys generates n additional keys from the slowSeedScanner's seed.
func (s *slowSeedScanner) generateKeys(n uint64) {
initialProgress := s.numKeys()
for i, k := range generateKeys(s.seed, initialProgress, n) {
s.keys[k.UnlockConditions.UnlockHash()] = initialProgress + uint64(i)
u := k.UnlockConditions.UnlockHash()
s.keysArray = append(s.keysArray, u[:])
}
}
func isAirdrop(h types.BlockHeight) bool {
return h <= 7
}
func (s *slowSeedScanner) adjustMinimumIndex(siacoinOutputDiffs []modules.SiacoinOutputDiff) {
for _, diff := range siacoinOutputDiffs {
index, exists := s.keys[diff.SiacoinOutput.UnlockHash]
if exists {
s.log.Debugln("Seed scanner adjustMinimumIndex at index", index)
if index > s.maximumExternalIndex {
s.maximumExternalIndex = index
}
}
}
}
// ProcessHeaderConsensusChange match consensus change headers with generated seeds
// It needs to look for two types new outputs:
//
// 1) Delayed outputs that have matured during this block. These outputs come
// attached to the HeaderConsensusChange via the output diff.
//
// 2) Fresh outputs that were created and activated during this block. If the
// current block contains these outputs, the header filter will match the wallet's
// keys.
//
// In a full node, we read the block directly from the consensus db and grab the
// outputs from the block output diff.
func (s *slowSeedScanner) ProcessHeaderConsensusChange(hcc modules.HeaderConsensusChange,
getSiacoinOutputDiff func(types.BlockID, modules.DiffDirection) ([]modules.SiacoinOutputDiff, error)) {
var siacoinOutputDiffs []modules.SiacoinOutputDiff
// grab matured outputs
siacoinOutputDiffs = append(siacoinOutputDiffs, hcc.MaturedSiacoinOutputDiffs...)
// grab applied active outputs from full blocks
for _, pbh := range hcc.AppliedBlockHeaders {
if !isAirdrop(pbh.Height) {
close(s.cancel)
return
}
blockID := pbh.BlockHeader.ID()
if pbh.GCSFilter.MatchUnlockHash(blockID[:], s.keysArray) {
// log.Printf("apply block: %d", pbh.Height)
// read the block, process the output
blockSiacoinOutputDiffs, err := getSiacoinOutputDiff(blockID, modules.DiffApply)
if err != nil {
panic(err)
}
s.adjustMinimumIndex(blockSiacoinOutputDiffs)
siacoinOutputDiffs = append(siacoinOutputDiffs, blockSiacoinOutputDiffs...)
}
}
// grab reverted active outputs from full blocks
for _, pbh := range hcc.RevertedBlockHeaders {
blockID := pbh.BlockHeader.ID()
if pbh.GCSFilter.MatchUnlockHash(blockID[:], s.keysArray) {
// log.Printf("revert block: %d", pbh.Height)
blockSiacoinOutputDiffs, err := getSiacoinOutputDiff(blockID, modules.DiffRevert)
if err != nil {
panic(err)
}
s.adjustMinimumIndex(blockSiacoinOutputDiffs)
siacoinOutputDiffs = append(siacoinOutputDiffs, blockSiacoinOutputDiffs...)
}
}
// apply the aggregated output diffs
for _, diff := range siacoinOutputDiffs {
if diff.Direction == modules.DiffApply {
if index, exists := s.keys[diff.SiacoinOutput.UnlockHash]; exists && diff.SiacoinOutput.Value.Cmp(s.dustThreshold) > 0 {
// log.Printf("slow DiffApply %d: %s\n", index, diff.SiacoinOutput.Value.String())
s.siacoinOutputs[diff.ID] = scannedOutput{
id: types.OutputID(diff.ID),
value: diff.SiacoinOutput.Value,
seedIndex: index,
}
}
} else if diff.Direction == modules.DiffRevert {
// NOTE: DiffRevert means the output was either spent or was in a
// block that was reverted.
if _, exists := s.keys[diff.SiacoinOutput.UnlockHash]; exists {
// log.Printf("slow DiffRevert %d: %s\n", index, diff.SiacoinOutput.Value.String())
delete(s.siacoinOutputs, diff.ID)
}
}
}
s.lastConsensusChange = hcc.ID
}
// scan subscribes s to cs and scans the blockchain for addresses that belong
// to s's seed. If scan returns errMaxKeys, additional keys may need to be
// generated to find all the addresses.
func (s *slowSeedScanner) scan(cancel <-chan struct{}) error {
// generate a bunch of keys and scan the blockchain looking for them. If
// none of the 'upper' half of the generated keys are found, we are done;
// otherwise, generate more keys and try again (bounded by a sane
// default).
//
// NOTE: since scanning is very slow, we aim to only scan once, which
// means generating many keys.
s.gapScanner = newFastSeedScanner(s.seed, s.addressGapLimit, s.cs, s.log)
s.generateKeys(numInitialKeys)
s.cancel = make(chan struct{}) // this will disturbe thread stop to stop scan
err := s.cs.HeaderConsensusSetSubscribe(s, modules.ConsensusChangeBeginning, s.cancel)
if err != siasync.ErrStopped {
return err
}
s.cs.HeaderUnsubscribe(s)
// log.Printf("end fist part slow scan s.maximumExternalIndex %d\n", s.maximumExternalIndex)
s.gapScanner.minimumIndex = s.maximumExternalIndex
s.gapScanner.maximumInternalIndex = s.maximumExternalIndex
s.gapScanner.maximumExternalIndex = s.maximumExternalIndex
s.gapScanner.siacoinOutputs = s.siacoinOutputs
s.gapScanner.generateKeys(uint64(s.addressGapLimit))
if err := s.gapScanner.cs.HeaderConsensusSetSubscribe(s.gapScanner, s.lastConsensusChange, cancel); err != nil {
return err
}
s.gapScanner.cs.HeaderUnsubscribe(s.gapScanner)
s.maximumExternalIndex = s.gapScanner.maximumExternalIndex
// log.Printf("slow scan s.maximumExternalIndex %d\n", s.maximumExternalIndex)
// for id, sco := range s.gapScanner.siacoinOutputs {
// log.Printf("siacoinOutputs: %d %s", sco.seedIndex, sco.value.String())
// s.siacoinOutputs[id] = sco
// }
return nil
}
// newSlowSeedScanner returns a new slowSeedScanner.
func newSlowSeedScanner(seed modules.Seed, addressGapLimit uint64,
cs modules.ConsensusSet, log *persist.Logger) *slowSeedScanner {
return &slowSeedScanner{
seed: seed,
addressGapLimit: addressGapLimit,
maximumExternalIndex: 0,
keys: make(map[types.UnlockHash]uint64, numInitialKeys),
siacoinOutputs: make(map[types.SiacoinOutputID]scannedOutput),
cs: cs,
log: log,
}
}
<file_sep>package renter
import (
"os"
"path/filepath"
"regexp"
"sync"
"github.com/HyperspaceApp/Hyperspace/build"
"github.com/HyperspaceApp/Hyperspace/crypto"
"github.com/HyperspaceApp/Hyperspace/modules"
"github.com/HyperspaceApp/Hyperspace/modules/renter/siafile"
"github.com/HyperspaceApp/Hyperspace/persist"
"github.com/HyperspaceApp/Hyperspace/types"
"github.com/HyperspaceApp/errors"
)
var (
// ErrEmptyFilename is an error when filename is empty
ErrEmptyFilename = errors.New("filename must be a nonempty string")
// ErrPathOverload is an error when a file already exists at that location
ErrPathOverload = errors.New("a file already exists at that location")
// ErrUnknownPath is an error when a file cannot be found with the given path
ErrUnknownPath = errors.New("no file known with that path")
)
// A file is a single file that has been uploaded to the network. Files are
// split into equal-length chunks, which are then erasure-coded into pieces.
// Each piece is separately encrypted, using a key derived from the file's
// master key. The pieces are uploaded to hosts in groups, such that one file
// contract covers many pieces.
type file struct {
name string
size uint64 // Static - can be accessed without lock.
contracts map[types.FileContractID]fileContract
masterKey crypto.TwofishKey // Static - can be accessed without lock.
erasureCode modules.ErasureCoder // Static - can be accessed without lock.
pieceSize uint64 // Static - can be accessed without lock.
mode uint32 // actually an os.FileMode
deleted bool // indicates if the file has been deleted.
staticUID string // A UID assigned to the file when it gets created.
mu sync.RWMutex
}
// A fileContract is a contract covering an arbitrary number of file pieces.
// Chunk/Piece metadata is used to split the raw contract data appropriately.
type fileContract struct {
ID types.FileContractID
IP modules.NetAddress
Pieces []pieceData
WindowStart types.BlockHeight
}
// pieceData contains the metadata necessary to request a piece from a
// fetcher.
//
// TODO: Add an 'Unavailable' flag that can be set if the host loses the piece.
// Some TODOs exist in 'repair.go' related to this field.
type pieceData struct {
Chunk uint64 // which chunk the piece belongs to
Piece uint64 // the index of the piece in the chunk
MerkleRoot crypto.Hash // the Merkle root of the piece
}
// deriveKey derives the key used to encrypt and decrypt a specific file piece.
func deriveKey(masterKey crypto.TwofishKey, chunkIndex, pieceIndex uint64) crypto.TwofishKey {
return crypto.TwofishKey(crypto.HashAll(masterKey, chunkIndex, pieceIndex))
}
// DeleteFile removes a file entry from the renter and deletes its data from
// the hosts it is stored on.
//
// TODO: The data is not cleared from any contracts where the host is not
// immediately online.
func (r *Renter) DeleteFile(nickname string) error {
lockID := r.mu.Lock()
f, exists := r.files[nickname]
if !exists {
r.mu.Unlock(lockID)
return ErrUnknownPath
}
delete(r.files, nickname)
delete(r.persist.Tracking, nickname)
err := persist.RemoveFile(filepath.Join(r.persistDir, f.SiaPath()+ShareExtension))
if err != nil {
r.log.Println("WARN: couldn't remove file :", err)
}
r.saveSync()
r.mu.Unlock(lockID)
// mark the file as deleted
f.Delete()
// TODO: delete the sectors of the file as well.
return nil
}
// FileList returns all of the files that the renter has or a filtered list
// if a compiled Regexp is supplied. Filtering is applied to the hyperspace path.
func (r *Renter) FileList(filter ...*regexp.Regexp) []modules.FileInfo {
noFilter := len(filter) == 0
// Get all the files holding the readlock.
lockID := r.mu.RLock()
renterFiles := make([]*siafile.SiaFile, 0, len(r.files))
for _, file := range r.files {
renterFiles = append(renterFiles, file)
}
r.mu.RUnlock(lockID)
// Filter files by regexp. We can't do that under the same lock since we
// need to call a public method on the file.
files := make([]*siafile.SiaFile, 0, len(renterFiles))
for _, file := range renterFiles {
if noFilter || filter[0].MatchString(file.SiaPath()) {
files = append(files, file)
}
}
// Save host keys in map. We can't do that under the same lock since we
// need to call a public method on the file.
pks := make(map[string]types.SiaPublicKey)
for _, f := range files {
for _, pk := range f.HostPublicKeys() {
pks[string(pk.Key)] = pk
}
}
// Build 2 maps that map every pubkey to its offline and goodForRenew
// status.
goodForRenew := make(map[string]bool)
offline := make(map[string]bool)
contracts := make(map[string]modules.RenterContract)
for _, pk := range pks {
contract, ok := r.hostContractor.ContractByPublicKey(pk)
if !ok {
continue
}
goodForRenew[string(pk.Key)] = ok && contract.Utility.GoodForRenew
offline[string(pk.Key)] = r.hostContractor.IsOffline(pk)
contracts[string(pk.Key)] = contract
}
// Build the list of FileInfos.
fileList := []modules.FileInfo{}
for _, f := range files {
var localPath string
siaPath := f.SiaPath()
lockID := r.mu.RLock()
tf, exists := r.persist.Tracking[siaPath]
r.mu.RUnlock(lockID)
if exists {
localPath = tf.RepairPath
}
fileList = append(fileList, modules.FileInfo{
SiaPath: f.SiaPath(),
LocalPath: localPath,
Filesize: f.Size(),
Renewing: true,
Available: f.Available(offline),
Redundancy: f.Redundancy(offline, goodForRenew),
UploadedBytes: f.UploadedBytes(),
UploadProgress: f.UploadProgress(),
Expiration: f.Expiration(contracts),
})
}
return fileList
}
// File returns file from siaPath queried by user.
// Update based on FileList
func (r *Renter) File(siaPath string) (modules.FileInfo, error) {
var fileInfo modules.FileInfo
// Get the file and its contracts
lockID := r.mu.RLock()
file, exists := r.files[siaPath]
r.mu.RUnlock(lockID)
if !exists {
return fileInfo, ErrUnknownPath
}
pks := file.HostPublicKeys()
// Build 2 maps that map every contract id to its offline and goodForRenew
// status.
goodForRenew := make(map[string]bool)
offline := make(map[string]bool)
contracts := make(map[string]modules.RenterContract)
for _, pk := range pks {
contract, ok := r.hostContractor.ContractByPublicKey(pk)
if !ok {
continue
}
goodForRenew[string(pk.Key)] = ok && contract.Utility.GoodForRenew
offline[string(pk.Key)] = r.hostContractor.IsOffline(pk)
contracts[string(pk.Key)] = contract
}
// Build the FileInfo
renewing := true
var localPath string
tf, exists := r.persist.Tracking[file.SiaPath()]
if exists {
localPath = tf.RepairPath
}
fileInfo = modules.FileInfo{
SiaPath: file.SiaPath(),
LocalPath: localPath,
Filesize: file.Size(),
Renewing: renewing,
Available: file.Available(offline),
Redundancy: file.Redundancy(offline, goodForRenew),
UploadedBytes: file.UploadedBytes(),
UploadProgress: file.UploadProgress(),
Expiration: file.Expiration(contracts),
}
return fileInfo, nil
}
// RenameFile takes an existing file and changes the nickname. The original
// file must exist, and there must not be any file that already has the
// replacement nickname.
func (r *Renter) RenameFile(currentName, newName string) error {
lockID := r.mu.Lock()
defer r.mu.Unlock(lockID)
err := validateSiapath(newName)
if err != nil {
return err
}
// Check that currentName exists and newName doesn't.
file, exists := r.files[currentName]
if !exists {
return ErrUnknownPath
}
_, exists = r.files[newName]
if exists {
return ErrPathOverload
}
// Modify the file and save it to disk.
file.Rename(newName) // TODO: violation of locking convention
err = r.saveFile(file)
if err != nil {
return err
}
// Update the entries in the renter.
delete(r.files, currentName)
r.files[newName] = file
if t, ok := r.persist.Tracking[currentName]; ok {
delete(r.persist.Tracking, currentName)
r.persist.Tracking[newName] = t
}
err = r.saveSync()
if err != nil {
return err
}
// Delete the old .sia file.
oldPath := filepath.Join(r.persistDir, currentName+ShareExtension)
return os.RemoveAll(oldPath)
}
// fileToSiaFile converts a legacy file to a SiaFile. Fields that can't be
// populated using the legacy file remain blank.
func (r *Renter) fileToSiaFile(f *file, repairPath string) *siafile.SiaFile {
fileData := siafile.FileData{
Name: f.name,
FileSize: f.size,
MasterKey: f.masterKey,
ErasureCode: f.erasureCode,
RepairPath: repairPath,
PieceSize: f.pieceSize,
Mode: os.FileMode(f.mode),
Deleted: f.deleted,
UID: f.staticUID,
}
chunks := make([]siafile.FileChunk, f.numChunks())
for i := 0; i < len(chunks); i++ {
chunks[i].Pieces = make([][]siafile.Piece, f.erasureCode.NumPieces())
}
for _, contract := range f.contracts {
pk := r.hostContractor.ResolveIDToPubKey(contract.ID)
for _, piece := range contract.Pieces {
chunks[piece.Chunk].Pieces[piece.Piece] = append(chunks[piece.Chunk].Pieces[piece.Piece], siafile.Piece{
HostPubKey: pk,
MerkleRoot: piece.MerkleRoot,
})
}
}
fileData.Chunks = chunks
return siafile.NewFromFileData(fileData)
}
// siaFileToFile converts a SiaFile to a legacy file. Fields that don't exist
// in the legacy file will get lost and therefore not persisted.
func (r *Renter) siaFileToFile(sf *siafile.SiaFile) *file {
fileData := sf.ExportFileData()
f := &file{
contracts: make(map[types.FileContractID]fileContract),
name: fileData.Name,
size: fileData.FileSize,
masterKey: fileData.MasterKey,
erasureCode: fileData.ErasureCode,
pieceSize: fileData.PieceSize,
mode: uint32(fileData.Mode),
deleted: fileData.Deleted,
staticUID: fileData.UID,
}
for chunkIndex, chunk := range fileData.Chunks {
for pieceIndex, pieceSet := range chunk.Pieces {
for _, piece := range pieceSet {
c, ok := r.hostContractor.ContractByPublicKey(piece.HostPubKey)
if !ok {
build.Critical("missing contract when converting SiaFile to file")
continue
}
h, ok := r.hostDB.Host(piece.HostPubKey)
if !ok {
build.Critical("missing host when converting SiaFile to file")
continue
}
if _, exists := f.contracts[c.ID]; !exists {
f.contracts[c.ID] = fileContract{
ID: c.ID,
IP: h.NetAddress,
WindowStart: c.EndHeight,
}
}
fc := f.contracts[c.ID]
fc.Pieces = append(fc.Pieces, pieceData{
Chunk: uint64(chunkIndex),
Piece: uint64(pieceIndex),
MerkleRoot: piece.MerkleRoot,
})
f.contracts[c.ID] = fc
}
}
}
return f
}
// numChunks returns the number of chunks that f was split into.
func (f *file) numChunks() uint64 {
// empty files still need at least one chunk
if f.size == 0 {
return 1
}
n := f.size / f.staticChunkSize()
// last chunk will be padded, unless chunkSize divides file evenly.
if f.size%f.staticChunkSize() != 0 {
n++
}
return n
}
// staticChunkSize returns the size of one chunk.
func (f *file) staticChunkSize() uint64 {
return f.pieceSize * uint64(f.erasureCode.MinPieces())
}
| 8d6f10b605797efe5f8d07376dd150b18058cb10 | [
"Markdown",
"Go"
] | 5 | Go | e-corp-sam-sepiol/Hyperspace | 0ca31bff8dcbc1c0c3f45600e7aa7dbe7927b50c | 7738fcdd4aa879d8eb833a1ac87c53a0c30ce610 |
refs/heads/master | <file_sep>#include <stdio.h>
void pyramid(void);
int size(void);
void build_pyramid(int Order_of_Pyramid);
int main(void)
{
int order = size();
build_pyramid(order);
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////FUNCTIONS - implementation//////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////
/*Function to implement the symbol*/
void pyramid(void)
{
printf("#");
}
/*Function to obtain pyramid order*/
int size(void)
{
int a;
printf("Height: ");
scanf("%d", &a);
return a;
}
/*Function to build the pyramid*/
void build_pyramid(int Order_of_Pyramid)
{
int New = (Order_of_Pyramid * 2) + 2; //New size for the columns
int counter = 0; //Decrement of each line of the pyramid
int Counter = Order_of_Pyramid + 2; //Start of the count for the second pyramid
for (int i = 0; i < Order_of_Pyramid; i++) // For referring to each line
{
counter++;
Counter++;
for (int j = 0; j < New; j++)// For referring to each column -> New variable N to represent the extended size of matrix (offset => 2 because of the gaps between the pyramids)
{
if (j < Order_of_Pyramid - counter)
{
printf(" ");
}
else if (j < Order_of_Pyramid)
{
pyramid();
}
while(j == Order_of_Pyramid || (j > Order_of_Pyramid && j < New))// This loop is for the first matrix (n represents the order of the matrix)
{
if (j >= Counter) //part for the gaps
{
printf(" ");
}
else if (j == Order_of_Pyramid || j == Order_of_Pyramid + 1)// part for the offset of 2
{
printf(" ");
}
else
{
pyramid();
}
j++;
}
}
printf("\n");
}
}
| 11799e41d24be40ab41164ac6f24485130e6224d | [
"C"
] | 1 | C | cristianoraussemoraes/Double_Pyramids | bf9f4c90cdcbb97bda1e0f570ceb9c718485cc5a | 844f987d183d3aca3ae0946e9b404d351302a91c |
refs/heads/master | <file_sep># Video Manager
This is a client app I built for my home video server in 2005. It was built so a three year old could easily navigate and watch videos. All home media server software at the time were build for 30 year old geeks and had horrible UIs.

<file_sep>using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Text;
using System.Windows.Forms;
namespace VideoManager
{
public partial class frmVLC : Form
{
public string strFilePath;
List<string> arrPlaylist;
public string ValueFromParent
{
set
{
strFilePath = value;
}
}
public frmVLC(string initialValue, List<string> playlist)
{
InitializeComponent();
ValueFromParent = initialValue;
arrPlaylist = playlist;
}
private void frmVLC_Load(object sender, EventArgs e)
{
//this.Text = "Playing - " + FileManipulation.GetFileNameFromPath(strFilePath);
string[] options = { ":vout-filter=deinterlace", ":deinterlace-mode=x", ":fullscreen", ":show-intf", ":control=default" };
for (int i = 0; i < arrPlaylist.Count; i++)
{
axVLCPlugin1.addTarget(arrPlaylist[i], options, AXVLC.VLCPlaylistMode.VLCPlayListInsert, i);
}
axVLCPlugin1.play();
}
private void btnFullscreen_Click(object sender, EventArgs e)
{
axVLCPlugin1.fullscreen();
SendKeys.Send("f");
Object X = axVLCPlugin1.getVariable("key-fullscreen");
axVLCPlugin1.setVariable("key-pressed", X);
}
private void btnRewind_Click(object sender, EventArgs e)
{
int vlctime = axVLCPlugin1.Time;
if (vlctime > 30)
{
axVLCPlugin1.shuttle(-30);
//axVLCPlugin1.Time -= Convert.ToInt32("30");
}
else
{
axVLCPlugin1.Time = Convert.ToInt32("00");
}
}
private void btnFastFoward_Click(object sender, EventArgs e)
{
axVLCPlugin1.shuttle(30);
//axVLCPlugin1.Time += Convert.ToInt32("30");
}
private void btnPlay_Click(object sender, EventArgs e)
{
axVLCPlugin1.play();
}
private void btnFastFoward1Min_Click(object sender, EventArgs e)
{
axVLCPlugin1.shuttle(60);
// axVLCPlugin1.Time += Convert.ToInt32("60");
}
private void btnRewind1Min_Click(object sender, EventArgs e)
{
int vlctime = axVLCPlugin1.Time;
if (vlctime > 60)
{
axVLCPlugin1.shuttle(-60);
//axVLCPlugin1.Time -= Convert.ToInt32("60");
}
else
{
axVLCPlugin1.Time = Convert.ToInt32("00");
}
}
private void btnPause_Click(object sender, EventArgs e)
{
axVLCPlugin1.pause();
}
private void btnStop_Click(object sender, EventArgs e)
{
axVLCPlugin1.stop();
}
private void timPlaylist_Tick(object sender, EventArgs e)
{
int intPlaylist= axVLCPlugin1.PlaylistIndex;
strFilePath = arrPlaylist[intPlaylist];
this.Text = "Playing - " + FileManipulation.GetFileNameFromPath(strFilePath);
this.lblTimeElasped.Text = axVLCPlugin1.Time.ToString();
this.lblTimeTotal.Text = axVLCPlugin1.Length.ToString();
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Text;
namespace VideoManager
{
class csFileNameChange
{
private int intLocationExt;
private int intLocationBackSlash;
private string strFilePathChange;
private string strFilePath;
private string strExt;
public csFileNameChange(){}
public csFileNameChange(string strFilePath)
{
this.StrFilePath = strFilePath;
}
public string StrFilePath
{
get
{
return strFilePath;
}
set
{
strFilePath = value;
}
}
public string GetFileExtension()
{
intLocationExt = strFilePath.IndexOf(".");
strExt = strFilePath.Substring(intLocationExt);
return strExt;
}
public string FileNameChange(string strLabel)
{
//finds location of period . in filepath variable - then pulls just the file extension with . ex. .vob
strExt = this.GetFileExtension();
//Get Directory - this is used to pull out just the directory path of the selected file
//finds location of last backslash \
intLocationBackSlash = strFilePath.LastIndexOf(@"\");
//pulls directory (filepath) of file with substring starting at 0 and ending at last backslash
strFilePathChange = strFilePath.Substring(0, intLocationBackSlash + 1);
//builds new tag with filepath, edit label and ext
strFilePathChange = strFilePathChange + "" + strLabel + "" + strExt;
return strFilePathChange;
}
}
}
<file_sep>using System;
using System.IO;
using System.Collections.Generic;
using System.Collections.Specialized;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Text;
using System.Windows.Forms;
namespace VideoManager
{
public class DirectoryList
{
private List<DirectoryObj> directories;
public DirectoryList()
{
directories = new List<DirectoryObj>();
}
public int Count
{
get
{
return directories.Count;
}
}
public DirectoryObj this[int i]
{
get
{
if (i < 0)
{
throw new ArgumentOutOfRangeException("i");
}
else if (i >= directories.Count)
{
throw new ArgumentOutOfRangeException("i");
}
return directories[i];
}
set
{
directories[i] = value;
}
}
public void Fill()
{
directories = DirectoryDB.GetDirectories();
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Text;
using System.Windows.Forms;
using Settings = VideoManager.Properties.Settings;
namespace VideoManager
{
public partial class frmFileExt : Form
{
public frmFileExt()
{
InitializeComponent();
}
private void frmFileExt_Load(object sender, EventArgs e)
{
this.mtdLoadlbxFileExt();
}
public void mtdLoadlbxFileExt()
{
this.lbxFileExt.Items.Clear();
string strFileExt = Settings.Default.FileExt;
string[] sa = strFileExt.Split(',');
foreach (string s in sa)
{
this.lbxFileExt.Items.Add(s.ToString());
}
}
private void btnFileExtSave_Click(object sender, EventArgs e)
{
Settings.Default.FileExt = Settings.Default.FileExt + "," + this.tbxFileExt.Text.ToString();
Settings.Default.Save();
//this.Close();
this.mtdLoadlbxFileExt();
this.tbxFileExt.Text = "";
}
private void btnRemove_Click(object sender, EventArgs e)
{
this.lbxFileExt.Items.Remove(lbxFileExt.SelectedItem);
string strFileExt = "";
int i = 1;
foreach (string s in this.lbxFileExt.Items)
{
if (i == 1)
{
strFileExt = s;
}
else
{
strFileExt = strFileExt + "," + s;
}
i = i + 1;
}
Settings.Default.FileExt = strFileExt;
Settings.Default.Save();
this.mtdLoadlbxFileExt();
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Text;
using System.Windows.Forms;
using System.Threading;
using System.IO;
namespace VideoManager
{
public partial class frmProgressBar : Form
{
private long intSize;
public string copyFilePathName;
public string copyFileName;
public string copyFileDestinationPath;
public delegate void ChangeProgressBar();
public ChangeProgressBar myDelegate;
public delegate void CloseProgressBar();
public CloseProgressBar myDelegateClose;
public delegate void FillVideosRoot(string strDirPath);
public event FillVideosRoot LoadVideos;
public frmProgressBar()
{
InitializeComponent();
}
public frmProgressBar(long intFileSize, string filePathName, string fileName, string fileDestinationPath )
{
InitializeComponent();
intSize = intFileSize;
copyFilePathName = filePathName;
copyFileName = fileName;
copyFileDestinationPath = fileDestinationPath;
}
private void frmProgressBar_Load(object sender, EventArgs e)
{
myDelegate = new ChangeProgressBar(HandleCopyChange);
myDelegateClose = new CloseProgressBar(Close_ProgressBar);
double maxSize = (Convert.ToDouble(intSize) * .01);
this.pgbFileCopy.Maximum = (int) maxSize;
MessageBox.Show(this.pgbFileCopy.Maximum.ToString());
this.pgbFileCopy.Minimum = 1;
this.pgbFileCopy.Step = 450;
this.lblCopyName.Text = "Copying " + copyFileName + @" to " + copyFileDestinationPath;
this.Text = "Copying File " + copyFileName;
Thread thdCopyFile = new Thread(new ThreadStart(CopyFile));
thdCopyFile.Start();
}
public void HandleCopyChange()
{
this.pgbFileCopy.PerformStep();
}
private void CopyFile()
{
// string strExt = FileManipulation.GetFileExtension(copyFilePathName, 0);
string originalPath = copyFilePathName;
string destinationPath = copyFileDestinationPath + copyFileName;
using (FileStream fRStream = File.OpenRead(originalPath))
{
byte[] bytes = new byte[45000];
long numBytesToRead = fRStream.Length;
using (FileStream fWStream = File.Create(destinationPath))
{
while (numBytesToRead > 0L)
{
// Read may return anything from 0 to numBytesToRead.
int n = fRStream.Read(bytes, 0, bytes.Length);
// The end of the file is reached.
if (n == 0)
{
break;
}
if (n == bytes.Length)
{
// Write data the destination file.
fWStream.Write(bytes, 0, bytes.Length);
}
else
{
fWStream.Write(bytes, 0, n);
}
numBytesToRead -= (long)n;
// Notify observer
this.Invoke(this.myDelegate);
}
this.Invoke(this.myDelegateClose);
}
}
}
private void btnClose_Click(object sender, EventArgs e)
{
this.Close();
}
public void Close_ProgressBar()
{
LoadVideos("root");
this.Close();
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Text;
using Settings = VideoManager.Properties.Settings;
namespace VideoManager
{
public class FileManipulation
{
private static int intLocationExt;
private static int intLocationBackSlash;
private static string strFileExt;
private static bool fileAllowed;
public static string FileNameChange(string strFilePath, string strLabel)
{
int intLocationBackSlash;
string strFilePathChange;
//finds location of period . in filepath variable - then pulls just the file extension with . ex. .vob
strFileExt = GetFileExtension(strFilePath, 0);
//Get Directory - this is used to pull out just the directory path of the selected file
//finds location of last backslash \
intLocationBackSlash = strFilePath.LastIndexOf(@"\");
//pulls directory (filepath) of file with substring starting at 0 and ending at last backslash
strFilePathChange = strFilePath.Substring(0, intLocationBackSlash + 1);
//builds new tag with filepath, edit label and ext
strFilePathChange = strFilePathChange + "" + strLabel + "" + strFileExt;
return strFilePathChange;
}
public static string GetFileExtension(string strFilePath, int intNoPeriod)
{
intLocationExt = strFilePath.LastIndexOf(".");
strFileExt = strFilePath.Substring(intLocationExt + intNoPeriod);
return strFileExt;
}
public static string GetFileName(string strFileName)
{
bool strFileExtShow = Settings.Default.FileType;
intLocationExt = strFileName.LastIndexOf(".");
if (!strFileExtShow)
{
strFileName = strFileName.Substring(0, intLocationExt);
}
return strFileName;
}
public static string GetFileNameFromPath(string strFilePath)
{
intLocationBackSlash = strFilePath.LastIndexOf(@"\");
string strFileName = GetFileName(strFilePath.Substring(intLocationBackSlash + 1));
return strFileName;
}
public static bool CheckFileAllowed(string strFileName)
{
string strFileExtAllowed;
fileAllowed = false;
strFileExt = GetFileExtension(strFileName, 1);
strFileExtAllowed = Settings.Default.FileExt;
string[] sa = strFileExtAllowed.Split(',');
foreach (string s in sa)
{
if (strFileExt.ToLower() == s)
{
fileAllowed = true;
}
}
return fileAllowed;
}
public static bool CheckDirectoryNotAllowed(string strFileName)
{
const string strDirectoryNotAllowed = "recycler,temprec,backup,system volume information";
fileAllowed = true;
strFileExt = GetFileExtension(strFileName, 1);
string[] sa = strDirectoryNotAllowed.Split(',');
foreach (string s in sa)
{
if (strFileExt.ToLower() == s)
{
fileAllowed = false;
}
}
return fileAllowed;
}
}
}
<file_sep>using System;
using System.IO;
using System.Collections.Generic;
using System.Collections.Specialized;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Text;
using System.Windows.Forms;
namespace VideoManager
{
public class DirectoryObj
{
public DirectoryObj()
{
}
private string directoryPath;
public DirectoryObj(string directoryPath)
{
this.DirectoryPath = directoryPath;
}
public string DirectoryPath
{
get
{
return directoryPath;
}
set
{
directoryPath = value;
}
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Text;
namespace VideoManager
{
public class VideoObj
{
private string fileName;
private string filePath;
private int imageIndex;
public VideoObj() { }
public VideoObj(string fileName, string filePath, int imageIndex)
{
this.FileName = fileName;
this.FilePath = filePath;
this.ImageIndex = imageIndex;
}
public string FileName
{
get
{
return fileName;
}
set
{
fileName = value;
}
}
public string FilePath
{
get
{
return filePath;
}
set
{
filePath = value;
}
}
public int ImageIndex
{
get
{
return imageIndex;
}
set
{
imageIndex = value;
}
}
}
}
<file_sep>using System;
using System.IO;
using System.Collections.Generic;
using System.Collections.Specialized;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Text;
using System.Windows.Forms;
using System.Threading;
using Settings = VideoManager.Properties.Settings;
namespace VideoManager
{
public partial class frmVideoManager : Form
{
private StringCollection folderCol;
List<DirectoryObj> directories;
public frmVideoManager()
{
InitializeComponent();
}
private void frmVideoManager_Load(object sender, EventArgs e)
{
folderCol = new StringCollection();
folderCol.Add(@"root");
//Sets initial movies listview previous directory preference to root, every time you load the program it always loads the "root" directories in listbox
Settings.Default.DirectoryPrevious = "root";
Settings.Default.Save();
//Loads Directories From Object and Fill Directories Listbox
directories = DirectoryDB.GetDirectories();
this.FillLbxDirectoriesFromText();
//Creates default directory on hard drive to store videos
if (Convert.ToString(Directory.Exists(@"c:\vm_videos\")) == "False")
{
Directory.CreateDirectory(@"c:\vm_videos\");
}
//Sets width of colums based on previous changes stored in preferences
this.lvwVideos.Columns[0].Width = this.lvwVideos.Columns[0].Width + Settings.Default.ColumnWidth;
}
//!!!Move to own db object, this writes directories in listbox to directories file
private void WritelbxDirectoriesToText()
{
StreamWriter sw = new StreamWriter(@"C:\videomanager.txt");
foreach (object o in lbxDirectories.Items)
{
sw.WriteLine(o);
}
sw.Close();
}
//!!Move to own db object, Populates directory listbox from directoryobj list
private void FillLbxDirectoriesFromText()
{
this.lbxDirectories.Items.Clear();
foreach (DirectoryObj d in directories)
{
this.lbxDirectories.Items.Add(d.DirectoryPath);
}
this.FillVideos(Settings.Default.DirectoryPrevious);
}
//!!Move to own db object, Fills listview with all videos in directories
public void FillVideos(string strDirPath)
{
// FillVideos Method fills the listview with subfolders and allowed files
// from all directories om videosdirectory listbox
// the intext parameter is the show file extension button
//---------------------------
//clears videos from video listview
this.lvwVideos.Items.Clear();
ListViewItem lvi;
if (strDirPath != "root")
{
lvi = new ListViewItem();
lvi.Text = "Back";
lvi.ImageIndex = 2;
lvi.Tag = Settings.Default.DirectoryPrevious;
this.lvwVideos.Items.Add(lvi);
Settings.Default.DirectoryPrevious = strDirPath;
Settings.Default.Save();
}
//loads videomanager.txt into filestream
if (strDirPath != "root")
{
List<VideoObj> videos;
videos = VideoDB.GetDirectories(strDirPath);
foreach (VideoObj v in videos)
{
lvi = new ListViewItem();
lvi.Text = v.FileName;
lvi.Tag = v.FilePath;
lvi.ImageIndex = v.ImageIndex;
this.lvwVideos.Items.Add(lvi);
}
videos = VideoDB.GetVideos(strDirPath);
foreach (VideoObj v in videos)
{
lvi = new ListViewItem();
lvi.Text = v.FileName;
lvi.Tag = v.FilePath;
lvi.ImageIndex = v.ImageIndex;
this.lvwVideos.Items.Add(lvi);
}
if (lbxDirectories.Items.Count > 0)
{
this.lvwVideos.Items[0].Selected = true;
}
}
else
{
foreach (DirectoryObj d in directories)
{
//checks to see if directory exists
if (Directory.Exists(d.DirectoryPath))
{
List<VideoObj> videos;
videos = VideoDB.GetDirectories(d.DirectoryPath);
foreach (VideoObj v in videos)
{
lvi = new ListViewItem();
lvi.Text = v.FileName;
lvi.Tag = v.FilePath;
lvi.ImageIndex = v.ImageIndex;
this.lvwVideos.Items.Add(lvi);
}
}
}
foreach (DirectoryObj d in directories)
{
if (Directory.Exists(d.DirectoryPath))
{
List<VideoObj> videos;
videos = VideoDB.GetVideos(d.DirectoryPath);
foreach (VideoObj v in videos)
{
lvi = new ListViewItem();
lvi.Text = v.FileName;
lvi.Tag = v.FilePath;
lvi.ImageIndex = v.ImageIndex;
this.lvwVideos.Items.Add(lvi);
}
}
}
if (lbxDirectories.Items.Count > 0)
{
this.lvwVideos.Items[0].Selected = true;
}
}
}
private void WriteDirectoryText(string strDirPath)
{
int intTrue = 0;
foreach (object o in lbxDirectories.Items)
{
if (o.ToString() == strDirPath)
{
//does not show error if c:\vm_videos exists
if (strDirPath != @"c:\vm_videos\")
{
MessageBox.Show("Directory Already Added " + strDirPath);
}
intTrue = 1;
}
}
if (intTrue == 0)
{
DirectoryObj d = new DirectoryObj();
d.DirectoryPath = strDirPath;
directories.Add(d);
this.FillLbxDirectoriesFromText();
this.WritelbxDirectoriesToText();
this.FillVideos(Settings.Default.DirectoryPrevious);
}
}
//End of Methods - Video Form Events Start Here
private void btnFindDirectories_Click(object sender, EventArgs e)
{
//opens folderbrowser dialog and closes if user clicks ok
if (folderBrowserDialog1.ShowDialog() == DialogResult.OK)
{
this.WriteDirectoryText(folderBrowserDialog1.SelectedPath);
}
}
private void btnDirectoryDelete_Click(object sender, EventArgs e)
{
int i = lbxDirectories.SelectedIndex;
this.lbxDirectories.Items.Remove(lbxDirectories.SelectedItem);
directories.RemoveAt(i);
this.WritelbxDirectoriesToText();
this.FillVideos(Settings.Default.DirectoryPrevious);
}
private void btnLoadVideos_Click(object sender, EventArgs e)
{
this.FillVideos("root");
}
private void lvwVideos_ItemActivate(object sender, EventArgs e)
{
//code for making the video files active...so you can play them
ListView lw = (ListView)sender;
string strFilenamePath = lw.SelectedItems[0].Tag.ToString();
//get playlist
int intVideoIndex = lw.SelectedItems[0].Index;
List<string> playlist = new List<string>();
for (int i = intVideoIndex; i < (intVideoIndex + 30); i++)
{
try
{
playlist.Add(this.lvwVideos.Items[i].Tag.ToString());
}
catch
{
break;
}
}
string strFileExt;
if (lw.SelectedItems[0].ImageIndex == 1)
{
try
{
strFileExt = FileManipulation.GetFileExtension(strFilenamePath, 1);
if (strFileExt == "pdf")
{
System.Diagnostics.Process.Start(strFilenamePath);
}
frmVLC vlc = new frmVLC(strFilenamePath, playlist);
vlc.Show();
}
catch
{
return;
}
}
else if (lw.SelectedItems[0].ImageIndex == 0)
{
this.FillVideos(strFilenamePath);
folderCol.Add(strFilenamePath);
}
else if (lw.SelectedItems[0].ImageIndex == 2)
{
Settings.Default.DirectoryPrevious = "root";
Settings.Default.Save();
if (folderCol.Count > 1)
{
this.FillVideos(folderCol[folderCol.Count - 2].ToString());
folderCol.RemoveAt(folderCol.Count - 1);
}
else
{
this.FillVideos(strFilenamePath);
}
this.FillVideos(strFilenamePath);
}
}
private void btnColumnDec_Click(object sender, EventArgs e)
{
this.lvwVideos.Columns[0].Width = this.lvwVideos.Columns[0].Width - 25;
Settings.Default.ColumnWidth = Settings.Default.ColumnWidth - 25;
Settings.Default.Save();
}
private void btnColumnInc_Click(object sender, EventArgs e)
{
this.lvwVideos.Columns[0].Width = this.lvwVideos.Columns[0].Width + 25;
Settings.Default.ColumnWidth = Settings.Default.ColumnWidth + 25;
Settings.Default.Save();
}
private void btnShowFileType_Click(object sender, EventArgs e)
{
if (Settings.Default.FileType == false)
{
Settings.Default.FileType = true;
Settings.Default.Save();
}
else
{
Settings.Default.FileType = false;
Settings.Default.Save();
}
this.FillVideos(Settings.Default.DirectoryPrevious);
}
private void lvwVideos_AfterLabelEdit(object sender, LabelEditEventArgs e)
{
//grabs the changed name of file after user does edit ex. Barney
string strLabel = e.Label;
//grabs name of selected file with extension ex. Barney.vob
string strFilePathOld = this.lvwVideos.SelectedItems[0].Tag.ToString();
if (strLabel != null)
{
// FileManipulation objFileNameChange = new FileManipulation(strFilePathOld);
string strFilePathChange = FileManipulation.FileNameChange(strFilePathOld, strLabel);
if (strFilePathOld != strFilePathChange)
{
if (Convert.ToString(File.Exists(strFilePathChange)) == "False")
{
File.Move(strFilePathOld, strFilePathChange);
this.lvwVideos.SelectedItems[0].Tag = strFilePathChange;
}
else
{
if (MessageBox.Show("File Name Not Changed It Already Exists - " + strLabel, "File Name Change Error", MessageBoxButtons.OK) == DialogResult.OK)
{
this.FillVideos(Settings.Default.DirectoryPrevious);
}
}
}
}
}
private void btnDeleteFile_Click(object sender, EventArgs e)
{
string strFilePathName = this.lvwVideos.SelectedItems[0].Tag.ToString();
if (File.Exists(strFilePathName))
{
if (MessageBox.Show("Do you want to delete this file: " + strFilePathName, "File Deletion Confirmation", MessageBoxButtons.YesNo) == DialogResult.Yes)
{
File.Delete(strFilePathName);
this.FillVideos(Settings.Default.DirectoryPrevious);
}
}
}
private void btnFileExt_Click(object sender, EventArgs e)
{
frmFileExt frmFileExtDialog = new frmFileExt();
frmFileExtDialog.Show();
}
private void btnOpenVideo_Click(object sender, EventArgs e)
{
//code for making the video files active...so you can play them
try
{
string strFilenamePath = lvwVideos.SelectedItems[0].Tag.ToString();
string strFileExt;
//get playlist
int intVideoIndex = lvwVideos.SelectedItems[0].Index;
List<string> playlist = new List<string>();
for (int i = intVideoIndex; i < (intVideoIndex + 30); i++)
{
try
{
playlist.Add(this.lvwVideos.Items[i].Tag.ToString());
}
catch
{
break;
}
}
if (lvwVideos.SelectedItems[0].ImageIndex == 1)
{
try
{
strFileExt = FileManipulation.GetFileExtension(strFilenamePath, 1);
if (strFileExt == "pdf")
System.Diagnostics.Process.Start(strFilenamePath);
frmVLC vlc = new frmVLC(strFilenamePath, playlist);
vlc.Show();
}
catch
{
return;
}
}
else if (lvwVideos.SelectedItems[0].ImageIndex == 0)
{
this.FillVideos(strFilenamePath);
folderCol.Add(strFilenamePath);
}
else if (lvwVideos.SelectedItems[0].ImageIndex == 2)
{
Settings.Default.DirectoryPrevious = "root";
Settings.Default.Save();
if (folderCol.Count > 1)
{
this.FillVideos(folderCol[folderCol.Count - 2].ToString());
folderCol.RemoveAt(folderCol.Count - 1);
}
else
{
this.FillVideos(strFilenamePath);
}
this.FillVideos(strFilenamePath);
}
}
catch
{
return;
}
}
private void btnCopyFile_Click(object sender, EventArgs e)
{
string copyFilePath = this.lvwVideos.SelectedItems[0].Tag.ToString();
FileInfo fi = new FileInfo(copyFilePath);
long copyFileSize = fi.Length;
string copyFileName = fi.Name;
string copyFileDestinationPath = @"c:\vm_videos\";
// MessageBox.Show(fi.Length.ToString() + " --- " + fileSize.ToString());
// string strCFFileName = this.lvwVideos.SelectedItems[0].Text.ToString();
// string strExt = FileManipulation.GetFileExtension(copyFilePath, 0);
if (File.Exists(copyFileDestinationPath + copyFileName))
{
MessageBox.Show("This File Already Exists", "File Copy Error");
}
else
{
this.WriteDirectoryText(copyFileDestinationPath);
frmProgressBar pg = new frmProgressBar(copyFileSize, copyFilePath, copyFileName, copyFileDestinationPath);
pg.Show();
pg.LoadVideos += new frmProgressBar.FillVideosRoot(FillVideos);
}
}
private void btnCopyServer_Click(object sender, EventArgs e)
{
string copyFilePath = this.lvwVideos.SelectedItems[0].Tag.ToString();
FileInfo fi = new FileInfo(copyFilePath);
long copyFileSize = fi.Length;
string copyFileName = fi.Name;
string copyFileDestinationPath = @"\\Videoserver\vs3 kids (f)\kids\";
//MessageBox.Show(fileSize.ToString());
//string strCFFilePathName = this.lvwVideos.SelectedItems[0].Tag.ToString();
//string strCFFileName = this.lvwVideos.SelectedItems[0].Text.ToString();
//string strExt = FileManipulation.GetFileExtension(strCFFilePathName, 0);
if (File.Exists(@"\\Videoserver\vs3 kids (f)\kids\" + copyFileName))
{
MessageBox.Show("This File Already Exists", "File Copy Error");
}
else
{
frmProgressBar pg = new frmProgressBar(copyFileSize, copyFilePath, copyFileName, copyFileDestinationPath);
pg.Show();
pg.LoadVideos += new frmProgressBar.FillVideosRoot(FillVideos);
}
}
}
}<file_sep>using System;
using System.IO;
using System.Collections.Generic;
using System.Collections.Specialized;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Text;
using System.Windows.Forms;
namespace VideoManager
{
class DirectoryDB
{
private const string Path = @"c:\videomanager.txt";
public static List<DirectoryObj> GetDirectories()
{
string strLine;
FileInfo fi = new FileInfo(Path);
if (!fi.Exists)
{
//if videomanage.txt does not exist it is created
FileStream fstr = fi.Create();
fstr.Close();
}
List<DirectoryObj> directories = new List<DirectoryObj>();
FileStream fs = new FileStream(Path, FileMode.Open);
StreamReader sr = new StreamReader(fs);
strLine = sr.ReadLine();
while (strLine != null)
{
DirectoryObj directoryobj = new DirectoryObj();
directoryobj.DirectoryPath = strLine;
directories.Add(directoryobj);
strLine = sr.ReadLine();
}
sr.Close();
return directories;
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Text;
using System.IO;
using System.Collections.Specialized;
namespace VideoManager
{
class VideoDB
{
public static List<VideoObj> GetVideos(string strDirPath)
{
List<VideoObj> videos = new List<VideoObj>();
DirectoryInfo dir = new DirectoryInfo(strDirPath);
FileInfo[] files = dir.GetFiles();
foreach (FileInfo fi in files)
{
if (FileManipulation.CheckFileAllowed(fi.Name))
{
VideoObj videoobj = new VideoObj();
videoobj.FileName = FileManipulation.GetFileName(fi.Name);
videoobj.FilePath = fi.FullName;
videoobj.ImageIndex = 1;
videos.Add(videoobj);
}
}
return videos;
}
public static List<VideoObj> GetDirectories(string strDirPath)
{
List<VideoObj> directories = new List<VideoObj>();
DirectoryInfo dir = new DirectoryInfo(strDirPath);
DirectoryInfo[] dirs = dir.GetDirectories();
//load the directories
foreach (DirectoryInfo di in dirs)
{
if (FileManipulation.CheckDirectoryNotAllowed(di.Name))
{
VideoObj videoobj = new VideoObj();
videoobj.FileName = "Folder - " + di.Name;
videoobj.FilePath = di.FullName;
videoobj.ImageIndex = 0;
directories.Add(videoobj);
}
}
return directories;
}
}
}
| 6356c1ae798add92154faa1749829a53a7373e91 | [
"Markdown",
"C#"
] | 12 | Markdown | jeffcore/video-manager | 107b58ce40abb6cf46bf83233d6fa1fd7555c368 | 995b06026368145368f9ff67da37c1b647f5b451 |
refs/heads/master | <file_sep>package com.maihaoche.lint.core;
import com.android.tools.lint.client.api.IssueRegistry;
import com.android.tools.lint.detector.api.Issue;
import com.maihaoche.lint.core.detectors.CaseLiteralIllegalDetector;
import com.maihaoche.lint.core.detectors.LogUtilDetector;
import com.maihaoche.lint.core.detectors.NamingConventionsDetector;
import com.maihaoche.lint.core.detectors.NestRecyclerViewDetector;
import com.maihaoche.lint.core.detectors.RequestCodeForV4FragmentDetector;
import com.maihaoche.lint.core.detectors.SubscriptionDetector;
import com.maihaoche.lint.core.detectors.VectorDrawableIllegalDetector;
import java.util.Arrays;
import java.util.List;
/**
* Created with Android Studio.
* User: dashu
* Date: 2017/5/5
* Time: 下午5:08
* Desc: 收集自定义的Lint规则
*/
public class MHCIssueRegistry extends IssueRegistry {
@Override
public List<Issue> getIssues() {
return Arrays.asList(
LogUtilDetector.ISSUE,
NestRecyclerViewDetector.ISSUE,
RequestCodeForV4FragmentDetector.ISSUE,
VectorDrawableIllegalDetector.ISSUE,
SubscriptionDetector.ISSUE,
NamingConventionsDetector.ISSUE,
CaseLiteralIllegalDetector.ISSUE);
}
}
<file_sep>include ':app', ':lintCoreLib', ':lint', ':lintPlugin'
<file_sep>apply plugin: 'java'
def lint_version = "25.3.2"
dependencies {
compile fileTree(dir: 'libs', include: ['*.jar'])
compile "com.android.tools.lint:lint-api:$lint_version"
compile "com.android.tools.lint:lint-checks:$lint_version"
}
//注册MHCIssueRegistry,生成jar包
jar {
manifest {
attributes("Lint-Registry": "com.maihaoche.lint.core.MHCIssueRegistry")
}
}
//为aar包提供jar包依赖配置
defaultTasks 'assemble'
configurations {
lintJarOutput
}
dependencies {
lintJarOutput files(jar)
}
<file_sep>package com.maihaoche.lintdemo;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v7.app.AppCompatActivity;
import android.util.Log;
import android.widget.TextView;
import rx.Observable;
import rx.Subscription;
import rx.subscriptions.CompositeSubscription;
/**
* Created with Android Studio.
* User: dashu
* Date: 2017/5/5
* Time: 下午4:56
* Desc:
*/
public class MainActivity extends AppCompatActivity {
private static final int TEST = 2;
private static final String TEST2 = "3";
public static final int SOME_CONSTANT = 42;
public int publicField;
private static String sSingleton;
int mPackagePrivate;
private int mPrivate;
protected int mProtected;
private TextView mTextView;
private int code;
private CompositeSubscription mCompositeSubscription;
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
Log.i("xyz", "xyz");
TextView textView;
switch (9) {
case 1:
break;
case TEST:
break;
case TestFragment.REQUEST_TEST:
break;
}
Observable.just(1).subscribe();
}
private void unSubscription(Subscription subscription){
subscription.isUnsubscribed();
}
}
<file_sep>package com.maihaoche.lint.core.detectors;
import com.android.tools.lint.detector.api.Category;
import com.android.tools.lint.detector.api.Implementation;
import com.android.tools.lint.detector.api.Issue;
import com.android.tools.lint.detector.api.LayoutDetector;
import com.android.tools.lint.detector.api.Scope;
import com.android.tools.lint.detector.api.Severity;
import com.android.tools.lint.detector.api.XmlContext;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import java.util.Collection;
import java.util.Collections;
/**
* Created with Android Studio.
* User: dashu
* Date: 2017/5/13
* Time: 上午10:29
* Desc: 避免在ScrollView中使用自适应高度的RecyclerView
*/
public class NestRecyclerViewDetector extends LayoutDetector {
public static final Issue ISSUE = Issue.create(
"NestRecyclerView",
"避免在ScrollView中使用自适应高度的RecyclerView",
"使用NestScrollView可以避免自使用RecyclerView初始化未加载item无法显示的bug",
Category.CORRECTNESS, 8, Severity.FATAL,
new Implementation(NestRecyclerViewDetector.class, Scope.RESOURCE_FILE_SCOPE));
@Override
public Collection<String> getApplicableElements() {
return Collections.singletonList("android.support.v7.widget.RecyclerView");
}
@Override
public void visitElement(XmlContext context, Element element) {
String width = element.getAttributeNS("http://schemas.android.com/apk/res/android", "layout_height");
if ("wrap_content".equals(width)) {
Element parent = this.findOuterScrollingWidget(element.getParentNode());
if (parent != null) {
String msg = "使用NestScrollView代替ScrollView";
context.report(ISSUE, parent, context.getLocation(parent), msg);
}
}
}
private Element findOuterScrollingWidget(Node node) {
for (String applicableElements = "ScrollView"; node != null; node = node.getParentNode()) {
if (node instanceof Element) {
Element element = (Element) node;
String tagName = element.getTagName();
if (applicableElements.equals(tagName)) {
return element;
}
}
}
return null;
}
}
<file_sep># Android Lint
Android Lint 是有 Android SDK 提供的一种静态代码检测工具,用于检测 Android 的代码质量
Android Lint 的源码集成在 Android SDK Tools 16 及更高的版本中,我们可以在项目目录下通过 `./gradlew lint` 命令调用,也可以通过 Android Studio 的 `【Analyze】->【Inspect Code】`路径调用 Lint 检查
## 配置 Lint 规则
在 Android Studio 创建的 Android 项目中运行 `./gradlew lint` 可以获得 Lint 检测结果,生成详细的 xml 或者 html 报告文件,同时通过对 lint.xml、lintOptions 的配置可以实现符合自己项目的 Lint 检测规则
**lintOptions** 定义在 gradle 文件中,下面列举 lintOptions 可定义的选项
```gradle
android {
lintOptions {
// true--关闭lint报告的分析进度
quiet true
// true--错误发生后停止gradle构建
abortOnError false
// true--只报告error
ignoreWarnings true
// true--忽略有错误的文件的全/绝对路径(默认是true)
//absolutePaths true
// true--检查所有问题点,包含其他默认关闭项
checkAllWarnings true
// true--所有warning当做error
warningsAsErrors true
// 关闭指定问题检查
disable 'TypographyFractions', 'TypographyQuotes'
// 打开指定问题检查
enable 'RtlHardcoded', 'RtlCompat', 'RtlEnabled'
// 仅检查指定问题
check 'NewApi', 'InlinedApi'
// true--error输出文件不包含源码行号
noLines true
// true--显示错误的所有发生位置,不截取
showAll true
// 回退lint设置(默认规则)
lintConfig file("default-lint.xml")
// true--生成txt格式报告(默认false)
textReport true
// 重定向输出;可以是文件或'stdout'
textOutput 'stdout'
// true--生成XML格式报告
xmlReport false
// 指定xml报告文档(默认lint-results.xml)
xmlOutput file("lint-report.xml")
// true--生成HTML报告(带问题解释,源码位置,等)
htmlReport true
// html报告可选路径(构建器默认是lint-results.html )
htmlOutput file("lint-report.html")
// true--所有正式版构建执行规则生成崩溃的lint检查,如果有崩溃问题将停止构建
checkReleaseBuilds true
// 在发布版本编译时检查(即使不包含lint目标),指定问题的规则生成崩溃
fatal 'NewApi', 'InlineApi'
// 指定问题的规则生成错误
error 'Wakelock', 'TextViewEdits'
// 指定问题的规则生成警告
warning 'ResourceAsColor'
// 忽略指定问题的规则(同关闭检查)
ignore 'TypographyQuotes'
}
}
```
**lint.xml** 这个配置文件是用来指定你想禁用哪些lint检查功能,以及自定义问题严重度 (problem severity levels),我们可以通过 lintOptions 中的 lintConfig file("lint.xml") 来指定配置文件的所在目录
lint.xml文件的组成结构是,最外面是一对闭合的标签,里面包含一个或多个子元素。每一个被唯一的id属性来标识,整体结构如下:
```
<?xml version="1.0" encoding="UTF-8"?>
<lint>
<!-- list of issues to configure -->
<!-- Disable the given check in this project -->
<issue id="IconMissingDensityFolder" severity="ignore" />
<!-- Ignore the ObsoleteLayoutParam issue in the specified files -->
<issue id="ObsoleteLayoutParam">
<ignore path="res/layout/activation.xml" />
<ignore path="res/layout-xlarge/activation.xml" />
</issue>
<!-- Ignore the UselessLeaf issue in the specified file -->
<issue id="UselessLeaf">
<ignore path="res/layout/main.xml" />
</issue>
<!-- Change the severity of hardcoded strings to "error" -->
<issue id="HardcodedText" severity="error" />
</lint>
```
id 的获取我们可以通过命令行 `lint --list` 获取。如果无法直接执行 lint 命令,我们可以在 `/.bash_profile` 中添加 `PATH="~/Library/Android/sdk/tools/bin:${PATH}"` 即可
## Android Studio 的 Lint 支持
Android Studio 2.0 以后,谷歌将 Lint 检查整合到了 IDE 之中,提供了方便的图形界面操作,检测结果也会在底部 Inspection Results 中展现,除了 Lint 规则,AS 也加入了一些其他的检测规则
## 自定义 Lint 规则

在某些特殊情况下,系统定义的 Lint 并不能满足我们的需求,这时需要我们自己定义规则,然后利用 Android 的 Lint 工具帮助我们自动发现某些问题
谷歌官方的方案是依赖 lint-api 创建自己的 lint 规则,然后将自定义的 lint 规则打包成 jar (保存在 build/libs 中),将 jar 包复制到 `~/.android/lint` 目录下,最后在 Android 工程源码目录下执行 `./gradlew lint` 即可。但是这种方案有一个缺点:它针对的是本机的所有项目,也就是会影响同一台机器其他项目的 Lint 检查
所以我们可以采用 LinkedIn 提出的 aar 方案,将 jar 包放到一个 aar 中,然后 Android 项目依赖这个 aar 完成自定义 lint 检查。利用这种方案我们就可以针对项目进行自定义 Lint 规则,且 lint.jar 只对当前项目有效,根据 [Android Lint工作原理剖析](http://carrotsight.com/2016/06/21/Android%20Lint%E5%B7%A5%E4%BD%9C%E5%8E%9F%E7%90%86%E5%89%96%E6%9E%90.html) 里分析可以得知,当系统执行 lint 时,会检查项目的构建目录下的 lint 目录下是否引用了一个名为lint.jar文件,有的话会添加到自定义的 lint 规则中,最终将会被执行
以下项目结构参考自美团的自定义 Lint 开源项目。首先需要创建一个存放自定义 Lint 代码的纯 Java module,最终目的是输出 jar 包
对于自定义 Lint 我们需要依赖两个库,目前的 lint_version 版本是 25.3.0
```
compile 'com.android.tools.lint:lint-api:' + lint_version
compile 'com.android.tools.lint:lint-checks:' + lint_version
```
lint-checks 中包含了所有的官方定义的 lint 规则,我们可以参考其中的 Detector 实现自定义的 Detector 来满足项目的特殊需要,但是关于这方面的资料和文档是十分稀少的,所以这些官方提供的实例非常值得我们深入研究
我们以一个检测项目中 Log 日志打印的自定义 Detector 为例来说明自定义 Detector 的结构构成
```
public class LogUtilDetector extends Detector implements Detector.JavaPsiScanner {
public static final Issue ISSUE = Issue.create(
"LogUtilUse",
"避免使用Log/System.out.println",
"使用LogUtil,LogUtil对系统的Log类进行了展示格式、逻辑等封装",
Category.SECURITY, 5, Severity.WARNING,
new Implementation(LogUtilDetector.class, Scope.JAVA_FILE_SCOPE));
public List<String> getApplicableMethodNames() {
return Arrays.asList("d", "e", "i", "v", "w");
}
public void visitMethod(JavaContext context, JavaElementVisitor visitor, PsiMethodCallExpression node, PsiMethod method) {
JavaEvaluator evaluator = context.getEvaluator();
if (evaluator.isMemberInClass(method, "android.util.Log")) {
String message = "请使用LogUtil";
context.report(ISSUE, node, context.getLocation(node), message);
}
}
}
```
### Detector
lint 规则实现类需要继承 Detector 并实现 Scanner 接口
* **Detector.JavaScanner**——扫描 Java 源码抽象语法树,**在25.2.0版本中该接口已被弃用,换成了 JavaPsiScanner**,对语法分析由 [Lombok AST API](https://github.com/tnorbye/lombok.ast) 转变 [IntelliJ IDEA's "PSI" API](https://github.com/joewalnes/idea-community/tree/master/java/openapi/src/com/intellij/psi),功能更强大而且可以扩展到 kotlin 语言上(kotlin 是由 intellij 推出的与 Java 无缝兼容的全新语言)
* **Detector.ClassScanner**——扫描 class 文件
* **Detector.BinaryResourceScanner**——扫描二进制资源文件
* **Detector.ResourceFolderScanner**——扫描资源文件
* **Detector.XmlScanner**——扫描xml文件
* **Detector.GradleScanner**——扫描gradle文件
* **Detector.OtherFileScanner**——扫描其他类型文件
在 Detector 已经默认实现所有接口的所有方法了,只需要 override 需要的方法即可
以最复杂的 Detector.JavaPsiScanner 为例,其接口组成为
```Java
public interface JavaPsiScanner {
List<Class<? extends PsiElement>> getApplicablePsiTypes();
JavaElementVisitor createPsiVisitor(JavaContext var1);
List<String> getApplicableMethodNames();
void visitMethod(JavaContext var1, JavaElementVisitor var2, PsiMethodCallExpression var3, PsiMethod var4);
List<String> getApplicableConstructorTypes();
void visitConstructor(JavaContext var1, JavaElementVisitor var2, PsiNewExpression var3, PsiMethod var4);
List<String> getApplicableReferenceNames();
void visitReference(JavaContext var1, JavaElementVisitor var2, PsiJavaCodeReferenceElement var3, PsiElement var4);
boolean appliesToResourceRefs();
void visitResourceReference(JavaContext var1, JavaElementVisitor var2, PsiElement var3, ResourceType var4, String var5, boolean var6);
List<String> applicableSuperClasses();
void checkClass(JavaContext var1, PsiClass var2);
}
```
在自定义的 LogUtilDetector 中,继承了 JavaPsiScanner 用来检测源代码中的目标代码
```Java
public class LogUtilDetector extends Detector implements Detector.JavaPsiScanner
```
我们实现了 `List<String> getApplicableMethodNames() ` 方法用于返回我们需要查找的方法名称,因为打印日志会调用系统类 Log 对应的方法,我们通过简析方法名来达到替换的目的。系统找到对应的方法则会回调 `void visitMethod(JavaContext var1, JavaElementVisitor var2, PsiMethodCallExpression var3, PsiMethod var4);` 方法希望得到进一步的处理,例子中我们对方法的宿主类进行了检验,只有是 android.util.Log 下的方法才是我们最重要找的目标
### Issue
找到了目标代码,我们需要上报给系统以供展示,Issue 的作用就是在 Detector 发现并报告,由静态方法 create 创建
```Java
public static Issue create(String id, String briefDescription, String explanation, Category category, int priority, Severity severity, Implementation implementation)
```
* **id** 唯一值,应该能简短描述当前问题。利用Java注解或者XML属性进行屏蔽时,使用的就是这个id
* **summary** 简短的总结,通常5-6个字符,描述问题而不是修复措施
* **explanation** 完整的问题解释和修复建议
* **category** 问题类别。在 Category 类中定义
* **priority** 优先级。1-10的数字,10为最重要/最严重
* **severity** 严重级别:Fatal,Error,Warning,Informational,Ignore,是 Severity 枚举类
* **Implementation** 为 Issue 和 Detector 提供映射关系,Detector 就是当前 Detector。声明扫描检测的范围 Scope,Scope 用来描述 Detector 需要分析时需要考虑的文件集,包括:Resource 文件或目录、Java 文件、Class 文件
我们为 LogUtilDetector 定义了上报的 Issue 如下格式
```Java
public static final Issue ISSUE = Issue.create(
"LogUtilUse",
"避免使用Log/System.out.println",
"使用LogUtil,LogUtil对系统的Log类进行了展示格式、逻辑等封装",
Category.SECURITY, 5, Severity.WARNING,
new Implementation(LogUtilDetector.class, Scope.JAVA_FILE_SCOPE));
```
最终通过 `context.report(ISSUE, node, context.getLocation(node), message);` 方法上报,其中 message 就是针对具体代码场景的描述
### IssueRegistry
自定义 lint 规则必须提供一个继承自 IssueRegistry 的类,实现抽象方法 `public abstract List<Issue> getIssues();` 将所有自定义 Detector 的 Issue 方法放入,最终执行 lint 命令时,通过注册的 IssueRegistry 可以获取所有的自定义探测器 detector(Issue 中存在与 Detector 的映射关系)
最终我们将所有自定义的 Detector 汇总,对外提供
```
public class MHCIssueRegistry extends IssueRegistry {
@Override
public List<Issue> getIssues() {
return Arrays.asList(
LogUtilDetector.ISSUE,
NestRecyclerViewDetector.ISSUE,
RequestCodeForV4FragmentDetector.ISSUE,
VectorDrawableIllegalDetector.ISSUE,
SubscriptionDetector.ISSUE,
NamingConventionsDetector.ISSUE,
CaseLiteralIllegalDetector.ISSUE);
}
}
```
### 工程 gradle 配置
提供包含自定义 lint 规则 jar 包的 :lintCoreLib 模块的 gradle 配置
```gradle
//注册MHCIssueRegistry,生成jar包
jar {
manifest {
attributes("Lint-Registry": "com.maihaoche.lint.core.MHCIssueRegistry")
}
}
//为aar包提供jar包依赖配置
defaultTasks 'assemble'
configurations {
lintJarOutput
}
dependencies {
lintJarOutput files(jar)
}
```
上层提供给项目 aar 包的 :lint 模块的 gradle 配置
```gradle
//配置lint的jar包
configurations {
lintJarImport
}
dependencies {
lintJarImport project(path: ":lintCoreLib", configuration: "lintJarOutput")
}
//将jar复制到lint目录下的lint.jar,因为在 lint 命令执行时会检查该文件,存在的话会添加到 lint 检查的队列中
task copyLintJar(type: Copy) {
from (configurations.lintJarImport) {
rename {
String fileName ->
'lint.jar'
}
}
into 'build/intermediates/lint/'
}
//当 Project 创建完所有任务的有向图后,通过 afterEvaluate 函数设置一个回调 Closure。将 copyLintJar 插入到 compileLint 之前执行
Closure 里,我 disable 了所有 Debug 的 Task
project.afterEvaluate {
def compileLintTask = project.tasks.find { it.name == 'compileLint' }
compileLintTask.dependsOn(copyLintJar)
}
```
最后只要在主工程中依赖 :lint 模块即可
我把导出的 aar 包导入到了生产环境中,执行后就可以见到自定义的 Lint 的检测结果

## debug lint代码
调试 lint 的代码需要特殊配置
* 在 gradle.properties 文件中添加如下配置
```
org.gradle.jvmargs='-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005'
```
* 创建一个 Remote 类型的 debug 配置文件
* 终端中以 daemon 模式启动 gradle lint 任务
* 选择新建的配置文件快速点击 debug 按钮
按照这种方法我们就可以 debug 我们自定义的 lint 规则或者是系统定义的 lint 规则了
## 参考文献
[How to find gradle lintOptions document for android?](http://stackoverflow.com/questions/31128770/how-to-find-gradle-lintoptions-document-for-android)
[Android Studio Project Site -- Suppressing Lint Warnings](http://tools.android.com/tips/lint/suppressing-lint-warnings)
[Android自定义Lint实践](http://tech.meituan.com/android_custom_lint.html)
[Android自定义Lint实践2——改进原生Detector](http://tech.meituan.com/android_custom_lint2.html)
[Lint Source Code](https://android.googlesource.com/platform/tools/base/+/studio-master-dev/lint/)
<file_sep>apply plugin: 'groovy'
repositories {
jcenter()
}
dependencies {
//gradle sdk
compile gradleApi()
//groovy sdk
compile localGroovy()
compile 'com.android.tools.build:gradle:2.3.2'
}
apply plugin: 'maven-publish'
apply plugin: 'com.jfrog.bintray'
def siteUrl = 'https://github.com/sanousun/LintDemo' // 项目的主页
def gitUrl = 'https://github.com/sanousun/LintDemo.git' // Git仓库的url
task sourceJar(type: Jar) {
from sourceSets.main.allSource
classifier 'sources'
}
task docJar(type: Jar, dependsOn: javadoc) {
from tasks.javadoc.destinationDir
classifier 'doc'
}
group = 'com.maihaoche.lint'
version = '1.0.3'
publishing {
publications {
MyPublication(MavenPublication) {
from components.java
artifactId 'plugin'
groupId group
version version
artifact sourceJar
artifact docJar
}
}
}
Properties properties = new Properties()
properties.load(project.rootProject.file('local.properties').newDataInputStream())
bintray {
user = properties.getProperty("bintray.user")
key = properties.getProperty("bintray.apikey")
publications = ['MyPublication']
pkg {
repo = "mhc_lint"
name = "lint_plugin" //发布到JCenter上的项目名字
websiteUrl = siteUrl
vcsUrl = gitUrl
licenses = ["Apache-2.0"]
publish = true
}
} | 73cb8d5b4ef2bd67f832fcc5a78ca53d3d11d191 | [
"Markdown",
"Java",
"Gradle"
] | 7 | Java | zyxcoder/LintDemo | 9bf6fba74328f6eed9cf2c4b218ded36a4dd69d7 | 40847fcae2bed68151d5e478bc3b95f538be1b77 |
refs/heads/master | <repo_name>ibexmonj/shop<file_sep>/unicorn.rb
01 # See http://unicorn.bogomips.org/Unicorn/Configurator.html for complete
02 # documentation.
03 worker_processes 4
04 # Help ensure your application will always spawn in the symlinked
05 # "current" directory that Capistrano sets up.
06 working_directory "/home/ibex/shop/current"
07 # listen on both a Unix domain socket and a TCP port,
08 # we use a shorter backlog for quicker failover when busy
09 listen "/tmp/shop.socket", :backlog => 64
10 # nuke workers after 30 seconds instead of 60 seconds (the default)
11 timeout 30
12 # feel free to point this anywhere accessible on the filesystem
13 user 'ibex',
14 shared_path = “/home/ibex/shop/shared”
15 pid "#{shared_path}/pids/unicorn.pid"
16 stderr_path "#{shared_path}/log/unicorn.stderr.log"
17 stdout_path "#{shared_path}/log/unicorn.stdout.log"
| b55fcae1aeaf3cab1ab4f88a04f893c532f87dd9 | [
"Ruby"
] | 1 | Ruby | ibexmonj/shop | 4ee2d3638598b372789b5a6abfb02db52745d4cc | 19165094e59710a54e93d3fdee6dcb018dbd98a0 |
refs/heads/master | <repo_name>ryulullalaa/my_projects<file_sep>/moodplace/app.py
from urllib.request import urlopen
from urllib.parse import quote_plus
from bs4 import BeautifulSoup
from selenium import webdriver
import time
baseUrl = 'https://www.instagram.com/explore/tags/'
plusUrl = input('검색할 태그를 입력하세요: ')
url = baseUrl + quote_plus(plusUrl)
| 2deb67a8b9930c817591d6ed04d4c27314f30f8e | [
"Python"
] | 1 | Python | ryulullalaa/my_projects | af9cfc640fe81944dc10b76790810ed05c55ec7b | c9e70d21d81403ed99ca0a084488a690a59903b8 |
refs/heads/master | <repo_name>eliranmal/proxify<file_sep>/README.md
# proxify
> callbacks to `async`s, all over the place
[![NPM][1]][2]
## overview
`async`ify all functions on an object with node-style functions.
## usage
```js
const obj = {
foo: (bar, done) => done(null, 'wat'),
};
const asyncObj = proxify(obj);
const result = await asyncObj.foo(1);
```
[1]: https://img.shields.io/npm/v/@eliranmal/proxify.svg?style=flat-square
[2]: https://www.npmjs.com/package/@eliranmal/proxify
<file_sep>/tests/unit/api_test.js
'use strict';
const expect = require('chai').expect;
const stub = require('../stubs/object-stub');
const proxify = require('../../lib/proxify');
let sut;
describe('api', function () {
beforeEach(() => {
sut = proxify(stub);
});
describe('a known object method', () => {
it('should be available on the instance', async () => {
const result = sut.foo;
expect(result).to.be.an('function');
});
});
describe('successful invocation with `await`', () => {
it('should yield a result', async () => {
const result = await sut.bar('string', 1);
expect(result).to.be.an('object');
});
});
});
<file_sep>/lib/proxify.js
/**
* transform callback-style functions into promises.
*
* @param fn the function to promisify
* @param ctx a context to bind on invocation
* @returns {function(...[*]): Promise<any>}
*/
const promisify = (fn, ctx = Object.create(null)) => (...args) => {
return new Promise((resolve, reject) => {
try {
const cb = (err, result) => {
if (err) {
return reject(err);
}
resolve(result);
};
fn.call(ctx, ...(args.concat(cb)));
} catch (ex) {
reject(ex);
}
});
};
/**
* creates an opaque wrapper around the passed object,
* while promisifying all function invocations.
*
* @param target the object to wrap.
* @returns {object} a wrapped object, with all its functions promisified.
*/
const proxify = (target) => new Proxy(target, {
get(target, key) {
const orig = Reflect.get(target, key);
if (typeof orig === 'function') {
return async (...args) => {
return promisify(target[key], target)(...args);
};
}
return orig;
}
});
module.exports = proxify;
| f39b33dcae0ec5c947adea11599c191def781cbc | [
"Markdown",
"JavaScript"
] | 3 | Markdown | eliranmal/proxify | c1ee29f01d2f1666b9815cebdd40dd2ad0c158bc | 84465342c587ed33891c36dde8fca5d48574e5c0 |
refs/heads/main | <file_sep>package com.neo.behavor.iterator;
/**
* @Author : neo
* @Date 2021/3/28 16:16
* @Description : 抽象迭代器角色,定义访问和遍历聚合元素的接口,通常包含hasNext()、next()方法。
*/
public interface StudentIterator {
boolean hasNext();
Student next();
}
<file_sep>package com.neo.behavor.mediator;
/**
* @Author : neo
* @Date 2021/3/28 15:20
* @Description : 抽象中介者角色
*/
public abstract class Mediator {
public abstract void communicate(String message, Person person);
}
<file_sep>package com.neo.behavor.state.after;
/**
* @Author : neo
* @Date 2021/3/28 10:29
* @Description : 具体状态角色
*/
public class ClosingState extends LiftState {
//关门状态下开启电梯门
public void open() {
//修改状态
super.context.setLiftState(Context.OPENNING_STATE);
//调用父类中context中的open方法
super.context.open();
}
//要执行的方法
public void close() {
System.out.println("电梯门关闭...");
}
//关闭状态可以运行
public void run() {
//修改状态
super.context.setLiftState(Context.RUNNING_STATE);
//调用父类中context中的open方法
super.context.run();
}
//关闭状态可以停止,不按楼层
public void stop() {
//修改状态
super.context.setLiftState(Context.STOPPING_STATE);
//调用父类中context中的open方法
super.context.stop();
}
}
<file_sep>package com.neo.controller;
import com.neo.service.AsyncService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.ResponseBody;
/**
* @Author : neo
* @Date 2021/7/24 10:04
* @Description : TODO
*/
@Controller
public class AsyncController {
@Autowired
private AsyncService asyncService;
@GetMapping("/")
@ResponseBody
public String hello() {
asyncService.hello();
return "ok";
}
}
<file_sep>package com.neo.dataStructure;
/**
* @Description Author neo
* Date 2021/3/14 15:46
*/
public class Fibonacci {
public static int fib(int n) {
if (n == 0) {
return 0;
} else if (n == 1 || n == 2) {
return 1;
} else {
return ((fib(n - 1) + fib(n - 2)) % (1000000007));
}
}
public static void main(String[] args) {
System.out.println(fib(10));
}
}
<file_sep>package com.neo.structure.proxy.CGlib_Proxy;
/**
* @Author : neo
* @Date 2021/3/23 17:28
* @Description : TODO
*/
public class Client {
public static void main(String[] args) {
//创建代理工厂对象
ProxyFactory proxyFactory = new ProxyFactory();
TrainStation proxyObject = proxyFactory.getProxyObject();
proxyObject.sell();
}
}
<file_sep>package com.neo.test;
/**
* @Author : neo
* @Date 2021/3/26 22:13
* @Description : TODO
*/
public class Company {
private String name;
public Company(String name) {
this.name = name;
}
public String getName() {
return name;
}
}<file_sep>package com.neo.behavor.command;
import java.util.ArrayList;
/**
* @Author : neo
* @Date 2021/3/27 15:45
* @Description : 服务员类(调用者角色,要求命令对象执行请求,通常会持有命令对象,可以持有很多命令对象)
*/
public class Waiter {
//持有命令对象,可以持有很多的命令对象
private ArrayList<Command> commands;
public Waiter() {
this.commands = new ArrayList<Command>();
}
public void setCommands(Command cmd) {
//把cmd对象存储到list集合中
commands.add(cmd);
}
//发出命令,喊 订单来了,厨师开始执行
public void orderUp() {
System.out.println("服务员:叮咚,大厨,订单来了...");
for (Command command : commands) {
if (command != null) {
command.execute();
}
}
}
}
<file_sep>package com.neo.pojo;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* @Author : neo
* @Date 2021/7/20 16:40
* @Description : TODO
*/
//部门表
@Data
@AllArgsConstructor
@NoArgsConstructor
public class Department {
private Integer id;
private String depargmentName;
}
<file_sep>package com.neo.service;
import com.neo.pojo.User;
/**
* @Author : neo
* @Date 2021/7/22 16:39
* @Description : TODO
*/
public interface UserService {
public User queryUserByName(String name);
}
<file_sep>"# JavaLife"
<file_sep>package com.neo.behavor.strategy;
/**
* @Author : neo
* @Date 2021/3/27 10:38
* @Description : 具体策略类:每个节日具体的促销活动
*/
public class StrategyA implements Strategy {
public void show() {
System.out.println("买一送一");
}
}
<file_sep>package com.neo.controller;
import com.neo.pojo.User;
import io.swagger.annotations.ApiOperation;
import io.swagger.annotations.ApiParam;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.Mapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RestController;
/**
* @Author : neo
* @Date 2021/7/23 10:44
* @Description : TODO
*/
@RestController
public class HelloController {
@GetMapping("/hello")
public String hello(){
return"hello";
}
//只要我们的接口中,返回值中存在实体类,他就会被扫描到Swagger中
@PostMapping("/hello")
public User user(){
return new User();
}
@ApiOperation("Hello2控制类")
@GetMapping("/hello2")
public String hello2(@ApiParam("用户名") String name){
return "hello"+name;
}
@ApiOperation("post测试类")
@PostMapping("/postt")
public String postt(@ApiParam("用户名") User user){
return "hello"+user;
}
}
<file_sep>package com.neo.pojo;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* @Author : neo
* @Date 2021/7/22 16:20
* @Description : TODO
*/
@Data
@AllArgsConstructor
@NoArgsConstructor
public class User {
private Integer id;
private String name;
private String pwd;
private String perms;
}
<file_sep>package com.neo.controller;
import org.apache.logging.log4j.message.ReusableMessage;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.util.StringUtils;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import javax.servlet.http.HttpSession;
/**
* @Author : neo
* @Date 2021/7/21 10:03
* @Description : TODO
*/
@Controller
public class LoginController {
@RequestMapping("/user/login")
public String login(@RequestParam("username") String username,
@RequestParam("password") String password,
Model model, HttpSession session) {
//具体的业务
if (!StringUtils.isEmpty(username) && "<PASSWORD>".equals(password)) {
session.setAttribute("loginUser", username);
return "dashboard";
} else {
//告诉用户你登录失败了
model.addAttribute("msg", "用户名或者密码错误");
return "index";
}
}
}
<file_sep>package com.neo.creator.Singleton.demo03;
/**
* @Description Author neo
* Date 2021/3/17 14:40
* 懒汉式——类在加载时,不会创建对象,只有在调用getInstance()方法时,才会创建对象
* 线程不安全
*/
public class Singleton {
private static Singleton instance;
//私有构造方法
private Singleton() {
}
;
//对外提供获取该对象的方法
public static Singleton getInstance() {
if (instance == null) {
instance = new Singleton();
}
return instance;
}
}
<file_sep>package com.neo.demo02;
/**
* @Description Author neo
* Date 2021/3/4 20:25
*/
//客户
public class Client {
public static void main(String[] args) {
UserServiceImpl userService = new UserServiceImpl();
UserServiceProxy proxy = new UserServiceProxy();
proxy.setUserService(userService);
proxy.add();
}
}
<file_sep>package com.neo.service.impl;
import com.neo.dao.UserDao;
import com.neo.entity.User;
import com.neo.service.UserService;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.util.List;
/**
* (User)表服务实现类
*
* @author makejava
* @since 2021-07-27 10:56:13
*/
@Service
public class UserServiceImpl implements UserService {
@Resource
private UserDao userDao;
/**
* 通过ID查询单条数据
*
* @param id 主键
* @return 实例对象
*/
@Override
public User queryById(Integer id) {
return this.userDao.queryById(id);
}
/**
* 查询多条数据
*
* @param page 页码数
* @param limit 查询条数
* @return 对象列表
*/
@Override
public List<User> queryAllByLimit(int page, int limit) {
int offset = (page - 1) * limit;
return this.userDao.queryAllByLimit(offset, limit);
}
/**
* 新增数据
*
* @param user 实例对象
* @return 实例对象
*/
@Override
public User insert(User user) {
this.userDao.insert(user);
return user;
}
/**
* 修改数据
*
* @param user 实例对象
* @return 实例对象
*/
@Override
public User update(User user) {
this.userDao.update(user);
return this.queryById(user.getId());
}
/**
* 通过主键删除数据
*
* @param id 主键
* @return 是否成功
*/
@Override
public boolean deleteById(Integer id) {
return this.userDao.deleteById(id) > 0;
}
@Override
public List<User> selectAll() {
List<User> users = userDao.selectAll();
return users;
}
@Override
public Integer getRecordNum() {
return userDao.getRecordNum();
}
}<file_sep>package com.neo.behavor.iterator.after;
/**
* @Author : neo
* @Date 2021/3/28 16:21
* @Description : 抽象聚合角色,定义存储,添加,删除聚合元素以及创建迭代器对象的接口
*/
public interface Aggregate<T> {
//添加元素
void add(T item);
//删除学生
void remove(T item);
//获得迭代器对象
Iterator getIterator();
}
<file_sep>package com.neo.structure.bridge;
/**
* @Author : neo
* @Date 2021/3/25 12:16
* @Description : windows版本(扩展抽象化角色)
*/
public class Windows extends OperatingSystem {
public Windows(VideoFile videoFile) {
super(videoFile);
}
public void play(String fileName) {
System.out.print("在windows上播放");
videoFile.decode(fileName);
}
}
<file_sep>package com.neo.dataStructure;
import java.util.Arrays;
/**
* @Description Author neo
* Date 2021/3/13 21:56
*/
public class SlideWindow {
public static void main(String[] args) {
int[] nums = {1, 3, -1, -3, 5, 3, 6, 7};
int k = 3;
int[] res = new int[nums.length - k + 1];
for (int i = 0; i < nums.length - k + 1; i++) {
int max = nums[i];
for (int j = i; j < i + k - 1; j++) {
max = max > nums[j + 1] ? max : nums[j + 1];
}
res[i] = max;
}
System.out.println(Arrays.toString(res));
}
}
<file_sep>package com.neo.structure.facade;
/**
* @Author : neo
* @Date 2021/3/25 17:12
* @Description : 测试类
*/
public class Client {
public static void main(String[] args) {
SmartAppliancesFacade facade = new SmartAppliancesFacade();
facade.say("打开家电");
System.out.println("======================");
facade.say("关闭家电");
}
}
<file_sep>package com.neo.structure.bridge;
/**
* @Author : neo
* @Date 2021/3/25 12:20
* @Description : 测试类
*/
public class Client {
public static void main(String[] args) {
Windows windows = new Windows(new AVIFile());
Mac mac = new Mac(new RMVBFile());
windows.play("战狼3");
mac.play("唐伯虎点秋香");
}
}
<file_sep>package com.neo.structure.facade;
/**
* @Author : neo
* @Date 2021/3/25 17:03
* @Description : 空调类(子系统角色):实现系统的部分功能,客户可以通过外观角色访问它
*/
public class AirCondition {
//开空调
public void on() {
System.out.println("打开空调...");
}
//关空调
public void off() {
System.out.println("关闭空调...");
}
}
<file_sep>package com.neo.config;
import com.neo.pojo.User;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import org.springframework.stereotype.Component;
/**
* @Description Author neo
* Date 2021/1/25 15:12
*/
//这个也会被Spring容器托管,注册到容器中,因为他本来就是一个@Component,
//@Configuration代表这是一个配置类,就和我们之前看的beans.xml一样
@Configuration
@ComponentScan("com.neo.pojo")
@Import(MyConfig2.class)
public class MyConfig {
//注册一个bean,就相当于我们之前写的一个bean标签,
// 这个方法的名字,就相当于bean标签中的id属性
//这个方法的返回值,就相当于bean标签中的class属性
@Bean
public User getUser() {
return new User();
}
}
<file_sep>package com.neo.structure.decorator;
/**
* @Author : neo
* @Date 2021/3/24 19:48
* @Description : 炒面(具体构件角色)
*/
public class FiredNoodles extends FastFood {
public FiredNoodles() {
super(12, "炒面");
}
public float cost() {
return getPrice();
}
}
<file_sep>import org.junit.Test;
import sun.management.Agent;
/**
* @Description Author neo
* Date 2021/3/20 15:48
*/
public class TestDemo {
}
<file_sep>package com.neo.structure.bridge;
/**
* @Author : neo
* @Date 2021/3/25 12:20
* @Description : mac版本(扩展抽象化角色)
*/
public class Mac extends OperatingSystem {
public Mac(VideoFile videoFile) {
super(videoFile);
}
public void play(String fileName) {
System.out.print("在mac上播放");
videoFile.decode(fileName);
}
}
<file_sep>package com.neo.creator.Factory.AbstractFactory;
/**
* @Description Author neo
* Date 2021/3/19 17:50
*/
public interface DessertFactory {
//生产咖啡
public Coffee createCoffee();
//生产甜品
public Dessert createDessert();
}
<file_sep>package com.neo.structure.flyweight;
import com.sun.xml.internal.ws.wsdl.parser.MemberSubmissionAddressingWSDLParserExtension;
import sun.security.jca.GetInstance;
import java.util.HashMap;
/**
* @Author : neo
* @Date 2021/3/26 15:23
* @Description : 享元工厂类
*/
public class BoxFactory {
private static HashMap<String, AbstractBox> map;
//只有一个享元工厂类,设置为单例模式,私有构造方法
private BoxFactory() {
map = new HashMap<String, AbstractBox>();
AbstractBox iBox = new IBox();
AbstractBox LBox = new LBox();
AbstractBox OBox = new OBox();
map.put("I", iBox);
map.put("L", LBox);
map.put("O", OBox);
}
private static BoxFactory instance = new BoxFactory();
//单例模式对外提供的获取工厂对象的方法
public static BoxFactory getInstance() {
return instance;
}
public AbstractBox getBox(String key) {
return map.get(key);
}
}
<file_sep>package com.neo.study.test;
import java.io.*;
/**
* @Description Author neo
* Date 2020/11/24 9:26
*/
//字节流与字符流
public class Stream {
public static void main(String[] args) throws Exception {
/*
//字节输出流
//1.向文件中写入字符串
//第1步:使用File类找到一个文件
File file = new File("d:" + File.separator + "test.txt");//文件不存在时候会自动创建
//第2步:通过子类实例化父类对象
OutputStream out = null;
// out = new FileOutputStream(file);
out = new FileOutputStream(file, true);//此处表示在文件末尾追加内容
//第3步:进行写操作
String str = "\r\nHello World222 梁兵涛";// “\r\n”表示换行输出
byte[] bytes = str.getBytes();//只能输出byte数组,所以将字符串变为byte数组
out.write(bytes);
//第4步:关闭输出流
out.close();
*/
/* //字节流输入
//第1步,使用file类找到一个文件
File file1 = new File("d:" + File.separator + "test.txt");
//第2步:通过子类实例化父类对象
InputStream input = null;
input = new FileInputStream(file1);
//第三步:进行读操作
// byte[] bytes1 = new byte[1024];
byte[] bytes1 = new byte[(int) file1.length()];//根据文件大小初始化数组大小
input.read(bytes1);
// int len = input.read(bytes1);
//第4不:关闭输入流
input.close();
// System.out.println("读入数据的长度:"+len);
// System.out.println("内容为:"+new String(bytes1,0,len));
System.out.println("内容为:" + new String(bytes1));
*/
/*
//字符流
//字符输出流
//与字节流相比,可以直接输出字符串,不用在转换成byte数组后输出
//1.向文件中写入字符串
//第1步:使用File类找到一个文件
File file = new File("d:" + File.separator + "test.txt");//文件不存在时候会自动创建
//第2步:通过子类实例化父类对象
Writer out = null;
// out = new FileWriter(file);
out=new FileWriter(file,true);//追加文件内容
//第3步:进行写操作
String str = "\r\nHello World \r\n梁兵涛";// “\r\n”表示换行输出
out.write(str); //将内容输出
//第4步:关闭输出流
out.close();
*/
//字符输入流,从文件中读取内容
//第1步,使用file类找到一个文件
File file1 = new File("d:" + File.separator + "test.txt");
//第2步:通过子类实例化父类对象
Reader reader = null;
reader = new FileReader(file1);
//第三步:进行读操作
// char c[] = new char[1024]; //将所有内容读入到此数组中
char[] c = new char[(int) file1.length()]; //根据文件内容大小初始化数组
int len = reader.read(c); //将内容输出
//第4不:关闭输入流
reader.close();
System.out.println("读入数据的长度:" + len);
System.out.println("内容为:" + new String(c, 0, len));
}
}
<file_sep>package com.neo.behavor.iterator.after;
/**
* @Author : neo
* @Date 2021/3/28 16:29
* @Description : 测试类
*/
public class Client {
public static void main(String[] args) {
//创建聚合对象
AggregateImpl aggregate1 = new AggregateImpl<Student>();
AggregateImpl aggregate2 = new AggregateImpl<Worker>();
//添加元素
aggregate1.add(new Student("张三", "001"));
aggregate1.add(new Student("李四", "002"));
aggregate2.add(new Worker("王五", 5000.23f));
aggregate2.add(new Worker("赵六", 6000.56f));
//1、获取迭代器对象
Iterator iterator1 = aggregate1.getIterator();
Iterator iterator2 = aggregate2.getIterator();
//2、遍历
while (iterator1.hasNext()) {
Student student = (Student) iterator1.next();
System.out.println(student.toString());
}
while (iterator2.hasNext()) {
Worker worker = (Worker) iterator2.next();
System.out.println(worker.toString());
}
}
}
<file_sep>package com.neo.structure.flyweight;
/**
* @Author : neo
* @Date 2021/3/26 15:20
* @Description : L型方块
*/
public class LBox extends AbstractBox {
public String getShape() {
return "L";
}
}
<file_sep>package com.neo.behavor.state.before;
/**
* @Author : neo
* @Date 2021/3/28 9:38
* @Description : 电梯接口
*/
public interface ILift {
//定义电梯的四个状态
int OPENING_STATE = 1;
int CLOSING_STATE = 2;
int RUNNING_STATE = 3;
int STOPPING_STATE = 4;
//设置电梯状态
void setState(int state);
//电梯动作
void open();
void close();
void run();
void stop();
}
<file_sep>package com.neo.creator.prototype.demo03;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
/**
* @Description Author neo
* Date 2021/3/20 16:00
*/
public class PrototypeTest {
public static void main(String[] args) throws Exception {
//创建原型对象
Citation citation = new Citation();
Student stu = new Student();
stu.setName("张三");
citation.setStudent(stu);
//创建对象输出流对象
ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream("d:\\a.txt"));
//将citation对象写出到文件中
oos.writeObject(citation);
oos.close();
//创建对象出入流对象
ObjectInputStream ois = new ObjectInputStream(new FileInputStream("d:\\a.txt"));
//读取对象
Citation citation1 = (Citation) ois.readObject();
//获取citation1奖状所属学生对象
Student stu1 = citation1.getStudent();
stu1.setName("李四");
//判断stu对象和stu1对象是否是同一个对象
System.out.println("stu和stu1是同一个对象?" + (stu == stu1));
citation.show();
citation1.show();
}
}
<file_sep>package com.neo.demo04;
import com.neo.demo02.UserService;
import com.neo.demo02.UserServiceImpl;
/**
* @Description Author neo
* Date 2021/3/4 22:11
*/
public class Client {
public static void main(String[] args) {
//真实角色
UserServiceImpl userService = new UserServiceImpl();
//代理角色,不存在
ProxyInvocationHandler pih = new ProxyInvocationHandler();
//设置要代理的对象
pih.setTarget(userService);
//动态生成代理类
UserService proxy = (UserService) pih.getProxy();
proxy.delete();
}
}
<file_sep>package com.neo.behavor.mediator;
/**
* @Author : neo
* @Date 2021/3/28 15:34
* @Description : 测试类
*/
public class Client {
public static void main(String[] args) {
//一个房主,一个租户,一个中介
MediatorStructure mediatorStructure = new MediatorStructure();
//房主和租户只需要知道中介机构即可
Tenant tenant = new Tenant("张三", mediatorStructure);
HouseOwner houseOwner = new HouseOwner("李四", mediatorStructure);
//中介机构要知道租户和房主
mediatorStructure.setHouseOwner(houseOwner);
mediatorStructure.setTenant(tenant);
tenant.communicate("我需要租三室的房子");
houseOwner.communicate("我这里有三室的房子,你要租吗?");
}
}
<file_sep>package com.neo.behavor.visitor;
/**
* @Author : neo
* @Date 2021/3/29 9:25
* @Description : 具体访问者角色
*/
public class Someone implements Person {
public void feed(Cat cat) {
System.out.println("其他人喂食猫");
}
public void feed(Dog dog) {
System.out.println("其他人喂食狗");
}
}
<file_sep>package com.neo.structure.bridge;
/**
* @Author : neo
* @Date 2021/3/25 10:08
* @Description : 操作系统版本(抽象化角色)
*/
public abstract class OperatingSystem {
//声明VidelFile变量
protected VideoFile videoFile;
public OperatingSystem(VideoFile videoFile) {
this.videoFile = videoFile;
}
//定义抽象方法
public abstract void play(String fileName);
}
<file_sep>package com.neo.excelutil.utils;
import com.itextpdf.text.*;
import com.itextpdf.text.pdf.BaseFont;
import com.itextpdf.text.pdf.PdfPTable;
import com.itextpdf.text.pdf.PdfWriter;
import com.neo.excelutil.entity.User;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
public class PdfUtil {
// public static void exportPDF(List<News> newsList, String title, OutputStream out1) throws IOException, org.dom4j.DocumentException, DocumentException {
public static void exportPDF(List<User> newsList, String title, OutputStream out1) throws IOException, org.dom4j.DocumentException, DocumentException {
// 第一步,实例化一个document对象
Document document = new Document();
// 第二步,设置要到出的路径
FileOutputStream out = new FileOutputStream("E:\\NEW\\"+title+".pdf");
//如果是浏览器通过request请求需要在浏览器中输出则使用下面方式
//OutputStream out = response.getOutputStream();
// 第三步,设置字符 要导入itext 2.1.7包和itext-asian 5.2.0包.不然会报错
BaseFont bfChinese = BaseFont.createFont("STSong-Light", "UniGB-UCS2-H", false);
Font fontZH = new Font(bfChinese, 12.0F, 0);
// 第四步,将pdf文件输出到磁盘
PdfWriter writer = PdfWriter.getInstance(document, out);//(OutputStream out1 = response.getOutputStream();)返回页面阅览out1,直接导出就用out
// 第五步,打开生成的pdf文件
document.open();
// 第六步,设置内容
Font titieFont = new Font(bfChinese, 15, Font.NORMAL);
Paragraph paragraph = new Paragraph();
paragraph.setAlignment(Element.ALIGN_CENTER);
paragraph.setFont(titieFont);
Chunk chunk = new Chunk("新闻明细表");
paragraph.add(chunk);
//网页标签名
document.addTitle("Set Attribute Example");
document.add(paragraph);
document.add(new Paragraph("\n"));
// 创建table,注意这里的2是两列的意思,下面通过table.addCell添加的时候必须添加整行内容的所有列
PdfPTable table = new PdfPTable(5);
//设置表格宽度比例为100%
table.setWidthPercentage(100.0F);
//每页都输出表头
table.setHeaderRows(1);
table.getDefaultCell().setHorizontalAlignment(1);
table.addCell(new Paragraph("id", fontZH));
table.addCell(new Paragraph("标题", fontZH));
table.addCell(new Paragraph("内容", fontZH));
table.addCell(new Paragraph("作者", fontZH));
table.addCell(new Paragraph("发布时间", fontZH));
// table.addCell(new Paragraph("地址", fontZH));
if(newsList.size()==0){
table.addCell(new Paragraph("无", fontZH));
table.addCell(new Paragraph("无", fontZH));
table.addCell(new Paragraph("无", fontZH));
table.addCell(new Paragraph("无", fontZH));
table.addCell(new Paragraph("无", fontZH));
}else{
for(int i=0;i<newsList.size();i++){
// table.addCell(new Paragraph(String.valueOf(newsList.get(i).getId()), fontZH));
// table.addCell(new Paragraph(newsList.get(i).getTitle(), fontZH));
// table.addCell(new Paragraph(newsList.get(i).getContent(), fontZH));
// table.addCell(new Paragraph(String.valueOf(newsList.get(i).getAuthor()), fontZH));
// table.addCell(new Paragraph(newsList.get(i).getReleaseTime(), fontZH));
// table.addCell(new Paragraph(newsList.get(i).getAddress(), fontZH));
}
}
document.add(table);
document.add(new Paragraph("\n"));
Chunk chunkEnd = new Chunk("java导出PDF");
Paragraph paragraphEND = new Paragraph();
paragraphEND.setAlignment(Element.ALIGN_CENTER);
paragraphEND.setFont(titieFont);
paragraphEND.add(chunkEnd);
document.add(paragraphEND);
// 第七步,关闭document
document.close();
System.out.println("导出pdf成功————"+title);
}
}
<file_sep>#1、注解说明
- @Autowired:自动装配,通过类型,名字;
@Autowired通过byType的方式实现,而且必须要求这个对象存在,如果@Autowired不能唯一自动装配上属性,则需要通过@Qualifier(value=“xxx”)【常用】
- @Nullable:字段标记了这个注解,说明这个字段可以为null
- @Resource:@Resource默认通过byName的方式实现,如果找不到名字,则通过byType实现!如果两个都找不到,就报错!【常用】
- @Component:组件,放在类上,说明这个类被Spring管理了,就是bean!<file_sep>package com.neo.behavor.template;
/**
* @Author : neo
* @Date 2021/3/26 17:16
* @Description : 具体类(菜心),需要覆写父类中的抽象方法
*/
public class Concrete_CaiXin extends AbstractClass {
public void pourVegetable() {
System.out.println("下锅的蔬菜是菜心");
}
public void pourSauce() {
System.out.println("下锅的酱料是蒜蓉");
}
}
<file_sep>package com.neo.behavor.command;
/**
* @Author : neo
* @Date 2021/3/27 15:52
* @Description : 测试类
*/
public class Client {
public static void main(String[] args) {
//创建两个order
Order order1 = new Order();
order1.setDiningTable(1);
order1.setFoodDic("西红柿鸡蛋面", 1);
order1.setFoodDic("可口可乐", 2);
Order order2 = new Order();
order2.setDiningTable(2);
order2.setFoodDic("青椒肉丝", 1);
order2.setFoodDic("雪碧", 1);
//创建接收者
SeniorChef chef = new SeniorChef();
//将订单和接收者封装成命令对象
OrderCommand cmd1 = new OrderCommand(chef, order1);
OrderCommand cmd2 = new OrderCommand(chef, order2);
//创建调用者waiter
Waiter waiter = new Waiter();
waiter.setCommands(cmd1);
waiter.setCommands(cmd2);
//将订单带到柜台,并向厨师喊 订单来了
waiter.orderUp();
}
}
<file_sep>package com.neo.structure.decorator02;
/**
* @Author : neo
* @Date 2021/3/25 9:38
* @Description : TODO
*/
public class Client {
public static void main(String[] args) {
Circle circle = new Circle();
RedShapeDecorator redCircle = new RedShapeDecorator(circle);
RedShapeDecorator redRectangle = new RedShapeDecorator(new Rectangle());
System.out.println("Circle with normal border");
circle.draw();
System.out.println("=============================");
System.out.println("Circle with red border");
redCircle.draw();
System.out.println("===============================");
System.out.println("Rectangle of red border");
redRectangle.draw();
}
}
<file_sep>package com.neo.structure.proxy.JDK_Proxy;
/**
* @Author : neo
* @Date 2021/3/23 17:01
* @Description : TODO
*/
public class Client {
public static void main(String[] args) {
//获取代理对象
ProxyFactory proxyFactory = new ProxyFactory();
SellTickets proxyObject = proxyFactory.getProxyObject();
proxyObject.sell();
}
}
<file_sep>package com.neo.behavor.memento.white_box;
/**
* @Author : neo
* @Date 2021/3/29 10:29
* @Description : 角色状态管理者
*/
public class RoleStateCaretaker {
//创建备忘录变量
private RoleStateMemento roleStateMemento;
public RoleStateMemento getRoleStateMemento() {
return roleStateMemento;
}
public void setRoleStateMemento(RoleStateMemento roleStateMemento) {
this.roleStateMemento = roleStateMemento;
}
}
<file_sep>package com.neo.structure.adapter.objectAdapter;
/**
* @Author : neo
* @Date 2021/3/24 13:57
* @Description : 适配者类(TF卡)
*/
public interface TFCard {
//从TF卡中读取数据
public String readTF();
//向TF卡中写入数据
void WriteTF(String msg);
}
<file_sep>package com.neo.behavor.interpreter;
/**
* @Author : neo
* @Date 2021/3/29 12:57
* @Description : 抽象角表达式色
*/
public abstract class AbstractExpression {
public abstract int interpret(Context context);
}
<file_sep>package com.neo.structure.proxy.JDK_Proxy;
/**
* @Description Author neo
* @Date 2021/3/23 16:38
* @Description: 真实目标
*/
public class TrainStation implements SellTickets {
public void sell() {
System.out.println("火车站售票");
}
}
<file_sep>package com.neo.service;
import com.neo.entity.User;
import java.util.List;
/**
* (User)表服务接口
*
* @author makejava
* @since 2021-07-27 10:56:13
*/
public interface UserService {
/**
* 通过ID查询单条数据
*
* @param id 主键
* @return 实例对象
*/
User queryById(Integer id);
/**
* 查询多条数据
*
* @param page 页码数
* @param limit 查询条数
* @return 对象列表
*/
List<User> queryAllByLimit(int page, int limit);
/**
* 新增数据
*
* @param user 实例对象
* @return 实例对象
*/
User insert(User user);
/**
* 修改数据
*
* @param user 实例对象
* @return 实例对象
*/
User update(User user);
/**
* 通过主键删除数据
*
* @param id 主键
* @return 是否成功
*/
boolean deleteById(Integer id);
//查询所有用户
List<User> selectAll();
//查询总的记录数量
Integer getRecordNum();
}<file_sep>package com.neo.behavor.state.after;
/**
* @Author : neo
* @Date 2021/3/28 10:29
* @Description : 具体状态角色
*/
public class OpenningState extends LiftState {
//当前状态要执行的操作
public void open() {
System.out.println("电梯门打开了...");
}
public void close() {
//修改状态
super.context.setLiftState(Context.CLOSING_STATE);
//调用当前状态中的context中的
super.context.close();
}
public void run() {
//开门状态下不能运行
//do nothing
}
public void stop() {
//开门状态下已经是停止的了 do nothing
}
}
<file_sep>package com.neo.structure.decorator;
/**
* @Author : neo
* @Date 2021/3/24 19:46
* @Description : 炒饭(具体构件角色)
*/
public class FiredRice extends FastFood {
public FiredRice() {
super(10, "炒饭");
}
public float cost() {
return getPrice();
}
}
<file_sep>package com.neo.creator.Factory.StaticFactory;
/**
* @Description Author neo
* Date 2021/3/19 17:24
*/
public class LatteCoffee extends Coffee {
public String getName() {
return "拿铁咖啡";
}
}
<file_sep>package com.neo.behavor.responsibility;
/**
* @Author : neo
* @Date 2021/3/27 17:35
* @Description : 部门经理(具体处理者)
*/
public class Manager extends Handler {
public Manager() {
super(Handler.NUM_THREE, Handler.NUM_SEVEN);
}
public void handleLeave(LeaveRequest leave) {
System.out.println(leave.getName() + "请假" + leave.getNum() + "天," + leave.getContent() + "。");
System.out.println("部门经理审批:同意!");
}
}
<file_sep>artifactId=study
groupId=com.neo
version=0.0.1-SNAPSHOT
<file_sep>package com.neo.behavor.iterator;
/**
* @Author : neo
* @Date 2021/3/28 16:21
* @Description : 抽象聚合角色,定义存储,添加,删除聚合元素以及创建迭代器对象的接口
*/
public interface StudentAggregate {
//添加学生
void addStudent(Student student);
//删除学生
void removeStudent(Student student);
//获得迭代器对象
StudentIterator getStudentIterator();
}
<file_sep>package com.neo.study.Runnable.demo01;
/**
* @Description Author neo
* Date 2021/3/20 0:03
*/
public class RunnableDemo {
public static void main(String[] args) {
MyThread myThread1 = new MyThread("线程A", 1000);
MyThread myThread2 = new MyThread("线程B", 2000);
MyThread myThread3 = new MyThread("线程C", 5000);
//启动三个线程
new Thread(myThread1).start();
new Thread(myThread2).start();
new Thread(myThread3).start();
}
}
<file_sep>package com.neo.study;
/**
* @Author : neo
* @Date 2021/7/26 11:20
* @Description : TODO
*/
class Base {
public void method() {
System.out.println("Base");
}
}
class Son extends Base {
public void method() {
System.out.println("Son");
}
public void methodB() {
System.out.println("SonB");
}
}
public class test01 {
public static void main(String[] args) {
Base base = new Son();
base.method();
//向下转型
Son son = (Son) base;
son.method();
son.methodB();
// base.methodB();
}
}
<file_sep>package com.neo.structure.adapter.objectAdapter;
/**
* @Author : neo
* @Date 2021/3/24 13:59
* @Description : TODO
*/
public class TFCardImpl implements TFCard {
public String readTF() {
String msg = "tf card read msg : hello world tf card";
return msg;
}
public void WriteTF(String msg) {
System.out.println("tf card write msg :" + msg);
}
}
<file_sep>package com.neo.structure.proxy.StaticProxy;
/**
* @Description Author neo
* @Date 2021/3/23 16:36
* @Description: 抽象目标
*/
public interface SellTickets {
void sell();
}
<file_sep>package com.neo.behavor.iterator.after;
/**
* @Author : neo
* @Date 2021/3/28 16:16
* @Description : 抽象迭代器角色,定义访问和遍历聚合元素的接口,通常包含hasNext()、next()方法。
*/
public interface Iterator<T> {
boolean hasNext();
T next();
}
<file_sep>package com.neo.creator.Factory.FactoryMethod;
/**
* @Description Author neo
* Date 2021/3/19 17:29
*/
public class CoffeeStroe {
private CoffeeFactory factory;
public void setFactory(CoffeeFactory factory) {
this.factory = factory;
}
public Coffee orderCoffee() {
Coffee coffee = factory.createCoffee();
coffee.addMilk();
coffee.addSugar();
return coffee;
}
}
<file_sep>package com.neo.service.impl;
import com.neo.mapper.UserMapper;
import com.neo.pojo.User;
import com.neo.service.UserService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
/**
* @Author : neo
* @Date 2021/7/22 16:40
* @Description : TODO
*/
@Service
public class UserServiceImpl implements UserService {
@Autowired
private UserMapper userMapper;
@Override
public User queryUserByName(String name) {
return userMapper.queryUserByName(name);
}
}
<file_sep>package com.neo.vo;
import com.neo.vo.utils.IErrorCode;
import com.neo.vo.utils.ResultCode;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.List;
/**
* @Author : neo
* @Date 2021/7/27 11:05
* @Description : TODO
*/
@Data
// @NoArgsConstructor
public class DataVo<T> {
private Integer code;
private String msg;
private Integer count;
private List<T> data;
protected DataVo(Integer code, String msg, Integer count, List<T> data) {
this.code = code;
this.msg = msg;
this.count = count;
this.data = data;
}
protected DataVo() {
}
/**
* 成功返回结果
*
* @param data 获取的数据
*/
public static <T> DataVo<T> success(Integer count, List<T> data) {
return new DataVo<T>(ResultCode.SUCCESS.getCode(), ResultCode.SUCCESS.getMsg(), count, data);
}
/**
* 成功返回结果
*
* @param data 获取的数据
* @param message 提示信息
*/
public static <T> DataVo<T> success(String message, Integer count, List<T> data) {
return new DataVo<T>(ResultCode.SUCCESS.getCode(), message, count, data);
}
/**
* 扩展使用
* 失败返回结果
*
* @param errorCode 错误码
*/
public static <T> DataVo<T> failed(IErrorCode errorCode) {
return new DataVo<T>(errorCode.getCode(), errorCode.getMsg(), null,null);
}
/**
* 自定义错误消息
* 失败返回结果
*
* @param message 提示信息
*/
public static <T> DataVo<T> failed(String message) {
return new DataVo<T>(ResultCode.FAILED.getCode(), message, null,null);
}
/**
* 默认的
* 失败返回结果
*/
public static <T> DataVo<T> failed() {
return failed(ResultCode.FAILED);
}
/**
* 参数验证失败返回结果
*/
public static <T> DataVo<T> validateFailed() {
return failed(ResultCode.VALIDATE_FAILED);
}
/**
* 参数验证失败返回结果
*
* @param message 提示信息
*/
public static <T> DataVo<T> validateFailed(String message) {
return new DataVo<T>(ResultCode.VALIDATE_FAILED.getCode(), message, null, null);
}
/**
* 未登录返回结果
*/
public static <T> DataVo<T> unauthorized() {
return new DataVo<T>(ResultCode.UNAUTHORIZED.getCode(), ResultCode.UNAUTHORIZED.getMsg(),null, null);
}
/**
* 未授权返回结果
*/
public static <T> DataVo<T> forbidden() {
return new DataVo<T>(ResultCode.FORBIDDEN.getCode(), ResultCode.FORBIDDEN.getMsg(), null,null);
}
}
<file_sep>package com.neo.behavor.command;
/**
* @Author : neo
* @Date 2021/3/27 15:36
* @Description : 抽象命令类()
*/
public interface Command {
//定义一个执行命令方法
void execute();
}
<file_sep>package com.neo.controller;
import com.neo.entity.User;
import com.neo.service.UserService;
import com.neo.vo.DataVo;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.*;
import javax.annotation.Resource;
import java.util.List;
/**
* (User)表控制层
*
* @author makejava
* @since 2021-07-27 10:56:14
*/
@Controller
public class UserController {
@GetMapping("/")
public String index() {
return "table";
}
/**
* 服务对象
*/
@Resource
private UserService userService;
/**
* 通过主键查询单条数据
*
* @param id 主键
* @return 单条数据
*/
@GetMapping("/selectOne")
public User selectOne(Integer id) {
return this.userService.queryById(id);
}
// @GetMapping("/selectAll/{page}/{size}")
@GetMapping("/selectAll")
@ResponseBody
public DataVo<User> selectAll(@RequestParam(value = "page") Integer page,
@RequestParam(value = "limit") Integer size) {
List<User> users = userService.queryAllByLimit(page, size);
Integer recordNum = userService.getRecordNum();
if (users.size() > 0) {
return DataVo.success(recordNum, users);
} else {
return DataVo.failed();
}
}
@PostMapping("/delete/{id}")
public void delete(@PathVariable("id") Integer id) {
userService.deleteById(id);
// return DataVo.success(1, null);
}
}<file_sep>package com.neo.behavor.state.after;
/**
* @Author : neo
* @Date 2021/3/28 10:32
* @Description : 具体角色类
*/
public class RunningState extends LiftState {
//运行中不能开门
public void open() {
//do nothing
}
//运行状态就是关闭状态
public void close() {
//do nothing
}
//运行状态要执行的操作
public void run() {
System.out.println("电梯正在运行...");
}
//运行状态下可以停止
public void stop() {
//修改状态
super.context.setLiftState(Context.STOPPING_STATE);
//调用父类中context中的stop方法
super.context.stop();
}
}
<file_sep>package com.neo.dataStructure;
import java.security.PublicKey;
import java.util.Arrays;
import java.util.Deque;
import java.util.LinkedList;
import java.util.Queue;
/**
* @Description Author neo
* Date 2021/3/14 15:14
*/
public class MaxQueue {
Queue<Integer> queue;
Deque<Integer> deque;
public MaxQueue() {
queue = new LinkedList<>();
deque = new LinkedList<>();
}
public int max_value() {
return deque.isEmpty() ? -1 : deque.peekFirst();
}
public void push_back(int value) {
queue.offer(value);
while (!deque.isEmpty() && deque.peekLast() < value) {
deque.pollLast();
}
deque.offerLast(value);
}
public int pop_front() {
if (deque.isEmpty()) {
return -1;
}
if (queue.peek().equals(deque.peekFirst())) {
deque.pollFirst();
}
return queue.poll();
}
public static void main(String[] args) {
MaxQueue maxQueue = new MaxQueue();
maxQueue.push_back(1);
maxQueue.push_back(-2);
maxQueue.push_back(3);
System.out.println(maxQueue.max_value());
System.out.println(maxQueue.pop_front());
}
}
<file_sep>package com.neo.excelutil.controller;
import com.neo.excelutil.entity.User;
import com.neo.excelutil.service.UserService;
import com.neo.excelutil.utils.ExcelUtil;
import com.neo.excelutil.utils.ExcelUtils;
import org.apache.poi.openxml4j.exceptions.InvalidFormatException;
import org.apache.tomcat.util.http.fileupload.FileUtils;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import javax.annotation.Resource;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.List;
/**
* (User)表控制层
*
* @author makejava
* @since 2021-07-30 18:39:48
*/
@Controller
public class UserController {
/**
* 服务对象
*/
@Resource
private UserService userService;
@GetMapping("/")
public String index() {
return "index";
}
/**
* 通过主键查询单条数据
*
* @param id 主键
* @return 单条数据
*/
@GetMapping("selectOne/{id}")
public User selectOne(@PathVariable("id") Integer id) {
return this.userService.queryById(id);
}
/*导入使用方法:
//文件输入流
inputStream =uploadedFile.getInputStream();
//获取对象集合
List< SlrEmpSalary> empSalaryList =
(List< SlrEmpSalary>) ExcelUtils.parseExcelToList(inputStream, SlrEmpSalary.class);
导出使用方法:
//导出数据
List< SlrEmpSalary> dataList = new ArrayList<SlrEmpSalary>();
//导出
FileUtils.exportExcel(outputStream, dataList, SlrEmpSalary.class, Const.ALL_SELECT_LIST_MAP, exportTitle);
*/
@PostMapping("/impFile")
public String upload(@RequestParam(value = "file") MultipartFile uploadedFile, Model model) throws IOException, InvocationTargetException, NoSuchMethodException, InvalidFormatException, InstantiationException, IllegalAccessException {
if (uploadedFile.isEmpty()) {
return "redirect:/";
}
//文件输入流
InputStream inputStream = uploadedFile.getInputStream();
//获取对象集合
List<User> userList =
(List<User>) ExcelUtils.parseExcelToList(inputStream, User.class);
model.addAttribute("userList", userList);
return "record";
}
@GetMapping("/exportFile")
public void export(HttpServletResponse response) throws IOException {
List<User> userList = userService.queryAllByLimit(0, 100);
OutputStream outputStream = response.getOutputStream();
// ExcelUtil.exportExcel(userList,"title","sheetname", User.class,"新闻数据表.xls", response);
ExcelUtils.exportExcel(outputStream, userList, User.class, null, "输出.xls");
}
}
<file_sep>package com.neo.study.lesson01;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
/**
* @Description Author neo
* Date 2020/11/24 11:05
*/
public class TestInetSocketAddress {
public static void main(String[] args) {
InetSocketAddress inetSocketAddress = new InetSocketAddress("127.0.0.1", 8080);
InetSocketAddress inetSocketAddress2 = new InetSocketAddress("localhost", 8080);
System.out.println(inetSocketAddress);
System.out.println(inetSocketAddress2);
System.out.println(inetSocketAddress.getAddress());
System.out.println(inetSocketAddress.getHostName());//地址
System.out.println(inetSocketAddress.getPort());//端口
}
}
<file_sep>package com.neo.behavor.iterator;
import java.util.ArrayList;
import java.util.List;
/**
* @Author : neo
* @Date 2021/3/28 16:23
* @Description : 具体聚合角色:实现抽象聚合类,返回一个具体迭代器实例
*/
public class StudentAggregateImpl implements StudentAggregate {
//学生列表
private List<Student> list = new ArrayList<Student>();
public void addStudent(Student student) {
list.add(student);
}
public void removeStudent(Student student) {
list.remove(student);
}
public StudentIterator getStudentIterator() {
return new StudentIteratorImpl(list);
}
}
<file_sep>package com.neo.behavor.state.after;
/**
* @Author : neo
* @Date 2021/3/28 11:01
* @Description : 客户端
*/
public class Client {
public static void main(String[] args) {
Context context = new Context();
context.setLiftState(Context.STOPPING_STATE);
context.open();
context.close();
context.run();
context.stop();
}
}
<file_sep>package com.neo.structure.decorator02;
/**
* @Author : neo
* @Date 2021/3/25 9:32
* @Description : TODO
*/
public class Rectangle implements Shape {
public void draw() {
System.out.println("画了一个矩形");
}
}
<file_sep>package com.neo.creator.Factory.AbstractFactory;
/**
* @Description Author neo
* Date 2021/3/19 17:51
*/
public class AmericanDessertFactory implements DessertFactory {
public Coffee createCoffee() {
return new AmericanCoffee();
}
public Dessert createDessert() {
return new MatchaMousse();
}
}
<file_sep>package com.neo.behavor.mediator;
/**
* @Author : neo
* @Date 2021/3/28 15:23
* @Description : 具体同事类角色
*/
public class HouseOwner extends Person {
public HouseOwner(String name, Mediator mediator) {
super(name, mediator);
}
//与终结者联系
public void communicate(String message) {
mediator.communicate(message, this);
}
//获取信息
public void getMessage(String message) {
System.out.println("房主" + name + "获取到的信息: " + message);
}
}
<file_sep>package com.neo.structure.proxy.JDK_Proxy;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
/**
* @Author : neo
* @Date 2021/3/23 16:51
* @Description : 代理工厂,用来创建代理对象
*/
public class ProxyFactory {
private TrainStation station = new TrainStation();
public SellTickets getProxyObject() {
//使用Proxy获取代理对象
/*
newProxyInstance()方法参数说明:
ClassLoader loader : 类加载器,用于加载代理类,使用真实对象的类加载器即可
Class<?>[] interfaces : 真实对象所实现的接口,代理模式真实对象和代理对象实现相同的接口
InvocationHandler h : 代理对象的调用处理程序
*/
//代理类也实现了卖票接口,所以可以向上转型接收
//newProxyInstance()方法返回的是Object类型,需要强制转换
SellTickets proxyInstance = (SellTickets) Proxy.newProxyInstance(station.getClass().getClassLoader(),
station.getClass().getInterfaces(),
//需要一个接口,这里我们创建其子实现类对象,重写invoke方法
new InvocationHandler() {
/*
InvocationHandler中invoke方法参数说明:
proxy : 代理对象,在invoke方法中基本不用
method : 对应于在代理对象上调用的接口方法的 Method 实例
args : 代理对象调用接口方法时传递的实际参数
*/
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
System.out.println("代售点收取服务费");
//执行真实对象
Object result = method.invoke(station, args);
return result;
}
}
);
return proxyInstance;
}
}
<file_sep>package com.neo.behavor.state.before;
/**
* @Author : neo
* @Date 2021/3/28 9:50
* @Description : 电梯类
*/
public class Lift implements ILift {
//定义一个变量表示电梯当前状态
private int state;
public void setState(int state) {
this.state = state;
}
//执行开门动作
public void open() {
switch (state) {
case OPENING_STATE: //门已经开了,不能再打开
//do nothing
break;
case CLOSING_STATE: //关门状态,可以开门
System.out.println("j电梯门打开了");
this.setState(OPENING_STATE);
break;
case RUNNING_STATE:
//do nothing
break;
case STOPPING_STATE:
System.out.println("电梯门打开了");
this.setState(OPENING_STATE);
break;
}
}
public void close() {
switch (this.state) {
case OPENING_STATE:
System.out.println("电梯关门了。。。");//只有开门状态可以关闭电梯门,可以对应电梯状态表来看
this.setState(CLOSING_STATE);//关门之后电梯就是关闭状态了
break;
case CLOSING_STATE:
//do nothing //已经是关门状态,不能关门
break;
case RUNNING_STATE:
//do nothing //运行时电梯门是关着的,不能关门
break;
case STOPPING_STATE:
//do nothing //停止时电梯也是关着的,不能关门
break;
}
}
public void run() {
switch (this.state) {
case OPENING_STATE://电梯不能开着门就走
//do nothing
break;
case CLOSING_STATE://门关了,可以运行了
System.out.println("电梯开始运行了。。。");
this.setState(RUNNING_STATE);//现在是运行状态
break;
case RUNNING_STATE:
//do nothing 已经是运行状态了
break;
case STOPPING_STATE:
System.out.println("电梯开始运行了。。。");
this.setState(RUNNING_STATE);
break;
}
}
public void stop() {
switch (this.state) {
case OPENING_STATE: //开门的电梯已经是是停止的了(正常情况下)
//do nothing
break;
case CLOSING_STATE://关门时才可以停止
System.out.println("电梯停止了。。。");
this.setState(STOPPING_STATE);
break;
case RUNNING_STATE://运行时当然可以停止了
System.out.println("电梯停止了。。。");
this.setState(STOPPING_STATE);
break;
case STOPPING_STATE:
//do nothing
break;
}
}
}
<file_sep>package com.neo.study.Runnable.demo03;
/**
* @Description Author neo
* Date 2021/3/20 10:17
*/
public class Info { //定义信息类
private String name = "李兴华"; //信息名称,指定默认值
private String content = "JAVA讲师"; //信息内容,指定默认值
boolean flag = false; //增加标志位,让生产与消费依次进行
public synchronized void set(String name, String content) {
if (!flag) { //如果falg=false,表示可以取走,但是不能生产,如果生产者线程运行,则消费者线程应该等待
try {
super.wait();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
this.setName(name);
try {
Thread.sleep(300);
} catch (InterruptedException e) {
e.printStackTrace();
}
this.setContent(content);
flag = false; //修改标志位,表示可以取走
super.notify(); //唤醒等待线程
}
public synchronized void get() {
if (flag) {
try {
super.wait();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
try {
Thread.sleep(300);
} catch (InterruptedException e) {
e.printStackTrace();
}
System.out.println(this.getName() + "-->" + this.getContent());
flag = true; //设置flag=true,表示可以生产,不能取走
super.notify(); //唤醒等待线程
}
////setter和getter方法
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getContent() {
return content;
}
public void setContent(String content) {
this.content = content;
}
}
<file_sep>package com.neo.structure.flyweight;
/**
* @Author : neo
* @Date 2021/3/26 15:20
* @Description : O型方块
*/
public class OBox extends AbstractBox {
public String getShape() {
return "O";
}
}
<file_sep>package com.neo.behavor.state.after;
/**
* @Author : neo
* @Date 2021/3/28 10:32
* @Description : 具体角色类
*/
public class StoppingState extends LiftState {
//停止状态可以开门
public void open() {
//修改状态
super.context.setLiftState(Context.OPENNING_STATE);
//调用父类中context中的open方法
super.context.open();
}
//停滞状态下,可以关门
public void close() {
//修改状态
super.context.setLiftState(Context.CLOSING_STATE);
//调用父类中context中的open方法
super.context.close();
}
//停止状态下可以运行
public void run() {
//修改状态
super.context.setLiftState(Context.RUNNING_STATE);
//调用父类中context中的open方法
super.context.run();
}
//当前对象要执行的方法
public void stop() {
System.out.println("电梯停止了...");
}
}
<file_sep>package com.neo.creator.Factory.FactoryMethod;
/**
* @Description Author neo
* Date 2021/3/19 17:43
*/
public class Client {
public static void main(String[] args) {
CoffeeStroe stroe = new CoffeeStroe();
// AmericanCoffeeFactory factory = new AmericanCoffeeFactory();
LatteCoffeeFactory factory = new LatteCoffeeFactory();
stroe.setFactory(factory);
Coffee coffee = stroe.orderCoffee();
System.out.println(coffee.getName());
}
}
<file_sep>package com.neo.behavor.state.after;
import com.sun.org.apache.bcel.internal.generic.NEW;
/**
* @Author : neo
* @Date 2021/3/28 10:23
* @Description : 环境角色,定义了客户程序需要的接口,维护一个当前状态,
* 并将与状态相关的操作委托给当前状态对象来处理
*/
public class Context {
//定义所有电梯的状态
public static final OpenningState OPENNING_STATE = new OpenningState();
public static final ClosingState CLOSING_STATE = new ClosingState();
public static final RunningState RUNNING_STATE = new RunningState();
public static final StoppingState STOPPING_STATE = new StoppingState();
//定义一个当前电梯状态
private LiftState liftState;
public void setLiftState(LiftState liftState) {
//当前环境改变
this.liftState = liftState;
//把当前的环境通知到各个实现类中
this.liftState.setContext(this);
}
public void open() {
this.liftState.open();
}
public void close() {
this.liftState.close();
}
public void run() {
this.liftState.run();
}
public void stop() {
this.liftState.stop();
}
}
<file_sep>package com.neo.structure.flyweight;
import javax.swing.plaf.metal.MetalCheckBoxIcon;
/**
* @Author : neo
* @Date 2021/3/26 15:37
* @Description : 测试类
*/
public class Clent {
public static void main(String[] args) {
//获取I图形对象
AbstractBox ibox = BoxFactory.getInstance().getBox("I");
ibox.display("灰色");
//获取L图形对象
AbstractBox lbox = BoxFactory.getInstance().getBox("L");
lbox.display("绿色");
//获取O图形对象
AbstractBox obox = BoxFactory.getInstance().getBox("O");
obox.display("灰色");
//获取O图形对象
AbstractBox oobox = BoxFactory.getInstance().getBox("O");
oobox.display("红色");
System.out.println("两次获取到的O图形对象是否是同一对象:" + (obox == oobox));
}
}
<file_sep>package com.neo.behavor.memento.black_box;
/**
* @Author : neo
* @Date 2021/3/29 10:30
* @Description : 测试类
*/
public class Client {
public static void main(String[] args) {
System.out.println("-------------------大战Boss前--------------------------");
//创建游戏角色对象
GameRole gameRole = new GameRole();
gameRole.initState();
gameRole.stateDisplay();
//保存进度
//创建管理者角色
RoleStateCaretaker roleStateCaretaker = new RoleStateCaretaker();
roleStateCaretaker.setMemento(gameRole.saveState());
System.out.println("-------------------大战Boss后--------------------------");
//大战boss时,损耗严重
gameRole.fight();
gameRole.stateDisplay();
System.out.println("-------------------恢复之前状态-----------------------------------");
//恢复之前状态
gameRole.recoverState(roleStateCaretaker.getMemento());
gameRole.stateDisplay();
}
}
<file_sep>package com.neo.study.lesson01;
import java.net.InetAddress;
import java.net.UnknownHostException;
/**
* @Description Author neo
* Date 2020/11/23 22:53
*/
//测试获取IP地址
public class TestInetAddress {
public static void main(String[] args) throws UnknownHostException {
//获取本机的信息
InetAddress localHost = InetAddress.getLocalHost();
System.out.println("计算机名:" + localHost.getHostName());
System.out.println("本机地址:" + localHost.getAddress());
try {
//查询本机地址
InetAddress inetAddresses1 = InetAddress.getByName("127.0.0.1");
System.out.println(inetAddresses1);
//通过localhost获取本机地址
InetAddress inetAddresses3 = InetAddress.getByName("localhost");
System.out.println(inetAddresses3);
//查询网站ip地址
InetAddress inetAddresses2 = InetAddress.getByName("www.baidu.com");
System.out.println(inetAddresses2);
//常用方法
System.out.println(inetAddresses2.getAddress());
System.out.println(inetAddresses2.getHostName());//域名或者你自己电脑的名字
System.out.println(inetAddresses2.getHostAddress());//ip
System.out.println(inetAddresses2.getCanonicalHostName()); //规范的主机名
} catch (UnknownHostException e) {
e.printStackTrace();
}
}
}
<file_sep>package com.neo.behavor.memento.black_box;
import com.neo.behavor.mediator.Mediator;
import com.neo.behavor.memento.white_box.RoleStateMemento;
/**
* @Author : neo
* @Date 2021/3/29 10:29
* @Description : 角色状态管理者
*/
public class RoleStateCaretaker {
//创建备忘录变量
private Memento memento;
public Memento getMemento() {
return memento;
}
public void setMemento(Memento memento) {
this.memento = memento;
}
}
<file_sep>package com.neo.behavor.mediator;
/**
* @Author : neo
* @Date 2021/3/28 15:27
* @Description : 具体同事类角色
*/
public class Tenant extends Person {
public Tenant(String name, Mediator mediator) {
super(name, mediator);
}
//与中介者联系
public void communicate(String message) {
mediator.communicate(message, this);
}
//获取信息
public void getMessage(String message) {
System.out.println("租房者" + name + "获取到的信息: " + message);
}
}
<file_sep>package com.neo.creator.Factory.AbstractFactory;
/**
* @Description Author neo
* Date 2021/3/19 17:47
*/
public abstract class Dessert {
public abstract void show();
}
| 9002c0f18bb01b7a895651b6011ce67c5856ac20 | [
"Markdown",
"Java",
"INI"
] | 88 | Java | liangbingtao/JavaLife | 3f507c31b3c198e9498e777a7d6c76a2b86b5f78 | dbfc1efba1173761c28389fd36d1a6d543206bbc |
refs/heads/master | <file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Events;
using UnityEngine.SceneManagement;
public class ClickButtonSouth : MonoBehaviour {
public GameObject definedButton;
public UnityEvent OnClick = new UnityEvent();
// Use this for initialization
void Start () {
definedButton = this.gameObject;
}
// Update is called once per frame
void Update () {
var ray = Camera.main.ScreenPointToRay(Input.mousePosition);
RaycastHit Hit;
if (Input.GetMouseButtonDown(0))
{
if (Physics.Raycast(ray, out Hit) && Hit.collider.gameObject == gameObject)
{
Debug.Log("Button Clicked");
OnClick.Invoke();
}
}
}
//South wall
public void Play() {
SceneManager.LoadScene(30);
}
public void Fix() {
SceneManager.LoadScene(29);
}
public void SouthScene1() {
SceneManager.LoadScene(31);
}
public void SouthScene2() {
SceneManager.LoadScene(32);
}
public void SouthScene3Jpr1() {
SceneManager.LoadScene(33);
}
public void SouthScene3Jpr2() {
SceneManager.LoadScene(34);
}
public void SouthScene3Jpr3() {
SceneManager.LoadScene(35);
}
public void SouthScene3Go() {
SceneManager.LoadScene(36);
}
public void SouthScene5Fresco() {
SceneManager.LoadScene(37);
}
public void SouthScene5Secco() {
SceneManager.LoadScene(38);
}
public void SouthScene4Go()
{
SceneManager.LoadScene(39);
}
public void SouthScene8()
{
SceneManager.LoadScene(40);
}
public void Wall()
{
SceneManager.LoadScene(1);
}
}<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Events;
using UnityEngine.SceneManagement;
public class ClickButton : MonoBehaviour {
public GameObject definedButton;
public UnityEvent OnClick = new UnityEvent();
// Use this for initialization
void Start () {
definedButton = this.gameObject;
}
// Update is called once per frame
void Update () {
var ray = Camera.main.ScreenPointToRay(Input.mousePosition);
RaycastHit Hit;
if (Input.GetMouseButtonDown(0))
{
if (Physics.Raycast(ray, out Hit) && Hit.collider.gameObject == gameObject)
{
Debug.Log("Button Clicked");
OnClick.Invoke();
}
}
}
//North wall
public void Play() {
SceneManager.LoadScene(3);
}
public void Fix() {
SceneManager.LoadScene(26);
}
public void NorthScene1() {
SceneManager.LoadScene(4);
}
public void NorthScene2() {
SceneManager.LoadScene(5);
}
public void NorthScene3Jpr1() {
SceneManager.LoadScene(6);
}
public void NorthScene3Jpr2() {
SceneManager.LoadScene(8);
}
public void NorthScene3Jpr3() {
SceneManager.LoadScene(9);
}
public void NorthScene3Go() {
SceneManager.LoadScene(7);
}
public void NorthScene5Fresco() {
SceneManager.LoadScene(10);
}
public void NorthScene5Secco() {
SceneManager.LoadScene(11);
}
public void NorthScene4Go()
{
SceneManager.LoadScene(12);
}
public void NorthScene8()
{
SceneManager.LoadScene(13);
}
public void Wall()
{
SceneManager.LoadScene(1);
}
}<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.SceneManagement;
/* In the Menu class we deal with loading into the AR scene
* We also deal with quitting the game
*/
public class Menu : MonoBehaviour {
// Load a new Scene
public void Play() {
SceneManager.LoadScene(1);
}// play game
// Close the application
public void Quit() {
Debug.Log("Exiting game");
Application.Quit();
}
// Load a new Scene
public void Info() {
SceneManager.LoadScene(25);
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.SceneManagement;
public class ButtonSceneWall : MonoBehaviour
{
public void PlayVideo() {
SceneManager.LoadScene(2);
}
public void Home() {
SceneManager.LoadScene(1);
}
public void Menu() {
SceneManager.LoadScene(0);
}
public void exit()
{
Debug.Log("Exiting game");
Application.Quit();
}
}
<file_sep>using UnityEngine;
using System.Collections;
public class GyroCamera : MonoBehaviour {
private Gyroscope gyro;
private bool gyroSupported;
private Quaternion rotFix;
[SerializeField]
private Transform worldObj;
private float startY;
[SerializeField]
//private Transform zoomObj;
// Use this for initialization
void Start() {
gyroSupported = SystemInfo.supportsGyroscope;
GameObject camParent = new GameObject("camParent");
camParent.transform.position = transform.position;
transform.parent = camParent.transform;
if(gyroSupported)
{
gyro = Input.gyro;
gyro.enabled = true;
camParent.transform.rotation = Quaternion.Euler(90f, 180f, 0f);
rotFix = new Quaternion(0, 0, 1, 0);
}
}
// Update is called once per frame
void Update () {
transform.localRotation = gyro.attitude * rotFix;
}
}<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class ShowAfter : MonoBehaviour
{
public float time;
void Start()
{
gameObject.SetActive(false);
Invoke("HideShowGameobject", time);
}
void HideShowGameobject()
{
gameObject.SetActive(true);
}
}<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Events;
using UnityEngine.SceneManagement;
public class ClickButtonEast : MonoBehaviour {
public GameObject definedButton;
public UnityEvent OnClick = new UnityEvent();
// Use this for initialization
void Start () {
definedButton = this.gameObject;
}
// Update is called once per frame
void Update () {
var ray = Camera.main.ScreenPointToRay(Input.mousePosition);
RaycastHit Hit;
if (Input.GetMouseButtonDown(0))
{
if (Physics.Raycast(ray, out Hit) && Hit.collider.gameObject == gameObject)
{
Debug.Log("Button Clicked");
OnClick.Invoke();
}
}
}
//North wall
public void Play() {
SceneManager.LoadScene(14);
}
public void Fix() {
SceneManager.LoadScene(27);
}
public void EastScene1() {
SceneManager.LoadScene(15);
}
public void EastScene2() {
SceneManager.LoadScene(16);
}
public void EastScene3Jpr1() {
SceneManager.LoadScene(17);
}
public void EastScene3Jpr2() {
SceneManager.LoadScene(18);
}
public void EastScene3Jpr3() {
SceneManager.LoadScene(19);
}
public void EastScene3Go() {
SceneManager.LoadScene(20);
}
public void EastScene5Fresco() {
SceneManager.LoadScene(21);
}
public void EastScene5Secco() {
SceneManager.LoadScene(22);
}
public void EastScene5Go()
{
SceneManager.LoadScene(23);
}
public void EastScene8()
{
SceneManager.LoadScene(24);
}
public void Wall()
{
SceneManager.LoadScene(1);
}
}<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Events;
using UnityEngine.SceneManagement;
public class ClickButtonWest : MonoBehaviour {
public GameObject definedButton;
public UnityEvent OnClick = new UnityEvent();
// Use this for initialization
void Start () {
definedButton = this.gameObject;
}
// Update is called once per frame
void Update () {
var ray = Camera.main.ScreenPointToRay(Input.mousePosition);
RaycastHit Hit;
if (Input.GetMouseButtonDown(0))
{
if (Physics.Raycast(ray, out Hit) && Hit.collider.gameObject == gameObject)
{
Debug.Log("Button Clicked");
OnClick.Invoke();
}
}
}
//West wall
public void Play() {
SceneManager.LoadScene(41);
}
public void Fix() {
SceneManager.LoadScene(28);
}
public void WestScene1() {
SceneManager.LoadScene(42);
}
public void WestScene2() {
SceneManager.LoadScene(43);
}
public void WestScene3Jpr1() {
SceneManager.LoadScene(44);
}
public void WestScene3Jpr2() {
SceneManager.LoadScene(45);
}
public void WestScene3Jpr3() {
SceneManager.LoadScene(46);
}
public void WestScene3Go() {
SceneManager.LoadScene(47);
}
public void WestScene5Fresco() {
SceneManager.LoadScene(48);
}
public void WestScene5Secco() {
SceneManager.LoadScene(49);
}
public void WestScene4Go()
{
SceneManager.LoadScene(50);
}
public void WestScene8()
{
SceneManager.LoadScene(51);
}
public void Wall()
{
SceneManager.LoadScene(1);
}
} | 0d5a9efcaeefdb9ecd06c7a5145f7a6b1493741f | [
"C#"
] | 8 | C# | chicca1210/ProgettoTesiMagistrale | 1f08dc05e2a91728f7000077c170e85522dfc152 | 707e92b7b7a79b05a23da5a8e8b7d08f81612cb9 |
refs/heads/master | <repo_name>vsmorais/midterm<file_sep>/js/main.js
(() => {
// this is a self-invoking
console.log ('fired'); // python
})(); | cab71d4d379236bfae3eae7fa97bf514ff2e2b96 | [
"JavaScript"
] | 1 | JavaScript | vsmorais/midterm | ade7882b370f32f6782d03a89ed7fab068532b8b | abe22a44aa50526f59460e3d8203414ddf2539de |
refs/heads/master | <file_sep>/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package data;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
*
* @author jonab
*/
public class RecipesDAO {
private DBConnector con;
private Recipe recipe;
public RecipesDAO() {
try {
this.con = new DBConnector();
} catch (Exception ex) {
Logger.getLogger(RecipesDAO.class.getName()).log(Level.SEVERE, null, ex);
System.out.println("Couldn't connect to DB");
}
this.recipe = new Recipe();
}
public void createRecipe() {
ResultSet rs = null;
try {
Statement stmt = con.getConnection().createStatement();
String query = "insert into Recipe\n"
+ "values ('Farmors flotte kager', 'Kom alle ingredienser i en skål og så smid det i ovnen', 'Tommel op', null),\n"
+ "('Bedstemor med slag i', 'Fuld drøn på røremaskinen og så 400 grader i ovnen 20 min', 'Tommel op', null);";
stmt.executeUpdate(query);
// System.out.println(rs);
} catch (Exception e) {
System.out.println(e);
}
}
public String displaySingleRecipe(String recipeName) {
ResultSet rs;
try {
Statement stmt = con.getConnection().createStatement();
String query = "SELECT *"
+ "FROM `Recipe`"
+ "WHERE recipeName = '" + recipeName + "';";
rs = stmt.executeQuery(query);
if (rs.next()) {
String res = rs.toString();
System.out.println(res);
}
System.out.println(rs);
return rs.toString();
} catch (Exception e) {
return e.toString();
}
}
public Recipe displaySingleRecipe1(String recipeName) {
ResultSet rs = null;
ResultSet rs2 = null;
try {
db_GetRecipeData(recipeName);
db_GetIngredientsData(recipeName);
return recipe;
} catch (Exception e) {
// return e.toString();
}
return null;
}
private void db_GetIngredientsData(String recipeName) throws SQLException {
ResultSet rs2;
Statement stmt2 = con.getConnection().createStatement();
String query2 = "SELECT * FROM IngredientDetails\n"
+ "where recipeName = '" + recipeName + "';";
rs2 = stmt2.executeQuery(query2);
int i = 1;
while (rs2.next()) {
Ingredient ingredient = new Ingredient(rs2.getString("ingredientName"), rs2.getString("qty"));
recipe.addIngredient(ingredient);
i++;
}
}
private void db_GetRecipeData(String recipeName) throws SQLException {
ResultSet rs;
Statement stmt = con.getConnection().createStatement();
String query = "SELECT * FROM cupcakeRecipes.Recipe\n"
+ "where recipeName = '" + recipeName + "';";
rs = stmt.executeQuery(query);
if (rs.next()) {
recipe.setRecipeName(rs.getString("recipeName"));
recipe.setInstructions(rs.getString("instructions"));
recipe.setRating(rs.getString("rating"));
recipe.setImgURL(rs.getString("image"));
}
}
public ArrayList<String> displayAllRecipeNames() {
ResultSet rs;
ArrayList<String> recipeNames = new ArrayList();
try {
Statement stmt = con.getConnection().createStatement();
String query = "SELECT recipeName FROM cupcakeRecipes.Recipe;";
rs = stmt.executeQuery(query);
while (rs.next()) {
for (int i = 0; i < rs.getRow(); i++) {
recipeNames.add(rs.getString("recipeName"));
}
// recipeNames.add(new Recipe(rs.getString("recipeName")));
}
// System.out.println(recipeNames.get(1));
// return recipeNames;
} catch (Exception e) {
System.out.println(e);
return null;
}
return recipeNames;
}
}
| d504aac064c4ad31f6b368c8d701079de8a7d05b | [
"Java"
] | 1 | Java | JonatanHjelm95/cupcakeRecipes | 3703a64858d44cf95ac39675f4e36827117e9018 | 6fa0afe281dd4153ab554e6ae1aa6c6b13613437 |
refs/heads/main | <file_sep>export = gTTsClass;
declare class gTTsClass {
constructor(text: string, lang?: string, debug?: boolean);
GOOGLE_TTS_URL: string;
MAX_CHARS: number;
LANGUAGES: {
af: string;
sq: string;
ar: string;
hy: string;
ca: string;
zh: string;
"zh-cn": string;
"zh-tw": string;
"zh-yue": string;
hr: string;
cs: string;
da: string;
nl: string;
en: string;
"en-au": string;
"en-uk": string;
"en-us": string;
eo: string;
fi: string;
fr: string;
de: string;
el: string;
ht: string;
hi: string;
hu: string;
is: string;
id: string;
it: string;
ja: string;
ko: string;
la: string;
lv: string;
mk: string;
no: string;
pl: string;
pt: string;
"pt-br": string;
ro: string;
ru: string;
sr: string;
sk: string;
es: string;
"es-es": string;
"es-us": string;
sw: string;
sv: string;
ta: string;
th: string;
tr: string;
vi: string;
cy: string;
};
debug: boolean;
lang: string;
text: string;
text_parts: string[];
token: typeof gToken;
getHeader(): {
Referer: string;
"User-Agent": string;
};
getPayload(
part: any,
idx: any
): {
ie: string;
q: string;
tl: string;
total: number;
idx: number;
client: string;
textlen: number;
tk: string;
};
stream(): NodeJS.ReadableStream;
save(save_file: string, callback: any): Promise<void>;
_tokenize(text: string, max_size: number): string[];
_minimize(thestring: string, delim: string, max_size: any): any;
}
import gToken = require("./gToken");
<file_sep>import gTTs = require("../lib/gTTS");
import axios from "axios";
import { readFileSync } from "fs";
import { join } from "path";
const main = async () => {
var param = {
format: "json",
action: "query",
prop: "extracts",
exlimit: "max",
explaintext: "",
exintro: "",
titles: "doraemon",
redirects: "",
};
var data = (
await axios.get("http://vi.wikipedia.org/w/api.php", {
params: param,
})
).data.query.pages;
var text: string[] | string = ["a"];
for (const key of Object.keys(data)) {
text.push(data[key].extract);
}
// var txtFileData = await readFileSync(join(__dirname, "../text.txt"), {
// encoding: "utf-8"
// });
text = text.join("");
console.log(text.length);
var gtts = new gTTs(text, "vi");
await gtts.save("./output.mp4", (err) => {
console.log(err);
});
};
main();
<file_sep>export default gToken;
declare function gToken(text: string): string;
declare class gToken {
constructor(text: string);
SALT_1: string;
SALT_2: string;
token_key: number;
}
<file_sep>import express = require("express");
const app = express();
import Gtts = require("../lib/gTTS");
app.get("/hear", function (req, res) {
const text = String(req.query.text);
const lang = String(req.query.lang);
const gtts = new Gtts(text, lang);
gtts.stream().pipe(res);
});
app.listen(3000, function () {
console.log(
"Open url to hear Hallelujah http://localhost:3000/hear?lang=en&text=Hallelujah"
);
});
<file_sep>export = gTTS;
import gTTS = require("./lib/gTTS");
| aae3cec27fa391ff88413930a92c91c911f56e30 | [
"TypeScript"
] | 5 | TypeScript | code-ga/gTTs.ts | 3b31c71e0471abd4ef58bf29b609cf44c228e814 | f01976c4e0299200143b256af35e2b5dd357a490 |
refs/heads/master | <repo_name>marcoagpegoraro/estrutura-de-dados-em-c-univesp<file_sep>/aula2/aula23.c
#include <stdio.h>
#include <stdlib.h>
#define alturaMaxima 255
typedef int CHAVE;
typedef struct {
int peso;
int altura;
} PesoAltura;
int main(){
PesoAltura* pessoa1 = (PesoAltura*) malloc(sizeof(PesoAltura));
pessoa1->peso = 80;
pessoa1->altura = 185;
printf("Peso: %d Altura: %d\n", pessoa1->peso, pessoa1->altura);
if(pessoa1->altura > alturaMaxima) printf("altura acima da maxima\n");
else printf("altura abaixo da maxima\n");
return 0;
}<file_sep>/aula2/aula2.c
#include <stdio.h>
#define alturaMaxima 255
typedef int CHAVE;
typedef struct {
int peso;
int altura;
} PesoAltura;
int main(){
PesoAltura pessoa1;
pessoa1.peso = 80;
pessoa1.altura = 185;
printf("Peso: %d Altura: %d\n", pessoa1.peso, pessoa1.altura);
if(pessoa1.altura > alturaMaxima) printf("altura acima da maxima\n");
else printf("altura abaixo da maxima\n");
return 0;
}<file_sep>/aula3/aula3.c
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define MAX 50
typedef int TIPOCHAVE;
typedef struct
{
TIPOCHAVE chave;
} REGISTRO;
typedef struct
{
REGISTRO R[MAX];
int numElem;
} LISTA;
void inicializaLista(LISTA *l)
{
l->numElem = 0;
}
int tamanho(LISTA *l)
{
return l->numElem;
}
void exibirLista(LISTA *l)
{
int i;
printf("Lista: \"\n");
for (i = 0; i < l->numElem; i++)
{
printf("%i\n", l->R[i].chave);
}
printf("\"\n");
}
int buscaSequencial(LISTA *l, TIPOCHAVE ch)
{
int i;
while (i < l->numElem)
{
if (ch == l->R[i].chave)
return i;
else
i++;
}
return -1;
}
bool inserirElemLista(LISTA *l, REGISTRO registro, int posicao)
{
int i;
//é valido a posição?
if (l->numElem == MAX || posicao < 0 || posicao > l->numElem)
return false;
for (i = l->numElem; i > posicao; i--)
l->R[i] = l->R[i - 1];
l->R[i] = registro;
l->numElem++;
return true;
}
bool removerElemLista(LISTA *l, TIPOCHAVE index)
{
int pos, j;
pos = buscaSequencial(l, index);
if (pos == -1)
return false;
for (j = pos; j < l->numElem - 1; j++)
l->R[j] = l->R[j + 1];
return true;
}
int main()
{
LISTA *l = (LISTA *)malloc(sizeof(LISTA));
REGISTRO *r1 = (REGISTRO *)malloc(sizeof(REGISTRO));
r1->chave = 100;
REGISTRO *r2 = (REGISTRO *)malloc(sizeof(REGISTRO));
r2->chave = 200;
REGISTRO *r3 = (REGISTRO *)malloc(sizeof(REGISTRO));
r3->chave = 300;
inserirElemLista(l, *r3, 1);
l->numElem = 2;
l->R[0] = *r1;
l->R[1] = *r2;
exibirLista(l);
inserirElemLista(l, *r3, 1);
exibirLista(l);
return 0;
} | b2adeb46e09887d411bacf7400fda6f63783c0e9 | [
"C"
] | 3 | C | marcoagpegoraro/estrutura-de-dados-em-c-univesp | 8c1ff5d0c3a16e5653e965d19d6143d4cfcbb61c | a37c6b98ecfef66acfc90d368bda284fbcda9ca1 |
refs/heads/master | <file_sep>INSERT INTO burgers (Burger_Name) VALUES ('Bacon Cheeseburger');
INSERT INTO burgers (Burger_Name) VALUES ('Mushroom Swiss');
INSERT INTO burgers (Burger_Name) VALUES ('Double Cheeseburger');
INSERT INTO burgers (Burger_Name) VALUES ('BBQ Mac-N-Cheese');
INSERT INTO burgers (Burger_Name) VALUES ('Bacon Avocado');<file_sep>////--IMPORTING MYSQL CONNECTION--////
const connection = require('./connection.js');
////--OBJECT FOR SQL STATEMENT FUNCTIONS--////
const orm = {
selectAll() {
},
insertOne() {
},
updateOne() {
},
};
////--EXPORT FOR ORM OBJECT--////
module.exports = orm;<file_sep>////--EXPRESS IMPORT--////
const express = require('express');
const router = express.Router();
////--IMPORT BURGER MODEL TO USE DATABASE FUNCTIONS--////
const burger = require('../models/burger.js');
////--ROUTES--////
router.get('/', (req, res) => {
});
router.post('/api/burgers', (req, res) => {
});
router.put('/api/burgers/:id', (req, res) => {
});
////--EXPORT ROUTES FOR SERVER--////
module.exports = router;<file_sep>////--IMPORT ORM OBJECT FOR FUNCTIONS TO INTERACT WITH DATABASE--////
const orm = require('../config/orm.js');
////--FUNCTION INTERACTION--////
const burger = {
selectAll() {
},
insertOne() {
},
updateOne() {
},
};
////--EXPORT DATABASE FUNCTIONS FOR CONTROLLER--////
module.exports = burger; | e75d8ff47fa58c8576dca5a1dee3a8d38c8b8e2e | [
"JavaScript",
"SQL"
] | 4 | SQL | jkeopangna/BurgerLogger | 762d70675af8425f8a7e3a2fea048941badc04fc | 6060b380d1cc08d0f8022e7cc15c91bd1113ea75 |
refs/heads/master | <file_sep>def writeLine(outString):
print(outString)
def readLine():
return input()
<file_sep>from IO import *
from Verif import *
stop = False
while not stop:
date = ""
# read line
currLine = readLine()
if currLine == "stop":
stop = True
else:
date = dDate(currLine)
if date == -1:
print("Re-enter your date plz mm-dd-yyyy")
continue
else:
print(date)
<file_sep>def dDate(rawDate):
# unnecessary but good practice when using a language other than Python
# array that holds our split up date
dSeg = []
# string that we will return to the main loop containing the final date in a format the computer can understand
fDate = ""
# check if contains proper formatting
if "/" in rawDate:
dSeg = rawDate.split("/")
elif "-" in rawDate:
dSeg = rawDate.split("-")
else:
# error
return -1
# check if correct length
if len(dSeg) == 3:
# make sure that every part of the date is 2-2-4 (d-m-y) characters long so that the returned value is 8 characters
# variables for the length of m, d, y, respectively
lM = len(dSeg[0])
lD = len(dSeg[1])
lY = len(dSeg[2])
if lM == 1:
dSeg[0] = "0" + dSeg[0]
elif not (lM == 2):
# error
return -1
if lD == 1:
dSeg[1] = "0" + dSeg[1]
elif not (lD == 2):
# error
return -1
if lY == 2:
dSeg[2] = "20" + dSeg[2]
elif not (lY == 4):
# error
return -1
else:
# error
return -1
# check if int by setting fDate and trying to turn it into an int
# if fDate cannot be turned into an int, then it will give an error, which is what the except ValueError handles
fDate = dSeg[0] + dSeg[1] + dSeg[2]
try:
int(fDate)
except ValueError:
# error
return -1
# know that dSeg[n] can be turned into an integer from condition above
m = int(dSeg[0])
d = int(dSeg[1])
y = int(dSeg[2])
# months with 30 days: 4, 6, 9, 11
# months with 31 days: 1, 3, 5, 7, 8, 10, 12
# months with leap year: 2
# shM is a list of short months in the year (excludes feb since feb is and odd case), 30 days
# lnM is a list of long months in the year, 31 days
shM = [4, 6, 9, 11]
lnM = [1, 3, 5, 7, 8, 10, 12]
# year check
if not (2000 <= y):
# error
return -1
# month check
if 1 <= m <= 12:
# day check in month check because the number of days in a month is dependent on the month
if not (m in lnM):
if not (m in shM):
# check for leap year
if (y % 4) == 0:
# is a leap year 1 - 29
if not (1 <= d <= 29):
# error
return -1
else:
# is not a leap year 1 - 28
if not (1 <= d <= 28):
# error
return -1
else:
# short month
if not (1 <= d <= 31):
# error
return -1
else:
# long month
if not (1 <= d <= 30):
# error
return -1
else:
# error
return -1
return fDate
| c520767f6b2b9fb17a1dfae55808cd03424dd163 | [
"Python"
] | 3 | Python | sacredcoding/HomeworkBot | a93cc944ff7be5bc2d30235385f2331d7d551a15 | 2f7069f217ffebb51eccd428ce5e26a6c2917726 |
refs/heads/master | <repo_name>ozPop/SQL-bear-organizer-lab-wdf-000<file_sep>/lib/insert.sql
INSERT INTO bears (name, age, gender, color, temperament, alive) VALUES ('Mr. Chocolate', 2, 'M', 'Brown', 'Angry', 1);
INSERT INTO bears (name, age, gender, color, temperament, alive) VALUES ('Rowdy', 5, 'M', 'Black', 'Cool', 0);
INSERT INTO bears (name, age, gender, color, temperament, alive) VALUES ('Tabitha', 3, 'F', 'Yellow', 'Weird', 0);
INSERT INTO bears (name, age, gender, color, temperament, alive) VALUES ('<NAME>', 1, 'M', 'Grey', 'Calm', 1);
INSERT INTO bears (name, age, gender, color, temperament, alive) VALUES ('Melissa', 7, 'F', 'Brown', 'Passive', 0);
INSERT INTO bears (name, age, gender, color, temperament, alive) VALUES ('Grinch', 6, 'M', 'Black', 'Angry', 1);
INSERT INTO bears (name, age, gender, color, temperament, alive) VALUES ('Wendy', 1, 'F', 'Blue', 'Cool', 0);
INSERT INTO bears (name, age, gender, color, temperament, alive) VALUES (NULL, 8, 'F', 'Grey', 'Angry', 1); | a12e62ea419f7bd1d1d46e45c7d2c167bf27de39 | [
"SQL"
] | 1 | SQL | ozPop/SQL-bear-organizer-lab-wdf-000 | adf51a1ca53b15485766f3adf0e490ee76122f8d | 4407ded7ea4668a60fb94a0fc6da6657f7cdf21c |
refs/heads/master | <repo_name>BusraKeskin/cathexis<file_sep>/main.cpp
#include<iostream>
#include<fstream>
#include<math.h>
#define N 10
using namespace std;
struct Kisi{
string ad;
string soyad;
string kullanici_adi;
string e_posta;
int dogum_yili;
};
struct Tarih{
int yil;
int kacinci_gun;
int kacinci_ay;
int saat;
int dakika;
int saniye;
};
struct Belge{
string b_yolu;
string boyut;
int sayfa_sayisi;
Kisi kisi;
Tarih tarih;
};
Belge s1[N];
Belge kuyruk[N];
Belge temp;
void kuyruga_yazma(int i, ifstream &dosya){
dosya>>s1[i].b_yolu;
kuyruk[i].b_yolu = s1[i].b_yolu;
dosya>>s1[i].boyut;
kuyruk[i].boyut = s1[i].boyut;
dosya>>s1[i].sayfa_sayisi;
kuyruk[i].sayfa_sayisi = s1[i].sayfa_sayisi;
dosya>>s1[i].kisi.ad;
kuyruk[i].kisi.ad = s1[i].kisi.ad;
dosya>>s1[i].kisi.soyad;
kuyruk[i].kisi.soyad = s1[i].kisi.soyad;
dosya>>s1[i].kisi.kullanici_adi;
kuyruk[i].kisi.kullanici_adi = s1[i].kisi.kullanici_adi;
dosya>>s1[i].kisi.e_posta;
kuyruk[i].kisi.e_posta = s1[i].kisi.e_posta;
dosya>>s1[i].kisi.dogum_yili;
kuyruk[i].kisi.dogum_yili = s1[i].kisi.dogum_yili;
dosya>>s1[i].tarih.yil;
kuyruk[i].tarih.yil = s1[i].tarih.yil;
dosya>>s1[i].tarih.kacinci_gun;
kuyruk[i].tarih.kacinci_gun = s1[i].tarih.kacinci_gun;
dosya>>s1[i].tarih.kacinci_ay;
kuyruk[i].tarih.kacinci_ay= s1[i].tarih.kacinci_ay;
dosya>>s1[i].tarih.saat;
kuyruk[i].tarih.saat= s1[i].tarih.saat;
dosya>>s1[i].tarih.dakika;
kuyruk[i].tarih.dakika= s1[i].tarih.dakika;
dosya>>s1[i].tarih.saniye;
kuyruk[i].tarih.saniye= s1[i].tarih.saniye;
}
void slide(int i){
int j;
for( j=0;j<i;j++){
kuyruk[j].b_yolu = kuyruk[j+1].b_yolu;
kuyruk[j].boyut = kuyruk[j+1].boyut;
kuyruk[j].sayfa_sayisi = kuyruk[j+1].sayfa_sayisi;
kuyruk[j].kisi.ad = kuyruk[j+1].kisi.ad;
kuyruk[j].kisi.soyad = kuyruk[j+1].kisi.soyad;
kuyruk[j].kisi.kullanici_adi = kuyruk[j+1].kisi.kullanici_adi;
kuyruk[j].kisi.e_posta = kuyruk[j+1].kisi.e_posta;
kuyruk[j].kisi.dogum_yili = kuyruk[j+1].kisi.dogum_yili;
kuyruk[j].tarih.yil = kuyruk[j+1].tarih.yil;
kuyruk[j].tarih.kacinci_gun = kuyruk[j+1].tarih.kacinci_gun;
kuyruk[j].tarih.kacinci_ay = kuyruk[j+1].tarih.kacinci_ay;
kuyruk[j].tarih.saat = kuyruk[j+1].tarih.saat;
kuyruk[j].tarih.dakika = kuyruk[j+1].tarih.dakika;
kuyruk[j].tarih.saniye = kuyruk[j+1].tarih.saniye;
}
}
bool kuyrukta_yer_var_mi(){
int a=0;
while(a<N && kuyruk[a].b_yolu.length()!=0){//icinde bir sey varsa a'yi arttir N olana kadar
a++;
}
if (a==N)// a N oldu demek ki hep doluydu
return 0;
else // a N'den kucuktur demek ki bir yerde bos var
return 1;
}
int main ()
{
bool yazici_musait=1;
int dosya_sayisi=0;
int yazilmis_dosya_sayisi=0;
int sure = 0;
int i=0; // kuyruga yazilacak kafa konumu
int j=0;
int baslangic; // bilgisayarin kuyruga yazmaya basladigi an
string dosyain = "a.txt";
ifstream dosya;
dosya.open(dosyain.c_str());
if (dosya.is_open()) {
unsigned int sn, cn;
cout<<"PC kac saniyede kuyruga belge gonderir (sn) : ?"<<endl; cin>>sn;
cout<<"Yazicinin bir sayfayi yazma hizi nedir? (cn): ?"<<endl; cin>>cn;
while(1==1){
//PCnin sirasi geldiyse
if(sure % sn == 0 && !dosya.eof()){
//Kuyrukta yer varsa yaz
if(kuyrukta_yer_var_mi()){
kuyruga_yazma(i,dosya);
cout<< sure << "sn - Pc kuyruga yazdi - \t" << kuyruk[i].b_yolu <<endl;
i++; // kuyruktaki bir sonraki elemanin yerini belirttik
dosya_sayisi++;//ekledigimiz dosya sayisi
}
else{
cout<<"PC kuyruga yazamadi.\n";
}
}
//yazicinin okumasi
if(yazici_musait && kuyruk[0].b_yolu.length()!=0){// kuyrukta bir dosya var mi
temp=kuyruk[0]; //kuyruktan veri al,tempe tasi
cout<< sure <<"sn - Yazici kuyruktan okudu - \t" << temp.b_yolu << endl;
//kuyruktan sil
slide(i);
i--; // kuyruktaki bir sonraki elemana yer belirttik
yazici_musait=0;
baslangic=sure;
}
if(sure == baslangic + temp.sayfa_sayisi*cn && temp.b_yolu.length()!=0){
cout<< sure << "sn - Yazici belge yazdi -\t" << temp.b_yolu <<endl;
yazici_musait=1;
yazilmis_dosya_sayisi++;
if(yazici_musait && kuyruk[0].b_yolu.length()!=0){ // kuyrukta bir dosya var mi
temp=kuyruk[0];//kuyruktan veri al tempe tasi
cout<< sure <<"sn - Yazici kuyruktan okudu - \t" << temp.b_yolu << endl;
//kuyruktan sil
slide(i);
i--;
yazici_musait=0;
baslangic=sure;
}
}
if(dosya_sayisi == yazilmis_dosya_sayisi && dosya.eof()){ //yazdigimiz dosya sayisi a.txt dosyanin satir sayisina esitse hepsini yazmisiz demektir
cout<<"Butun dosyalari yazdik..."<<endl;
break;
}
sure = sure + 1;
}
}
dosya.close();
return 0;
}
<file_sep>/README.md
# Printer Simulation
Dönen kuyruk yapısı mantığı ile verileri sırası ile yazan program.
| 30143b9d5dc7e03085318c27714b001f991ebb15 | [
"Markdown",
"C++"
] | 2 | C++ | BusraKeskin/cathexis | 5251d4103dd74236bf0ad9931ca190e439e948ad | 8f1c2bc2af79db416c1c3e92a81c06f8c1288565 |
refs/heads/main | <file_sep>import math
import statistics
from collections import defaultdict
from sklearn.metrics import mean_absolute_error
from tabulate import tabulate
from sklearn.model_selection import KFold
def read_dataset(path):
ds = []
with open(path, "r") as file:
for line in file:
parts = line.strip().split("\t")
user_id = int(parts[0])
movie_id = int(parts[1])
rating = int(parts[2])
ds.append((user_id, movie_id, rating))
return ds
def user_prediction(user, movie, ratings, neighbors):
dividend = sum([n[1] * (ratings[n[0]][movie] - statistics.mean(ratings[n[0]].values())) for n in neighbors])
divisor = sum([n[1] for n in neighbors])
try:
return statistics.mean(ratings[user].values()) + (dividend / divisor)
except ZeroDivisionError:
return statistics.mean(ratings[user].values())
def present(model, knn, results):
rows = []
for i in range(len(results)):
rows.append([model, knn, i+1, results[i]])
rows.append([model, knn, "Average", statistics.mean(results)])
print(tabulate(rows, headers=["Model", "KNN", "Fold", "MAE"]))
def pearson_correlation(u1, u2):
mean_u1 = statistics.mean(u1.values())
mean_u2 = statistics.mean(u2.values())
commons = set(u1.keys()).intersection(set(u2.keys()))
dividend = sum([(u1[c] - mean_u1) * (u2[c] - mean_u2) for c in commons])
dvr1 = math.sqrt(sum([(u1[c] - mean_u1) ** 2 for c in commons]))
dvr2 = math.sqrt(sum([(u2[c] - mean_u2) ** 2 for c in commons]))
divisor = dvr1 * dvr2
try:
return dividend / divisor
except ZeroDivisionError:
return 0
def user_based(train, test, knn):
ratings, similarities = defaultdict(lambda: dict()), defaultdict(lambda: dict())
truth, predictions = [], []
for user_id, movie_id, rating in train:
ratings[user_id][movie_id] = rating
for user_id, movie_id, rating in test:
truth.append(rating)
others = [k for k in ratings.keys() if k != user_id]
for o in others:
if o not in similarities[user_id]:
similarities[user_id][o] = pearson_correlation(u1=ratings[user_id], u2=ratings[o])
relative = [i for i in similarities[user_id].items() if movie_id in ratings[i[0]]]
nearest = sorted(relative, key=lambda temp: temp[1], reverse=True)[:knn]
p = user_prediction(user=user_id, movie=movie_id, ratings=ratings, neighbors=nearest)
predictions.append(p)
return mean_absolute_error(truth, predictions)
if __name__ == '__main__':
path1 = input("Please enter the path to the data file:")
data = read_dataset(path1)
n1 = int(input("Please enter the number of kfold: (5 or 10)"))
kf = KFold(n_splits=n1)
knn1 = int(input("Please enter the number of k nearest neighbors(choises :10,20,30,40,50,60,70,80)"))
maes = []
for train_index, text_index in kf.split(data):
train_data = [data[i] for i in train_index]
test_data = [data[i] for i in text_index]
mae = user_based(train_data, test_data, knn1)
maes.append(mae)
present(results=maes, knn=knn1, model="user")
<file_sep># CollaborativeFiltering
<br>Movie rating prediction on the MovieLens 100K Dataset.
<br>I used user-based CF with Pearson correlation technique when predicting ratings.
<br>You can download the dataset here:
<br>https://grouplens.org/datasets/movielens/100k/
<br>I worked in “u.data” file, which contains
<br><ol>100,000 ratings
<br>943 users
<br>1,682 movies
| 7eeb855f809360bcb0b2b388b30ad5577cfadeb4 | [
"Markdown",
"Python"
] | 2 | Python | rumeysaturkan/CollaborativeFiltering | 796fc5d770ee671bb63ab2ca80c48503e46d2b3a | 9bdd0082a3cc9313e7f8868f8aff1dd86906d3d6 |
refs/heads/master | <repo_name>sinzen/Prototype<file_sep>/src/Document.java
/**
* Created by yassirhessane on 5/08/16.
*/
public abstract class Document {
protected String contenu;
public Document duplique()
{
Document resultat = null;
try {
resultat.clone();
}catch (CloneNotSupportedException e) {
e.printStackTrace();
}
return resultat;
}
}
| decd3dd5560d2f223bd67bad4c4bf01c30378cee | [
"Java"
] | 1 | Java | sinzen/Prototype | 489733b171b53a9ee1a76acfda42eb7aa015fb2f | 9f1cfb464d3a50ab5cbfb79ba47bc708e2a80836 |
refs/heads/main | <repo_name>tiagoporto/reference-repository<file_sep>/README.md
# Reference Repository 
<p align="right">
LIKED ? <a href="https://github.com/tiagoporto/reference-repository/stargazers">⭐</a> : <a href="https://github.com/tiagoporto/reference-repository/issues">😞</a>
</p>
[](https://github.com/tiagoporto/reference-repository/releases)
[](https://github.com/tiagoporto/reference-repository/releases)
[![Node][badge-node-version]](https://www.npmjs.com/package/reference-repository)
[![Downloads][badge-downloads]](https://www.npmjs.com/package/reference-repository)
[](https://packagephobia.now.sh/result?p=reference-repository)
![Web Component][badge-web-component]
![type definitions][badge-definitions]
[![js-standard-style][badge-code-style]](http://standardjs.com)
[![License][badge-license]](https://raw.githubusercontent.com/tiagoporto/reference-repository/main/LICENSE)
[](https://travis-ci.com/tiagoporto/reference-repository)
[![Coverage Status][badge-coverage]](https://coveralls.io/github/tiagoporto/reference-repository)
[](https://dashboard.stryker-mutator.io/reports/github.com/tiagoporto/reference-repository/main)
[![Dependencies Status][badge-dependencies]](https://david-dm.org/tiagoporto/reference-repository)
[![devDependencies Status][badge-dev-dependencies]](https://david-dm.org/tiagoporto/reference-repository?type=dev)

[](https://dashboard.stryker-mutator.io/reports/github.com/tiagoporto/reference-repository/main)
<!--
Badges image
-->
[badge-inch]: http://inch-ci.org/github/tiagoporto/reference-repository?branch=main&style=flat-square
[badge-dependencies]: https://img.shields.io/david/tiagoporto/reference-repository?style=flat-square
[badge-dev-dependencies]: https://img.shields.io/david/dev/tiagoporto/reference-repository?style=flat-square
[badge-coverage]: https://img.shields.io/coveralls/tiagoporto/reference-repository?style=flat-square
[badge-license]: https://img.shields.io/github/license/tiagoporto/reference-repository?style=flat-square
[badge-code-style]: https://img.shields.io/badge/code%20style-standard-yellow?style=flat-square
[badge-downloads]: https://img.shields.io/npm/dt/reference-repository?style=flat-square
[badge-definitions]: https://img.shields.io/npm/types/reference-repository?style=flat-square
[badge-node-version]: https://img.shields.io/node/v/reference-repository?style=flat-square
[badge-web-component]: https://img.shields.io/badge/%20-Web%20Components-gray?style=popout-square&logoColor=white&logoWidth=45&logo=data:image/svg+xml;base64,<KEY>
> Project description.
The tool can be accessed on [tiagoporto.github.io/reference-repository](http://tiagoporto.github.io/reference-repository).
_Read this in other languages: English, [Português(Brasil)](README.md)_
_Work in Progress_
## Installation
```bash
npm i reference-repository
```
## Usage
## Contributing
[How to contribute](https://github.com/tiagoporto/reference-repository/blob/main/CONTRIBUTING.md).
## Donating
This project is developed in my free time, donations are welcome.
[](https://tiagoporto.github.io/donation-page)
<p align="center"><img src="https://forthebadge.com/images/badges/built-with-love.svg"/></p>
## License
Reference Repository is released under the terms of the [MIT](LICENSE).
<file_sep>/.husky/pre-commit
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
echo 'Running checks in staged files...'
npm run pre-commit
<file_sep>/.husky/pre-push
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
echo 'Running checks...'
npm run checks
| 7a2c1303bb67a5c08291c344b2535c141f66a6cf | [
"Markdown",
"Shell"
] | 3 | Markdown | tiagoporto/reference-repository | efe159e4dc592e2c2b96c24559cd277b14d06972 | f8414aa2988d51d7602dafa4891406121630161a |
refs/heads/master | <file_sep>window.addEventListener('load', () => {
var img = document.querySelector('img');
img.addEventListener('click', () => {
alert('hola');
});
});<file_sep>
//-----------Importar modulos e instancias-----//
// Importar modulo
const express=require('express');
//Intanciar bodyparser despues de instalarlo en git bash
var bodyParser = require('body-parser');
// importar file system para poder crear archivos txt, ya viene instalado pero toca instanciarlo
var fs = require('fs');
//-----Configuracion de la app-----------//
//Instanciar app
const app=express();
// configuración body parser para poder usar variables post en el body
app.use(bodyParser.urlencoded({ extended: true }));
//definir el puerto_ usualmente el 3000
const port = 3000;
//Aqui es donde se define la carpeta como publica
app.use(express.static('public'));
/* acciones del usuario al servidor
app.get(); //traer informacion del servidor
app.post(); //enviar nueva infromacion lo envia el usuario al servidor
app.put(); // modificar algo que ya esta
app.delete(); //eliminar alguna informacion que ya esta
*/
// la ruta recibe dos cosas '/aque va a responder', la funcion a la cual va a responder
//definir ruta tipo get y su funcion
app.get('/', (request, response) =>{
console.log('alguien entro a la ruta inicial');
response.sendFile(__dirname +'/public/home.html');
});
// inicar servidor en el puerto definido anteriormente
app.listen(port, () => {
console.log(`Servidor iniciado en el puerto ${port}`);
}); | e86b03c9d989de158683ec9dc76d7a7aca1fe767 | [
"JavaScript"
] | 2 | JavaScript | mariar13/Semana11_web | 08210806b982dd9339a5c51741c86113e33f7fad | d7f6964906545a239bb1b4470e040b7e9b679967 |
refs/heads/master | <file_sep>import java.util.Scanner;
public class CalorieCalc {
// Method converts steps to feet walked
public static int stepsToFeet(int baseSteps) {
final int FEET_PER_STEP = 3; // Unit conversion
int feetTot = 0; // Corresponding feet to steps
feetTot = baseSteps * FEET_PER_STEP;
return feetTot;
}
// Method converts steps to calories burned
public static double stepsToCalories(int baseSteps) {
final double STEPS_PER_MINUTE = 70.0; // Unit Conversion
final double CALORIES_PER_MINUTE_WALKING = 3.5; // Unit Conversion
double minutesTot = 0.0; // Corresponding min to steps
double caloriesTot = 0.0; // Corresponding calories to min
minutesTot = baseSteps / STEPS_PER_MINUTE;
caloriesTot = minutesTot * CALORIES_PER_MINUTE_WALKING;
return caloriesTot;
}
public static void main(String[] args) {
Scanner scnr = new Scanner(System.in);
int stepsInput = 0; // User defined steps
int feetTot = 0; // Corresponding feet to steps
double caloriesTot = 0; // Corresponding calories to steps
// Prompt user for input
System.out.print("Enter number of steps walked: ");
stepsInput = scnr.nextInt();
// Call methods to convert steps to feet/calories
feetTot = stepsToFeet(stepsInput);
System.out.println("Feet: " + feetTot);
caloriesTot = stepsToCalories(stepsInput);
System.out.println("Calories: " + caloriesTot);
return;
}
}<file_sep>import java.util.Scanner;
import java.lang.Math;
// ELEMENTARY LEVEL
public class CoordinateGeometry {
// public static double distanceCalculation(double x1, double x2, double y1, double y2){
// pointsDistance = Math.sqrt(Math.pow(x2 - x1 , 2.0) + Math.pow(y2 - y1 ,2.0));
// return pointsDistance;
// }
public static void main(String [] args) {
double x1 = 1.0;
double y1 = 2.0;
double x2 = 1.0;
double y2 = 5.0;
double pointsDistance = 0.0;
pointsDistance = Math.sqrt(Math.pow(x2 - x1 , 2.0) + Math.pow(y2 - y1 ,2.0));
System.out.print("Points distance: ");
System.out.println(pointsDistance);
return;
}
}<file_sep>import java.util.Scanner;
public class SavingsInterestCalc {
public static void main(String[] args) {
Scanner scnr = new Scanner(System.in);
final int INIT_SAVINGS = 10000; // Initial savings
final double INTEREST_RATE = 0.05; // Interest rate
int userYears = 0; // User input of number of years
int i = 0; // Loop variable
double currSavings = 0.0; // Savings with interest
System.out.println("Initial savings of $" + INIT_SAVINGS);
System.out.println("at " + INTEREST_RATE + " yearly interest.\n");
System.out.print("Enter years: ");
userYears = scnr.nextInt();
currSavings = INIT_SAVINGS;
i = 1;
while (i <= userYears) {
System.out.println(" Savings in year " + i
+ ": $" + currSavings);
currSavings = currSavings + (currSavings * INTEREST_RATE);
i = i + 1;
}
System.out.println();
return;
}
}<file_sep>import java.util.Scanner;
import java.lang.Math;
// Calculate tree height based on length of shadow. (dont use at night ;) or most other times really. )
public class TreeHeight {
public static void main(String [] args) {
double treeHeight = 0.0;
double shadowLength = 0.0;
double angleElevation = 0.0;
angleElevation = 0.11693706; // 0.11693706 radians = 6.7 degrees
shadowLength = 17.5;
//tan(angleElevation) = treeHeight / shadowLength
treeHeight = Math.tan(angleElevation) * shadowLength;
System.out.print("Tree height: ");
System.out.println(treeHeight);
return;
}
}<file_sep>import java.util.Scanner;
public class CalcPyramidVolume {
//Volume = base area x height x 1/3
//Base area = base length x base width
public static double pyramidVolume(double baseLength, double baseWidth, double pyramidHeight) {
double volume;
volume = ((baseLength * baseWidth) * pyramidHeight / 3);
return volume;
}
public static void main (String [] args) {
double baseLength;
double baseWidth;
double pyramidHeight;
Scanner scnr = new Scanner(System.in);
System.out.println(" Pyramid Base Length: ");
baseLength = scnr.nextDouble();
System.out.println(" Pyramid Base Width: ");
baseWidth = scnr.nextDouble();
System.out.println(" Pyramid Height: ");
pyramidHeight = scnr.nextDouble();
System.out.println(" Volume is: " + pyramidVolume(baseLength, baseWidth, pyramidHeight));
return;
}
}<file_sep>
import java.util.Scanner;
public class CelsiusToFahrenheit {
public static double c_to_f_conversion(double tempF, double tempC) {
tempF = tempC * (9 / 5) + 32;
return tempF;
}
public static void main (String [] args) {
Scanner scnr = new Scanner(System.in);
double tempF = 0.0;
double tempC = 0.0;
System.out.println("Enter temperature in Celsius: ");
tempC = scnr.nextDouble();
tempF = c_to_f_conversion(tempF, tempC);
System.out.print("Fahrenheit: ");
System.out.println(tempF);
return;
}
}
<file_sep>public class RowsColsCounter {
public static void main (String [] args) {
int numRows = 2;
int numCols = 3;
int i;
int k;
char letter = 'A';
for(i = 1; i <= numRows; ++i){
for(k = 0; k < numCols; ++k){
letter += k;
System.out.print("" + i + letter + " ");
letter = 'A';
}
}
System.out.println("");
return;
}
}<file_sep>import java.util.Scanner;
public class HeightConverter {
/* Converts a height in feet/inches to centimeters */
public static double feetInchesToCentimeters(int heightFeet, int heightInches) {
final double CM_PER_IN = 2.54;
final int INCH_PER_FT = 12;
int totalInches = 0;
double cmVal = 0.0;
totalInches = (heightFt * INCH_PER_FT) + heightInches;
cmVal = totalInches * CM_PER_IN;
return cmVal;
}
public static void main(String[] args) {
Scanner scnr = new Scanner(System.in);
int userFeet = 0;
int userInches = 0;
System.out.print("Enter feet: ");
userFeet = scnr.nextInt();
System.out.print("Enter inches: ");
userInches = scnr.nextInt();
System.out.print("Centimeters: ");
System.out.println(feetInchesToCentimeters(userFeet, userInches));
return;
}
} | 18f6cb9eb3b72ba4e35c04eca679a04fe9bae580 | [
"Java"
] | 8 | Java | aalimov/Simple_Java_Apps | 2bd0f394de4f71080499ca8cd737b0ab5a95ef66 | ff838fae756423e79c22eae8dd21f87b9b011430 |
refs/heads/main | <file_sep>require('dotenv').config();
const Serve = require('./models/server');
const serve = new Server();<file_sep>
const validarCampos = () => {
}
module.exports = {
validarCampos
}<file_sep>const jwt = require('jsonwebtoken');
const validarJWT = () => {
}
module.exports = {
validarJWT
}<file_sep>const jwt = require('jsonwebtoken');
const generarJWT = () => {
}
module.exports = {
generarJWT
}<file_sep>const { response } = require("express")
const esAdminRole = ( req, res = response, next ) => {
next();
}
module.exports = {
esAdminRole
}<file_sep>
const generarJWT = () => {
}
module.exports = {
generarJWT
}<file_sep>const { response } = require('express');
const jwt = require('jsonwebtoken');
const validarJWT = ( req, res = response , next ) => {
}
module.exports = {
validarJWT
}<file_sep>
const validarJWT = () => {
}
module.exports = {
validarJWT
}<file_sep>class Server {
constructor() {
this.app
}
}
module.exports = Server;<file_sep>const express = require('express');
class Server {
constructor() {
this.app = express(); // <== create the express app as a property in the same server class
this.port = process.env.PORT;
// Middlewares(functions that will add other functions to the server);
// Rutas de mi aplicación
this.routes(); // <== will trigger the router method
}
routes() {
this.app.get('/', (req, res) => {
res.send('Hello Mundo');
});
}
listen() {
this.app.listen(this.port, () => {
console.log('Servidor corriendo en puerto', this.port);
});
}
}
module.exports = Server;<file_sep>const Role = require('../models/role');
const esRolValido = async(rol = '') => { // <== Validacion personalizada
const existeRol = await Role.findOne({ rol });
if ( !existeRol ) {
throw new Error(`El rol ${ rol } no está registrado en la BD`); // <== LANZAR un error personalizado
}
}
const emailExiste = async(correo = '') => {
const existeEmail = await Usuario.findOne({ correo });
if( existeEmail ) {
return res.status(400).json({
msg: 'Ese correo ya esta registrado'
});
}
}
module.exports = {
esRolValido,
emailExiste
}<file_sep>const jwt = require('jsonwebtoken');
const generarJWT = ( uid = '' ) => {
return new Promise( (resolve, reject) => {
})
}
module.exports = {
generarJWT
} | 9869a5d3b41b360db4d3a0975b260fef90bdaf29 | [
"JavaScript"
] | 12 | JavaScript | ElideZavala/Backend-restserver-basico | 539d9eaf7d9128e139b074bb6154e52f48439e7e | 03cb9d4197e2d6140b39cd36b30929db9934ba67 |
refs/heads/master | <file_sep>//
// NEOVisitorViewController.swift
// XLweibo
//
// Created by apple on 15/12/6.
// Copyright © 2015年 dfdsaf. All rights reserved.
//
import UIKit
class NEOVisitorViewController: UITableViewController {
//记录是否登录
var userLogin = false
override func loadView() {
userLogin ? super.loadView() : setupVisitorView()
}
private func setupVisitorView(){
view = NEOVisitorView()
}
override func viewDidLoad() {
super.viewDidLoad()
}
}
<file_sep>//
// UIBarButtonItem+Extention.swift
// XLweibo
//
// Created by apple on 15/12/6.
// Copyright © 2015年 dfdsaf. All rights reserved.
//
import UIKit
// item字体大小
let ItemTitleFontSize: CGFloat = 14
// item字体颜色
let ItemTitleColor: UIColor = UIColor(white: 80 / 255, alpha: 1)
extension UIBarButtonItem {
//uiBarButton的便利构造函数
convenience init(imageNamed : String? = nil ,title : String? = nil ,target : AnyObject? ,action : Selector) {
self .init()
let button = UIButton ()
button.addTarget(target, action: action, forControlEvents: .TouchUpInside)
if let ima = imageNamed {
button.setImage(UIImage(named: ima), forState: .Normal)
button.setImage(UIImage(named:"\(ima)_highlighted"), forState: .Highlighted)
}
if let t = title {
button.setTitle(t, forState: .Normal)
button.setTitleColor(ItemTitleColor, forState: .Normal)
button.setTitleColor(UIColor.orangeColor(), forState: .Highlighted)
button.titleLabel?.font = UIFont.systemFontOfSize(ItemTitleFontSize)
}
button.sizeToFit()
customView = button
}
}
<file_sep>//
// NEONavController.swift
// XLweibo
//
// Created by mac on 15/12/6.
// Copyright © 2015年 dfdsaf. All rights reserved.
//
import UIKit
class NEONavController: UINavigationController ,UIGestureRecognizerDelegate{
override func viewDidLoad() {
super.viewDidLoad()
self.interactivePopGestureRecognizer?.delegate = self //// bug: 如果在根控制器按住边缘向右滑,再次 push 新控制器的话,push 失败
}
override func pushViewController(viewController: UIViewController, animated: Bool) {
/* 如果当前导航控制器里面有1个子控制器,那么执行到这个地方的时候,就代表
将要push进来第2个,把 push 进来 的控制器的左边按钮改成第一个控制器的title
*/
if childViewControllers.count != 0 {
var title = "返回"
if childViewControllers.count == 1 {
title = childViewControllers.first?.title ?? title
}
//判断如果 push来的是 rootviewController就
viewController.navigationItem.leftBarButtonItem = UIBarButtonItem(imageNamed: "navigationbar_back_withtext",title: title ,target: self ,action: "back")
//隐藏底部的 tabBar
viewController.hidesBottomBarWhenPushed = true
}
super.pushViewController(viewController, animated: animated)
}
@objc private func back () {
popViewControllerAnimated(true)
}
}
//func gestureRecognizerShouldBegin(gestureRecognizer: UIGestureRecognizer) -> Bool {
//
// print("哈哈")
// // 判断,如果是根控制器,就不要识别这个手势
// // if childViewControllers.count == 1 {
// // return false
// // }
// // return true
//
// return childViewControllers.count != 1
//}
// }
<file_sep>//
// NEOVisitorView.swift
// XLweibo
//
// Created by apple on 15/12/6.
// Copyright © 2015年 dfdsaf. All rights reserved.
//
import UIKit
class NEOVisitorView: UIView {
override init(frame: CGRect) {
super.init(frame: frame)
setupUI()
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
//懒加载控件====================================
private lazy var iconView :UIImageView = { //图标
let image = UIImage(named: "visitordiscover_feed_image_house")
let imageView = UIImageView(image: image)
return imageView
}()
private lazy var circleView : UIImageView = UIImageView(image: UIImage(named: "visitordiscover_feed_image_smallicon")) //圆图
private lazy var messageLabel: UILabel = { //显示的文字
let lable = UILabel()
lable.textColor = UIColor.darkGrayColor() //lable 文字颜色
lable.font = UIFont.systemFontOfSize( 14 )
lable.text = "关注一些人,回这里看看有什么惊喜关注一些人,回这里看看有什么惊喜"
lable.numberOfLines = 0 //换行
return lable
}()
private lazy var rigisterButton : UIButton = { //注册按钮
let button = UIButton ()
button.setTitle("注册", forState: UIControlState.Normal)
button.setTitleColor(UIColor.orangeColor(), forState:.Normal)
button.titleLabel?.font = UIFont.systemFontOfSize(14)
button.setBackgroundImage(UIImage(named: "common_button_white_disable"), forState:.Normal)
return button
}()
private lazy var loginButton : UIButton = {
let button = UIButton ()
button.setTitle("登录", forState: UIControlState.Normal)
button.setTitleColor(UIColor.orangeColor(), forState:.Normal)
button.titleLabel?.font = UIFont.systemFontOfSize(14)
button.setBackgroundImage(UIImage(named: "common_button_white_disable"), forState:.Normal)
return button
}()
private lazy var maskIconView :UIImageView = {
let image = UIImage(named: "visitordiscover_feed_mask_smallicon")
let imageView = UIImageView(image: image)
return imageView
}()
//============================================
//添加约束
private func setupUI (){
backgroundColor = UIColor(white: 237/255, alpha: 1)
addSubview(iconView)
// addSubview(maskIconView)
addSubview(circleView)
addSubview(messageLabel)
addSubview(rigisterButton)
addSubview(loginButton)
//添加图标的约束
iconView.translatesAutoresizingMaskIntoConstraints = false
addConstraint(NSLayoutConstraint(item: iconView, attribute: NSLayoutAttribute.CenterX, relatedBy: NSLayoutRelation.Equal, toItem: self, attribute: .CenterX, multiplier: 1, constant: 0))
addConstraint(NSLayoutConstraint(item: iconView, attribute: NSLayoutAttribute.CenterY, relatedBy: NSLayoutRelation.Equal, toItem: self, attribute: .CenterY, multiplier: 1, constant: 0))
//原图的约束
circleView.translatesAutoresizingMaskIntoConstraints = false //自动添加的约束不起效
addConstraint(NSLayoutConstraint(item: circleView, attribute: NSLayoutAttribute.CenterX, relatedBy: NSLayoutRelation.Equal, toItem: iconView, attribute: .CenterX, multiplier: 1, constant: 0))
addConstraint(NSLayoutConstraint(item: circleView, attribute: NSLayoutAttribute.CenterY, relatedBy: NSLayoutRelation.Equal, toItem: iconView, attribute: NSLayoutAttribute.CenterY, multiplier: 1, constant: 0))
//文字
messageLabel.translatesAutoresizingMaskIntoConstraints = false
addConstraint(NSLayoutConstraint(item: messageLabel , attribute: NSLayoutAttribute.Top, relatedBy: NSLayoutRelation.Equal, toItem: circleView, attribute: NSLayoutAttribute.Bottom, multiplier: 1, constant: 16)) //与上面的距离
addConstraint(NSLayoutConstraint(item: messageLabel, attribute: NSLayoutAttribute.CenterX, relatedBy: NSLayoutRelation.Equal, toItem: circleView, attribute: NSLayoutAttribute.CenterX, multiplier: 1, constant: 0)) //中心点
addConstraint(NSLayoutConstraint(item: messageLabel, attribute:.Width , relatedBy: NSLayoutRelation.Equal, toItem: nil , attribute: .NotAnAttribute, multiplier: 1, constant: 224))
//注册按钮
rigisterButton.translatesAutoresizingMaskIntoConstraints = false
addConstraint(NSLayoutConstraint(item: rigisterButton, attribute: NSLayoutAttribute.Left, relatedBy: NSLayoutRelation.Equal, toItem: messageLabel, attribute: NSLayoutAttribute.Left, multiplier: 1, constant: 0))
addConstraint(NSLayoutConstraint(item: rigisterButton, attribute: NSLayoutAttribute.Top, relatedBy: NSLayoutRelation.Equal, toItem: messageLabel, attribute: NSLayoutAttribute.Bottom, multiplier: 1, constant: 10))
addConstraint(NSLayoutConstraint(item: rigisterButton, attribute: .Width, relatedBy: NSLayoutRelation.Equal, toItem: nil, attribute: .NotAnAttribute, multiplier: 1, constant: 100))
//登录按钮
loginButton.translatesAutoresizingMaskIntoConstraints = false
addConstraint(NSLayoutConstraint(item: loginButton, attribute: NSLayoutAttribute.Right, relatedBy: NSLayoutRelation.Equal, toItem: messageLabel, attribute: NSLayoutAttribute.Right, multiplier: 1, constant: 0))
addConstraint(NSLayoutConstraint(item: loginButton, attribute: NSLayoutAttribute.Top, relatedBy: NSLayoutRelation.Equal, toItem: messageLabel, attribute: NSLayoutAttribute.Bottom, multiplier: 1, constant: 10))
addConstraint(NSLayoutConstraint(item: loginButton, attribute: .Width, relatedBy: NSLayoutRelation.Equal, toItem: nil, attribute: .NotAnAttribute, multiplier: 1, constant: 100))
// //遮罩阴影
// maskIconView.translatesAutoresizingMaskIntoConstraints = false
// addConstraints(NSLayoutConstraint.constraintsWithVisualFormat("H:|-0-[maskIconView]-0-|", options: [], metrics: nil, views: ["maskIconView": maskIconView]))
//
// addConstraints(NSLayoutConstraint.constraintsWithVisualFormat("V:|-0-[maskIconView]-(offset)-[registerButton]", options: [], metrics: ["offset" : -35], views: ["maskIconView": maskIconView,"loginButton": loginButton]))
//
}
}
<file_sep>//
// NEOTempViewController.swift
// XLweibo
//
// Created by mac on 15/12/6.
// Copyright © 2015年 dfdsaf. All rights reserved.
//
import UIKit
class NEOTempViewController: UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
title = "当前是第 \(navigationController?.childViewControllers.count ?? 0)级控制器"
view.backgroundColor = UIColor.whiteColor()
navigationItem.rightBarButtonItem = UIBarButtonItem(title: "PUSH", style: UIBarButtonItemStyle.Done, target: self, action: "push")
}
@objc private func push (){ //跳转 控制器
let vc = NEOTempViewController()
navigationController?.pushViewController(vc, animated: true)
}
}
<file_sep>//
// NEODiscoverController.swift
// XLweibo
//
// Created by apple on 15/12/5.
// Copyright © 2015年 dfdsaf. All rights reserved.
//
import UIKit
class NEODiscoverController: NEOVisitorViewController {
override func viewDidLoad() {
super.viewDidLoad()
setupUI()
}
func setupUI() {
//初始化控件
let searchView = NEODiscoverView.seachView()
searchView.frame = CGRect(x: 0, y: 0, width: UIScreen.mainScreen().bounds.width, height: 35)
//设置成title view
navigationItem.titleView = searchView
}
}
<file_sep>//
// NEOTabBarController.swift
// XLweibo
//
// Created by apple on 15/12/5.
// Copyright © 2015年 dfdsaf. All rights reserved.
//
import UIKit
class NEOMainViewController: UITabBarController {
override func viewDidLoad() {
super.viewDidLoad()
let tabBar = NEOTabBar()
tabBar.composeButtonClosure = { [weak self] in
print(self?.view)
}
//私有属性composeButtonClosure 不能直接赋值 用 KVC 代替
setValue(tabBar, forKeyPath: "tabBar")
addChildViewController(NEOHomeController() ,imageName: "tabbar_home" , title: "首页")
addChildViewController(NEOMessegeController() ,imageName: "tabbar_message_center" , title: "消息")
addChildViewController( NEODiscoverController () ,imageName: "tabbar_discover" , title: "发现")
addChildViewController(NEOPrefileController() ,imageName: "tabbar_profile" , title: "我")
}
func dissmiss(){
dismissViewControllerAnimated(true, completion: nil)
}
func addChildViewController(childController: UIViewController , imageName:String , title : String) {
childController.title = title
//设置背景图片
childController.tabBarItem.image = UIImage(named: imageName)
//渲染图片以原样的形式显示
childController.tabBarItem.selectedImage = UIImage(named: "\(imageName)_selected")?.imageWithRenderingMode(.AlwaysOriginal)
//设置选中的颜色
childController.tabBarItem.setTitleTextAttributes([NSForegroundColorAttributeName :UIColor.orangeColor()], forState: .Selected)
//设置 tabBar title的文字大小
childController.tabBarItem.setTitleTextAttributes([NSFontAttributeName :UIFont.systemFontOfSize(14)], forState: .Normal)
//
// //当图标为一张图片和文字时需要将图片向下滑
//
// childController.tabBarItem.imageInsets = UIEdgeInsets(top: 5, left: 0, bottom: -5, right: 0)
//添加子控制器
addChildViewController(UINavigationController (rootViewController: childController))
}
}
<file_sep>//
// NEOHomeController.swift
// XLweibo
//
// Created by apple on 15/12/5.
// Copyright © 2015年 dfdsaf. All rights reserved.
//
import UIKit
class NEOHomeController: NEOVisitorViewController {
override func viewDidLoad() {
super.viewDidLoad()
setupUI()
}
private func setupUI(){
navigationItem.leftBarButtonItem = UIBarButtonItem(imageNamed: "navigationbar_friendsearch",target: self, action: "friendsearch")
navigationItem.rightBarButtonItem = UIBarButtonItem(imageNamed: "navigationbar_pop",target: self, action: "friendsearch")
}
@objc private func friendsearch() {
let vc = NEOTempViewController()
navigationController?.pushViewController(vc, animated: true)
}
}
<file_sep>//
// NEODiscoverView.swift
// XLweibo
//
// Created by mac on 15/12/5.
// Copyright © 2015年 dfdsaf. All rights reserved.
//
import UIKit
class NEODiscoverView: UIView,UITextFieldDelegate{
@IBOutlet weak var cancleButton: UIButton!
@IBOutlet weak var textfiled: UITextField!
@IBOutlet weak var textfiledRightCon: NSLayoutConstraint!
class func seachView () -> NEODiscoverView { //从xib中加载数据
return NSBundle.mainBundle().loadNibNamed("NEODiscoverView", owner: nil, options: nil).last! as! NEODiscoverView
}
override func awakeFromNib() {
}
@IBAction func cancleButtonClick(sender: AnyObject) {
self.textfiledRightCon.constant = 0
UIView.animateWithDuration(0.25) { () -> Void in
// 把执行动画的代码放进去
self.textfiled.layoutIfNeeded()
}
}
func textFieldDidBeginEditing(textField: UITextField) {
self.textfiledRightCon.constant = self.cancleButton.frame.width
UIView.animateWithDuration(0.25) { ( ) -> Void in
self.textfiled.layoutIfNeeded()
}
}
}<file_sep>//
// UIView+IBExtension.swift
// XLweibo
//
// Created by mac on 15/12/5.
// Copyright © 2015年 dfdsaf. All rights reserved.
//
import UIKit
extension UIView {
@IBInspectable var cornerRadius : CGFloat {
get{
return layer.cornerRadius
}
set{
layer.cornerRadius = newValue
layer.masksToBounds = cornerRadius > 0 //将位于layer下的layer都遮住
}
}
@IBInspectable var borderColor: UIColor? {
get{
guard let c = layer.borderColor else {
return nil
}
return UIColor(CGColor: c)
}
set{
layer.borderColor = newValue?.CGColor
}
}
//borderWidth
@IBInspectable var borderWidth: CGFloat {
get{
return layer.borderWidth
}
set{
layer.borderWidth = newValue
}
}
}
<file_sep>//
// NEOTabBar.swift
// XLweibo
//
// Created by apple on 15/12/5.
// Copyright © 2015年 dfdsaf. All rights reserved.
//
import UIKit
class NEOTabBar: UITabBar {
//定义闭包
var composeButtonClosure : (() -> ())?
override init(frame: CGRect) {
super.init(frame : frame)
setupUI()
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
setupUI()
}
func setupUI(){ //设置UI内容
addSubview(composeButton) //添加自定义的按钮
}
//设置 tabBar 的 item frame
override func layoutSubviews() {
super.layoutSubviews()
composeButton.center = CGPoint(x: frame.width * 0.5, y: frame.height * 0.5)
let childW = frame.width / 5 //tabBar的每一个item宽度
//遍历每一个字 item
var index = 0
for childView in subviews {
if childView.isKindOfClass(NSClassFromString("UITabBarButton")!) {
//如果子 view是 UITableBarButton 类型
let x = CGFloat(index) * childW //设置 x 的值
childView.frame = CGRect(x: x, y: 0, width: childW, height: frame.height) //设置 childcview的frame
index++
//如果 index 大于 2
if index == 2 {
index++
}
}
}
}
//MARK 监听事件
@objc private func composeButtonClick () {
//执行闭包
composeButtonClosure? ()
}
// MARK: - 懒加载控件
// 撰写按钮
lazy var composeButton: UIButton = {
let button = UIButton()
// 添加点击事件
button.addTarget(self, action: "composeButtonClick", forControlEvents: UIControlEvents.TouchUpInside)
// 设置按钮的背景图片
button.setBackgroundImage(UIImage(named: "tabbar_compose_button"), forState: .Normal)
button.setBackgroundImage(UIImage(named: "tabbar_compose_button_highlighted"), forState: .Highlighted)
// 设置不同状态的图片
button.setImage(UIImage(named: "tabbar_compose_icon_add"), forState: UIControlState.Normal)
button.setImage(UIImage(named: "tabbar_compose_icon_add_highlighted"), forState: UIControlState.Highlighted)
// 设置button大小
button.sizeToFit()
return button
}()
}
| c24827c16aa85fc0ee7c08a0ab1ff7362310157e | [
"Swift"
] | 11 | Swift | NEOLYH/xlwb | c807b8bd5aa6d40f4ca8d4b206a25c87ce4badf6 | be9bf0e764d079c8cfc760991379ea9acd205a75 |
refs/heads/master | <file_sep>"# batch2-movie-store"
<file_sep>package com.cubit.it.servlet;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.util.ArrayList;
import java.util.List;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.cubit.it.entity.UserEntity;
import com.cubit.it.utils.SQLConnUtil;
@WebServlet("/users")
public class UserDataServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
try {
//Fetching all the rows no where class
String sql="select uid,userid,password,name,email,mobile,salutation,image,createdate,role from users_tbl";
Connection connection=SQLConnUtil.getConnection();
//compiling the query
PreparedStatement pstmt=connection.prepareStatement(sql);
//fire the query
ResultSet rs=pstmt.executeQuery();
//ResultSet has multiple records
List<UserEntity> userList=new ArrayList<>();
while(rs.next()) {
//public UserEntity(String userid, String password, String email, String name, String mobile, String image,String salutation) {
UserEntity entity=new UserEntity(rs.getString(2),rs.getString(3),
rs.getString(5),rs.getString(4), rs.getLong(6)+"",rs.getString(8),rs.getString(7));
//Setting the uid
entity.setUid(rs.getInt(1));
entity.setCreateDate(rs.getTimestamp(9));
entity.setRole(rs.getString(10));
userList.add(entity);
}
//Adding List into request scope against key "mark"
//using
req.setAttribute("mark",userList);
req.getRequestDispatcher("users.jsp").forward(req, resp);
}catch (Exception e) {
e.printStackTrace();
}
}
}
<file_sep>package com.cubit.it.servlet;
import java.io.IOException;
import java.util.List;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.cubit.it.dao.CubicDao;
import com.cubit.it.dao.CubicDaoImpl;
import com.cubit.it.entity.BlockTimeEntity;
@WebServlet("/blockTime")
public class BlockTimeServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
CubicDao cubicDao =new CubicDaoImpl();
//Adding List into request scope against key "blockTimeList"
List<BlockTimeEntity> blockTimeList=cubicDao.findBlockTimes();
req.setAttribute("blockTimeList",blockTimeList);
req.getRequestDispatcher("blockTime.jsp").forward(req, resp);
}
}
<file_sep>/*
Navicat MySQL Data Transfer
Source Server : LOCA
Source Server Version : 50726
Source Host : localhost:3306
Source Database : happy_hrs_db
Target Server Type : MYSQL
Target Server Version : 50726
File Encoding : 65001
Date: 2020-05-05 18:06:33
*/
SET FOREIGN_KEY_CHECKS=0;
-- ----------------------------
-- Table structure for `users_tbl`
-- ----------------------------
DROP TABLE IF EXISTS `users_tbl`;
CREATE TABLE `users_tbl` (
`uid` int(11) NOT NULL AUTO_INCREMENT,
`userid` varchar(50) DEFAULT NULL,
`password` varchar(30) DEFAULT NULL,
`name` varchar(100) DEFAULT NULL,
`email` varchar(100) DEFAULT NULL,
`mobile` bigint(20) DEFAULT NULL,
`salutation` varchar(4) DEFAULT NULL,
`image` varchar(500) DEFAULT NULL,
`createdate` timestamp NULL DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`uid`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1;
-- ----------------------------
-- Records of users_tbl
-- ----------------------------
INSERT INTO `users_tbl` VALUES ('1', 'yadna01', 'test', '<NAME>', '<EMAIL>', '923489438', 'Mr.', 'https://www.goodmorningimagesdownload.com/wp-content/uploads/2019/10/Nice-Whatsapp-Dp-Profile-Images-101-300x300.jpg', '2020-05-05 17:09:19');
INSERT INTO `users_tbl` VALUES ('2', '<EMAIL>', 'tetetete', 'JavaHunk Technologies', '<EMAIL>', '8700134973', 'Mr.', 'https://www.goodmorningimagesdownload.com/wp-content/uploads/2019/10/Nice-Whatsapp-Dp-Profile-Images-101-300x300.jpg', '2020-05-05 17:27:25');
INSERT INTO `users_tbl` VALUES ('3', '<EMAIL>', 'cool', 'Amita', '<EMAIL>', '3242424234', 'Miss', 'https://www.goodmorningimagesdownload.com/wp-content/uploads/2019/10/Nice-Whatsapp-Dp-Profile-Images-101-300x300.jpg', '2020-05-05 17:57:07');
| 80a7d389410709601471c0418f30d369b3bef89f | [
"Markdown",
"Java",
"SQL"
] | 4 | Markdown | JavaHunk2020/batch2-movie-store | b6cc9f4f1ceeeedf286a4d59164b8db001f02ee6 | 4d6ec9f4cddebdc3a42a7a6c8a3dc45b758b88e8 |
refs/heads/master | <repo_name>jakeatwork/axs-user_research<file_sep>/spec/routing/feedback_routing_spec.rb
require "spec_helper"
describe FeedbackController do
describe "routing" do
it "routes to #index" do
get("/feedback").should route_to("feedback#index")
end
it "routes to #new" do
get("/feedback/new").should route_to("feedback#new")
end
it "routes to #show" do
get("/feedback/1").should route_to("feedback#show", :id => "1")
end
it "routes to #edit" do
get("/feedback/1/edit").should route_to("feedback#edit", :id => "1")
end
it "routes to #create" do
post("/feedback").should route_to("feedback#create")
end
it "routes to #update" do
put("/feedback/1").should route_to("feedback#update", :id => "1")
end
it "routes to #destroy" do
delete("/feedback/1").should route_to("feedback#destroy", :id => "1")
end
end
end
<file_sep>/app/views/feedback/index.json.jbuilder
json.array!(@feedback) do |feedback|
json.extract! feedback, :person, :role, :email, :product_type, :business_unit, :feedback, :feedback_type, :feedback_date, :purpose, :version, :feedback_product
json.url feedback_url(feedback, format: :json)
end
<file_sep>/spec/factories/feedback.rb
# Read about factories at https://github.com/thoughtbot/factory_girl
FactoryGirl.define do
factory :feedback do
person "MyString"
role "MyString"
email "MyString"
product_type "MyString"
business_unit "MyString"
feedback "MyText"
feedback_type "MyString"
feedback_date "2013-08-11"
purpose "MyString"
version "MyString"
feedback_product "MyString"
end
end
<file_sep>/spec/models/feedback_spec.rb
# == Schema Information
#
# Table name: feedback
#
# id :integer not null, primary key
# person :string(255)
# role :string(255)
# email :string(255)
# product_type :string(255)
# business_unit :string(255)
# feedback :text
# feedback_type :string(255)
# feedback_date :date
# purpose :string(255)
# version :string(255)
# feedback_product :string(255)
# created_at :datetime
# updated_at :datetime
#
require 'spec_helper'
describe Feedback do
pending "add some examples to (or delete) #{__FILE__}"
end
<file_sep>/db/migrate/20130811183347_create_feedback.rb
class CreateFeedback < ActiveRecord::Migration
def change
create_table :feedback do |t|
t.string :person
t.string :role
t.string :email
t.string :product_type
t.string :business_unit
t.text :feedback
t.string :feedback_type
t.date :feedback_date
t.string :purpose
t.string :version
t.string :feedback_product
t.timestamps
end
end
end
<file_sep>/app/models/feedback.rb
# == Schema Information
#
# Table name: feedback
#
# id :integer not null, primary key
# person :string(255)
# role :string(255)
# email :string(255)
# product_type :string(255)
# business_unit :string(255)
# feedback :text
# feedback_type :string(255)
# feedback_date :date
# purpose :string(255)
# version :string(255)
# feedback_product :string(255)
# created_at :datetime
# updated_at :datetime
#
class Feedback < ActiveRecord::Base
include PgSearch
pg_search_scope :search, against: [:person, :role, :feedback, :product_type, :feedback_type, :business_unit],
using: {tsearch: {dictionary: "english"}}
def self.text_search(query)
if query.present?
search(query)
else
scoped
end
end
end
<file_sep>/app/controllers/feedback_controller.rb
class FeedbackController < InheritedResources::Base
before_filter :require_login
def index
@feedback = Feedback.text_search(params[:query]).page(params[:page]).per_page(10)
end
private
def require_login
unless current_user
redirect_to new_user_session_path
end
end
end
<file_sep>/Gemfile
source 'https://rubygems.org'
ruby '2.0.0'
gem 'rails', '4.0.0'
gem 'sass-rails', '~> 4.0.0'
gem 'uglifier', '>= 1.3.0'
gem 'coffee-rails', '~> 4.0.0'
gem 'jquery-rails'
gem 'turbolinks'
gem 'jbuilder', '~> 1.2'
gem 'bootstrap-sass'
gem 'activeadmin', github: 'gregbell/active_admin', branch: 'rails4'
gem 'ransack', github: 'ernie/ransack', branch: 'rails-4'
gem 'inherited_resources', github: 'josevalim/inherited_resources'
gem 'formtastic', github: 'justinfrench/formtastic'
gem 'annotate', ">=2.5.0"
gem 'devise'
gem 'will_paginate', '~> 3.0'
gem "jqcloud-rails"
gem 'figaro'
gem 'pg_search'
gem 'rails_12factor'
gem 'pg'
gem 'simple_form', '>= 3.0.0.rc'
gem 'thin'
group :development do
gem 'better_errors'
gem 'binding_of_caller', :platforms=>[:mri_19, :rbx]
gem 'guard-bundler'
gem 'guard-rails'
gem 'guard-rspec'
gem 'hub', :require=>nil
gem 'rb-fchange', :require=>false
gem 'rb-fsevent', :require=>false
gem 'rb-inotify', :require=>false
end
group :development, :test do
gem 'factory_girl_rails'
gem 'rspec-rails'
end
group :test do
gem 'capybara'
gem 'database_cleaner'
gem 'email_spec'
end
| 8ce1430e5d66bd31f8c5f3094ee109e0b02d126c | [
"Ruby"
] | 8 | Ruby | jakeatwork/axs-user_research | a1322f157ef931d2f87d8516f13542c8af6fd43e | 289312ea383ca9d2b37c1f4e57d36d679dc273fd |
refs/heads/master | <repo_name>Shadas/leetcode_notes<file_sep>/algorithms/_989_Add_to_Array_Form_of_Integer/answer_test.go
package _989_Add_to_Array_Form_of_Integer
import "testing"
func isSame(s1, s2 []int) bool {
if len(s1) != len(s2) {
return false
}
for i := 0; i < len(s1); i++ {
if s1[i] != s2[i] {
return false
}
}
return true
}
func TestAddToArrayForm(t *testing.T) {
if ret := addToArrayForm([]int{0}, 23); !isSame(ret, []int{2, 3}) {
t.Errorf("wrong with %v", ret)
}
}
<file_sep>/algorithms/_19_Remove_Nth_Node_From_End_of_List/answer.go
package _19_Remove_Nth_Node_From_End_of_List
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func removeNthFromEnd(head *ListNode, n int) *ListNode {
pre := &ListNode{Next: head}
ret, fast, slow := pre, pre, pre
for i := 0; i < n; i++ {
fast = fast.Next
}
for fast.Next != nil {
fast = fast.Next
slow = slow.Next
}
slow.Next = slow.Next.Next
return ret.Next
}
<file_sep>/algorithms/_581_Shortest_Unsorted_Continuous_Subarray/QD.md
Given an integer array nums, you need to find one continuous subarray such that if you only sort this subarray in
non-decreasing order, then the whole array will be sorted in non-decreasing order.
Return the shortest such subarray and output its length.
Example 1:
```
Input: nums = [2,6,4,8,10,9,15]
Output: 5
Explanation: You need to sort [6, 4, 8, 10, 9] in ascending order to make the whole array sorted in ascending order.
```
Example 2:
```
Input: nums = [1,2,3,4]
Output: 0
```
Example 3:
```
Input: nums = [1]
Output: 0
```
Constraints:
- 1 <= nums.length <= 104
- -105 <= nums[i] <= 105<file_sep>/algorithms/_207_Course_Schedule/answer.go
package _207_Course_Schedule
func canFinish(numCourses int, prerequisites [][]int) bool {
var (
matrix = [][]int{}
indegree = []int{}
)
// 初始化 邻接矩阵二维数组,及入度数组
for i := 0; i < numCourses; i++ {
tmp := []int{}
for j := 0; j < numCourses; j++ {
tmp = append(tmp, 0)
}
matrix = append(matrix, tmp)
indegree = append(indegree, 0)
}
// 生成邻接矩阵及入度列表
for _, p := range prerequisites {
ready, pre := p[0], p[1]
if matrix[pre][ready] == 0 {
indegree[ready]++
}
matrix[pre][ready] = 1
}
var (
count = 0
queue = []int{}
)
// 每一个入度为0的节点,进入一个队列
for i, d := range indegree {
if d == 0 {
queue = append(queue, i)
}
}
for len(queue) != 0 {
course := queue[0]
queue = queue[1:]
count++
for i := 0; i < numCourses; i++ {
if matrix[course][i] != 0 { // 节点i为course的后驱节点
indegree[i]-- // 其入度减一
if indegree[i] == 0 {
queue = append(queue, i)
}
}
}
}
return count == numCourses
}
<file_sep>/algorithms/_918_Maximum_Sum_Circular_Subarray/QD.md
Given a circular integer array nums of length n, return the maximum possible sum of a non-empty subarray of nums.
A circular array means the end of the array connects to the beginning of the array. Formally, the next element of nums[i] is nums[(i + 1) % n] and the previous element of nums[i] is nums[(i - 1 + n) % n].
A subarray may only include each element of the fixed buffer nums at most once. Formally, for a subarray nums[i], nums[i + 1], ..., nums[j], there does not exist `i <= k1, k2 <= j with k1 % n == k2 % n`.
Example 1:
```
Input: nums = [1,-2,3,-2]
Output: 3
Explanation: Subarray [3] has maximum sum 3.
```
Example 2:
```
Input: nums = [5,-3,5]
Output: 10
Explanation: Subarray [5,5] has maximum sum 5 + 5 = 10.
```
Example 3:
```
Input: nums = [-3,-2,-3]
Output: -2
Explanation: Subarray [-2] has maximum sum -2.
```
Constraints:
- n == nums.length
- 1 <= n <= 3 * 10^4
- -3 * 10^4 <= nums[i] <= 3 * 10^4
<file_sep>/algorithms/_71_Simplify_Path/answer.go
package _71_Simplify_Path
import "strings"
func simplifyPath(path string) string {
var (
paths = strings.Split(path, "/")
stack = []string{}
ret string
)
for _, path := range paths {
if path == "" { // 空串往后走
continue
}
if path == "." { // 本目录往后走
continue
}
if path == ".." { // 向前一个目录
if len(stack) > 0 { // 如果有元素,pop一个
stack = stack[:len(stack)-1]
}
continue
}
stack = append(stack, path)
}
ret = strings.Join(stack, "/")
ret = "/" + ret
return ret
}
<file_sep>/algorithms/_132_Palindrome_Partitioning_2/answer_test.go
package _132_Palindrome_Partitioning_2
import (
"fmt"
"testing"
)
func TestMinCut(t *testing.T) {
var s string
s = "ca"
fmt.Println(minCut(s))
}
<file_sep>/algorithms/_449_Serialize_and_Deserialize_BST/answer.go
package _449_Serialize_and_Deserialize_BST
import (
"strconv"
"strings"
)
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
type Codec struct {
}
func Constructor() Codec {
return Codec{}
}
// Serializes a tree to a single string.
func (this *Codec) serialize(root *TreeNode) string {
var (
nodeValue []string
line []*TreeNode
)
if root != nil {
line = append(line, root)
nodeValue = append(nodeValue, strconv.FormatInt(int64(root.Val), 10))
} else {
nodeValue = append(nodeValue, "nil")
}
for len(line) != 0 {
var tmpLine []*TreeNode
for _, node := range line {
if node.Left == nil {
nodeValue = append(nodeValue, "nil")
} else {
nodeValue = append(nodeValue, strconv.FormatInt(int64(node.Left.Val), 10))
tmpLine = append(tmpLine, node.Left)
}
if node.Right == nil {
nodeValue = append(nodeValue, "nil")
} else {
nodeValue = append(nodeValue, strconv.FormatInt(int64(node.Right.Val), 10))
tmpLine = append(tmpLine, node.Right)
}
}
line = tmpLine
}
return strings.Join(nodeValue, ",")
}
// Deserializes your encoded data to tree.
func (this *Codec) deserialize(data string) *TreeNode {
nodeValues := strings.Split(data, ",")
if len(nodeValues) == 1 && nodeValues[0] == "nil" {
return nil
}
rootVal, _ := strconv.Atoi(nodeValues[0])
root := &TreeNode{Val: rootVal}
line := []*TreeNode{root}
curIdx := 1
Finish:
for len(line) != 0 {
var tmpLine []*TreeNode
for _, node := range line {
if curIdx >= len(nodeValues) {
break Finish
}
leftx := nodeValues[curIdx]
if leftx == "nil" {
node.Left = nil
} else {
leftValue, _ := strconv.Atoi(leftx)
node.Left = &TreeNode{Val: leftValue}
tmpLine = append(tmpLine, node.Left)
}
curIdx++
if curIdx >= len(nodeValues) {
break Finish
}
rightx := nodeValues[curIdx]
if rightx == "nil" {
node.Right = nil
} else {
rightValue, _ := strconv.Atoi(rightx)
node.Right = &TreeNode{Val: rightValue}
tmpLine = append(tmpLine, node.Right)
}
curIdx++
}
line = tmpLine
}
return root
}
/**
* Your Codec object will be instantiated and called as such:
* ser := Constructor()
* deser := Constructor()
* tree := ser.serialize(root)
* ans := deser.deserialize(tree)
* return ans
*/
<file_sep>/algorithms/_139_Word_Break/answer_test.go
package _139_Word_Break
import (
"fmt"
"testing"
)
func TestWordBreak(t *testing.T) {
var str string
var wordDict []string
str = "leetcode"
wordDict = []string{"leet", "code"}
fmt.Println(wordBreak(str, wordDict))
}
<file_sep>/algorithms/_7_Reverse_Integer/answer_test.go
package _7_Reverse_Integer
import (
"testing"
)
func TestReverse(t *testing.T) {
if ret := reverse(-12); ret != -21 {
t.Error("not -21 with -12.")
}
if ret := reverse(1200); ret != 21 {
t.Error("not 21 with 1200.")
}
if ret := reverse(1534236469); ret != 0 {
t.Error("not 0 with 1534236469.")
}
if ret := reverse(-2147483412); ret != -2143847412 {
t.Error("not -2143847412 with -2147483412.")
}
if ret := reverse(1463847412); ret != 2147483641 {
t.Error("not 2147483641 with 1463847412.")
}
}
<file_sep>/algorithms/_515_Find_Largest_Value_in_Each_Tree_Row/QD.md
Given the root of a binary tree, return an array of the largest value in each row of the tree (0-indexed).
Example 1:
```
Input: root = [1,3,2,5,3,null,9]
Output: [1,3,9]
```
Example 2:
```
Input: root = [1,2,3]
Output: [1,3]
```
Example 3:
```
Input: root = [1]
Output: [1]
```
Example 4:
```
Input: root = [1,null,2]
Output: [1,2]
```
Example 5:
```
Input: root = []
Output: []
```
Constraints:
- The number of nodes in the tree will be in the range [0, 104].
- -231 <= Node.val <= 231 - 1
----
层序遍历,每层计算一次即可<file_sep>/algorithms/_51_N_Queens/answer.go
package _51_N_Queens
func solveNQueens(n int) [][]string {
return solveNQueensWithDFS(n)
}
func solveNQueensWithDFS(n int) [][]string {
var (
pos []int // 描述放置结果,下标为行,值为列,按行遍历dfs
tmp [][]int // 中间结果
result [][]string
)
dfs(n, pos, &tmp)
result = transform(n, tmp)
return result
}
func transform(n int, tmp [][]int) [][]string {
var result [][]string
for _, ret := range tmp {
res := []string{}
for _, t := range ret {
str := ""
for i := 0; i < n; i++ {
if i == t {
str += "Q"
} else {
str += "."
}
}
res = append(res, str)
}
result = append(result, res)
}
return result
}
func abs(a int) int {
if a < 0 {
return -a
}
return a
}
func dfs(n int, pos []int, ret *[][]int) {
if len(pos) == n {
newPos := make([]int, len(pos))
copy(newPos, pos)
*ret = append(*ret, newPos)
return
}
column_loop:
for i := 0; i < n; i++ { // 尝试第i列往里放
// 检查是否有同列
for _, p := range pos {
if i == p {
continue column_loop
}
}
// 判断是否有存在对角线
existDiagonal := false
for line, col := range pos {
if abs(len(pos)-line) == abs(col-i) {
existDiagonal = true
break
}
}
// 如果有,尝试下一个位置
if existDiagonal {
continue
}
// 可以安放
pos = append(pos, i)
// 尝试安放下一个
dfs(n, pos, ret)
// 回退,尝试其他可能
pos = pos[:len(pos)-1]
}
return
}
<file_sep>/algorithms/_54_Spiral_Matrix/answer.go
package _54_Spiral_Matrix
// 方向枚举
type Dire int
const (
Right Dire = iota
Down
Left
Up
)
type Pos struct {
X, Y int
}
func spiralOrder(matrix [][]int) []int {
var ret []int
if len(matrix) == 0 { // 一行也没有,不用打印
return ret
}
if len(matrix[0]) == 0 { // 一列也没有,不用打印
return ret
}
count := 0 // 用于记录已经遍历的次数
record := make([][]int, len(matrix))
for i := 0; i < len(matrix); i++ {
record[i] = make([]int, len(matrix[0]))
}
// 起始位置
pos := Pos{0, 0}
dire := Right
ret = make([]int, len(matrix)*len(matrix[0]))
for count < len(matrix)*len(matrix[0]) { // 当遍历次数打到元素个数时结束
x := matrix[pos.Y][pos.X]
ret[count] = x
pos, record, dire = newPos(pos, record, dire)
count++
}
return ret
}
func newPos(pos Pos, record [][]int, dire Dire) (newPos Pos, newRecord [][]int, newDire Dire) {
// 更新标记
record[pos.Y][pos.X] = 1
newRecord = record
// 更新方向
newDire = dire
// 尝试移动
switch dire {
case Right:
pos.X += 1
if pos.X >= len(record[0]) || record[pos.Y][pos.X] == 1 {
pos.X -= 1 // 恢复
pos.Y += 1
newDire = Down
}
case Down:
pos.Y += 1
if pos.Y >= len(record) || record[pos.Y][pos.X] == 1 {
pos.X -= 1
pos.Y -= 1
newDire = Left
}
case Left:
pos.X -= 1
if pos.X < 0 || record[pos.Y][pos.X] == 1 {
pos.X += 1
pos.Y -= 1
newDire = Up
}
case Up:
pos.Y -= 1
if pos.X < 0 || record[pos.Y][pos.X] == 1 {
pos.X += 1
pos.Y += 1
newDire = Right
}
}
newPos = pos
return
}
<file_sep>/algorithms/_53_Maximum_Subarray/answer.go
package _53_Maximum_Subarray
import (
"math"
)
func maxSubArray(nums []int) int {
return maxSubArrayDP1(nums)
}
func maxSubArrayDP1(nums []int) int {
n := len(nums)
dp := make([]int, n)
dp[0] = nums[0]
max := dp[0]
for i := 1; i < n; i++ {
var x int
if dp[i-1] > 0 {
x = dp[i-1]
} else {
x = 0
}
dp[i] = nums[i] + x
max = int(math.Max(float64(max), float64(dp[i])))
}
return max
}
<file_sep>/algorithms/_82_Remove_Duplicates_from_Sorted_List_2/answer.go
package _82_Remove_Duplicates_from_Sorted_List_2
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func deleteDuplicates(head *ListNode) *ListNode {
if head == nil {
return nil
}
var ret *ListNode
var retTail *ListNode
var lastValue int
var isRepeated bool
if head.Next == nil {
return head
} else {
lastValue = head.Val
}
for head.Next != nil {
head = head.Next
if head.Val == lastValue {
isRepeated = true
} else {
if !isRepeated {
if ret == nil {
ret = &ListNode{
Val: lastValue,
}
retTail = ret
} else {
retTail.Next = &ListNode{
Val: lastValue,
}
retTail = retTail.Next
}
} else {
isRepeated = false
}
lastValue = head.Val
}
}
if !isRepeated {
if ret == nil {
ret = &ListNode{
Val: lastValue,
}
retTail = ret
} else {
retTail.Next = &ListNode{
Val: lastValue,
}
retTail = retTail.Next
}
}
return ret
}
<file_sep>/algorithms/_938_Range_Sum_of_BST/answer.go
package _938_Range_Sum_of_BST
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func rangeSumBST(root *TreeNode, L int, R int) int {
// return rangeSumBSTUnrecursion(root, L, R)
return rangeSumBSTRecursion(root, L, R)
}
func rangeSumBSTUnrecursion(root *TreeNode, L int, R int) int {
var (
l []*TreeNode = []*TreeNode{}
sum int
)
l = append(l, root)
for len(l) > 0 {
x := l[len(l)-1]
l = l[:len(l)-1]
if x == nil {
continue
}
if x.Val < L {
if x.Right != nil {
l = append(l, x.Right)
}
} else if x.Val > R {
if x.Left != nil {
l = append(l, x.Left)
}
} else {
sum += x.Val
l = append(l, x.Left, x.Right)
}
}
return sum
}
func rangeSumBSTRecursion(root *TreeNode, L int, R int) int {
var sum int
if root.Val >= L && root.Left != nil {
sum += rangeSumBST(root.Left, L, R)
}
if root.Val <= R && root.Right != nil {
sum += rangeSumBST(root.Right, L, R)
}
if root.Val >= L && root.Val <= R {
sum += root.Val
}
return sum
}
<file_sep>/algorithms/_94_Binary_Tree_Inorder_Traversal/answer.go
package _94_Binary_Tree_Inorder_Traversal
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func inorderTraversal(root *TreeNode) []int {
// return inorderTranversalRecursion(root)
// return inorderTranversalUnrecursion(root)
return inorderTranversalUnrecursionWithStack(root)
}
func inorderTranversalUnrecursionWithStack(n *TreeNode) []int {
var (
s = []*TreeNode{}
ret = []int{}
node = n
)
for node != nil || len(s) > 0 {
for node != nil {
s = append(s, node)
node = node.Left
}
if len(s) > 0 {
node = s[len(s)-1]
s = s[:len(s)-1]
ret = append(ret, node.Val)
node = node.Right
}
}
return ret
}
func inorderTranversalUnrecursion(n *TreeNode) []int {
var (
l = []*TreeNode{}
node = n
ret = []int{}
)
for node != nil || len(l) > 0 {
if node != nil {
l = append(l, node)
node = node.Left
} else {
node = l[len(l)-1]
l = l[:len(l)-1]
ret = append(ret, node.Val)
node = node.Right
}
}
return ret
}
func inorderTranversalRecursion(n *TreeNode) []int {
if n == nil {
return []int{}
}
var ret []int
if n.Left != nil {
ret = inorderTranversalRecursion(n.Left)
}
ret = append(ret, n.Val)
if n.Right != nil {
ret = append(ret, inorderTranversalRecursion(n.Right)...)
}
return ret
}
<file_sep>/algorithms/_897_Increasing_Order_Search_Tree/answer.go
package _897_Increasing_Order_Search_Tree
import "fmt"
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func increasingBST(root *TreeNode) *TreeNode {
var (
s = []*TreeNode{}
ol = []int{}
node = root
head = &TreeNode{}
)
for node != nil || len(s) != 0 {
for node != nil {
s = append(s, node)
node = node.Left
}
if len(s) != 0 {
node = s[len(s)-1]
s = s[0 : len(s)-1]
ol = append(ol, node.Val)
node = node.Right
}
}
fmt.Println(ol)
// 构建新的二叉树
tmp := head
for _, i := range ol {
tmp.Right = &TreeNode{
Val: i,
}
tmp = tmp.Right
}
return head.Right
}
<file_sep>/algorithms/_33_Search_in_Rotated_Sorted_Array/answer.go
package _33_Search_in_Rotated_Sorted_Array
func search(nums []int, target int) int {
if len(nums) == 0 {
return -1
}
if len(nums) == 1 && nums[0] == target {
return 0
}
var (
left = 0
right = len(nums) - 1
mid = 0
)
for left <= right {
mid = (left + right) / 2
// 如果碰上了就碰上了
if nums[left] == target {
return left
}
if nums[right] == target {
return right
}
if nums[mid] == target {
return mid
}
if nums[mid] > nums[left] { // 左边单调递增,右边不一定
if nums[mid] < target { // 肯定不在左边的单调区间,左边界调整,右边界不定,所以这轮右边界不变
left = mid + 1
} else { // 比mid小,可能在左边,也可能在右边rotate的部分,需要根据左边下界判断
if nums[left] > target { // 如果单调区间下界也比target大,则目标值一定落在右边部分
right = right - 1
left = mid + 1
} else { // 如果单调区间下界比target小,说明就在这个单调区间中
right = mid - 1
left = left + 1
}
}
} else { // 左边不单调递增,则右边一定单调递增
if nums[mid] > target { // 说明右边单调递增部分都比目标值大,所以右界调整,左界+1继续处理
right = mid - 1
left = left + 1
} else { // 说明在mid右边的单调区间,或者 左边rotate部分,需要根据右边上界判断处理
if nums[right] > target { // 如果右边界大于目标值,说明就在右边的单调区间中
right = right - 1
left = mid + 1
} else { // 否则,在左边比mid小的单调区间中
left = left + 1
right = mid - 1
}
}
}
}
return -1
}
<file_sep>/algorithms/_67_Add_Binary/answer_test.go
package _67_Add_Binary
import (
"testing"
)
func TestAddBinary(t *testing.T) {
if ret := addBinary("11", "1"); ret != "100" {
t.Error("ret not 100, get", ret)
}
if ret := addBinary("100", "110010"); ret != "110110" {
t.Error("ret not 110110, get", ret)
}
}
<file_sep>/algorithms/_460_LFU_Cache/answer.go
package _460_LFU_Cache
import (
"fmt"
"strconv"
"strings"
)
// 测试时可调用 transCall([]string{"LFUCache", "put", "get"...}, transArg("[[2],[1,1],[1]]...")) 类似的方式进行测试,方便调试
// 测试用,处理arg参数
func transArg(str string) (ret [][]int) {
str = strings.TrimSpace(strings.TrimRight(strings.TrimLeft(str, "["), "]"))
args := strings.Split(str, "],[")
for _, arg := range args {
as := strings.Split(strings.TrimSpace(arg), ",")
tmpAs := []int{}
for _, a := range as {
i, _ := strconv.Atoi(a)
tmpAs = append(tmpAs, i)
}
ret = append(ret, tmpAs)
}
return
}
// 测试用,处理调用
func transCall(fn []string, arg [][]int) {
if len(fn) != len(arg) {
fmt.Println("err input")
return
}
var obj LFUCache
for i := 0; i < len(fn); i++ {
switch fn[i] {
case "LFUCache":
obj = Constructor(arg[i][0])
case "put":
obj.Put(arg[i][0], arg[i][1])
case "get":
obj.Get(arg[i][0])
}
}
}
type Pair struct {
Value int
Freq int
}
type LFUCache struct {
MinFreq int
Capacity int
Mp map[int]Pair
Mf map[int]*[]int
}
func Constructor(capacity int) LFUCache {
lc := LFUCache{
MinFreq: 0,
Capacity: capacity,
Mp: make(map[int]Pair),
Mf: make(map[int]*[]int),
}
return lc
}
// 打印内容仅限于调试,跑的时候一定要去掉,不然时间会超限。
func (this *LFUCache) Get(key int) int {
var (
p Pair
ok bool
ret int
)
// defer func() {
// fmt.Printf("GET %d->%d: %+v\n\n", key, ret, this)
// }()
// 如果检索不到记录,返回-1
if p, ok = this.Mp[key]; !ok {
ret = -1
return ret
}
// 有检索记录
var (
oldFreq = p.Freq // 旧频率
newFreq = oldFreq + 1
ofl = this.Mf[oldFreq] // 旧频率列表
nfl *[]int
delIdx int
)
ret = p.Value // 获得返回值
if len(*ofl) == 0 {
return -1
}
// 更新旧的频率列表
for i, k := range *ofl {
if k == key {
delIdx = i
break
}
}
if delIdx == len(*ofl)-1 {
*ofl = append((*ofl)[:delIdx])
} else {
*ofl = append((*ofl)[:delIdx], (*ofl)[delIdx+1:]...)
}
this.Mf[oldFreq] = ofl
// 更新mp
p.Freq = newFreq
this.Mp[key] = p
// 更新新的mf
if nfl, ok = this.Mf[newFreq]; !ok {
nfl = &[]int{}
}
*nfl = append(*nfl, key)
this.Mf[newFreq] = nfl
// 更新最低频率标记
if this.MinFreq == oldFreq { // 如果删除的项的频率对应最低频率
if len(*ofl) == 0 {
this.MinFreq = newFreq
}
}
return ret
}
func (this *LFUCache) Put(key int, value int) {
// defer fmt.Printf("PUT %d-%d: %+v\n\n", key, value, this)
// 如果之前有这一项,则更新值,同时把其在频率行中提前
if p, ok := this.Mp[key]; ok {
var (
delIdx int
of = p.Freq
nf = of + 1
ofl = this.Mf[of]
)
if ofl == nil || len(*ofl) == 0 {
return
}
p.Value = value
p.Freq = nf
this.Mp[key] = p
// 更新旧的频率列表
for i, k := range *ofl {
if k == key {
delIdx = i
break
}
}
if delIdx == len(*ofl)-1 {
*ofl = append((*ofl)[:delIdx])
} else {
*ofl = append((*ofl)[:delIdx], (*ofl)[delIdx+1:]...)
}
this.Mf[of] = ofl
var nfl *[]int
if nfl, ok = this.Mf[nf]; !ok {
nfl = &[]int{}
}
*nfl = append(*nfl, key)
this.Mf[nf] = nfl
if this.MinFreq == of && len(*this.Mf[of]) == 0 {
this.MinFreq = nf
}
return
}
// 如果没有,则需要增加,增加要看容量是否已经满了
if len(this.Mp) < this.Capacity { // 如果未满,直接插入即可
this.Mp[key] = Pair{
Value: value,
Freq: 1,
}
var (
f *[]int
ok bool
)
if f, ok = this.Mf[1]; !ok {
f = &[]int{}
}
*f = append(*f, key)
this.Mf[1] = f
this.MinFreq = 1
return
}
// 如果需要增加缓存项,且缓存项已满,则需要删除
// 先删除
// 找到删除的key
fl := this.Mf[this.MinFreq]
if fl == nil || len(*fl) == 0 {
return
}
delKey := (*fl)[0]
// 删除记录
delete(this.Mp, delKey)
// 删除并更新频率记录
*fl = (*fl)[1:]
this.Mf[this.MinFreq] = fl
// 再添加项目
this.Mp[key] = Pair{
Value: value,
Freq: 1,
}
if f, ok := this.Mf[1]; ok {
*f = append((*f), key)
this.Mf[1] = f
} else {
this.Mf[1] = &[]int{key}
}
this.MinFreq = 1
return
}
/**
* Your LFUCache object will be instantiated and called as such:
* obj := Constructor(capacity);
* param_1 := obj.Get(key);
* obj.Put(key,value);
*/
<file_sep>/algorithms/_20_Valid_Parentheses/answer.go
package _20_Valid_Parentheses
var beginList = []string{"(", "[", "{"}
var endList = []string{")", "]", "}"}
func isValid(s string) bool {
l := []string{}
for _, c := range s {
str := string(c)
if StrInList(str, beginList) {
l = append(l, str)
}
if StrInList(str, endList) {
if len(l) < 1 {
return false
}
switch str {
case ")":
if l[len(l)-1] == "(" {
l = l[:len(l)-1]
} else {
return false
}
case "]":
if l[len(l)-1] == "[" {
l = l[:len(l)-1]
} else {
return false
}
case "}":
if l[len(l)-1] == "{" {
l = l[:len(l)-1]
} else {
return false
}
}
}
}
if len(l) != 0 {
return false
}
return true
}
func StrInList(s string, list []string) bool {
for _, l := range list {
if s == l {
return true
}
}
return false
}
<file_sep>/algorithms/_349_Intersection_of_Two_Arrays/answer.go
package _349_Intersection_of_Two_Arrays
func intersection(nums1 []int, nums2 []int) []int {
var m1, m2 = make(map[int]interface{}), make(map[int]interface{})
for _, n := range nums1 {
m1[n] = nil
}
for _, n := range nums2 {
m2[n] = nil
}
var ret []int
for n1, _ := range m1 {
if _, ok := m2[n1]; ok {
ret = append(ret, n1)
}
}
return ret
}
<file_sep>/algorithms/_561_Array_Partition/answer_test.go
package _561_Array_Partition
import "testing"
type testCase struct {
input []int
output int
}
func TestArrayPairSum(t *testing.T) {
cases := []testCase{
{
input: []int{1, 4, 3, 2},
output: 4,
},
{
input: []int{6, 2, 6, 5, 1, 2},
output: 9,
},
}
for _, c := range cases {
if x := arrayPairSum(c.input); x != c.output {
t.Errorf("output should be \"%v\" instead of \"%v\" with input=\"%v\"", c.output, x, c.input)
}
}
}
<file_sep>/algorithms/_306_Additive_Number/answer.go
package _306_Additive_Number
func isAdditiveNumber(num string) bool {
tmp := []int{}
return dfs(num, 0, &tmp)
}
func dfs(num string, idx int, tmp *[]int) bool {
if idx == len(num) && len(*tmp) > 2 {
return true
}
cur := 0
for i := idx; i < len(num); i++ {
cur = cur*10 + int(num[i]-'0')
if len(*tmp) >= 2 && cur-(*tmp)[len(*tmp)-1] > (*tmp)[len(*tmp)-2] {
break
}
if len(*tmp) <= 1 || cur-(*tmp)[len(*tmp)-1] == (*tmp)[len(*tmp)-2] {
*tmp = append(*tmp, cur)
if dfs(num, i+1, tmp) {
return true
}
*tmp = (*tmp)[0 : len(*tmp)-1]
}
if cur == 0 {
break
}
}
return false
}
<file_sep>/algorithms/_169_Majority_Element/answer.go
package _169_Majority_Element
func majorityElement(nums []int) int {
return majorityElementByMapCount(nums)
}
func majorityElementByMapCount(nums []int) int {
l := len(nums)
m := make(map[int]int)
for _, n := range nums {
var tmpC int
if c, ok := m[n]; ok {
tmpC = c + 1
} else {
tmpC = 1
}
if tmpC > l/2 {
return n
}
m[n] = tmpC
}
return 0
}
<file_sep>/algorithms/_792_Number_of_Matching_Subsequences/answer.go
package _792_Number_of_Matching_Subsequences
func numMatchingSubseq(s string, words []string) int {
dict := make(map[string][]int)
for idx, b := range s {
dict[string(b)] = append(dict[string(b)], idx)
}
ret := 0
for _, word := range words {
if isSubSeq(word, dict) {
ret += 1
}
}
return ret
}
func isSubSeq(word string, dict map[string][]int) bool {
prev := -1
for i := 0; i < len(word); i++ {
s := string(word[i])
idxs := dict[s]
if len(idxs) == 0 { // 没有对应字符
return false
}
nh := nextHigher(prev, idxs)
if nh == -1 {
return false
} else {
prev = nh
}
}
return true
}
func nextHigher(n int, list []int) int {
for _, idx := range list {
if idx > n {
return idx
}
}
return -1
}
<file_sep>/algorithms/_1201_Ugly_Number_3/answer.go
package _1201_Ugly_Number_3
import "math"
func nthUglyNumber(n int, a int, b int, c int) int {
l, r := 1, math.MaxInt32
for l < r {
mid := l + (r-l)/2
if cnt(mid, a, b, c) < n {
l = mid + 1
} else {
r = mid
}
}
return l
}
func gcd(x, y int) int {
for y != 0 {
x, y = y, x%y
}
return x
}
func floorInt(x, y int) int {
return int(math.Floor(float64(x) / float64(y)))
}
func lcm2(x, y int) int {
return floorInt(x*y, gcd(x, y))
}
func lcm3(x, y, z int) int {
res := lcm2(x, y)
return lcm2(z, res)
}
func cnt(k, x, y, z int) int {
return floorInt(k, x) + floorInt(k, y) + floorInt(k, z) - floorInt(k, lcm2(x, y)) - floorInt(k, lcm2(x, z)) -
floorInt(k, lcm2(y, z)) + floorInt(k, lcm3(x, y, z))
}
<file_sep>/algorithms/_58_Length_of_Last_Word/answer_test.go
package _58_Length_of_Last_Word
import (
"testing"
)
func TestLengthOfLastWord(t *testing.T) {
if ret := lengthOfLastWord("Hello world"); ret != 5 {
t.Error("not 5")
}
}
<file_sep>/algorithms/_160_Intersection_of_Two_Linked_Lists/answer.go
package _160_Intersection_of_Two_Linked_Lists
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func getIntersectionNode(headA, headB *ListNode) *ListNode {
return getIntersectionNodeWithLengthWalk(headA, headB)
}
func getIntersectionNodeWithLengthWalk(headA, headB *ListNode) *ListNode {
var (
n1 = headA
n2 = headB
)
for n1 != nil && n2 != nil {
n1 = n1.Next
n2 = n2.Next
}
for n1 != nil { // 说明headA长
headA = headA.Next
n1 = n1.Next
}
for n2 != nil {
headB = headB.Next
n2 = n2.Next
}
for headA != nil && headB != nil {
if headA == headB {
return headA
}
headA = headA.Next
headB = headB.Next
}
return nil
}
<file_sep>/algorithms/_239_Sliding_Window_Maximum/answer.go
package _239_Sliding_Window_Maximum
import (
"container/list"
)
func maxSlidingWindow(nums []int, k int) []int {
return maxSlidingWindowWithQueue(nums, k)
}
// 双端单调队列,为了移动时计算对比窗口长度,队列中不放具体的值,而放下标
func maxSlidingWindowWithQueue(nums []int, k int) []int {
if k > len(nums) || len(nums) == 0 { // bad case
return []int{}
}
var (
ret []int
qs = list.New()
)
// 初始化构造窗口
for i := 0; i < k; i++ {
for qs.Len() != 0 && nums[qs.Back().Value.(int)] < nums[i] {
qs.Remove(qs.Back())
}
qs.PushBack(i)
}
ret = append(ret, nums[qs.Front().Value.(int)])
// 开始移动
for i := k; i < len(nums); i++ {
if (i - qs.Front().Value.(int) + 1) > k {
qs.Remove(qs.Front())
}
for qs.Len() != 0 && nums[qs.Back().Value.(int)] < nums[i] {
qs.Remove(qs.Back())
}
qs.PushBack(i)
ret = append(ret, nums[qs.Front().Value.(int)])
}
return ret
}
<file_sep>/algorithms/_543_Diameter_of_Binary_Tree/answer_test.go
package _543_Diameter_of_Binary_Tree
import "testing"
func TestDiameterOfBinaryTree(t *testing.T) {
r := &TreeNode{
Left: &TreeNode{
Value: 1,
},
Value: 2,
}
ret := diameterOfBinaryTree(r)
t.Log(ret)
}
<file_sep>/algorithms/_2181_Merge_Nodes_in_Between_Zeros/answer_test.go
package _2181_Merge_Nodes_in_Between_Zeros
import (
"testing"
"github.com/shadas/leetcode_notes/utils/linkedlist"
)
type testCase struct {
input *linkedlist.IntListNode
output *linkedlist.IntListNode
}
func TestMergeNodes(t *testing.T) {
cases := []testCase{
{
input: linkedlist.GenerateIntLinkedList([]int{0, 3, 1, 0, 4, 5, 2, 0}),
output: linkedlist.GenerateIntLinkedList([]int{4, 11}),
},
{
input: linkedlist.GenerateIntLinkedList([]int{0, 1, 0, 3, 0, 2, 2, 0}),
output: linkedlist.GenerateIntLinkedList([]int{1, 3, 4}),
},
}
for _, c := range cases {
if x := mergeNodes(c.input); !linkedlist.IsTwoIntLinkedListEqual(x, c.output) {
t.Errorf("output should be \"%v\" instead of \"%v\" with input=\"%v\"", c.output, x, c.input)
}
}
}
<file_sep>/algorithms/_993_Cousins_in_Binary_Tree/answer.go
package _993_Cousins_in_Binary_Tree
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func isCousins(root *TreeNode, x int, y int) bool {
return isCousinsBFS(root, x, y)
}
func isCousinsBFS(root *TreeNode, x int, y int) bool {
var (
xp, yp *TreeNode
xd, yd int
s = []*TreeNode{root}
nowDep = -1
)
if root == nil {
return false
}
for len(s) > 0 {
nowDep++
currFloor := s
cache := []*TreeNode{}
for _, n := range currFloor {
if (n.Left != nil && n.Left.Val == x) || (n.Right != nil && n.Right.Val == x) {
xp = n
xd = nowDep + 1
}
if (n.Left != nil && n.Left.Val == y) || (n.Right != nil && n.Right.Val == y) {
yp = n
yd = nowDep + 1
}
if xp != nil && yp != nil {
if xp != yp && xd == yd {
return true
} else {
return false
}
}
if n.Left != nil {
cache = append(cache, n.Left)
}
if n.Right != nil {
cache = append(cache, n.Right)
}
}
s = cache
}
return false
}
<file_sep>/algorithms/_647_Palindromic_Substrings/answer.go
package _647_Palindromic_Substrings
func countSubstrings(s string) int {
return countSubstringsWithDP(s)
}
// dp[i][j] 代表s[i,j]是否为回文子串
// 如果s[i,j]为true,且 s[i-1] == s[j+1],s[i-1,j+1]也为true
// 相当于计算dp矩阵里,包含对角线的右上半边的true数量
func countSubstringsWithDP(s string) int {
var (
count int
dp [][]bool
)
dp = make([][]bool, len(s))
for i := 0; i < len(s); i++ {
dp[i] = make([]bool, len(s))
dp[i][i] = true // 单个字符肯定为回文,初始化对角线,也是初始化长度为1的
count++
}
// 第二轮,初始化长度为2的
for i := 1; i < len(s); i++ {
if s[i-1] == s[i] {
dp[i-1][i] = true
count++
}
}
// 开始遍历dp数组
for i := 2; i < len(s); i++ {
for j := i; j < len(s); j++ {
if s[j-i] == s[j] && dp[j-i+1][j-1] {
dp[j-i][j] = true
count++
}
}
}
// 观察dp特征用
//for _, line := range dp {
// fmt.Println(line)
//}
return count
}
<file_sep>/algorithms/_225_Implement_Stack_using_Queues/answer.go
package _225_Implement_Stack_using_Queues
type MyStack struct {
queue []int
}
func Constructor() MyStack {
return MyStack{}
}
func (this *MyStack) Push(x int) {
this.queue = append(this.queue, x)
}
func (this *MyStack) Pop() int {
tmp := []int{}
for len(this.queue) > 1 {
x := this.queue[0]
this.queue = this.queue[1:]
tmp = append(tmp, x)
}
ret := this.queue[0]
this.queue = tmp
return ret
}
func (this *MyStack) Top() int {
return this.queue[len(this.queue)-1]
}
func (this *MyStack) Empty() bool {
return len(this.queue) == 0
}
/**
* Your MyStack object will be instantiated and called as such:
* obj := Constructor();
* obj.Push(x);
* param_2 := obj.Pop();
* param_3 := obj.Top();
* param_4 := obj.Empty();
*/
<file_sep>/algorithms/_264_Ugly_Number_2/answer.go
package _264_Ugly_Number_2
func nthUglyNumber(n int) int {
var (
u = make([]int, n)
idx2, idx3, idx5 int
k = 1
)
u[0] = 1
for k < n {
u[k] = min(u[idx2]*2, u[idx3]*3, u[idx5]*5)
if u[idx2]*2 == u[k] {
idx2++
}
if u[idx3]*3 == u[k] {
idx3++
}
if u[idx5]*5 == u[k] {
idx5++
}
k++
}
return u[n-1]
}
func min(a, b, c int) int {
var min int
if a < b {
min = a
} else {
min = b
}
if c < min {
min = c
}
return min
}
<file_sep>/algorithms/_62_Unique_Paths/answer.go
package _62_Unique_Paths
func uniquePaths(m int, n int) int {
var res = []int{}
for i := 0; i < n; i++ {
res = append(res, 0)
}
res[0] = 1
for i := 0; i < m; i++ {
for j := 1; j < n; j++ {
res[j] += res[j-1]
}
}
return res[n-1]
}
<file_sep>/algorithms/_49_Group_Anagrams/answer.go
package _49_Group_Anagrams
type Format struct {
Fm map[rune]int
Count int
}
func groupAnagrams(strs []string) [][]string {
var (
formats = make(map[string]Format)
ret [][]string
emptyRet []string
)
for _, str := range strs {
if str == "" {
emptyRet = append(emptyRet, "")
continue
}
tm, ok := formats[str]
if !ok {
tm = Format{
Fm: make(map[rune]int),
}
for _, b := range str {
count, _ := tm.Fm[b]
count++
tm.Fm[b] = count
}
}
tm.Count++
formats[str] = tm
}
loop:
for str, format := range formats {
for idx, list := range ret {
f0 := formats[list[0]]
if isSame(f0.Fm, format.Fm) {
for i := 0; i < format.Count; i++ {
list = append(list, str)
}
ret[idx] = list
continue loop
}
}
newline := []string{}
for i := 0; i < format.Count; i++ {
newline = append(newline, str)
}
ret = append(ret, newline)
}
if len(emptyRet) != 0 {
ret = append(ret, emptyRet)
}
return ret
}
func isSame(a, b map[rune]int) bool {
if len(a) != len(b) {
return false
}
for k, v := range a {
value, ok := b[k]
if !ok || v != value {
return false
}
}
return true
}
<file_sep>/own_practice/sort/fastsort/arraysort_test.go
package fastsort
import (
"testing"
. "leetcode_notes/utils/array"
)
func TestArrayFastSort1(t *testing.T) {
if ret := ArrayFastSort1([]int{2, 3, 4, 1, 0, 7}); !IsIntArrayEqual(ret, []int{0, 1, 2, 3, 4, 7}) {
t.Errorf("Test1 error with return: %v", ret)
}
}
func TestArrayFastSort2(t *testing.T) {
if ret := ArrayFastSort2([]int{2, 3, 4, 1, 0, 7}); !IsIntArrayEqual(ret, []int{0, 1, 2, 3, 4, 7}) {
t.Errorf("Test2 error with return: %v", ret)
}
}
<file_sep>/algorithms/_115_Distinct_Subsequences/answer.go
package _115_Distinct_Subsequences
func numDistinct(s string, t string) int {
sl, tl := len(s), len(t)
if sl == 0 {
return 0
}
var cache [][]int
// init
for i := 0; i <= tl; i++ {
line := []int{}
for j := 0; j <= sl; j++ {
if i == 0 {
line = append(line, 1)
} else {
line = append(line, 0)
}
}
cache = append(cache, line)
}
//
for i := 1; i <= tl; i++ {
for j := i; j <= sl; j++ {
if s[j-1] != t[i-1] {
cache[i][j] = cache[i][j-1]
} else {
cache[i][j] = cache[i][j-1] + cache[i-1][j-1]
}
}
}
return cache[tl][sl]
}
<file_sep>/go.mod
module github.com/shadas/leetcode_notes
go 1.17
<file_sep>/algorithms/_23_Merge_K_Sorted_Lists/answer_test.go
package _23_Merge_K_Sorted_Lists
import (
"fmt"
"testing"
)
// GenerateLinkedList generates an linked list
func GenerateLinkedList(vals []int) *ListNode {
var l *ListNode
var head *ListNode
for _, val := range vals {
if l == nil {
l = &ListNode{
Val: val,
}
head = l
} else {
l.Next = &ListNode{
Val: val,
}
l = l.Next
}
}
return head
}
func PrintListNode(l *ListNode) {
var s string
for l != nil {
s += fmt.Sprintf("%d", l.Val)
l = l.Next
}
fmt.Println(s)
}
func TestMergeKLists(t *testing.T) {
l1 := GenerateLinkedList([]int{1, 4, 5})
l2 := GenerateLinkedList([]int{1, 3, 4})
l3 := GenerateLinkedList([]int{2, 6})
lists1 := []*ListNode{l1, l2, l3}
ret := mergeKLists(lists1)
PrintListNode(ret)
l4 := GenerateLinkedList([]int{-2, -1, -1, -1})
l5 := GenerateLinkedList([]int{})
lists2 := []*ListNode{l4, l5}
ret = mergeKLists(lists2)
PrintListNode(ret)
}
<file_sep>/algorithms/_2487_Remove_Nodes_From_Linked_List/answer_test.go
package _2487_Remove_Nodes_From_Linked_List
import (
"testing"
"github.com/shadas/leetcode_notes/utils/linkedlist"
)
type testCase struct {
input *linkedlist.IntListNode
output *linkedlist.IntListNode
}
func TestRemoveNodes(t *testing.T) {
cases := []testCase{
{
input: linkedlist.GenerateIntLinkedList([]int{5, 2, 13, 3, 8}),
output: linkedlist.GenerateIntLinkedList([]int{13, 8}),
},
{
input: linkedlist.GenerateIntLinkedList([]int{1, 1, 1, 1}),
output: linkedlist.GenerateIntLinkedList([]int{1, 1, 1, 1}),
},
}
for _, c := range cases {
if x := removeNodes(c.input); !linkedlist.IsTwoIntLinkedListEqual(x, c.output) {
t.Errorf("output should be \"%v\" instead of \"%v\" with input=\"%v\"", c.output, x, c.input)
}
}
}
<file_sep>/algorithms/_1669_Merge_In_Between_Linked_List/answer.go
package _1669_Merge_In_Between_Linked_List
type ListNode struct {
Val int
Next *ListNode
}
func mergeInBetween(list1 *ListNode, a int, b int, list2 *ListNode) *ListNode {
l1, l2 := list1, list2
for ; a > 1; a-- {
l1 = l1.Next
b--
}
pre := l1
for ; b >= 0; b-- {
l1 = l1.Next
}
last := l1
pre.Next = list2
for l2.Next != nil {
l2 = l2.Next
}
l2.Next = last
return list1
}
<file_sep>/algorithms/_654_Maximum_Binary_Tree/answer.go
package _654_Maximum_Binary_Tree
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func constructMaximumBinaryTree(nums []int) *TreeNode {
//return constructMaximumBinaryTreeRecursively(nums)
return constructMaximumBinaryTreeStack(nums)
}
func constructMaximumBinaryTreeRecursively(nums []int) *TreeNode {
if len(nums) == 0 {
return nil
}
// find maximum and split
var (
maxIdx int = 0
)
for idx, num := range nums {
if num > nums[maxIdx] {
maxIdx = idx
}
}
x := &TreeNode{Val: nums[maxIdx]}
x.Left = constructMaximumBinaryTreeRecursively(nums[0:maxIdx])
x.Right = constructMaximumBinaryTreeRecursively(nums[maxIdx+1:])
return x
}
func constructMaximumBinaryTreeStack(nums []int) *TreeNode {
var (
treeNodePos = make([]*TreeNode, len(nums))
stack []int // stack of num index
maxPosIdx = 0
)
for idx, num := range nums {
currNode := &TreeNode{
Val: num,
}
treeNodePos[idx] = currNode
for len(stack) != 0 {
if nums[stack[len(stack)-1]] < num {
currNode.Left = treeNodePos[stack[len(stack)-1]]
// pop
stack = stack[0 : len(stack)-1]
} else {
topIdx := stack[len(stack)-1]
treeNodePos[topIdx].Right = currNode
stack = append(stack, idx)
break
}
}
if len(stack) == 0 {
stack = append(stack, idx)
maxPosIdx = idx
}
}
return treeNodePos[maxPosIdx]
}
<file_sep>/algorithms/_13_Roman_to_Integer/answer_test.go
package _13_Roman_to_Integer
import "testing"
func TestRomanToInt(t *testing.T) {
var (
roman string
)
roman = "III"
if num := romanToInt(roman); num != 3 {
t.Errorf("wrong num with %d", num)
}
}
<file_sep>/algorithms/_38_Count_and_Say/answer.go
package _38_Count_and_Say
import (
"fmt"
)
func countAndSay(n int) string {
str := "1"
for i := 1; i < n; i++ {
str = countStr(str)
}
return str
}
func countStr(str string) string {
var nowStr string
var nowCount int
var ret string
for _, c := range str {
s := string(c)
if string(s) == nowStr {
nowCount++
} else {
if len(nowStr) != 0 {
ret += fmt.Sprintf("%v%v", nowCount, string(nowStr))
nowCount = 0
}
nowStr = s
nowCount++
}
}
ret += fmt.Sprintf("%v%v", nowCount, string(nowStr))
return ret
}
<file_sep>/algorithms/_415_Add_Strings/answer.go
package _415_Add_Strings
import "fmt"
func addStrings(num1 string, num2 string) string {
return addStringsWithByte(num1, num2)
}
func addStringsWithByte(num1 string, num2 string) string {
var (
idx1, idx2 = len(num1) - 1, len(num2) - 1
add int
sum string
)
for idx1 >= 0 && idx2 >= 0 {
a1, b1 := num1[idx1], num2[idx2]
s := int(a1-'0') + int(b1-'0') + add
sum = fmt.Sprint(s%10) + sum
add = s / 10
idx1--
idx2--
}
if idx2 < 0 {
idx2 = idx1
num2 = num1
}
for idx2 >= 0 {
s := int(num2[idx2]-'0') + add
sum = fmt.Sprint(s%10) + sum
add = s / 10
idx2--
}
if add != 0 {
sum = "1" + sum
}
return sum
}
<file_sep>/algorithms/_114_Flatten_Binary_Tree_to_Linked_List/answer.go
package _114_Flatten_Binary_Tree_to_Linked_List
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func flatten(root *TreeNode) {
//flattenRecursion(root)
flattenIteration(root)
}
func flattenIteration(root *TreeNode) {
cur := root
for cur != nil {
lp := cur.Left
if cur.Left != nil {
for lp.Right != nil {
lp = lp.Right
}
lp.Right = cur.Right
cur.Right = cur.Left
cur.Left = nil
}
cur = cur.Right
}
}
func flattenRecursion(root *TreeNode) {
if root == nil {
return
}
if root.Left != nil {
flattenRecursion(root.Left)
}
if root.Right != nil {
flattenRecursion(root.Right)
}
tmp := root.Right
root.Right = root.Left
root.Left = nil
x := root
for x.Right != nil {
x = x.Right
}
x.Right = tmp
}
<file_sep>/utils/linkedlist/intlinkedlist_test.go
package linkedlist
import (
"testing"
)
func TestIntLinkedlistTools(t *testing.T) {
sl := []int{3, 1, 2}
l := GenerateIntLinkedList(sl)
if l.Val != 3 || l.Next == nil ||
l.Next.Val != 1 || l.Next.Next == nil ||
l.Next.Next.Val != 2 || l.Next.Next.Next != nil {
t.Error("generate error.")
}
l2s := IntLinkedList2slice(l)
if len(l2s) != len(sl) || l2s[0] != sl[0] ||
l2s[1] != sl[1] || l2s[2] != sl[2] {
t.Error("linkedlist to slice error.")
}
l1 := GenerateIntLinkedList(sl)
if isEqual := IsTwoIntLinkedListEqual(l, l1); !isEqual {
t.Error("is equal error.")
}
}
<file_sep>/algorithms/_725_Split_Linked_List_in_Parts/answer_test.go
package _725_Split_Linked_List_in_Parts
import (
"fmt"
"testing"
)
func TestSplitListToParts(t *testing.T) {
l := &ListNode{
Val: 1,
Next: &ListNode{
Val: 2,
Next: &ListNode{
Val: 3,
Next: nil,
},
},
}
k := 5
ret := splitListToParts(l, k)
for _, tl := range ret {
fmt.Println(tl)
}
}
<file_sep>/algorithms/_451_Sort_Characters_By_Frequency/answer.go
package _451_Sort_Characters_By_Frequency
func frequencySort(s string) string {
var (
freq = make(map[rune]int)
max int
ret string
)
for _, r := range s {
count, ok := freq[r]
if !ok {
count = 1
} else {
count++
}
if count >= max {
max = count
}
freq[r] = count
}
for i := max; i >= 0; i-- {
for b, v := range freq {
if v == i {
for j := 0; j < i; j++ {
ret += string(b)
}
}
}
}
return ret
}
<file_sep>/algorithms/_200_Number_of_Islands/answer.go
package _200_Number_of_Islands
func numIslands(grid [][]byte) int {
var (
count int
)
for i := 0; i < len(grid); i++ {
for j := 0; j < len(grid[i]); j++ {
if grid[i][j] == '1' {
count++
dfs(i, j, &grid)
}
}
}
return count
}
func dfs(i, j int, grid *([][]byte)) {
if i < 0 || i > len(*grid)-1 || j < 0 || j > len((*grid)[i])-1 || (*grid)[i][j] == '0' {
return
}
(*grid)[i][j] = '0'
dfs(i-1, j, grid)
dfs(i+1, j, grid)
dfs(i, j-1, grid)
dfs(i, j+1, grid)
}
<file_sep>/algorithms/_523_Continuous_Subarray_Sum/answer.go
package _523_Continuous_Subarray_Sum
func checkSubarraySum(nums []int, k int) bool {
return checkSubarraySumWithSums(nums, k)
}
func checkSubarraySumWithSums(nums []int, k int) bool {
if k == 0 {
return false
}
var (
sum int
remainderMap = map[int]int{0: -1} // map[remainder]idx
)
for idx, num := range nums {
sum += num
rmd := sum % k
if tidx, ok := remainderMap[rmd]; ok {
if idx-tidx <= 1 { // 连续2个以上才有效
continue
}
return true
}
remainderMap[rmd] = idx
}
return false
}
<file_sep>/algorithms/_121_Best_Time_to_Buy_and_Sell_Stock/answer_test.go
package _121_Best_Time_to_Buy_and_Sell_Stock
import (
"testing"
)
func TestMaxProfit(t *testing.T) {
if ret := maxProfit([]int{7, 1, 5, 3, 6, 4}); ret != 5 {
t.Error("not 5 with", ret)
}
}
<file_sep>/algorithms/_200_Number_of_Islands/answer_test.go
package _200_Number_of_Islands
import "testing"
func TestNumIslands(t *testing.T) {
var (
grid [][]byte
count int
)
grid = [][]byte{
{'1', '1', '1', '1', '0'},
{'1', '1', '0', '1', '0'},
{'1', '1', '0', '0', '0'},
{'0', '0', '0', '0', '0'},
}
if count = numIslands(grid); count != 1 {
t.Errorf("wrong count with %d", count)
}
}
<file_sep>/algorithms/_671_Second_Minimum_Node_In_a_Binary_Tree/answer.go
package _671_Second_Minimum_Node_In_a_Binary_Tree
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func findSecondMinimumValue(root *TreeNode) int {
return findSecondMinimumValueWithRunAll(root)
}
func findSecondMinimumValueWithRunAll(root *TreeNode) int {
if root == nil {
return -1
}
var (
min = root.Val
l = []*TreeNode{root}
ret = min
)
for len(l) > 0 {
x := l[len(l)-1]
l = l[:len(l)-1]
if ret == min { // 说明还没有
if x.Val > min {
ret = x.Val
}
} else {
if x.Val > min && x.Val < ret {
ret = x.Val
}
}
if x.Left != nil {
l = append(l, x.Left)
}
if x.Right != nil {
l = append(l, x.Right)
}
}
if ret == min {
return -1
}
return ret
}
<file_sep>/algorithms/_189_Rotate_Array/answer.go
package _189_Rotate_Array
func rotate(nums []int, k int) {
offset := k % len(nums)
ret := []int{}
ret = append(nums[len(nums)-offset:], nums[0:len(nums)-offset]...)
copy(nums, ret)
}
<file_sep>/algorithms/_3_Longest_Substring_Without_Repeating_Characters/answer.go
package _3_Longest_Substring_Without_Repeating_Characters
func lengthOfLongestSubstring(s string) int {
if len(s) == 0 {
return 0
}
if len(s) == 1 {
return 1
}
bstr := []byte(s)
max := 0
for i := range bstr {
ventor := []byte{bstr[i]}
if max >= len(bstr)-i {
break
}
for j := i + 1; j <= len(bstr)-1; j++ {
if IndexByte(ventor, bstr[j]) != -1 {
length := len(ventor)
if length >= max {
max = length
}
break
} else {
ventor = append(ventor, bstr[j])
length := len(ventor)
if length >= max {
max = length
}
}
}
}
return max
}
func IndexByte(arr []byte, i byte) int {
ret := -1
for m, ar := range arr {
if ar == i {
ret = m
break
}
}
return ret
}
// 去重使用map慢
func lengthOfLongestSubstring2(s string) int {
if len(s) == 0 {
return 0
}
if len(s) == 1 {
return 1
}
bstr := []byte(s)
max := 0
for i := range bstr {
tmpmap := make(map[byte]bool)
tmpmap[bstr[i]] = true
if max >= len(bstr)-i {
break
}
for j := i + 1; j < len(bstr); j++ {
if _, ok := tmpmap[bstr[j]]; !ok {
tmpmap[bstr[j]] = true
if len(tmpmap) >= max {
max = len(tmpmap)
}
} else {
if len(tmpmap) >= max {
max = len(tmpmap)
}
break
}
}
}
return max
}
<file_sep>/algorithms/_1002_Find_Common_Characters/answer.go
package _1002_Find_Common_Characters
func commonChars(A []string) []string {
if len(A) == 0 {
return []string{}
}
var collections []map[string]int
for _, str := range A {
tmpCollection := make(map[string]int)
for _, b := range str {
if count, ok := tmpCollection[string(b)]; ok {
count++
tmpCollection[string(b)] = count
} else {
tmpCollection[string(b)] = 1
}
}
collections = append(collections, tmpCollection)
}
var m = collections[0]
for _, tm := range collections {
m = mergeMinMap(m, tm)
}
var ret []string
for k, v := range m {
for i := 0; i < v; i++ {
ret = append(ret, k)
}
}
return ret
}
func mergeMinMap(m1, m2 map[string]int) (ret map[string]int) {
ret = make(map[string]int)
for k1, v1 := range m1 {
if v2, ok := m2[k1]; ok {
var v int
if v1 > v2 {
v = v2
} else {
v = v1
}
ret[k1] = v
}
}
return
}
<file_sep>/algorithms/_701_Insert_into_a_Binary_Search_Tree/answer.go
package _701_Insert_into_a_Binary_Search_Tree
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func insertIntoBST(root *TreeNode, val int) *TreeNode {
if root == nil {
return &TreeNode{Val: val}
}
var (
node = root
)
for {
if node.Val < val {
if node.Right == nil {
node.Right = &TreeNode{Val: val}
return root
} else {
node = node.Right
}
}
if node.Val > val {
if node.Left == nil {
node.Left = &TreeNode{Val: val}
return root
} else {
node = node.Left
}
}
}
return root
}
<file_sep>/algorithms/_641_Design_Circular_Deque/answer.go
package _641_Design_Circular_Deque
type MyCircularDeque struct {
data []int
head, tail int
}
func Constructor(k int) MyCircularDeque {
q := MyCircularDeque{data: make([]int, k+1)}
return q
}
func (this *MyCircularDeque) Len() int {
if this.tail > this.head {
return this.tail - this.head
} else {
return len(this.data) - (this.head - this.tail)
}
}
func (this *MyCircularDeque) InsertFront(value int) bool {
if this.IsFull() {
return false
}
this.head = (this.head + len(this.data) - 1) % len(this.data)
this.data[this.head] = value
return true
}
func (this *MyCircularDeque) InsertLast(value int) bool {
if this.IsFull() {
return false
}
this.data[this.tail] = value
this.tail = (this.tail + 1) % len(this.data)
return true
}
func (this *MyCircularDeque) DeleteFront() bool {
if this.IsEmpty() {
return false
}
this.head = (this.head + 1) % len(this.data)
return true
}
func (this *MyCircularDeque) DeleteLast() bool {
if this.IsEmpty() {
return false
}
this.tail = (this.tail - 1 + len(this.data)) % len(this.data)
return true
}
func (this *MyCircularDeque) GetFront() int {
if this.IsEmpty() {
return -1
}
return this.data[this.head]
}
func (this *MyCircularDeque) GetRear() int {
if this.IsEmpty() {
return -1
}
return this.data[(this.tail-1+len(this.data))%len(this.data)]
}
func (this *MyCircularDeque) IsEmpty() bool {
return this.head == this.tail
}
func (this *MyCircularDeque) IsFull() bool {
return this.Len() == len(this.data)-1
}
/**
* Your MyCircularDeque object will be instantiated and called as such:
* obj := Constructor(k);
* param_1 := obj.InsertFront(value);
* param_2 := obj.InsertLast(value);
* param_3 := obj.DeleteFront();
* param_4 := obj.DeleteLast();
* param_5 := obj.GetFront();
* param_6 := obj.GetRear();
* param_7 := obj.IsEmpty();
* param_8 := obj.IsFull();
*/
<file_sep>/algorithms/_445_Add_Two_Numbers_2/answer.go
package _445_Add_Two_Numbers_2
import "fmt"
type ListNode struct {
Val int
Next *ListNode
}
func (l *ListNode) String() string {
var s string
for l != nil {
s += fmt.Sprintf("%d", l.Val)
l = l.Next
}
return s
}
func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode {
var (
s1, s2 []int
ret = &ListNode{}
)
for l1 != nil {
s1 = append(s1, l1.Val)
l1 = l1.Next
}
for l2 != nil {
s2 = append(s2, l2.Val)
l2 = l2.Next
}
var addMore bool
for len(s1) > 0 && len(s2) > 0 {
t1 := s1[len(s1)-1]
t2 := s2[len(s2)-1]
s1 = s1[:len(s1)-1]
s2 = s2[:len(s2)-1]
sum := t1 + t2
addMore = dealSum(sum, ret, addMore)
}
// 等长部分处理完
if len(s2) > 0 {
s1 = s2 // 统一到s1处理
}
// 处理剩余
for len(s1) > 0 {
t1 := s1[len(s1)-1]
s1 = s1[:len(s1)-1]
sum := t1
addMore = dealSum(sum, ret, addMore)
}
// 处理最后进位
if addMore {
tmp := &ListNode{
Val: 1,
Next: ret.Next,
}
ret.Next = tmp
}
return ret.Next
}
func dealSum(sum int, ret *ListNode, addMore bool) (newAddMore bool) {
if addMore {
sum += 1
newAddMore = false
}
v := sum % 10
newAddMore = sum/10 == 1
if ret.Next == nil { // 说明是最低位
ret.Next = &ListNode{
Val: v,
Next: nil,
}
} else {
tmp := &ListNode{
Val: v,
Next: ret.Next,
}
ret.Next = tmp
}
return
}
<file_sep>/algorithms/_581_Shortest_Unsorted_Continuous_Subarray/answer.go
package _581_Shortest_Unsorted_Continuous_Subarray
import "math"
func findUnsortedSubarray(nums []int) int {
return findUnsortedSubarrayDoubleDirection(nums)
}
func findUnsortedSubarrayDoubleDirection(nums []int) int {
min, max := math.MinInt, math.MaxInt
start, end := -1, -1
for i := 0; i < len(nums); i++ {
if nums[i] >= min {
min = nums[i]
} else {
end = i
}
}
for i := len(nums) - 1; i >= 0; i-- {
if nums[i] <= max {
max = nums[i]
} else {
start = i
}
}
if end == start {
return 0
}
return end - start + 1
}
<file_sep>/algorithms/_23_Merge_K_Sorted_Lists/answer.go
package _23_Merge_K_Sorted_Lists
import (
"container/heap"
"fmt"
)
type ListNode struct {
Val int
Next *ListNode
}
func mergeKLists(lists []*ListNode) *ListNode {
//return mergeKListsWithPQ(lists)
return mergeKListsReduce(lists)
}
// 构造pq
type Item struct {
Node *ListNode
Idx int
}
type PriorityQueue []*Item
func (pq PriorityQueue) Len() int {
return len(pq)
}
func (pq PriorityQueue) Less(i, j int) bool {
return pq[i].Node.Val < pq[j].Node.Val
}
func (pq PriorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].Idx, pq[j].Idx = i, j
}
func (pq *PriorityQueue) Push(ln interface{}) {
item := ln.(*ListNode)
node := &Item{Node: item}
n := len(*pq)
node.Idx = n
*pq = append(*pq, node)
}
func (pq *PriorityQueue) Pop() interface{} {
n := len(*pq)
item := (*pq)[n-1]
(*pq)[n-1] = nil
item.Idx = -1
*pq = (*pq)[0 : n-1]
return item.Node
}
func (pq PriorityQueue) Print() {
for _, x := range pq {
fmt.Printf("%d,", x.Node.Val)
}
fmt.Printf("\n")
}
// 优先队列解法
func mergeKListsWithPQ(lists []*ListNode) *ListNode {
pq := PriorityQueue{}
// push
for _, list := range lists {
for list != nil {
heap.Push(&pq, list)
//pq.Print()
list = list.Next
}
}
//
head := &ListNode{}
tmp := head
for pq.Len() > 0 {
item := heap.Pop(&pq).(*ListNode)
//pq.Print()
tmp.Next = &ListNode{Val: item.Val}
tmp = tmp.Next
}
return head.Next
}
// 归并方式依次合并
func mergeKListsReduce(lists []*ListNode) *ListNode {
if len(lists) == 0 {
return nil
}
n := len(lists)
for n > 1 {
k := (n + 1) / 2
for i := 0; i < n/2; i++ {
lists[i] = mergeTwoLists(lists[i], lists[i+k])
}
n = k
}
return lists[0]
}
// 合并两个有序链表的原子方法
func mergeTwoLists(l1, l2 *ListNode) *ListNode {
if l1 == nil {
return l2
}
if l2 == nil {
return l1
}
ret := &ListNode{}
tmp := ret
for l1 != nil && l2 != nil {
if l1.Val < l2.Val {
tmp.Next = l1
l1 = l1.Next
} else {
tmp.Next = l2
l2 = l2.Next
}
tmp = tmp.Next
}
if l1 != nil {
tmp.Next = l1
}
if l2 != nil {
tmp.Next = l2
}
return ret.Next
}
<file_sep>/algorithms/_437_Path_Sum_3/answer.go
package _437_Path_Sum_3
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func pathSum(root *TreeNode, sum int) int {
preSum := make(map[int]int)
preSum[0] = 1
return countPath(root, 0, sum, preSum)
}
func countPath(root *TreeNode, currSum int, target int, preSum map[int]int) int {
if root == nil {
return 0
}
currSum += root.Val
var res int
if v, ok := preSum[currSum-target]; ok {
res = v
} else {
res = 0
}
var newv int
if v, ok := preSum[currSum]; ok {
newv = v + 1
} else {
newv = 0 + 1
}
preSum[currSum] = newv
res += countPath(root.Left, currSum, target, preSum) + countPath(root.Right, currSum, target, preSum)
preSum[currSum] = preSum[currSum] - 1
return res
}
<file_sep>/algorithms/_744_Find_Smallest_Letter_Greater_Than_Target/answer.go
package _744_Find_Smallest_Letter_Greater_Than_Target
func nextGreatestLetter(letters []byte, target byte) byte {
return findIteration(letters, target)
}
func findIteration(letters []byte, target byte) byte {
left, right := 0, len(letters)
for left < right {
mid := left + (right-left)/2
if letters[mid] < target+1 {
left = mid + 1
} else {
right = mid
}
}
if right == len(letters) {
return letters[0]
} else {
return letters[right]
}
}
<file_sep>/algorithms/_133_Clone_Graph/answer.go
package _133_Clone_Graph
type Node struct {
Val int
Neighbors []*Node
}
func cloneGraph(node *Node) *Node {
dict := make(map[int]*Node)
return cloneGraphDFS(node, dict)
}
func cloneGraphDFS(node *Node, dict map[int]*Node) *Node {
if node == nil {
return nil
}
tmp, ok := dict[node.Val]
if ok {
return tmp
}
tmp = &Node{
Val: node.Val,
}
dict[node.Val] = tmp
for _, n := range node.Neighbors {
tmp.Neighbors = append(tmp.Neighbors, cloneGraphDFS(n, dict))
}
return tmp
}
<file_sep>/algorithms/_334_Increasing_Triplet_Subsequence/answer_test.go
package _334_Increasing_Triplet_Subsequence
import (
"testing"
)
type testCase struct {
input []int
output bool
}
func TestIncreasingTriplet(t *testing.T) {
cases := []testCase{
{
input: []int{1, 2, 3, 4, 5},
output: true,
},
{
input: []int{5, 4, 3, 2, 1},
output: false,
},
{
input: []int{2, 1, 5, 0, 4, 6},
output: true,
},
}
for _, c := range cases {
if x := increasingTriplet(c.input); x != c.output {
t.Errorf("output should be \"%v\" instead of \"%v\" with input=\"%v\"", c.output, x, c.input)
}
}
}
<file_sep>/algorithms/_450_Delete_Node_in_a_BST/answer.go
package _450_Delete_Node_in_a_BST
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func deleteNode(root *TreeNode, key int) *TreeNode {
pre := &TreeNode{Left: root}
deleteNodeR(root, pre, true, key)
return pre.Left
}
func deleteNodeR(root, pre *TreeNode, isLeft bool, key int) {
if root == nil {
return
}
if root.Val > key {
deleteNodeR(root.Left, root, true, key)
return
}
if root.Val < key {
deleteNodeR(root.Right, root, false, key)
return
}
if root.Val == key { // delete
var tmp *TreeNode
if root.Left == nil {
tmp = root.Right
} else if root.Right == nil {
tmp = root.Left
} else {
tmp = buildBST(root.Left, root.Right)
}
if isLeft {
pre.Left = tmp
} else {
pre.Right = tmp
}
}
}
func buildBST(left, right *TreeNode) *TreeNode {
n := left
lr := n.Right
n.Right = right
for right.Left != nil {
right = right.Left
}
right.Left = lr
return n
}
<file_sep>/algorithms/_2130_Maximum_Twin_Sum_of_Linked_List/answer.go
package _2130_Maximum_Twin_Sum_of_Linked_List
import (
"math"
"github.com/shadas/leetcode_notes/utils/linkedlist"
)
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func pairSum(head *linkedlist.IntListNode) int {
return pairSumSlice(head)
}
func pairSumSlice(head *linkedlist.IntListNode) int {
var x []int
for head != nil {
x = append(x, head.Val)
head = head.Next
}
var max = math.MinInt
for i := 0; i < len(x)/2; i++ {
tmp := x[i] + x[len(x)-1-i]
if tmp > max {
max = tmp
}
}
return max
}
<file_sep>/algorithms/_137_Single_Number_2/answer.go
package _137_Single_Number_2
const Times = 3
func singleNumber(nums []int) int {
//return singleNumberWithCalcu(nums)
return singleNumberWithBit(nums)
}
// 位运算解法
func singleNumberWithBit(nums []int) int {
a, b := 0, 0
for _, num := range nums {
a = (a ^ num) & ^b
b = (^a ^ num) ^ b
}
return a
}
// 算术求和解法,先去重,再*3,减去所有原来num和,即2倍的目标值。此处不予实现。
func singleNumberWithSum(nums []int) int {
// ...
return 0
}
// 计数解法
func singleNumberWithCalcu(nums []int) int {
mark := []bool{}
for i := 0; i < len(nums); i++ {
mark = append(mark, false)
}
for i, n := range nums {
if mark[i] == false {
if i == len(nums)-1 {
return n
} else {
tmp := nums[i+1:]
times := 0
for j, m := range tmp {
if m == n {
mark[i+j+1] = true
times++
if times == Times-1 {
break
}
}
}
if times != Times-1 {
return n
}
}
} else {
continue
}
}
return 0
}
<file_sep>/algorithms/_647_Palindromic_Substrings/answer_test.go
package _647_Palindromic_Substrings
import "testing"
func TestCountSubstrings(t *testing.T) {
if n := countSubstrings("abc"); n != 3 {
t.Errorf("wrong count with %d", n)
}
if n := countSubstrings("aaa"); n != 6 {
t.Errorf("wrong count with %d", n)
}
}
<file_sep>/README.md
One question will be a folder include :
QD.md : A markdown file shows the description of the question. (QD means Question-Description)
answer.go : A xxx.go file shows the code of the question.
answer_test.go : A xxx.go file used to test the answers in the answer.go file.
<file_sep>/algorithms/_44_Wildcard_Matching/answer_test.go
package _44_Wildcard_Matching
import "testing"
func TestIsMatch(t *testing.T) {
if isMatch("aa", "a") {
t.Error("aa and a should be false")
}
if !isMatch("aa", "*") {
t.Error("aa and * should be true")
}
if isMatch("cb", "?a") {
t.Error("cb and ?a should be false")
}
}
<file_sep>/algorithms/_744_Find_Smallest_Letter_Greater_Than_Target/QD.md
Given a characters array letters that is sorted in non-decreasing order and a character target, return the smallest character in the array that is larger than target.
Note that the letters wrap around.
For example, if target == 'z' and letters == ['a', 'b'], the answer is 'a'.
Example 1:
```
Input: letters = ["c","f","j"], target = "a"
Output: "c"
```
Example 2:
```
Input: letters = ["c","f","j"], target = "c"
Output: "f"
```
Example 3:
```
Input: letters = ["c","f","j"], target = "d"
Output: "f"
```
Example 4:
```
Input: letters = ["c","f","j"], target = "g"
Output: "j"
```
Example 5:
```
Input: letters = ["c","f","j"], target = "j"
Output: "c"
```
Constraints:
- 2 <= letters.length <= 104
- letters[i] is a lowercase English letter.
- letters is sorted in non-decreasing order.
- letters contains at least two different characters.
- target is a lowercase English letter.
<file_sep>/algorithms/_958_Check_Completeness_of_a_Binary_Tree/answer.go
package _958_Check_Completeness_of_a_Binary_Tree
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func isCompleteTree(root *TreeNode) bool {
if root == nil {
return true
}
var queue = []*TreeNode{root}
var meetNil bool
for len(queue) != 0 {
n := queue[0] // head
queue = queue[1:]
if n.Left == nil {
meetNil = true
} else {
if meetNil {
return false
} else {
queue = append(queue, n.Left)
}
}
if n.Right == nil {
meetNil = true
} else {
if meetNil {
return false
} else {
queue = append(queue, n.Right)
}
}
}
return true
}
<file_sep>/algorithms/_9_Palindrome_Number/answer_test.go
package _9_Palindrome_Number
import (
"testing"
)
func TestIsPalindrome(t *testing.T) {
if ok := isPalindrome(0); !ok {
t.Error("false with 0.")
}
if ok := isPalindrome(10); ok {
t.Error("true with 10.")
}
if ok := isPalindrome(12321); !ok {
t.Error("false with 12321.")
}
}
<file_sep>/algorithms/_392_Is_Subsequence/answer.go
package _392_Is_Subsequence
func isSubsequence(s string, t string) bool {
return isSubsequenceByte(s, t)
}
func isSubsequenceByte(s string, t string) bool {
if s == "" {
return true
}
for si, ti := 0, 0; ti < len(t); ti++ {
if t[ti] == s[si] {
si++
}
if si == len(s) {
return true
}
}
return false
}
<file_sep>/algorithms/_692_Top_K_Frequent_Words/answer.go
package _692_Top_K_Frequent_Words
import (
"fmt"
"sort"
)
func topKFrequent(words []string, k int) []string {
return topKFrequentWithQS(words, k)
}
// 使用堆/pq做
func topKFrequentWithPQ(words []string, k int) []string {
// todo: 思路比较明确,暂不实现
return []string{}
}
// 使用快排来做
func topKFrequentWithQS(words []string, k int) []string {
// 计数
m := make(map[string]int)
for _, word := range words {
m[word]++
}
// 放进数组
s := make(X, len(m))
idx := 0
for k, v := range m {
item := &Item{word: k, count: v}
s[idx] = item
idx++
}
// 排序
sort.Sort(s)
// 取前k个
ret := make([]string, k)
for i := 0; i < k; i++ {
ret[i] = s[i].word
}
return ret
}
type Item struct {
word string
count int
}
type X []*Item
func (x X) Swap(i, j int) {
x[i], x[j] = x[j], x[i]
}
func (x X) Len() int {
return len(x)
}
func (x X) Less(i, j int) bool {
return Less(x[i], x[j])
}
func Less(a, b *Item) bool {
if a.count > b.count {
return true
}
if a.count < b.count {
return false
}
return a.word < b.word
}
func printL(l []*Item) {
for _, x := range l {
fmt.Printf("%s_%d,", x.word, x.count)
}
fmt.Println()
}
// 快排至k位,但这道题要求全有序,所以还是需要全排序
func SortUtilK(s []*Item, k int) {
low, high := 0, len(s)
sortLoop:
for low < high {
pos := low
i, j := low, high-1
privot := s[pos]
for i < j {
for i <= j && Less(s[j], privot) {
j--
}
if i <= j {
s[j], s[pos] = s[pos], s[j]
pos = j
}
for i <= j && Less(privot, s[i]) {
i++
}
if i <= j {
s[i], s[pos] = s[pos], s[i]
pos = i
}
}
if pos == k-1 {
break sortLoop
} else if pos < k-1 {
low = pos + 1
} else {
high = pos
}
}
}
<file_sep>/algorithms/_151_Reverse_Words_in_a_String/answer.go
package _151_Reverse_Words_in_a_String
import "strings"
func reverseWords(s string) string {
s = strings.TrimSpace(s)
words := strings.Split(s, " ")
nwords := []string{}
for _, word := range words {
if word == "" {
continue
}
nwords = append([]string{word}, nwords...)
}
return strings.Join(nwords, " ")
}
<file_sep>/algorithms/_337_House_Robber_3/answer.go
package _337_House_Robber_3
import "math"
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func rob(root *TreeNode) int {
// return solution1(root)
return solution2(root)
}
// 递归解法
func solution1(root *TreeNode) int {
with, without := solution1Rec(root)
return int(math.Max(float64(with), float64(without)))
}
// 返回 包含 或 不包含 该节点的值
func solution1Rec(root *TreeNode) (with, without int) {
if root == nil {
return 0, 0
}
lwith, lwithout := solution1Rec(root.Left)
rwith, rwithout := solution1Rec(root.Right)
with = root.Val + lwithout + rwithout
without = int(math.Max(float64(lwith), float64(lwithout)) + math.Max(float64(rwith), float64(rwithout)))
return
}
// 递归解法,带计算结果缓存
func solution2(root *TreeNode) int {
cache := make(map[*TreeNode][2]int)
ret := solution2Rec(root, cache)
return int(math.Max(float64(ret[0]), float64(ret[1])))
}
func solution2Rec(root *TreeNode, cache map[*TreeNode][2]int) (ret [2]int) {
if root == nil {
return
}
var ok bool
if ret, ok = cache[root]; ok {
return
}
lret := solution2Rec(root.Left, cache)
rret := solution2Rec(root.Right, cache)
cache[root.Left] = lret
cache[root.Right] = rret
ret[0] = root.Val + lret[1] + rret[1]
ret[1] = int(math.Max(float64(lret[0]), float64(lret[1])) + math.Max(float64(rret[0]), float64(rret[1])))
cache[root] = ret
return
}
<file_sep>/algorithms/_389_Find_the_Difference/answer.go
package _389_Find_the_Difference
func findTheDifference(s string, t string) byte {
return findTheDifferenceWithXOR(s, t)
}
func findTheDifferenceWithXOR(s string, t string) byte {
var (
str = s + t
ret rune
)
for _, b := range str {
ret ^= b
}
return byte(ret)
}
<file_sep>/algorithms/_383_Ransom_Note/answer.go
package _383_Ransom_Note
func canConstruct(ransomNote string, magazine string) bool {
var m map[rune]int = make(map[rune]int)
for _, b := range magazine {
if c, ok := m[b]; ok {
m[b] = c + 1
} else {
m[b] = 1
}
}
for _, b := range ransomNote {
if c, ok := m[b]; ok {
if c-1 < 0 {
return false
} else {
m[b] = c - 1
}
} else {
return false
}
}
return true
}
<file_sep>/algorithms/_213_House_Robber_2/answer.go
package _213_House_Robber_2
import "math"
func rob(nums []int) int {
if len(nums) == 0 {
return 0
}
if len(nums) == 1 {
return nums[0]
}
var ret1, ret2 int
ret1 = getMaxInt(nums[:len(nums)-1])
ret2 = getMaxInt(nums[1:])
return int(math.Max(float64(ret1), float64(ret2)))
}
func getMaxInt(nums []int) int {
var result []int
for i := 0; i < len(nums); i++ {
result = append(result, -1)
}
if len(nums) == 0 {
return 0
}
if len(nums) == 1 {
return nums[0]
}
result[0] = nums[0]
result[1] = int(math.Max(float64(nums[1]), float64(result[0])))
for i := 2; i < len(nums); i++ {
result[i] = int(math.Max(float64(result[i-1]), float64(result[i-2]+nums[i])))
}
return result[len(result)-1]
}
<file_sep>/algorithms/_1089_Duplicate_Zeros/answer.go
package _1089_Duplicate_Zeros
func duplicateZeros(arr []int) {
var (
tmp = []int{}
)
for _, i := range arr {
tmp = append(tmp, i)
if i == 0 {
tmp = append(tmp, 0)
}
}
for i := 0; i < len(arr); i++ {
arr[i] = tmp[i]
}
}
<file_sep>/algorithms/_590_N-ary_Tree_Postorder_Traversal/answer.go
package _590_N_ary_Tree_Postorder_Traversal
type Node struct {
Val int
Children []*Node
}
func postorder(root *Node) []int {
return postorderR(root)
}
// 递归解法
func postorderR(root *Node) []int {
if root == nil {
return []int{}
}
var (
ret []int
)
for _, node := range root.Children {
ret = append(ret, postorderR(node)...)
}
ret = append(ret, root.Val)
return ret
}
<file_sep>/algorithms/_304_Range_Sum_Query_2D_Immutable/answer.go
package _304_Range_Sum_Query_2D_Immutable
type NumMatrix struct {
ret [][]int
}
func Constructor(matrix [][]int) NumMatrix {
nm := NumMatrix{
ret: matrix,
}
for i := 0; i < len(matrix); i++ {
sum := 0 // 一行一行计算
for j := 0; j < len(matrix[i]); j++ {
sum += matrix[i][j]
if i != 0 { // 如果不是第一行, 则,需要加上上一行同列的值
nm.ret[i][j] = sum + nm.ret[i-1][j]
} else {
nm.ret[i][j] = sum
}
}
}
return nm
}
func (this *NumMatrix) SumRegion(row1 int, col1 int, row2 int, col2 int) int {
var (
a, b, c, d int
)
a = this.ret[row2][col2]
if row1 == 0 && col1 == 0 {
b, c, d = 0, 0, 0
} else if row1 == 0 && col1 != 0 {
b, d = 0, 0
c = this.ret[row2][col1-1]
} else if row1 != 0 && col1 == 0 {
c, d = 0, 0
b = this.ret[row1-1][col2]
} else {
b = this.ret[row1-1][col2]
c = this.ret[row2][col1-1]
d = this.ret[row1-1][col1-1]
}
return a - b - c + d
}
/**
* Your NumMatrix object will be instantiated and called as such:
* obj := Constructor(matrix);
* param_1 := obj.SumRegion(row1,col1,row2,col2);
*/
<file_sep>/algorithms/_147_Insertion_Sort_List/answer.go
package _147_Insertion_Sort_List
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func insertionSortList(head *ListNode) *ListNode {
if head == nil || head.Next == nil {
return head
}
p := head.Next // p为遍历剩下的链
head.Next = nil // 拿出头部第一个节点
for p != nil {
pNext := p.Next // 循环中,pNext 暂存后面剩下的链条, 此时 p直接为当前对比的节点,不需要考虑剩下的遍历
q := head // q 赋值为已经排序好的链
if q.Val >= p.Val { // 如果待插入节点,比已排序节点的第一个节点还要小,则插入到最前面
p.Next = q
head = p
} else { // 否则,应该向后找已排序节点的下一个节点进行比较
for q != nil && q.Next != nil && q.Next.Val < p.Val { // 如果已排序链有下一个节点,并且下一个节点的值也小于待排序节点,则继续向后找
q = q.Next
}
// 跳出循环,要么是遍历完了,要么是找到比待排序节点大的节点了,但总之当前的q后面,应该是插入p的位置
p.Next = q.Next
q.Next = p
}
// 插入完成,继续看下一个待排序节点
p = pNext
}
return head
}
<file_sep>/algorithms/_57_Insert_Interval/answer_test.go
package _57_Insert_Interval
import "testing"
func TestInsert(t *testing.T) {
var (
intervals [][]int
newInterval []int
ret [][]int
)
intervals, newInterval = [][]int{{1, 3}, {6, 9}}, []int{2, 5}
ret = insert(intervals, newInterval)
t.Logf("ret is %v", ret) // [[1,5], [6,9]]
intervals, newInterval = [][]int{{1, 5}}, []int{2, 7}
ret = insert(intervals, newInterval)
t.Logf("ret is %v", ret) // [[1,7]]
intervals, newInterval = [][]int{{1, 5}}, []int{0, 0}
ret = insert(intervals, newInterval)
t.Logf("ret is %v", ret) // [[0,0], [1,5]]
intervals, newInterval = [][]int{{1, 2}, {3, 5}, {6, 7}, {8, 10}, {12, 16}}, []int{4, 8}
ret = insert(intervals, newInterval)
t.Logf("ret is %v", ret) // [[1,2],[3,10],[12,16]]
}
<file_sep>/algorithms/_933_Number_of_Recent_Calls/answer.go
package _933_Number_of_Recent_Calls
type RecentCounter struct {
queue []int
}
func Constructor() RecentCounter {
rc := RecentCounter{}
return rc
}
func (this *RecentCounter) Ping(t int) int {
for len(this.queue) != 0 {
tmp := this.queue[0]
if t-tmp > 3000 {
this.queue = this.queue[1:]
} else {
break
}
}
this.queue = append(this.queue, t)
return len(this.queue)
}
/**
* Your RecentCounter object will be instantiated and called as such:
* obj := Constructor();
* param_1 := obj.Ping(t);
*/
<file_sep>/algorithms/_241_Different_Ways_to_Add_Parentheses/answer.go
package _241_Different_Ways_to_Add_Parentheses
import "strconv"
func diffWaysToCompute(input string) []int {
return genRecursive(input)
}
func genRecursive(input string) []int {
var ret []int
if num, err := strconv.Atoi(input); err == nil {
ret = append(ret, num)
return ret
}
for i := 1; i < len(input); i++ {
if input[i] == '+' || input[i] == '-' || input[i] == '*' {
left := genRecursive(input[0:i])
right := genRecursive(input[i+1:])
for _, l := range left {
for _, r := range right {
ret = append(ret, calc(l, input[i], r))
}
}
}
}
return ret
}
func calc(l int, op byte, r int) int {
var ret int
switch op {
case '+':
ret = l + r
case '-':
ret = l - r
case '*':
ret = l * r
}
return ret
}
<file_sep>/algorithms/_155_Min_Stack/answer.go
package _155_Min_Stack
type MinStack struct {
l []int
}
/** initialize your data structure here. */
func Constructor() MinStack {
return MinStack{
l: []int{},
}
}
func (this *MinStack) Push(x int) {
this.l = append(this.l, x)
}
func (this *MinStack) Pop() {
if len(this.l) > 0 {
this.l = this.l[:len(this.l)-1]
}
}
func (this *MinStack) Top() int {
if len(this.l) > 0 {
return this.l[len(this.l)-1]
}
return 0
}
func (this *MinStack) GetMin() int {
if len(this.l) == 0 {
return 0
}
var ret = this.l[0]
for _, v := range this.l {
if v < ret {
ret = v
}
}
return ret
}
/**
* Your MinStack object will be instantiated and called as such:
* obj := Constructor();
* obj.Push(x);
* obj.Pop();
* param_3 := obj.Top();
* param_4 := obj.GetMin();
*/
<file_sep>/algorithms/_229_Majority_Element_2/answer.go
package _229_Majority_Element_2
func majorityElement(nums []int) []int {
return majorityElementByCount(nums)
}
func majorityElementByCount(nums []int) []int {
l := len(nums)
m := make(map[int]int)
ret := []int{}
for _, n := range nums {
var tmpC int
if c, ok := m[n]; ok {
tmpC = c + 1
} else {
tmpC = 1
}
m[n] = tmpC
}
for n, c := range m {
if c > l/3 {
ret = append(ret, n)
}
}
return ret
}
<file_sep>/algorithms/_268_Missing_Number/answer.go
package _268_Missing_Number
func missingNumber(nums []int) int {
// return missingNumberWithAdd(nums)
return missingNumberWithXor(nums)
}
func missingNumberWithAdd(nums []int) int {
var (
rightSum int
realSum int
)
rightSum = len(nums) * (len(nums) + 1) / 2
for _, n := range nums {
realSum += n
}
return rightSum - realSum
}
func missingNumberWithXor(nums []int) int {
var ret = len(nums)
for i, n := range nums {
ret ^= n
ret ^= i
}
return ret
}
<file_sep>/algorithms/_606_Construct_String_from_Binary_Tree/answer_test.go
package _606_Construct_String_from_Binary_Tree
import (
"testing"
)
type testCase struct {
input *TreeNode
output string
}
func TestTree2str(t *testing.T) {
cases := []testCase{
{
input: &TreeNode{Val: 1, Left: &TreeNode{Val: 2, Left: &TreeNode{Val: 4}}, Right: &TreeNode{Val: 3}},
output: "1(2(4))(3)",
},
}
for _, c := range cases {
if x := tree2str(c.input); x != c.output {
t.Errorf("output should be \"%v\" instead of \"%v\" with input=\"%v\"", c.output, x, c.input)
}
}
}
<file_sep>/algorithms/_326_Power_of_Three/answer.go
package _326_Power_of_Three
func isPowerOfThree(n int) bool {
return isPowerOfThreeLoop(n)
}
func isPowerOfThreeLoop(n int) bool {
if n <= 0 {
return false
}
for n > 1 {
if n%3 != 0 {
return false
}
n = n / 3
}
return true
}
func isPowerOfThreeSmart(n int) bool {
// too many smart ways
return false
}
<file_sep>/algorithms/_22_Generate_Parentheses/answer.go
package _22_Generate_Parentheses
func generateParenthesis(n int) []string {
return generateParenthesisWithMap(n)
//return generateParenthesisWithBackTrace(n)
}
func generateParenthesisWithMap(n int) []string {
if n == 0 {
return []string{}
}
if n == 1 {
return []string{"()"}
}
dp := make([]map[string]struct{}, n)
dp[0] = map[string]struct{}{
"()": {},
}
for i := 2; i <= n; i++ {
pre := dp[i-2]
x := genNewStrs(pre)
dp[i-1] = x
}
ret := make([]string, len(dp[n-1]))
idx := 0
for k := range dp[n-1] {
ret[idx] = k
idx++
}
return ret
}
func genNewStrs(prem map[string]struct{}) (ret map[string]struct{}) {
ret = make(map[string]struct{})
for k := range prem {
for i := range k {
pre := k[0:i]
last := k[i:]
tmp := pre + "()" + last
ret[tmp] = struct{}{}
}
}
return ret
}
func generateParenthesisWithBackTrace(n int) []string {
res := []string{}
backTrace(n, 0, 0, &res, "")
return res
}
func backTrace(n, l, r int, res *[]string, str string) {
if len(str) == 2*n {
*res = append(*res, str)
return
}
if l < n {
backTrace(n, l+1, r, res, str+"(")
}
if r < l {
backTrace(n, l, r+1, res, str+")")
}
return
}
<file_sep>/algorithms/_989_Add_to_Array_Form_of_Integer/answer.go
package _989_Add_to_Array_Form_of_Integer
func addToArrayForm(A []int, K int) []int {
return addToArrayFormWithByte(A, K)
}
func addToArrayFormWithByte(A []int, K int) []int {
var (
idx = len(A) - 1
tk = K
add int
sum []int
)
for idx >= 0 && tk >= 0 {
a1 := A[idx]
b1 := tk % 10
s := a1 + b1 + add
sum = append([]int{s % 10}, sum...)
add = s / 10
idx--
tk = tk / 10
}
if tk == 0 {
for add != 0 && idx >= 0 {
s := A[idx] + add
sum = append([]int{s % 10}, sum...)
add = s / 10
idx--
}
if idx < 0 {
if add > 0 {
sum = append([]int{1}, sum...)
}
} else {
sum = append(A[0:idx+1], sum...)
}
} else {
for add != 0 && tk >= 0 {
s := tk%10 + add
sum = append([]int{s % 10}, sum...)
add = s / 10
tk = tk / 10
}
if tk == 0 {
if add > 0 {
sum = append([]int{1}, sum...)
}
} else {
for tk > 0 {
sum = append([]int{tk % 10}, sum...)
tk = tk / 10
}
}
}
return sum
}
<file_sep>/algorithms/_45_Jump_Game_2/answer.go
package _45_Jump_Game_2
func jump(nums []int) int {
if len(nums) <= 1 {
return 0
}
var (
distance = len(nums) - 1
step = 0
max = 0
i, idx = 0, 0
)
for i < len(nums) {
if i+nums[i] >= distance {
step++
return step
}
max = 0
idx = i + 1
for j := i + 1; j-i <= nums[i]; j++ {
if max < nums[j]+j-i {
max = nums[j] + j - i
idx = j
}
}
i = idx
step++
}
return step
}
<file_sep>/algorithms/_404_Sum_of_Left_Leaves/answer.go
package _404_Sum_of_Left_Leaves
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func sumOfLeftLeaves(root *TreeNode) int {
// return sumOfLeftLeavesBFS(root)
// return sumOfLeftLeavesDFSRecursion(root)
return sumOfLeftLeavesDFSUnrecursion(root)
}
func sumOfLeftLeavesDFSUnrecursion(node *TreeNode) int {
var (
s = []*TreeNode{node}
ret int
)
if node == nil {
return 0
}
for len(s) > 0 {
x := s[len(s)-1]
s = s[0 : len(s)-1]
if x.Right != nil {
s = append(s, x.Right)
}
if x.Left != nil {
s = append(s, x.Left)
if x.Left.Left == nil && x.Left.Right == nil {
ret += x.Left.Val
}
}
}
return ret
}
func sumOfLeftLeavesDFSRecursion(node *TreeNode) int {
var ret int
if node == nil {
return 0
}
if node.Left != nil {
ret += sumOfLeftLeavesDFSRecursion(node.Left)
if node.Left.Left == nil && node.Left.Right == nil {
ret += node.Left.Val
}
}
if node.Right != nil {
ret += sumOfLeftLeavesDFSRecursion(node.Right)
}
return ret
}
func sumOfLeftLeavesBFS(root *TreeNode) int {
var (
q = []*TreeNode{root}
ret int
)
if root == nil {
return 0
}
for len(q) > 0 {
x := q[0]
q = q[1:]
if x.Left != nil {
q = append(q, x.Left)
if x.Left.Left == nil && x.Left.Right == nil {
ret += x.Left.Val
}
}
if x.Right != nil {
q = append(q, x.Right)
}
}
return ret
}
<file_sep>/algorithms/_206_Reverse_Linked_List/answer.go
package _206_Reverse_Linked_List
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func reverseList(head *ListNode) *ListNode {
if head == nil {
return nil
}
var ret *ListNode
for head != nil {
tail := &ListNode{
Val: head.Val,
Next: ret,
}
ret = tail
head = head.Next
}
return ret
}
<file_sep>/algorithms/_76_Minimum_Window_Substring/answer.go
package _76_Minimum_Window_Substring
func minWindow(s string, t string) string {
//return minWindowWithSlidingWindow(s, t)
return minWindowWithSlidingWindowFast(s, t)
}
// 提升对比效率的滑动窗口
func minWindowWithSlidingWindowFast(s, t string) string {
if len(s) < len(t) { // bad case
return ""
}
var (
tcMap = make(map[int32]int)
count int // 作为匹配数的计算值,用于确认是否满足匹配,避免对于tcmap的遍历操作。触发条件为 count == len(t)
left, right int
ret string // 返回值
minLen = len(s) + 1 // 用作计算是否为最小长度的变量。不用len(ret)的方式计算,这样不用预置ret的初始长度,避免找不到的情况下特判返回为空
)
// 初始化target字符串组成结构
for _, tt := range t {
tcMap[tt]++
}
// 开始遍历,以右边界为准,在每一个右边界的位置,收敛好当前右界下,所有左界情况
for right = 0; right < len(s); right++ {
cur := int32(s[right])
if _, ok := tcMap[cur]; ok {
tcMap[cur]-- // 此位置尝试-1
if tcMap[cur] >= 0 { // 如果-1有效,说明匹配到了
count++
}
for count == len(t) { // 如果成功匹配
if right-left+1 < minLen { // 如果更小,更新
minLen = right - left + 1
ret = s[left : left+minLen]
}
curLeft := int32(s[left])
if _, ok := tcMap[curLeft]; ok { // 如果是匹配的值,说明会影响
tcMap[curLeft]++
if tcMap[curLeft] > 0 { // 说明不满足条件了,拉齐count值
count--
}
}
left++ // 左边界尝试右移缩小窗口
}
}
}
return ret
}
// 滑动窗口思路没问题,判断子窗口是否满足条件,重复计算比较多,会超时,需要优化
func minWindowWithSlidingWindow(s, t string) string {
if len(s) < len(t) { // bad case
return ""
}
var (
ret string = s + t
p, q int // [)
)
for q < len(s) || p < len(s) {
if q-p < len(t) { // 长度不够,需要+1
//fmt.Printf("not enough len with %s, p=%d, q=%d\n", s[p:q], p, q)
if q == len(s) { // q不能加了,说明没了
break
} else { // 还能加
q += 1
continue
}
}
tmp := s[p:q]
//fmt.Printf("test str=%s, p=%d, q=%d\n", tmp, p, q)
if isValid(tmp, t) { // 如果有,尝试缩短,看看是否有冗余空间
if len(tmp) < len(ret) { // 看长短,看是否更新
ret = tmp
}
p += 1
continue
} else { // 现在不存在,尝试扩展范围,类似长度不够的逻辑
if q == len(s) { // q不能加了,说明没了
break
} else { // 还能加
q += 1
continue
}
}
}
if len(ret) > len(s) {
return ""
}
return ret
}
func isValid(s, t string) bool {
sm := make(map[int32]int)
for _, ss := range s {
n := sm[ss]
n++
sm[ss] = n
}
for _, tt := range t {
n := sm[tt]
n--
if n < 0 {
return false
}
sm[tt] = n
}
return true
}
<file_sep>/algorithms/_745_Prefix_and_Suffix_Search/answer_test.go
package _745_Prefix_and_Suffix_Search
import "testing"
func TestCase(t *testing.T) {
var wf WordFilter
//wf = Constructor([]string{"apple"})
//if ret := wf.F("a", "e"); ret != 0 {
// t.Logf("wrong ret with %d", ret)
//}
wf = Constructor([]string{"cabaabaaaa", "ccbcababac", "bacaabccba", "bcbbcbacaa", "abcaccbcaa", "accabaccaa", "cabcbbbcca", "ababccabcb", "caccbbcbab", "bccbacbcba"})
if ret := wf.F("bccbacbcba", "a"); ret != 9 {
t.Logf("wrong ret with %d", ret)
}
if ret := wf.F("ab", "abcaccbcaa"); ret != 4 {
t.Errorf("wrong ret with %d", ret)
}
if ret := wf.F("a", "aa"); ret != 5 {
t.Errorf("wrong ret with %d", ret)
}
}
<file_sep>/algorithms/_2095_Delete_the_Middle_Node_of_a_Linked_List/answer_test.go
package _2095_Delete_the_Middle_Node_of_a_Linked_List
import (
"testing"
"github.com/shadas/leetcode_notes/utils/linkedlist"
)
type testCase struct {
input *linkedlist.IntListNode
output *linkedlist.IntListNode
}
func TestDeleteMiddle(t *testing.T) {
cases := []testCase{
{
input: linkedlist.GenerateIntLinkedList([]int{1, 3, 4, 7, 1, 2, 6}),
output: linkedlist.GenerateIntLinkedList([]int{1, 3, 4, 1, 2, 6}),
},
}
for _, c := range cases {
if x := deleteMiddle(c.input); !linkedlist.IsTwoIntLinkedListEqual(x, c.output) {
t.Errorf("output should be \"%v\" instead of \"%v\" with input=\"%v\"", c.output, x, c.input)
}
}
}
<file_sep>/algorithms/_1019_Next_Greater_Node_In_Linked_List/answer.go
package _1019_Next_Greater_Node_In_Linked_List
type ListNode struct {
Val int
Next *ListNode
}
func nextLargerNodes(head *ListNode) []int {
// 放进数组
var list []int
for head != nil {
list = append(list, head.Val)
head = head.Next
}
// 开始计算
//return nextLargerNodesListAsc(list)
return nextLargerNodesListDesc(list)
}
// 正序遍历处理
func nextLargerNodesListAsc(list []int) (ret []int) {
ret = make([]int, len(list))
var s []int
for idx, val := range list {
for {
if len(s) == 0 {
s = append(s, idx)
break
}
top := list[s[len(s)-1]]
if val > top {
ret[s[len(s)-1]] = val
s = s[:len(s)-1] // pop
} else {
s = append(s, idx)
break
}
}
}
for _, idx := range s {
ret[idx] = 0
}
return
}
// 倒序遍历处理
func nextLargerNodesListDesc(list []int) (ret []int) {
ret = make([]int, len(list))
var s []int
for i := len(list) - 1; i >= 0; i-- {
val := list[i]
for {
if len(s) == 0 {
s = append(s, val)
ret[i] = 0
break
}
top := s[len(s)-1]
if val < top {
ret[i] = top
s = append(s, val)
break
} else {
s = s[:len(s)-1] // pop
}
}
}
return
}
<file_sep>/algorithms/_331_Verify_Preorder_Serialization_of_a_Binary_Tree/answer_test.go
package _331_Verify_Preorder_Serialization_of_a_Binary_Tree
import "testing"
func TestIsValidSerialization(t *testing.T) {
if !isValidSerialization("9,3,4,#,#,1,#,#,2,#,6,#,#") {
t.Error("should be true")
}
if isValidSerialization("1,#,#,#,#") {
t.Error("should be false")
}
}
<file_sep>/algorithms/_560_Subarray_Sum_Equals_K/answer.go
package _560_Subarray_Sum_Equals_K
func subarraySum(nums []int, k int) int {
return subarraySumWithMap(nums, k)
}
func subarraySumWithMap(nums []int, k int) int {
var (
m = map[int]int{0: 1} // map[sum]count; 初始为0,计一次
sum int
count int
)
for _, num := range nums {
sum += num
key := sum - k // 去之前的sum里找找有没有和当前sum差k的情况
if tcount, ok := m[key]; ok {
count += tcount
}
m[sum] += 1
}
return count
}
<file_sep>/algorithms/_3_Longest_Substring_Without_Repeating_Characters/answer_test.go
package _3_Longest_Substring_Without_Repeating_Characters
import (
"testing"
)
func TestLengthOfLongestSubstring(t *testing.T) {
input1 := "abcabcab"
ret1 := lengthOfLongestSubstring(input1)
if ret1 != 3 {
t.Errorf("\"%v\" 's ret is %v not %v", input1, ret1, 3)
}
input2 := "bbbbbbbb"
ret2 := lengthOfLongestSubstring(input2)
if ret2 != 1 {
t.Errorf("\"%v\" 's ret is %v not %v", input2, ret2, 3)
}
input3 := "pwwkew"
ret3 := lengthOfLongestSubstring(input3)
if ret3 != 3 {
t.Errorf("\"%v\" 's ret is %v not %v", input3, ret3, 3)
}
input4 := "au"
ret4 := lengthOfLongestSubstring(input4)
if ret4 != 2 {
t.Errorf("\"%v\" 's ret is %v not %v", input4, ret4, 2)
}
input5 := "c"
ret5 := lengthOfLongestSubstring(input5)
if ret5 != 1 {
t.Errorf("\"%v\" 's ret is %v not %v", input5, ret5, 1)
}
input6 := "abcdef"
ret6 := lengthOfLongestSubstring(input6)
if ret6 != 6 {
t.Errorf("\"%v\" 's ret is %v not %v", input6, ret6, 6)
}
}
<file_sep>/algorithms/_52_N_Queens_2/answer_test.go
package _52_N_Queens_2
import "testing"
func TestTotalNQueens(t *testing.T) {
if ret := totalNQueens(1); ret != 1 {
t.Errorf("wrong ret with %d", ret)
}
if ret := totalNQueens(4); ret != 2 {
t.Errorf("wrong ret with %d", ret)
}
}
<file_sep>/algorithms/_4_Median_of_Two_Sorted_Arrays/answer_test.go
package _4_Median_of_Two_Sorted_Arrays
import (
"testing"
)
func TestFindMedianSortedArrays(t *testing.T) {
if ret := findMedianSortedArrays([]int{1}, []int{1}); ret != 1 {
t.Error("err test1 want 1 with", ret)
}
if ret := findMedianSortedArrays([]int{1}, []int{2}); ret != 1.5 {
t.Error("err test2 want 1.5 with", ret)
}
if ret := findMedianSortedArrays([]int{1, 2}, []int{3, 4}); ret != 2.5 {
t.Error("err test3 want 2.5 with", ret)
}
}
<file_sep>/algorithms/_86_Partition_List/answer.go
package _86_Partition_List
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func partition(head *ListNode, x int) *ListNode {
if head == nil {
return head
}
if head.Next == nil {
return head
}
sh, bh := &ListNode{}, &ListNode{}
sht, bht := sh, bh
for {
if head == nil {
break
}
if head.Val < x {
sht.Next = &ListNode{
Val: head.Val,
}
sht = sht.Next
} else {
bht.Next = &ListNode{
Val: head.Val,
}
bht = bht.Next
}
head = head.Next
}
sht.Next = bh.Next
return sh.Next
}
<file_sep>/algorithms/_783_Minimum_Distance_Between_BST_Nodes/answer.go
package _783_Minimum_Distance_Between_BST_Nodes
import "math"
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func minDiffInBST(root *TreeNode) int {
return minDiffInBSTInOrderNR(root)
}
func minDiffInBSTInOrderNR(root *TreeNode) int {
var (
stack []*TreeNode
node = root
pre *TreeNode
min int = math.MaxInt32
)
for node != nil || len(stack) > 0 {
if node != nil {
stack = append(stack, node)
node = node.Left
} else {
node = stack[len(stack)-1]
stack = stack[:len(stack)-1]
if pre != nil {
min = minInt(min, absInt(node.Val, pre.Val))
}
pre = node
node = node.Right
}
}
return min
}
func absInt(a, b int) int {
if a > b {
return a - b
} else {
return b - a
}
}
func minInt(a, b int) int {
if a < b {
return a
}
return b
}
<file_sep>/algorithms/_205_Isomorphic_Strings/answer.go
package _205_Isomorphic_Strings
func isIsomorphic(s string, t string) bool {
var pms, pmt = make(map[rune][]int), make(map[rune][]int)
for idx, b := range s {
list, _ := pms[b]
list = append(list, idx)
pms[b] = list
}
for _, idxs := range pms {
b := t[idxs[0]]
for _, idx := range idxs {
if b != t[idx] {
return false
}
}
}
for idx, b := range t {
list, _ := pmt[b]
list = append(list, idx)
pmt[b] = list
}
for _, idxs := range pmt {
b := s[idxs[0]]
for _, idx := range idxs {
if b != s[idx] {
return false
}
}
}
return true
}
<file_sep>/algorithms/_414_Third_Maximum_Number/answer.go
package _414_Third_Maximum_Number
func thirdMax(nums []int) int {
if len(nums) == 0 {
return -1
}
var (
max int = nums[0]
heap = []int{nums[0]}
min, minidx = nums[0], 0
)
for _, num := range nums {
// 随时找最大的
if num > max {
max = num
}
// 看是否重复
var repeat bool
for _, h := range heap {
if h == num {
repeat = true
break
}
}
if !repeat {
// 处理堆
if len(heap) < 3 { // 直接插入
heap = append(heap, num)
if num < min {
min = num
minidx = len(heap) - 1
}
} else {
if num > min { // 先删再插
if minidx == 2 {
heap = heap[0:minidx]
} else {
heap = append(heap[0:minidx], heap[minidx+1:]...)
}
heap = append(heap, num)
// 找最小的
min, minidx = heap[0], 0
for idx, h := range heap {
if h < min {
min = h
minidx = idx
}
}
}
}
}
}
if len(heap) < 3 {
return max
}
return min
}
<file_sep>/algorithms/_242_Valid_Anagram/answer.go
package _242_Valid_Anagram
func isAnagram(s string, t string) bool {
var ms, mt = make(map[rune]int), make(map[rune]int)
for _, b := range s {
count, _ := ms[b]
count++
ms[b] = count
}
for _, b := range t {
count, _ := mt[b]
count++
mt[b] = count
}
for k, v := range ms {
count, ok := mt[k]
if !ok || count != v {
return false
}
}
for k, v := range mt {
count, ok := ms[k]
if !ok || count != v {
return false
}
}
return true
}
<file_sep>/algorithms/_2487_Remove_Nodes_From_Linked_List/answer.go
package _2487_Remove_Nodes_From_Linked_List
import "github.com/shadas/leetcode_notes/utils/linkedlist"
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func removeNodes(head *linkedlist.IntListNode) *linkedlist.IntListNode {
return removeNodesStack(head)
}
func removeNodesStack(head *linkedlist.IntListNode) *linkedlist.IntListNode {
var stack []int
for head != nil {
for len(stack) != 0 && head.Val > stack[len(stack)-1] {
stack = stack[:len(stack)-1]
}
stack = append(stack, head.Val)
head = head.Next
}
x := &linkedlist.IntListNode{}
y := x
for _, i := range stack {
x.Next = &linkedlist.IntListNode{
Val: i,
Next: nil,
}
x = x.Next
}
return y.Next
}
<file_sep>/algorithms/_131_Palindrome_Partitioning/answer.go
package _131_Palindrome_Partitioning
func partition(s string) [][]string {
var ret [][]string
backTrack(s, 0, []string{}, &ret)
return ret
}
func isPalindromic(str string) bool {
for i := 0; i <= len(str)/2; i++ {
if str[i] != str[len(str)-1-i] {
return false
}
}
return true
}
func backTrack(str string, idx int, tmp []string, ret *[][]string) {
if idx == len(str) {
var x = make([]string, len(tmp))
for idx, v := range tmp {
x[idx] = v
}
*ret = append(*ret, x)
return
}
for i := idx; i < len(str); i++ {
sub := str[idx : i+1]
if isPalindromic(sub) {
tmp = append(tmp, sub)
backTrack(str, i+1, tmp, ret)
tmp = tmp[0 : len(tmp)-1]
}
}
}
<file_sep>/algorithms/_79_Word_Search/answer.go
package _79_Word_Search
func exist(board [][]byte, word string) bool {
var (
m, n int // 行、列
record [][]bool
)
m = len(board)
if m == 0 {
return false
}
n = len(board[0])
if n == 0 {
return false
}
if len(word) > m*n || len(word) == 0 {
return false
}
// 构建record
record = initRecord(m, n)
// 遍历找开始
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
if board[i][j] == word[0] {
b := search(board, record, i, j, word[1:])
if b {
return true
}
record[i][j] = false
}
}
}
return false
}
func initRecord(m, n int) [][]bool {
record := make([][]bool, m)
for i := 0; i < m; i++ {
record[i] = make([]bool, n)
}
return record
}
func availablePos(i, j int, record [][]bool) [][]int {
m, n := len(record), len(record[0])
ret := [][]int{}
// 左
if j-1 >= 0 && !record[i][j-1] {
ret = append(ret, []int{i, j - 1})
}
// 上
if i-1 >= 0 && !record[i-1][j] {
ret = append(ret, []int{i - 1, j})
}
// 右
if j+1 <= n-1 && !record[i][j+1] {
ret = append(ret, []int{i, j + 1})
}
// 下
if i+1 <= m-1 && !record[i+1][j] {
ret = append(ret, []int{i + 1, j})
}
return ret
}
func search(board [][]byte, record [][]bool, i, j int, word string) bool {
record[i][j] = true
if len(word) == 0 {
return true
}
aps := availablePos(i, j, record)
for _, ap := range aps {
ii, jj := ap[0], ap[1]
if board[ii][jj] == word[0] {
b := search(board, record, ii, jj, word[1:])
if b {
return true
}
record[ii][jj] = false
}
}
return false
}
<file_sep>/algorithms/_95_Unique_Binary_Search_Trees_2/answer.go
package _95_Unique_Binary_Search_Trees_2
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func generateTrees(n int) []*TreeNode {
if n == 0 {
return []*TreeNode{}
}
return genRecursive(1, n)
}
func genRecursive(i, j int) []*TreeNode {
var ret = []*TreeNode{}
if i > j {
ret = append(ret, nil)
return ret
}
if i == j {
ret = append(ret, &TreeNode{Val: i})
return ret
}
for k := i; k <= j; k++ {
left := genRecursive(i, k-1)
right := genRecursive(k+1, j)
for _, tmpR := range right {
for _, tmpL := range left {
ret = append(ret, &TreeNode{
Left: tmpL,
Right: tmpR,
Val: k,
})
}
}
}
return ret
}
<file_sep>/algorithms/_42_Trapping_Rain_Water/answer.go
package _42_Trapping_Rain_Water
func trap(height []int) int {
if len(height) < 3 {
return 0
}
return trapStack(height)
}
func trapStack(height []int) int {
s := []int{}
sum := 0
for idx, h := range height {
for len(s) != 0 && height[s[len(s)-1]] < h {
tmpIdx := s[len(s)-1]
s = s[:len(s)-1]
if len(s) != 0 {
sum += (min(h, height[s[len(s)-1]]) - height[tmpIdx]) * (idx - s[len(s)-1] - 1)
}
}
s = append(s, idx)
}
return sum
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
<file_sep>/algorithms/_134_Gas_Station/answer_test.go
package _134_Gas_Station
import "testing"
func TestCanCompleteCircuit(t *testing.T) {
if ret := canCompleteCircuit([]int{1, 2, 3, 4, 5}, []int{3, 4, 5, 1, 2}); ret != 3 {
t.Errorf("wrong ret with %d", ret)
}
}
<file_sep>/algorithms/_91_Decode_Ways/answer_test.go
package _91_Decode_Ways
import "testing"
func TestNumDecodings(t *testing.T) {
if ret := numDecodings("12"); ret != 2 {
t.Errorf("wrong answer with %d", ret)
}
if ret := numDecodings("226"); ret != 3 {
t.Errorf("wrong answer with %d", ret)
}
if ret := numDecodings("0"); ret != 0 {
t.Errorf("wrong answer with %d", ret)
}
}
<file_sep>/algorithms/_105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal/answer.go
package _105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func buildTree(preorder []int, inorder []int) *TreeNode {
preLength := len(preorder)
inLength := len(inorder)
return buildTreeRecursion(preorder, 0, preLength-1, inorder, 0, inLength-1)
}
func buildTreeRecursion(preorder []int, preStart int, preEnd int, inorder []int, inStart int, inEnd int) *TreeNode {
if preStart > preEnd || inStart > inEnd {
return nil
}
var rootVal = preorder[preStart]
var rootIndex = 0
for i := inStart; i <= inEnd; i++ {
if rootVal == inorder[i] {
rootIndex = i
break
}
}
length := rootIndex - inStart
node := &TreeNode{
Val: rootVal,
}
node.Left = buildTreeRecursion(preorder, preStart+1, preStart+length, inorder, inStart, rootIndex-1)
node.Right = buildTreeRecursion(preorder, preStart+length+1, preEnd, inorder, rootIndex+1, inEnd)
return node
}
<file_sep>/algorithms/_209_Minimum_Size_Subarray_Sum/answer_test.go
package _209_Minimum_Size_Subarray_Sum
import "testing"
func TestMinSubArrayLen(t *testing.T) {
//if ret := minSubArrayLen(7, []int{2, 3, 1, 2, 4, 3}); ret != 2 {
// t.Errorf("wrong ret with %d", ret)
//}
if ret := minSubArrayLen(11, []int{1, 2, 3, 4, 5}); ret != 3 {
t.Errorf("wrong ret with %d", ret)
}
}
<file_sep>/algorithms/_204_Count_Primes/answer_test.go
package _204_Count_Primes
import "testing"
func TestCountPrimes(t *testing.T) {
if ret := countPrimes(2); ret != 0 {
t.Errorf("ret of %d is %d rather than %d", 2, ret, 0)
}
if ret := countPrimes(10); ret != 4 {
t.Errorf("ret of %d is %d rather than %d", 10, ret, 4)
}
}
<file_sep>/algorithms/_20_Valid_Parentheses/answer_test.go
package _20_Valid_Parentheses
import (
"testing"
)
func TestIsValid(t *testing.T) {
if ret := isValid("()[]{}"); !ret {
t.Error("()[]{} is true, not false.")
}
if ret := isValid("()(]{}"); ret {
t.Error("()(]{} is false, not true.")
}
}
<file_sep>/algorithms/_997_Find_the_Town_Judge/answer.go
package _997_Find_the_Town_Judge
func findJudge(N int, trust [][]int) int {
var (
indegree []int
outdegree []int
)
for i := 0; i < N; i++ {
indegree = append(indegree, 0)
outdegree = append(outdegree, 0)
}
for _, t := range trust {
pre, post := t[0], t[1]
outdegree[pre-1]++
indegree[post-1]++
}
for i := 0; i < N; i++ {
if indegree[i] == N-1 && outdegree[i] == 0 {
return i + 1
}
}
return -1
}
<file_sep>/algorithms/_26_Remove_Duplicates_from_Sorted_Array/answer_test.go
package _26_Remove_Duplicates_from_Sorted_Array
import (
"testing"
)
func TestRemoveDuplicates(t *testing.T) {
if ret := removeDuplicates([]int{1, 1, 2}); ret != 2 {
t.Error("ret not 2 with:", ret)
}
}
<file_sep>/algorithms/_130_Surrounded_Regions/answer.go
package _130_Surrounded_Regions
func solve(board [][]byte) {
// 边界O染色
for i := 0; i < len(board); i++ {
for j := 0; j < len(board[i]); j++ {
if (i == 0 || i == len(board)-1 || j == 0 || j == len(board[i])-1) && board[i][j] == 'O' {
O2Y(i, j, &board)
}
}
}
// 剩余O变X
for i := 0; i < len(board); i++ {
for j := 0; j < len(board[i]); j++ {
if board[i][j] == 'O' {
board[i][j] = 'X'
}
}
}
// Y变O
for i := 0; i < len(board); i++ {
for j := 0; j < len(board[i]); j++ {
if board[i][j] == 'Y' {
board[i][j] = 'O'
}
}
}
}
// dfs方式O变Y
func O2Y(i, j int, board *[][]byte) {
if i < 0 || i > len(*board)-1 || j < 0 || j > len((*board)[i])-1 || (*board)[i][j] != 'O' {
return
}
(*board)[i][j] = 'Y'
O2Y(i-1, j, board)
O2Y(i+1, j, board)
O2Y(i, j-1, board)
O2Y(i, j+1, board)
}
<file_sep>/algorithms/_344_Reverse_String/answer.go
package _344_Reverse_String
func reverseString(s []byte) {
for idx := 0; idx < len(s); idx++ {
s[idx], s[len(s)-1-idx] = s[len(s)-1-idx], s[idx]
if idx >= len(s)/2-1 {
break
}
}
}
<file_sep>/algorithms/_641_Design_Circular_Deque/QD.md
Design your implementation of the circular double-ended queue (deque).
Implement the MyCircularDeque class:
- MyCircularDeque(int k) Initializes the deque with a maximum size of k.
- boolean insertFront() Adds an item at the front of Deque. Returns true if the operation is successful, or false otherwise.
- boolean insertLast() Adds an item at the rear of Deque. Returns true if the operation is successful, or false otherwise.
- boolean deleteFront() Deletes an item from the front of Deque. Returns true if the operation is successful, or false otherwise.
- boolean deleteLast() Deletes an item from the rear of Deque. Returns true if the operation is successful, or false otherwise.
- int getFront() Returns the front item from the Deque. Returns -1 if the deque is empty.
- int getRear() Returns the last item from Deque. Returns -1 if the deque is empty.
- boolean isEmpty() Returns true if the deque is empty, or false otherwise.
- boolean isFull() Returns true if the deque is full, or false otherwise.
Example 1:
```
Input
["MyCircularDeque", "insertLast", "insertLast", "insertFront", "insertFront", "getRear", "isFull", "deleteLast", "insertFront", "getFront"]
[[3], [1], [2], [3], [4], [], [], [], [4], []]
Output
[null, true, true, true, false, 2, true, true, true, 4]
Explanation
MyCircularDeque myCircularDeque = new MyCircularDeque(3);
myCircularDeque.insertLast(1); // return True
myCircularDeque.insertLast(2); // return True
myCircularDeque.insertFront(3); // return True
myCircularDeque.insertFront(4); // return False, the queue is full.
myCircularDeque.getRear(); // return 2
myCircularDeque.isFull(); // return True
myCircularDeque.deleteLast(); // return True
myCircularDeque.insertFront(4); // return True
myCircularDeque.getFront(); // return 4
```
Constraints:
- 1 <= k <= 1000
- 0 <= value <= 1000
- At most 2000 calls will be made to insertFront, insertLast, deleteFront, deleteLast, getFront, getRear, isEmpty, isFull.<file_sep>/algorithms/_1721_Swapping_Nodes_in_a_Linked_List/answer_test.go
package _1721_Swapping_Nodes_in_a_Linked_List
import (
"fmt"
"testing"
)
func TestSwapNodes(t *testing.T) {
head := &ListNode{
Val: 1,
Next: &ListNode{
Val: 2,
Next: &ListNode{
Val: 3,
Next: &ListNode{
Val: 4,
Next: &ListNode{
Val: 5,
Next: nil,
},
},
},
},
}
k := 2
ret := swapNodes(head, k)
fmt.Println(ret)
}
<file_sep>/algorithms/_98_Validate_Binary_Search_Tree/answer.go
package _98_Validate_Binary_Search_Tree
import "math"
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func isValidBST(root *TreeNode) bool {
return isValidBSTRecursion(root, math.MinInt64, math.MaxInt64)
}
func isValidBSTRecursion(node *TreeNode, min, max int) bool {
if node == nil {
return true
}
if node.Val <= min || node.Val >= max {
return false
}
return isValidBSTRecursion(node.Left, min, node.Val) && isValidBSTRecursion(node.Right, node.Val, max)
}
<file_sep>/algorithms/_287_Find_the_Duplicate_Number/answer.go
package _287_Find_the_Duplicate_Number
func findDuplicate(nums []int) int {
var (
slow = nums[0]
fast = nums[nums[0]]
)
for {
if slow == fast {
break
}
slow = nums[slow]
fast = nums[nums[fast]]
}
fast = 0
for {
if slow == fast {
return fast
}
fast = nums[fast]
slow = nums[slow]
}
return -1
}
<file_sep>/algorithms/_59_Spiral_Matrix_2/answer.go
package _59_Spiral_Matrix_2
func generateMatrix(n int) [][]int {
return generateMatrixWithGenSeq(n)
}
func generateMatrixWithGenSeq(n int) [][]int {
var (
ret, record [][]int
)
// 初始化
ret = make([][]int, n)
record = make([][]int, n)
for i := 0; i < n; i++ {
ret[i] = make([]int, n)
record[i] = make([]int, n)
}
// 遍历赋值
count := 0 // 用于记录已经遍历的次数
// 起始位置
pos := Pos{0, 0}
dire := Right
for count < n*n { // 当遍历次数打到元素个数时结束
ret[pos.Y][pos.X] = count + 1
pos, record, dire = newPos(pos, record, dire)
count++
}
return ret
}
// 方向枚举
type Dire int
const (
Right Dire = iota
Down
Left
Up
)
type Pos struct {
X, Y int
}
func newPos(pos Pos, record [][]int, dire Dire) (newPos Pos, newRecord [][]int, newDire Dire) {
// 更新标记
record[pos.Y][pos.X] = 1
newRecord = record
// 更新方向
newDire = dire
// 尝试移动
switch dire {
case Right:
pos.X += 1
if pos.X >= len(record[0]) || record[pos.Y][pos.X] == 1 {
pos.X -= 1 // 恢复
pos.Y += 1
newDire = Down
}
case Down:
pos.Y += 1
if pos.Y >= len(record) || record[pos.Y][pos.X] == 1 {
pos.X -= 1
pos.Y -= 1
newDire = Left
}
case Left:
pos.X -= 1
if pos.X < 0 || record[pos.Y][pos.X] == 1 {
pos.X += 1
pos.Y -= 1
newDire = Up
}
case Up:
pos.Y -= 1
if pos.X < 0 || record[pos.Y][pos.X] == 1 {
pos.X += 1
pos.Y += 1
newDire = Right
}
}
newPos = pos
return
}
<file_sep>/algorithms/_523_Continuous_Subarray_Sum/answer_test.go
package _523_Continuous_Subarray_Sum
import "testing"
func TestCheckSubarraySum(t *testing.T) {
if !checkSubarraySum([]int{23, 2, 6, 4, 7}, 6) {
t.Error("should be true")
}
if !checkSubarraySum([]int{23, 2, 4, 6, 6}, 7) {
t.Error("should be true")
}
if checkSubarraySum([]int{1, 0}, 2) {
t.Error("should be false")
}
if checkSubarraySum([]int{1, 2, 12}, 6) {
t.Error("should be false")
}
if !checkSubarraySum([]int{5, 0, 0, 0}, 3) {
t.Error("should be true")
}
if !checkSubarraySum([]int{1, 1}, 1) {
t.Error("should be true")
}
if !checkSubarraySum([]int{0, 0}, 1) {
t.Error("should be true")
}
}
<file_sep>/algorithms/_988_Smallest_String_Starting_From_Leaf/answer.go
package _988_Smallest_String_Starting_From_Leaf
import "fmt"
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func smallestFromLeaf(root *TreeNode) string {
var (
traces = [][]int{}
minTrace []int
)
if root == nil {
return ""
}
findTraces(root, []int{}, &traces)
// 找最小的trace
minTrace = traces[0]
for _, trace := range traces {
minTrace = minTraces(minTrace, trace)
}
return transNumbersToStr(minTrace)
}
func minTraces(t1, t2 []int) []int {
for i := 0; ; i++ {
if len(t1) < i+1 {
return t1
}
if len(t2) < i+1 {
return t2
}
if t1[i] > t2[i] {
return t2
} else if t1[i] < t2[i] {
return t1
}
}
}
func transNumbersToStr(nums []int) (out string) {
for _, v := range nums {
out += fmt.Sprintf("%c", v+97)
}
return
}
func findTraces(node *TreeNode, trace []int, ret *[][]int) {
trace = append(trace, node.Val)
if node.Left == nil && node.Right == nil {
nTrace := make([]int, 0, len(trace))
for i := len(trace) - 1; i >= 0; i-- {
nTrace = append(nTrace, trace[i])
}
*ret = append(*ret, nTrace)
return
}
if node.Left != nil {
findTraces(node.Left, trace, ret)
}
if node.Right != nil {
findTraces(node.Right, trace, ret)
}
}
<file_sep>/algorithms/_179_Largest_Number/QD.md
Given a list of non-negative integers nums, arrange them such that they form the largest number.
Note: The result may be very large, so you need to return a string instead of an integer.
Example 1:
```
Input: nums = [10,2]
Output: "210"
```
Example 2:
```
Input: nums = [3,30,34,5,9]
Output: "9534330"
```
Example 3:
```
Input: nums = [1]
Output: "1"
```
Example 4:
```
Input: nums = [10]
Output: "10"
```
Constraints:
- 1 <= nums.length <= 100
- 0 <= nums[i] <= 109
----
leetcode 179
----
贪心解法,做字符串排序再拼接<file_sep>/algorithms/_670_Maximum_Swap/answer.go
package _670_Maximum_Swap
import (
"math"
)
func maximumSwap(num int) int {
var (
currMax int
maxPos int
pos1, pos2 int
l = []int{}
ret int
)
for i := 0; num != 0; i++ {
n := num % 10
l = append(l, n)
if n > currMax {
currMax = n
maxPos = i
}
if n < currMax {
pos1 = i
pos2 = maxPos
}
num = num / 10
}
// 交换
l[pos1], l[pos2] = l[pos2], l[pos1]
// 求和
for i, n := range l {
ret += int(math.Pow(10, float64(i))) * n
}
return ret
}
<file_sep>/algorithms/_350_Intersection_of_Two_Arrays_2/answer.go
package _350_Intersection_of_Two_Arrays_2
func intersect(nums1 []int, nums2 []int) []int {
var m1, m2 = make(map[int]int), make(map[int]int)
for _, n := range nums1 {
if count, ok := m1[n]; ok {
count++
m1[n] = count
} else {
m1[n] = 1
}
}
for _, n := range nums2 {
if count, ok := m2[n]; ok {
count++
m2[n] = count
} else {
m2[n] = 1
}
}
var ret = make(map[int]int)
for k1, v1 := range m1 {
if v2, ok := m2[k1]; ok {
if v1 > v2 {
ret[k1] = v2
} else {
ret[k1] = v1
}
}
}
var r []int
for k, v := range ret {
for i := 0; i < v; i++ {
r = append(r, k)
}
}
return r
}
<file_sep>/algorithms/_219_Contains_Duplicate_2/answer.go
package _219_Contains_Duplicate_2
func containsNearbyDuplicate(nums []int, k int) bool {
if len(nums) == 0 || len(nums) == 1 {
return false
}
var m = make(map[int][]int)
for idx, n := range nums {
if l, ok := m[n]; !ok {
m[n] = []int{idx}
} else {
for _, i := range l {
if (idx - i) <= k {
return true
}
}
l = append(l, idx)
m[n] = l
}
}
return false
}
<file_sep>/algorithms/_491_Increasing_Subsequences/answer_test.go
package _491_Increasing_Subsequences
import "testing"
func TestFindSubsequences(t *testing.T) {
var (
subsequences [][]int
)
subsequences = findSubsequences([]int{4, 6, 7, 7})
t.Log(subsequences) // [[4,6],[4,6,7],[4,6,7,7],[4,7],[4,7,7],[6,7],[6,7,7],[7,7]]
subsequences = findSubsequences([]int{4, 4, 3, 2, 1})
t.Log(subsequences) // [[4,4]]
subsequences = findSubsequences([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15})
t.Log(subsequences) //
}
<file_sep>/algorithms/_69_Sqrt/answer.go
package _69_Sqrt
func mySqrt(x int) int {
var ret int
for {
if ret*ret > x {
break
}
ret++
}
return ret - 1
}
<file_sep>/algorithms/_1721_Swapping_Nodes_in_a_Linked_List/answer.go
package _1721_Swapping_Nodes_in_a_Linked_List
import "fmt"
type ListNode struct {
Val int
Next *ListNode
}
func (l *ListNode) String() string {
var s string
for l != nil {
s += fmt.Sprintf("%d", l.Val)
l = l.Next
}
return s
}
func swapNodes(head *ListNode, k int) *ListNode {
ls, lf := head, head
for i := 0; i < k-1; i++ {
lf = lf.Next
}
fn := lf
for lf.Next != nil {
ls = ls.Next
lf = lf.Next
}
ln := ls
fn.Val, ln.Val = ln.Val, fn.Val
return head
}
<file_sep>/algorithms/_382_Linked_List_Random_Node/answer.go
package _382_Linked_List_Random_Node
import "math/rand"
type ListNode struct {
Val int
Next *ListNode
}
type Solution struct {
head *ListNode
}
/** @param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node. */
func Constructor(head *ListNode) Solution {
return Solution{head: head}
}
/** Returns a random node's value. */
func (this *Solution) GetRandom() int {
var (
scope = 1
chosenValue = 0
)
curr := this.head
for curr != nil {
if rand.Float64() < 1.0/float64(scope) {
chosenValue = curr.Val
}
scope += 1
curr = curr.Next
}
return chosenValue
}
/**
* Your Solution object will be instantiated and called as such:
* obj := Constructor(head);
* param_1 := obj.GetRandom();
*/
<file_sep>/algorithms/_331_Verify_Preorder_Serialization_of_a_Binary_Tree/answer.go
package _331_Verify_Preorder_Serialization_of_a_Binary_Tree
import "strings"
func isValidSerialization(preorder string) bool {
var (
eles = strings.Split(preorder, ",")
stack []string
)
for _, ele := range eles {
stack = append(stack, ele)
for len(stack) >= 3 && stack[len(stack)-1] == "#" && stack[len(stack)-2] == "#" && stack[len(stack)-3] != "#" {
stack = stack[0 : len(stack)-3]
stack = append(stack, "#")
}
}
return len(stack) == 1 && stack[0] == "#"
}
<file_sep>/own_practice/sort/fastsort/arraysort.go
package fastsort
//
func ArrayFastSort1(values []int) []int {
arrayFastSort1(values)
return values
}
func arrayFastSort1(values []int) {
if len(values) <= 1 {
return
}
mid, i := values[0], 1
head, tail := 0, len(values)-1
for head < tail {
if values[i] > mid {
values[i], values[tail] = values[tail], values[i]
tail--
} else {
values[i], values[head] = values[head], values[i]
head++
i++
}
}
values[head] = mid
arrayFastSort1(values[:head])
arrayFastSort1(values[head+1:])
}
//
func ArrayFastSort2(values []int) []int {
arrayFastSort2(values, 0, len(values)-1)
return values
}
func arrayFastSort2(values []int, left, right int) {
tmp, p := values[left], left
i, j := left, right
for i < j {
if j >= p && values[j] >= tmp {
j--
}
if j >= p {
values[p] = values[j]
p = j
}
if values[i] <= tmp && i <= p {
i++
}
if i <= p {
values[p] = values[i]
p = i
}
}
values[p] = tmp
if p-left > 1 {
arrayFastSort2(values, left, p-1)
}
if right-p > 1 {
arrayFastSort2(values, p+1, right)
}
}
<file_sep>/algorithms/_387_First_Unique_Character_in_a_String/answer.go
package _387_First_Unique_Character_in_a_String
func firstUniqChar(s string) int {
var fm = make(map[rune]int)
for _, b := range s {
count, _ := fm[b]
count++
fm[b] = count
}
for idx, b := range s {
if fm[b] == 1 {
return idx
}
}
return -1
}
<file_sep>/algorithms/_968_Binary_Tree_Cameras/answer_test.go
package _968_Binary_Tree_Cameras
import (
"fmt"
"testing"
)
func TestMinCameraCover(t *testing.T) {
root := &TreeNode{
Left: &TreeNode{
Left: &TreeNode{
Left: &TreeNode{
Left: nil,
Right: &TreeNode{},
},
Right: nil,
},
Right: nil,
},
Right: nil,
}
count := minCameraCover(root)
fmt.Println(count)
}
<file_sep>/algorithms/_226_Invert_Binary_Tree/answer.go
package _226_Invert_Binary_Tree
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func invertTree(root *TreeNode) *TreeNode {
// return invertTreeRecursive(root)
return invertTreeIterative(root)
}
// 递归
func invertTreeRecursive(root *TreeNode) *TreeNode {
if root == nil {
return root
}
tmpLeft := root.Left
root.Left = invertTreeRecursive(root.Right)
root.Right = invertTreeRecursive(tmpLeft)
return root
}
// 迭代
func invertTreeIterative(root *TreeNode) *TreeNode {
if root == nil {
return root
}
var (
level = []*TreeNode{root}
)
for len(level) != 0 {
currLevel := make([]*TreeNode, len(level))
copy(currLevel, level)
level = level[0:0]
for _, n := range currLevel {
n.Left, n.Right = n.Right, n.Left
if n.Left != nil {
level = append(level, n.Left)
}
if n.Right != nil {
level = append(level, n.Right)
}
}
}
return root
}
<file_sep>/algorithms/_136_Single_Number/answer.go
package _136_Single_Number
func singleNumber(nums []int) int {
//return singleNumberWithMap(nums)
return singleNumberWithXOR(nums)
}
// 异或 计算处理
func singleNumberWithXOR(nums []int) int {
ret := 0
for _, num := range nums {
ret ^= num
}
return ret
}
// 使用额外map处理
func singleNumberWithMap(nums []int) int {
var m = make(map[int]int)
for _, n := range nums {
if _, ok := m[n]; ok {
delete(m, n)
} else {
m[n] = 0
}
}
for r, _ := range m {
return r
}
return 0
}
<file_sep>/algorithms/_230_Kth_Smallest_Element_in_a_BST/answer.go
package _230_Kth_Smallest_Element_in_a_BST
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func kthSmallest(root *TreeNode, k int) int {
var (
s = []*TreeNode{}
node = root
count int
)
for node != nil || len(s) > 0 {
for node != nil {
s = append(s, node)
node = node.Left
}
if len(s) > 0 {
node = s[len(s)-1]
s = s[:len(s)-1]
count++
if count == k {
return node.Val
}
node = node.Right
}
}
return -1
}
<file_sep>/algorithms/_58_Length_of_Last_Word/answer.go
package _58_Length_of_Last_Word
import (
"strings"
)
func lengthOfLastWord(s string) int {
s = strings.TrimSpace(s)
strs := strings.Split(s, " ")
return len(strs[len(strs)-1])
}
<file_sep>/algorithms/_442_Find_All_Duplicates_in_an_Array/answer.go
package _442_Find_All_Duplicates_in_an_Array
func findDuplicates(nums []int) []int {
l := len(nums)
ret := []int{}
for i := 0; i < l; i++ {
if nums[i] <= l && nums[i] != nums[nums[i]-1] {
nums[i], nums[nums[i]-1] = nums[nums[i]-1], nums[i]
i--
}
}
for i, n := range nums {
if i+1 != n {
ret = append(ret, n)
}
}
return ret
}
<file_sep>/algorithms/_110_Balanced_Binary_Tree/answer.go
package _110_Balanced_Binary_Tree
import "math"
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func isBalanced(root *TreeNode) bool {
if d := depth(root); d == -1 {
return false
}
return true
}
func depth(node *TreeNode) int {
if node == nil {
return 0
}
var (
leftDepth, rightDepth int
)
if leftDepth = depth(node.Left); leftDepth == -1 {
return -1
}
if rightDepth = depth(node.Right); rightDepth == -1 {
return -1
}
if int(math.Abs(float64(leftDepth-rightDepth))) > 1 {
return -1
}
return int(math.Max(float64(leftDepth), float64(rightDepth))) + 1
}
<file_sep>/algorithms/_92_Reverse_Linked_List_2/answer.go
package _92_Reverse_Linked_List_2
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func reverseBetween(head *ListNode, m int, n int) *ListNode {
if head == nil {
return nil
}
ret := &ListNode{}
ret.Next = head
pre := ret
for i := 0; i < m-1; i++ {
pre = pre.Next
}
start := pre.Next
then := start.Next
for i := 0; i < n-m; i++ {
start.Next = then.Next
then.Next = pre.Next
pre.Next = then
then = start.Next
}
return ret.Next
}
<file_sep>/algorithms/_543_Diameter_of_Binary_Tree/answer.go
package _543_Diameter_of_Binary_Tree
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
var max int
func diameterOfBinaryTree(root *TreeNode) int {
max = 0
diameterOfBinaryTreeR(root)
return max
}
func diameterOfBinaryTreeR(root *TreeNode) int {
if root == nil {
return 0
}
l := diameterOfBinaryTreeR(root.Left)
r := diameterOfBinaryTreeR(root.Right)
if l+r > max {
max = l + r
}
curMax := l
if r > l {
curMax = r
}
return curMax + 1
}
<file_sep>/own_practice/sort/fastsort/linkedlistsort.go
package fastsort
import (
. "leetcode_notes/utils/linkedlist"
)
//
func LinkedListFastSort1(l *IntListNode) *IntListNode {
head := l
linkedListFastSort1(head, nil)
return head
}
func linkedListFastSort1(head, end *IntListNode) {
if head == nil || head == end {
return
}
p := head.Next // pointer for run
small := head
for p != end {
if p.Val < head.Val {
small = small.Next
small.Val, p.Val = p.Val, small.Val
}
p = p.Next
}
head.Val, small.Val = small.Val, head.Val
linkedListFastSort1(head, small)
linkedListFastSort1(small.Next, end)
}
<file_sep>/algorithms/_862_Shortest_Subarray_with_Sum_at_Least_K/answer.go
package _862_Shortest_Subarray_with_Sum_at_Least_K
type mem struct {
idx int
sum int
}
func shortestSubarray(nums []int, k int) int {
sum := 0
queue := []mem{{idx: -1, sum: sum}}
res := len(nums) + 1
for idx, num := range nums {
sum += num
for len(queue) > 0 && sum-queue[0].sum >= k {
res = min(res, idx-queue[0].idx)
queue = queue[1:]
}
for len(queue) > 0 && sum <= queue[len(queue)-1].sum {
queue = queue[:len(queue)-1]
}
queue = append(queue, mem{idx: idx, sum: sum})
}
if res == len(nums)+1 {
res = -1
}
return res
}
func min(a, b int) int {
if a < b {
return a
} else {
return b
}
}
<file_sep>/algorithms/_300_Longest_Increasing_Subsequence/answer.go
package _300_Longest_Increasing_Subsequence
func lengthOfLIS(nums []int) int {
//return lengthOfLISDP(nums)
return lengthOfLISGreedy(nums)
}
func maxInt(a, b int) int {
if a > b {
return a
}
return b
}
// dp解法
func lengthOfLISDP(nums []int) int {
if len(nums) == 0 {
return 0
}
var (
dp = make([]int, len(nums))
max int = 1
)
for idx, _ := range dp {
dp[idx] = 1
}
for i := 1; i < len(nums); i++ {
for j := 0; j <= i; j++ {
if nums[i] > nums[j] {
dp[i] = maxInt(dp[i], dp[j]+1)
}
}
max = maxInt(max, dp[i])
}
return max
}
// 贪心法构造具体数列解法
func lengthOfLISGreedy(nums []int) int {
if len(nums) == 0 {
return 0
}
var (
lis = []int{nums[0]}
)
for i := 1; i < len(nums); i++ {
val := nums[i]
head, tail := lis[0], lis[len(lis)-1]
if val <= head {
lis[0] = val
} else if val > tail {
lis = append(lis, val)
} else {
lis[findPos(lis, val)] = val
}
}
//fmt.Println(lis) // 此处可以检验实际计算长度的子序列,不一定是合法的,但长度是正确的
return len(lis)
}
// 二分法找第一个>=val的位置
func findPos(lis []int, val int) int {
l, r := 0, len(lis)-1
for l < r {
mid := l + (r-l)/2
if lis[mid] >= val {
r = mid
} else {
l = mid + 1
}
}
return l
}
<file_sep>/algorithms/_25_Reverse_Nodes_in_k_Group/answer.go
package _25_Reverse_Nodes_in_k_Group
import "fmt"
type ListNode struct {
Val int
Next *ListNode
}
func (l *ListNode) String() string {
var s string
for l != nil {
s += fmt.Sprintf("%d", l.Val)
l = l.Next
}
return s
}
func reverseKGroup(head *ListNode, k int) *ListNode {
var (
count int
tmp = &ListNode{}
ret = &ListNode{}
tret = ret // ret的尾节点,用于接下一串链接
ttail = &ListNode{} // 子串的尾节点,用于更新ret的尾结点
)
for head != nil {
if count < k {
x := &ListNode{
Val: head.Val,
Next: tmp.Next,
}
tmp.Next = x
if count == 0 {
ttail = x
}
head = head.Next
count++
} else {
tret.Next = tmp.Next
tmp = &ListNode{}
tret = ttail
count = 0
}
}
if count == k {
// 把最后的tmp缀上来
tret.Next = tmp.Next
} else if count > 0 {
// 把最后的tmp要反回来
var (
tmpTail = &ListNode{}
)
for tmp.Next != nil {
x := &ListNode{
Val: tmp.Next.Val,
Next: tmpTail.Next,
}
tmpTail.Next = x
tmp = tmp.Next
}
tret.Next = tmpTail.Next
}
return ret.Next
}
<file_sep>/algorithms/_599_Minimum_Index_Sum_of_Two_Lists/answer.go
package _599_Minimum_Index_Sum_of_Two_Lists
func findRestaurant(list1 []string, list2 []string) []string {
return findRestaurantLoopSearch(list1, list2)
}
func findRestaurantLoopSearch(list1 []string, list2 []string) []string {
var (
m = make(map[int][]string)
minIdx int = len(list1) + len(list2)
)
for k1, v1 := range list1 {
if k1 > minIdx {
break
}
for k2, v2 := range list2 {
if k2 > minIdx {
break
}
if v1 == v2 {
sum := k1 + k2
if sum > minIdx {
break
}
if l, ok := m[sum]; ok {
l = append(l, v2)
m[sum] = l
} else {
m[sum] = []string{v2}
}
minIdx = sum
break
}
}
}
return m[minIdx]
}
<file_sep>/algorithms/_264_Ugly_Number_2/answer_test.go
package _264_Ugly_Number_2
import "testing"
func TestNthUglyNumber(t *testing.T) {
if ret := nthUglyNumber(10); ret != 12 {
t.Errorf("ret of %d should be %d, rather than %d", 10, 12, ret)
}
}
<file_sep>/algorithms/_263_Ugly_Number/answer_test.go
package _263_Ugly_Number
import "testing"
func TestIsUgly(t *testing.T) {
if ret := isUgly(1); ret != true {
t.Errorf("%d ret should be %v", 1, true)
}
if ret := isUgly(6); ret != true {
t.Errorf("%d ret should be %v", 6, true)
}
}
<file_sep>/algorithms/_88_Merge_Sorted_Array/answer_test.go
package _88_Merge_Sorted_Array
import (
. "leetcode_notes/utils/array"
"testing"
)
func TestMerge(t *testing.T) {
var nums1, nums2 []int
nums1 = []int{0}
nums2 = []int{1}
if merge(nums1, 0, nums2, 1); !IsIntArrayEqual(nums1, []int{1}) {
t.Error("error, get", nums1)
}
nums1 = []int{1, 0}
nums2 = []int{2}
if merge(nums1, 1, nums2, 1); !IsIntArrayEqual(nums1, []int{1, 2}) {
t.Error("error, get", nums1)
}
}
<file_sep>/algorithms/_99_Recover_Binary_Search_Tree/answer.go
package _99_Recover_Binary_Search_Tree
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
var (
pre *TreeNode
m1, m2 *TreeNode
)
func recoverTree(root *TreeNode) {
pre, m1, m2 = nil, nil, nil
dfs(root)
m1.Val, m2.Val = m2.Val, m1.Val
}
func dfs(root *TreeNode) {
if root == nil {
return
}
dfs(root.Left)
if pre != nil && pre.Val > root.Val {
if m1 == nil {
m1 = pre
}
m2 = root
}
pre = root
dfs(root.Right)
return
}
<file_sep>/algorithms/_328_Odd_Even_Linked_List/answer.go
package _328_Odd_Even_Linked_List
type ListNode struct {
Val int
Next *ListNode
}
func oddEvenList(head *ListNode) *ListNode {
h1, h2 := &ListNode{}, &ListNode{}
l1, l2 := h1, h2
var isEven bool
for head != nil {
if !isEven {
l1.Next = &ListNode{
Val: head.Val,
}
l1 = l1.Next
} else {
l2.Next = &ListNode{
Val: head.Val,
}
l2 = l2.Next
}
head = head.Next
isEven = !isEven
}
l1.Next = h2.Next
return h1.Next
}
<file_sep>/algorithms/_102_Binary_Tree_Level_Order_Traversal/answer.go
package _102_Binary_Tree_Level_Order_Traversal
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func levelOrder(root *TreeNode) [][]int {
var ret = [][]int{}
var queue = Queue{}
if root == nil {
return ret
}
queue.Push(root)
for {
if queue.IsEmpty() {
break
}
tmp := []int{}
length := queue.Len()
for i := 0; i < length; i++ {
node := queue.Pop()
tmp = append(tmp, node.Val)
if node.Left != nil {
queue.Push(node.Left)
}
if node.Right != nil {
queue.Push(node.Right)
}
}
ret = append(ret, tmp)
}
return ret
}
type Queue struct {
queue []*TreeNode
}
func (q *Queue) IsEmpty() bool {
return len(q.queue) == 0
}
func (q *Queue) Push(node *TreeNode) {
q.queue = append(q.queue, node)
}
func (q *Queue) Pop() (node *TreeNode) {
if len(q.queue) == 0 {
return
}
node = q.queue[0]
q.queue = q.queue[1:]
return
}
func (q *Queue) Len() int {
return len(q.queue)
}
<file_sep>/algorithms/_44_Wildcard_Matching/answer.go
package _44_Wildcard_Matching
func isMatch(s string, p string) bool {
return isMatchScan(s, p)
}
func isMatchScan(s string, p string) bool {
var (
i, j int // s,p的idx
lp = len(p) // 表达式长度
starPos int = -1 // 记录*的idx,-1代表没遇到
iStar int // 匹配*过程中的i的位置
)
for i < len(s) { // s 遍历完之前
if j < lp && (p[j] == '?' || p[j] == s[i]) { // 如果正常匹配,可以继续检查
i += 1
j += 1
} else if j < lp && p[j] == '*' { // 如果匹配到了星星,记录星星位置,从星星下一个开始匹配,遍历i的后面一段
starPos = j
j += 1
iStar = i // 之后从s这个位置开始匹配*的过程
} else if starPos != -1 { // 在star的匹配过程中
// 如果进第二次进入*的状态,说明前面的*已经匹配成功了,所以关心最新一个*就行
// 注意这里的if是有顺序的,进入这个状态后:
//// 如果成功匹配了*后面第一位,会在上面向后继续走
////// 遇到新的*处理就行,结束就结束了
//// 如果走着走不下去了,说明当时第一个*后匹配可能匹配早了:
////// j重置到*后第一位准备匹配;
////// iStar向后+1,从脱离*匹配的地方向后继续走*匹配的过程
j = starPos + 1 // 在star匹配过程中,匹配字符串的对比量永远在*后第一位
iStar += 1
i = iStar
} else {
return false
}
}
// 处理j不到尾部的情形
for j < lp && p[j] == '*' {
j++
}
return j == lp // 如果能对齐,则匹配成功
}
// 状态转移方程
// if p[j-1] = '*'; f(i,j) = f(i, j-1) or f(i-1, j)
//// 为*的时候,就考虑从当前位置向后匹配0次还是1次
////// 如果是0次,则,f(i-1, j)
////// 如果是1次,则,f(i, j-1), 在这个情况下,后续如果再匹配,则在当前计算中,再向后找
// if p[j-1] !-= '*'; f(i,j) = f(i-1, j-1) and (s[i-1] == p[j-1] or p[j-1] == '?')
//// 不为*的时候,前面的匹配成功,且当前能够单个字符对应
func isMatchDP(s, p string) bool {
var dp [][]bool // [s][p]
// 初始化dp数组
dp = make([][]bool, len(s)+1)
for idx := 0; idx <= len(s); idx++ {
dp[idx] = make([]bool, len(p)+1)
}
dp[0][0] = true // 初始长度0,0的时候是匹配的
//
for i := 0; i <= len(s); i++ {
for j := 1; j <= len(p); j++ {
if p[j-1] == '*' {
if i > 0 {
dp[i][j] = dp[i][j-1] || dp[i-1][j]
} else {
dp[i][j] = dp[i][j-1]
}
} else {
if i > 0 {
dp[i][j] = dp[i-1][j-1] && (s[i-1] == p[j-1] || p[j-1] == '?')
} else {
dp[i][j] = false
}
}
}
}
return dp[len(s)][len(p)]
}
<file_sep>/algorithms/_167_Two_Sum_2_Input_array_is_sorted/answer.go
package _167_Two_Sum_2_Input_array_is_sorted
func twoSum(numbers []int, target int) []int {
var (
start = 0
end = len(numbers) - 1
)
for start < end {
tmp := numbers[start] + numbers[end]
if tmp < target {
start++
} else if tmp > target {
end--
} else {
return []int{start + 1, end + 1}
}
}
return nil
}
<file_sep>/algorithms/_42_Trapping_Rain_Water/answer_test.go
package _42_Trapping_Rain_Water
import "testing"
func TestTrap(t *testing.T) {
if sum := trap([]int{0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1}); sum != 6 {
t.Errorf("wrong number is %d", sum)
}
}
<file_sep>/algorithms/_5_Longest_Palindromic_Substring/answer.go
package _5_Longest_Palindromic_Substring
var location, maxLength int
func longestPalindrome(s string) string {
location, maxLength = 0, 0
l := len(s)
if l < 2 {
return s
}
for i := 0; i < l-1; i++ {
extendPalindrome(s, i, i)
extendPalindrome(s, i, i+1)
}
return string(s[location : location+maxLength])
}
func extendPalindrome(s string, j, k int) {
for {
if j >= 0 && k < len(s) && s[j] == s[k] {
j--
k++
} else {
break
}
}
if length := k - j - 1; length > maxLength {
location = j + 1
maxLength = length
}
}
<file_sep>/algorithms/_509_Fibonacci_Number/answer_test.go
package _509_Fibonacci_Number
import "testing"
func TestFib(t *testing.T) {
if ret := fib(2); ret != 1 {
t.Errorf("wrong ret with %d", ret)
}
if ret := fib(3); ret != 2 {
t.Errorf("wrong ret with %d", ret)
}
if ret := fib(4); ret != 3 {
t.Errorf("wrong ret with %d", ret)
}
}
<file_sep>/algorithms/_658_Find_K_Closest_Element/answer.go
package _658_Find_K_Closest_Element
func findClosestElements(arr []int, k int, x int) []int {
return findClosestElementsFromHeadAndTail(arr, k, x)
//return findClosestElementsBinarySearch(arr, k, x)
}
// 两边遍历区间解法
func findClosestElementsFromHeadAndTail(arr []int, k int, x int) []int {
left, right := 0, len(arr)-1
for right-left+1 > k {
if x-arr[left] > arr[right]-x {
left++
} else {
right--
}
}
return arr[left : right+1]
}
// 二分搜索解法
func findClosestElementsBinarySearch(arr []int, k int, x int) []int {
left, right := 0, len(arr)-k // 定义一个区间
for left < right {
mid := left + (right-left)/2
if x-arr[mid] > arr[mid+k]-x { // 注意这里不能用绝对值
left = mid + 1
} else {
right = mid
}
}
return arr[left : left+k]
}
<file_sep>/algorithms/_513_Find_Bottom_Left_Tree_Value/QD.md
Given the root of a binary tree, return the leftmost value in the last row of the tree.
Example 1:
```
Input: root = [2,1,3]
Output: 1
```
Example 2:
```
Input: root = [1,2,3,4,null,5,6,null,null,7]
Output: 7
```
Constraints:
- The number of nodes in the tree is in the range [1, 104].
- -231 <= Node.val <= 231 - 1
----
思路1:<br>
深度优先,对比左右子树最大深度<br>
思路2:<br>
广度优先层序遍历,记录每一层左侧第一个<br>
----
leetcode 513<file_sep>/algorithms/_842_Split_Array_into_Fibonacci_Sequence/answer_test.go
package _842_Split_Array_into_Fibonacci_Sequence
import (
"testing"
"github.com/gotoprosperity/algorithm/util"
)
func TestSplitIntoFibonacci(t *testing.T) {
if ret := splitIntoFibonacci("123456579"); !util.IsIntArrayEqual(ret, []int{123, 456, 579}) {
t.Errorf("wrong ret with %v", ret)
}
if ret := splitIntoFibonacci("0123"); !util.IsIntArrayEqual(ret, []int{}) {
t.Errorf("wrong ret with %v", ret)
}
if ret := splitIntoFibonacci("539834657215398346785398346991079669377161950407626991734534318677529701785098211336528511"); !util.IsIntArrayEqual(ret, []int{}) {
t.Errorf("wrong ret with %v", ret)
}
}
<file_sep>/algorithms/_211_Design_Add_and_Search_Words_Data_Structure/answer_test.go
package _211_Design_Add_and_Search_Words_Data_Structure
import "testing"
func TestWD(t *testing.T) {
wd := Constructor()
wd.AddWord("bad")
wd.AddWord("dad")
wd.AddWord("mad")
wd.AddWord("pad")
t.Log("search \"bad\"", wd.Search("bad"))
t.Log("search \".ad\"", wd.Search(".ad"))
t.Log("search \"b..\"", wd.Search("b.."))
}
<file_sep>/algorithms/_173_Binary_Search_Tree_Iterator/answer.go
package _173_Binary_Search_Tree_Iterator
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type BSTIterator struct {
l []int
}
func Constructor(root *TreeNode) BSTIterator {
var (
l = []int{}
s = []*TreeNode{}
node = root
)
for node != nil || len(s) > 0 {
for node != nil {
s = append(s, node)
node = node.Left
}
if len(s) > 0 {
node = s[len(s)-1]
s = s[:len(s)-1]
l = append(l, node.Val)
node = node.Right
}
}
it := BSTIterator{
l: l,
}
return it
}
/** @return the next smallest number */
func (this *BSTIterator) Next() int {
var ret int
if len(this.l) > 0 {
ret = this.l[0]
if len(this.l) > 1 {
this.l = this.l[1:]
} else {
this.l = this.l[0:0]
}
} else {
ret = 0
}
return ret
}
/** @return whether we have a next smallest number */
func (this *BSTIterator) HasNext() bool {
if len(this.l) > 0 {
return true
}
return false
}
/**
* Your BSTIterator object will be instantiated and called as such:
* obj := Constructor(root);
* param_1 := obj.Next();
* param_2 := obj.HasNext();
*/
<file_sep>/algorithms/_513_Find_Bottom_Left_Tree_Value/answer.go
package _513_Find_Bottom_Left_Tree_Value
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func findBottomLeftValue(root *TreeNode) int {
//return findBottomLeftValueDFS(root)
return findBottomLeftValueBFS(root)
}
// BFS 解法
func findBottomLeftValueBFS(root *TreeNode) int {
if root == nil {
return -1
}
var (
level, tmp []*TreeNode
first = true
ret int
)
level = append(level, root)
for len(level) != 0 {
first = true
for _, node := range level {
if first {
ret = node.Val
first = false
}
if node.Left != nil {
tmp = append(tmp, node.Left)
}
if node.Right != nil {
tmp = append(tmp, node.Right)
}
}
level = tmp
tmp = []*TreeNode{}
}
return ret
}
// DFS 解法
func findBottomLeftValueDFS(root *TreeNode) int {
ret, _ := DFS(root, 0, 0)
return ret
}
func DFS(root *TreeNode, value, depth int) (nv, nd int) {
if root == nil {
return value, depth
}
nvl, ndl := DFS(root.Left, root.Val, depth+1)
nvr, ndr := DFS(root.Right, root.Val, depth+1)
if ndl >= ndr {
nd = ndl
nv = nvl
} else {
nd = ndr
nv = nvr
}
return
}
<file_sep>/algorithms/_148_Sort_List/answer.go
package _148_Sort_List
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func sortList(head *ListNode) *ListNode {
fastSortListRecursion(head, nil)
return head
}
func fastSortListRecursion(head *ListNode, end *ListNode) {
if head == nil || head == end {
return
}
p := head.Next
small := head
for p != end {
if p.Val < head.Val {
small = small.Next
p.Val, small.Val = small.Val, p.Val
}
p = p.Next
}
head.Val, small.Val = small.Val, head.Val
fastSortListRecursion(head, small)
fastSortListRecursion(small.Next, end)
}
<file_sep>/algorithms/_91_Decode_Ways/answer.go
package _91_Decode_Ways
func numDecodings(s string) int {
return numDecodingsWithDP(s)
}
// DP 解法
func numDecodingsWithDP(s string) int {
if len(s) == 0 || s[0] == '0' {
return 0
}
var dp = make([]int, len(s)+1)
dp[0] = 1 // 开始时只有1种
dp[1] = 1 // 到达第一位只有1种方法
for i := 2; i <= len(s); i++ {
x := s[i-1]
if x != '0' { // 如果此单字符合法,则它的个数至少为上一位的个数
dp[i] = dp[i-1]
}
xx := s[i-2]
if xx == '1' || (xx == '2' && x <= '6') { // 如果两位在 [10, 26]中,则还可以加上上上一位的个数
dp[i] += dp[i-2]
}
}
return dp[len(s)]
}
<file_sep>/algorithms/_560_Subarray_Sum_Equals_K/answer_test.go
package _560_Subarray_Sum_Equals_K
import "testing"
func TestSubarraySum(t *testing.T) {
if count := subarraySum([]int{1, 1, 1}, 2); count != 2 {
t.Errorf("wrong count with %d", count)
}
}
<file_sep>/algorithms/_1_Two_Sum/answer_test.go
package _1_Two_Sum
import (
"testing"
)
func TestTwoSum(t *testing.T) {
arr := []int{3, 2, 4}
target := 6
ret := twoSum(arr, target)
want := []int{1, 2}
if len(ret) != 2 {
t.Errorf("Your answer is %v, it should be %v", ret, want)
}
if IntArrayIndex(ret, 1) == -1 || IntArrayIndex(ret, 2) == -1 {
t.Errorf("Your answer is %v, it should be %v", ret, want)
}
}
func IntArrayIndex(arr []int, ele int) int {
ret := -1
for i, value := range arr {
if value == ele {
ret = i
break
}
}
return ret
}
<file_sep>/algorithms/_297_Serialize_and_Deserialize_Binary_Tree/answer_test.go
package _297_Serialize_and_Deserialize_Binary_Tree
import (
"fmt"
"testing"
)
func TestPreOrderSerialize(t *testing.T) {
var (
root *TreeNode
str string
start int
)
root = &TreeNode{Val: 1, Left: nil, Right: &TreeNode{Val: 2}}
str = preOrderSerialize(root)
fmt.Println(str)
start = 0
root = preOrderDeserialize(str, &start)
fmt.Println(root)
root = nil
str = preOrderSerialize(root)
fmt.Println(str)
start = 0
root = preOrderDeserialize(str, &start)
fmt.Println(root)
}
func TestPostOrderSerialize(t *testing.T) {
var (
root *TreeNode
str string
start int
)
root = &TreeNode{Val: 1, Left: nil, Right: &TreeNode{Val: 2}}
str = postOrderSerialize(root)
fmt.Println(str)
start = len(str) - 1
root = postOrderDeserialize(str, &start)
fmt.Println(root)
root = nil
str = postOrderSerialize(root)
fmt.Println(str)
start = len(str) - 1
root = postOrderDeserialize(str, &start)
fmt.Println(root)
}
func TestBFSSerialize(t *testing.T) {
var (
root *TreeNode
str string
)
root = &TreeNode{Val: 1, Left: nil, Right: &TreeNode{Val: 2}}
str = BFSSerialize(root)
fmt.Println(str)
root = BFSDeserialize(str)
fmt.Println(root, root.Left, root.Right)
root = nil
str = BFSSerialize(root)
fmt.Println(str)
root = BFSDeserialize(str)
fmt.Println(root)
}
<file_sep>/algorithms/_374_Guess_Number_Higher_or_Lower/answer.go
package _374_Guess_Number_Higher_or_Lower
/**
* Forward declaration of guess API.
* @param num your guess
* @return -1 if num is lower than the guess number
* 1 if num is higher than the guess number
* otherwise return 0
* func guess(num int) int;
*/
func guessNumber(n int) int {
var (
low = 1
high = n
)
for low <= high {
mid := (low + high) / 2
answer := guess(mid)
if answer == 0 {
return mid
} else if answer == -1 {
high = mid - 1
} else {
low = mid + 1
}
}
return -1
}
<file_sep>/algorithms/_215_Kth_Largest_Element_in_an_Array/answer_test.go
package _215_Kth_Largest_Element_in_an_Array
import "testing"
func TestFindKthLargest(t *testing.T) {
var (
arr []int
k, ret int
)
arr, k = []int{3, 2, 1, 5, 6, 4}, 2
ret = findKthLargest(arr, k)
if ret != 5 {
t.Errorf("ret is not 5, it's %d", ret)
}
arr, k = []int{3, 2, 3, 1, 2, 4, 5, 5, 6}, 4
ret = findKthLargest(arr, k)
if ret != 4 {
t.Errorf("ret is not 4, it's %d", ret)
}
}
<file_sep>/algorithms/_47_Permutations_2/answer.go
package _47_Permutations_2
import (
"sort"
)
func permuteUnique(nums []int) [][]int {
ret := [][]int{}
sort.Ints(nums)
used := []bool{}
for i := 0; i < len(nums); i++ {
used = append(used, false)
}
backtrack(&ret, &[]int{}, nums, &used)
return ret
}
func backtrack(list *[][]int, tmpList *[]int, nums []int, used *[]bool) {
if len(*tmpList) == len(nums) {
l := []int{}
for _, v := range *tmpList {
l = append(l, v)
}
*list = append(*list, l)
} else {
for i, n := range nums {
if (*used)[i] || i > 0 && nums[i] == nums[i-1] && !(*used)[i-1] {
continue
}
(*used)[i] = true
*tmpList = append(*tmpList, n)
backtrack(list, tmpList, nums, used)
(*used)[i] = false
*tmpList = (*tmpList)[:len(*tmpList)-1]
}
}
}
<file_sep>/algorithms/_51_N_Queens/answer_test.go
package _51_N_Queens
import "testing"
func TestSolveNQueens(t *testing.T) {
var ret [][]string
ret = solveNQueens(1)
t.Log(ret)
ret = solveNQueens(4)
t.Log(ret)
}
<file_sep>/algorithms/_713_Subarray_Product_Less_Than_K/answer.go
package _713_Subarray_Product_Less_Than_K
func numSubarrayProductLessThanK(nums []int, k int) int {
return numSubarrayProductLessThanKWithSlidingWindow(nums, k)
}
func numSubarrayProductLessThanKWithSlidingWindow(nums []int, k int) int {
var (
count int
p, q int // 前后idx
prod int = 1 // 乘积
)
for q = 0; q < len(nums); q++ { // 以右界增加处理
prod *= nums[q]
for prod >= k && p <= q {
prod /= nums[p]
p++
}
count += (q - p + 1) // 每向右移动一次右界,增加的连续个数
}
return count
}
<file_sep>/algorithms/_530_Minimum_Absolute_Difference_in_BST/QD.md
Given the root of a Binary Search Tree (BST), return the minimum absolute difference between the values of any two different nodes in the tree.
Example 1:
```
Input: root = [4,2,6,1,3]
Output: 1
```
Example 2:
```
Input: root = [1,0,48,null,null,12,49]
Output: 1
```
Constraints:
- The number of nodes in the tree is in the range [2, 104].
- 0 <= Node.val <= 105
Note:
- This question is the same as 783: https://leetcode.com/problems/minimum-distance-between-bst-nodes/
----
思路:<br>
BST其实是有序的,任意两个节点的最小差一定来自于相邻有序两个节点。<br>
因此,按照搜索序遍历找最小即可。
也可以遍历后放到有序数组,在数组中遍历,但是数组空间实际上没有必要。在遍历过程中即可计算出来。
<file_sep>/algorithms/_965_Univalued_Binary_Tree/QD.md
A binary tree is univalued if every node in the tree has the same value.
Return true if and only if the given tree is univalued.
Example 1:
```
Input: [1,1,1,1,1,null,1]
Output: true
```
Example 2:
```
Input: [2,2,2,5,2]
Output: false
```
<file_sep>/algorithms/_34_Find_First_and_Last_Position_of_Element_in_Sorted_Array/answer.go
package _34_Find_First_and_Last_Position_of_Element_in_Sorted_Array
func searchRange(nums []int, target int) []int {
if len(nums) == 0 {
return []int{-1, -1}
}
head, tail := 0, len(nums)-1
ret := []int{
FindFirst(nums, target, head, tail, -1),
FindLast(nums, target, head, tail, -1),
}
return ret
}
func FindFirst(nums []int, target, head, tail, lastPos int) (pos int) {
if head > tail {
return lastPos
}
mid := (head + tail) / 2
if nums[mid] == target {
tail = mid - 1
lastPos = mid
} else if nums[mid] > target {
tail = mid - 1
} else if nums[mid] < target {
head = mid + 1
}
pos = FindFirst(nums, target, head, tail, lastPos)
return
}
func FindLast(nums []int, target, head, tail, lastPos int) (pos int) {
if head > tail {
return lastPos
}
mid := (head + tail) / 2
if nums[mid] == target {
head = mid + 1
lastPos = mid
} else if nums[mid] > target {
tail = mid - 1
} else {
head = mid + 1
}
pos = FindLast(nums, target, head, tail, lastPos)
return
}
<file_sep>/algorithms/_199_Binary_Tree_Right_Side_View/answer.go
package _199_Binary_Tree_Right_Side_View
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNod
}
func rightSideView(root *TreeNode) []int {
if root == nil {
return []int{}
}
var (
level = []*TreeNode{root}
ret = []int{}
)
for len(level) != 0 {
ret = append(ret, level[len(level)-1].Val)
nextLevel := []*TreeNode{}
for _, n := range level {
if n.Left != nil {
nextLevel = append(nextLevel, n.Left)
}
if n.Right != nil {
nextLevel = append(nextLevel, n.Right)
}
level = nextLevel
}
}
return ret
}
<file_sep>/algorithms/_1260_Shift_2D_Grid/answer.go
package _1260_Shift_2D_Grid
func shiftGrid(grid [][]int, k int) [][]int {
if len(grid) == 0 || len(grid[0]) == 0 { // bad case
return nil
}
var (
m, n int // 行、列数
l []int
ret [][]int
)
m = len(grid)
n = len(grid[0])
l = make([]int, m*n)
ret = make([][]int, m)
for i := 0; i < m; i++ {
ret[i] = make([]int, n)
}
// 初始化
for ln, line := range grid {
for cn, x := range line {
l[ln*n+cn] = x
}
}
// 切换
l = append(l[len(l)-k%len(l):], l[0:len(l)-k%len(l)]...)
// 还原
for idx, x := range l {
l := idx / n
c := idx - n*l
ret[l][c] = x
}
return ret
}
<file_sep>/algorithms/_796_Rotate_String/answer.go
package _796_Rotate_String
func rotateString(A string, B string) bool {
if A == "" && B == "" {
return true
}
if len(A) < len(B) {
return false
}
for i, s := range B {
if s == rune(A[0]) {
tmpStr := B[i:] + B[0:i]
if tmpStr == A {
return true
}
}
}
return false
}
<file_sep>/algorithms/_300_Longest_Increasing_Subsequence/answer_test.go
package _300_Longest_Increasing_Subsequence
import "testing"
func TestLengthOfLIS(t *testing.T) {
if ret := lengthOfLIS([]int{10, 9, 2, 5, 3, 7, 19, 101, 18}); ret != 5 {
t.Errorf("should be 4, wrong length with %d", ret)
}
}
func TestFindPos(t *testing.T) {
if pos := findPos([]int{1, 2, 3}, 1); pos != 0 {
t.Errorf("wrong pos with %d", pos)
}
if pos := findPos([]int{1, 2, 3}, 2); pos != 1 {
t.Errorf("wrong pos with %d", pos)
}
if pos := findPos([]int{1, 2, 4}, 3); pos != 2 {
t.Errorf("wrong pos with %d", pos)
}
}
<file_sep>/algorithms/_202_Happy_Number/answer.go
package _202_Happy_Number
func isHappy(n int) bool {
if n == 1 {
return true
}
record := []int{}
return isHappyA(n, record)
}
func isHappyA(n int, record []int) bool {
m := happy(n)
if m == 1 {
return true
}
for _, r := range record {
if m == r {
return false
}
}
record = append(record, m)
return isHappyA(m, record)
}
func happy(n int) int {
sum := 0
for {
tmp := n % 10
sum += tmp * tmp
n = n / 10
if n == 0 {
break
}
}
return sum
}
<file_sep>/algorithms/_316_Remove_Duplicate_Letters/answer_test.go
package _316_Remove_Duplicate_Letters
import (
"strings"
"testing"
)
type testCase struct {
input string
output string
}
func TestRemoveDuplicateLetters(t *testing.T) {
cases := []testCase{
{
input: "bcabc",
output: "abc",
},
{
input: "cbacdcbc",
output: "acdb",
},
}
for _, c := range cases {
if x := removeDuplicateLetters(c.input); !strings.EqualFold(x, c.output) {
t.Errorf("output should be \"%s\" instead of \"%s\" with input=\"%s\"", c.output, x, c.input)
}
}
}
<file_sep>/algorithms/_4_Median_of_Two_Sorted_Arrays/answer.go
package _4_Median_of_Two_Sorted_Arrays
import (
"math"
)
func findMedianSortedArrays(nums1 []int, nums2 []int) float64 {
m, n := len(nums1), len(nums2)
if m > n {
nums1, nums2, m, n = nums2, nums1, n, m
}
if n == 0 {
return 0
}
imin, imax, half_len := 0, m, (m+n+1)/2
var max_of_left, min_of_right int
var ret float64
var i, j int
for {
if imin > imax {
break
}
i = (imin + imax) / 2
j = half_len - i
if i < m && nums2[j-1] > nums1[i] {
imin = i + 1
} else if i > 0 && nums1[i-1] > nums2[j] {
imax = i - 1
} else {
if i == 0 {
max_of_left = nums2[j-1]
} else if j == 0 {
max_of_left = nums1[i-1]
} else {
max_of_left = int(math.Max(float64(nums1[i-1]), float64(nums2[j-1])))
}
if (m+n)%2 == 1 {
ret = float64(max_of_left)
break
}
if i == m {
min_of_right = nums2[j]
} else if j == n {
min_of_right = nums1[i]
} else {
min_of_right = int(math.Min(float64(nums1[i]), float64(nums2[j])))
}
ret = float64((max_of_left + min_of_right)) / 2
break
}
}
return ret
}
<file_sep>/algorithms/_121_Best_Time_to_Buy_and_Sell_Stock/answer.go
package _121_Best_Time_to_Buy_and_Sell_Stock
import (
"math"
)
func maxProfit(prices []int) int {
return maxProfitOnce(prices)
}
func maxProfitOnce(prices []int) int {
var max, maxSoFar int
for i := 1; i < len(prices); i++ {
max += prices[i] - prices[i-1]
max = int(math.Max(0, float64(max)))
maxSoFar = int(math.Max(float64(max), float64(maxSoFar)))
}
return maxSoFar
}
// can work, the time is not good, is refused by leetcode.
func maxProfitForce(prices []int) int {
var max int
for i, _ := range prices {
for j := i; j < len(prices); j++ {
x := prices[j] - prices[i]
max = int(math.Max(float64(x), float64(max)))
}
}
return max
}
<file_sep>/algorithms/_589_N-ary_Tree_Preorder_Traversal/answer.go
package _589_N_ary_Tree_Preorder_Traversal
type Node struct {
Val int
Children []*Node
}
func preorder(root *Node) []int {
//return preOrderR(root)
return preOrderI(root)
}
// 递归解法
func preOrderR(root *Node) []int {
var ret []int
if root == nil {
return ret
}
ret = append(ret, root.Val)
for _, n := range root.Children {
ret = append(ret, preOrderR(n)...)
}
return ret
}
// 迭代解法
func preOrderI(root *Node) []int {
if root == nil {
return []int{}
}
var (
ret []int
stack = []*Node{root}
)
for len(stack) > 0 {
node := stack[len(stack)-1]
stack = stack[0 : len(stack)-1]
ret = append(ret, node.Val)
for i := len(node.Children) - 1; i >= 0; i-- {
stack = append(stack, node.Children[i])
}
}
return ret
}
<file_sep>/algorithms/_74_Search_a_2D_Matrix/answer_test.go
package _74_Search_a_2D_Matrix
import "testing"
func TestIdxToPos(t *testing.T) {
var (
i, j int
)
if i, j = idxToPos(0, 10); i != 0 && j != 0 {
t.Errorf("wrong ret with i=%d, j=%d", i, j)
}
}
func TestSearchMatrix(t *testing.T) {
var (
matrix [][]int
target int
)
matrix, target = [][]int{{1, 3, 5, 7}, {10, 11, 16, 20}, {23, 30, 34, 60}}, 3
if !searchMatrix(matrix, target) {
t.Errorf("should be true")
}
matrix, target = [][]int{{1, 3, 5, 7}, {10, 11, 16, 20}, {23, 30, 34, 60}}, 13
if searchMatrix(matrix, target) {
t.Errorf("should be false")
}
matrix, target = [][]int{{1}}, 1
if !searchMatrix(matrix, target) {
t.Errorf("should be true")
}
matrix, target = [][]int{{1, 3}}, 3
if !searchMatrix(matrix, target) {
t.Errorf("should be true")
}
}
<file_sep>/algorithms/_74_Search_a_2D_Matrix/QD.md
Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
Integers in each row are sorted from left to right.
The first integer of each row is greater than the last integer of the previous row.
Example 1:
Input: matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]], target = 3
Output: true
Example 2:
Input: matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]], target = 13
Output: false
Constraints:
m == matrix.length
n == matrix[i].length
1 <= m, n <= 100
-104 <= matrix[i][j], target <= 104
----
具有有序性,用二分解。
其实重点考察二分写法。
思路1:<br>
可以先找行,再找列,但是效率不如直接二分<br>
思路2:<br>
可以把二维数组读到一个一维数组中,但是空间占用不是很好<br>
思路3:<br>
将idx直接转化为i,j的访问,多一步计算即可,还是直接二分,不用额外空间。<file_sep>/algorithms/_39_Combination_Sum/answer_test.go
package _39_Combination_Sum
import "testing"
func TestCombinationSum(t *testing.T) {
var (
candidates []int
target int
ret [][]int
)
candidates = []int{2, 3, 5}
target = 8
ret = combinationSum(candidates, target)
t.Log(ret)
}
<file_sep>/algorithms/_876_Middle_of_the_Linked_List/answer.go
package _876_Middle_of_the_Linked_List
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func middleNode(head *ListNode) *ListNode {
var (
fast, slow *ListNode = head, head
slowMove bool = true
)
if head == nil {
return nil
}
for {
if fast.Next == nil {
break
}
if slowMove {
slow = slow.Next
}
slowMove = !slowMove
fast = fast.Next
}
return slow
}
<file_sep>/algorithms/_46_Permutations/answer.go
package _46_Permutations
func permute(nums []int) [][]int {
ret := [][]int{}
backtrack(&ret, &[]int{}, nums)
return ret
}
func backtrack(list *[][]int, tmpList *[]int, nums []int) {
if len(*tmpList) == len(nums) {
l := []int{}
for _, v := range *tmpList {
l = append(l, v)
}
*list = append(*list, l)
} else {
for _, num := range nums {
if intInList(num, *tmpList) {
continue
}
*tmpList = append(*tmpList, num)
backtrack(list, tmpList, nums)
*tmpList = (*tmpList)[:len(*tmpList)-1]
}
}
}
func intInList(num int, list []int) bool {
for _, i := range list {
if i == num {
return true
}
}
return false
}
<file_sep>/algorithms/_40_Combination_Sum_2/answer.go
package _40_Combination_Sum_2
import (
"fmt"
"sort"
)
func combinationSum2(candidates []int, target int) [][]int {
var (
result [][]int
resultM = make(map[string]struct{})
)
sort.Ints(candidates)
combinationSumR(candidates, target, 0, []int{}, &result, resultM)
return result
}
func combinationSumR(candidates []int, target int, idx int, path []int, result *[][]int, resultM map[string]struct{}) {
if target < 0 {
return
}
if target == 0 {
x := make([]int, len(path))
key := ""
for idx, v := range path {
x[idx] = v
key += fmt.Sprintf("%d,", v)
}
if _, ok := resultM[key]; !ok {
*result = append(*result, x)
resultM[key] = struct{}{}
}
return
}
for i := idx; i < len(candidates); i++ {
x := candidates[i]
if i > idx && candidates[i] == candidates[i-1] {
continue
}
combinationSumR(candidates, target-x, i+1, append(path, x), result, resultM)
}
}
<file_sep>/algorithms/_306_Additive_Number/answer_test.go
package _306_Additive_Number
import "testing"
func TestIsAdditiveNumber(t *testing.T) {
if !isAdditiveNumber("112358") {
t.Errorf("should be true")
}
if !isAdditiveNumber("199100199") {
t.Errorf("should be true")
}
if isAdditiveNumber("1023") {
t.Errorf("should be false")
}
if isAdditiveNumber("1203") {
t.Errorf("should be false")
}
if !isAdditiveNumber("121474836472147483648") {
t.Errorf("should be true")
}
}
<file_sep>/algorithms/_88_Merge_Sorted_Array/answer.go
package _88_Merge_Sorted_Array
func merge(nums1 []int, m int, nums2 []int, n int) {
i, j, k := m-1, n-1, m+n-1
for i > -1 && j > -1 {
if nums1[i] > nums2[j] {
nums1[k] = nums1[i]
i--
} else {
nums1[k] = nums2[j]
j--
}
k--
}
for j > -1 {
nums1[k] = nums2[j]
k--
j--
}
}
//This is not the solution of the 88 question.
func mergeTwoArrayIntoRet(nums1 []int, m int, nums2 []int, n int) {
var i, j int
var ret []int
for {
if i == m || j == n {
break
}
if nums1[i] < nums2[j] {
ret = append(ret, nums1[i])
i++
} else if nums1[i] > nums2[j] {
ret = append(ret, nums2[j])
j++
} else {
ret = append(ret, nums1[i], nums2[j])
i++
j++
}
}
if i == m && j < n {
ret = append(ret, nums2[j:]...)
} else if j == n && i < m {
ret = append(ret, nums1[i:]...)
}
nums1 = ret
}
<file_sep>/algorithms/_240_Search_a_2D_Matrix_2/answer.go
package _240_Search_a_2D_Matrix_2
func searchMatrix(matrix [][]int, target int) bool {
if len(matrix) == 0 || len(matrix[0]) == 0 { // 非法情形
return false
}
m, n := len(matrix), len(matrix[0])
i, j := 0, n-1
val := matrix[i][j]
for i < m && j >= 0 {
val = matrix[i][j]
if val == target {
return true
} else if val > target {
j--
} else {
i++
}
}
return false
}
<file_sep>/algorithms/_112_Path_Sum/answer.go
package _112_Path_Sum
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func hasPathSum(root *TreeNode, sum int) bool {
cur := []int{}
return pathSumRecursion(root, sum, cur)
}
func pathSumRecursion(root *TreeNode, sum int, cur []int) bool {
if root == nil {
return false
}
cur = append(cur, root.Val)
if root.Left == nil && root.Right == nil && sum == root.Val {
return true
}
if pathSumRecursion(root.Left, sum-root.Val, cur) || pathSumRecursion(root.Right, sum-root.Val, cur) {
return true
}
cur = cur[:len(cur)-1]
return false
}
<file_sep>/algorithms/_144_Binary_Tree_Preorder_Traversal/answer.go
package _144_Binary_Tree_Preorder_Traversal
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func preorderTraversal(root *TreeNode) []int {
// return preorderTraversalRecursion(root)
return preorderTraversalUnrecursion(root)
}
func preorderTraversalUnrecursion(root *TreeNode) []int {
var (
ret = []int{}
l = []*TreeNode{root}
)
if root == nil {
return ret
}
for len(l) > 0 {
node := l[len(l)-1]
ret = append(ret, node.Val)
l = l[:len(l)-1]
if node.Right != nil {
l = append(l, node.Right)
}
if node.Left != nil {
l = append(l, node.Left)
}
}
return ret
}
func preorderTraversalRecursion(n *TreeNode) []int {
var (
ret = []int{}
)
if n == nil {
return ret
}
ret = append(ret, n.Val)
if n.Left != nil {
ret = append(ret, preorderTraversalRecursion(n.Left)...)
}
if n.Right != nil {
ret = append(ret, preorderTraversalRecursion(n.Right)...)
}
return ret
}
<file_sep>/algorithms/_234_Palindrome_Linked_List/answer.go
package _234_Palindrome_Linked_List
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func isPalindrome(head *ListNode) bool {
if head == nil || head.Next == nil {
return true
}
var (
fast, slow = head.Next, head
cur, newHead *ListNode
)
for fast != nil && fast.Next != nil {
slow = slow.Next
fast = fast.Next.Next
}
cur = slow.Next
slow.Next = nil
newHead = nil
for cur != nil {
tmp := cur.Next
cur.Next = newHead
newHead = cur
cur = tmp
}
for head != nil && newHead != nil {
if head.Val != newHead.Val {
return false
}
head = head.Next
newHead = newHead.Next
}
return true
}
<file_sep>/algorithms/_409_Longest_Palindrome/answer.go
package _409_Longest_Palindrome
func longestPalindrome(s string) int {
return longestPalindromeMapCount(s)
}
func longestPalindromeMapCount(s string) int {
m := make(map[int32]int)
for _, x := range s {
m[x] += 1
}
total := 0
hasSingle := false
for _, v := range m {
if v%2 == 0 {
total += v
} else {
hasSingle = true
total += v / 2 * 2
}
}
if hasSingle {
total += 1
}
return total
}
<file_sep>/algorithms/_53_Maximum_Subarray/answer_test.go
package _53_Maximum_Subarray
import (
"testing"
)
func TestMaxSubArray(t *testing.T) {
ret := maxSubArray([]int{-2, 1, -3, 4, -1, 2, 1, -5, 4})
if ret != 6 {
t.Error("not 6, is", ret)
}
}
<file_sep>/algorithms/_680_Valid_Palindrome_2/answer.go
package _680_Valid_Palindrome_2
func validPalindrome(s string) bool {
if len(s) == 0 || len(s) == 1 {
return true
}
var (
i, j = 0, len(s) - 1
ti, tj int
errFlag int = 0
)
for i < j {
if s[i] != s[j] {
errFlag = 1
break
}
i++
j--
}
if errFlag == 0 {
return true
}
ti, tj = i, j
// 假如删除前面的
i++
for i < j {
if s[i] != s[j] {
errFlag = 2
break
}
i++
j--
}
if errFlag == 1 {
return true
}
// 假设删除后面的
i, j = ti, tj
j--
for i < j {
if s[i] != s[j] {
return false
}
i++
j--
}
return true
}
<file_sep>/algorithms/_8_String_to_Integer/answer.go
package _8_String_to_Integer
import (
"math"
"strings"
)
func myAtoi(str string) int {
if strings.TrimSpace(str) == "" {
return 0
}
var (
idx int
ret int64
flag int64 = 1
)
// 去掉开头空字符
for idx < len(str) && str[idx] == ' ' {
idx++
}
if str[idx] == '-' {
idx++
flag = -1
} else if str[idx] == '+' {
idx++
}
for idx < len(str) && str[idx] >= '0' && str[idx] <= '9' {
ret = ret*10 + int64(int(str[idx])-int('0'))
if ret > math.MaxInt32 {
if flag == -1 {
ret = math.MinInt32
flag = 1
} else {
ret = math.MaxInt32
}
break
}
idx++
}
ret *= flag
return int(ret)
}
<file_sep>/algorithms/_101_Symmetric_Tree/answer.go
package _101_Symmetric_Tree
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func isSymmetric(root *TreeNode) bool {
return isSymmetricRecursively(root)
// return isSymmetricIteratively(root)
}
// recursively
func isSymmetricRecursively(root *TreeNode) bool {
if root == nil {
return true
}
return compareRoot(root.Left, root.Right)
}
func compareRoot(leftTree, rightTree *TreeNode) bool {
if leftTree == nil {
return rightTree == nil
}
if rightTree == nil {
return false
}
if leftTree.Val != rightTree.Val {
return false
}
return compareRoot(leftTree.Left, rightTree.Right) && compareRoot(leftTree.Right, rightTree.Left)
}
// iteratively
func isSymmetricIteratively(root *TreeNode) bool {
if root == nil {
return true
}
if root.Left == nil && root.Right == nil {
return true
}
if (root.Left == nil && root.Right != nil) || (root.Right == nil && root.Left != nil) {
return false
}
leftStack := Stack{}
rightStack := Stack{}
leftStack.Push(root.Left)
rightStack.Push(root.Right)
for {
if leftStack.IsEmpty() && rightStack.IsEmpty() { // quit the loop
break
}
leftTree, rightTree := leftStack.Pop(), rightStack.Pop()
if leftTree.Val != rightTree.Val {
return false
}
if (leftTree.Left == nil && rightTree.Right != nil) ||
(leftTree.Left != nil && rightTree.Right == nil) ||
(leftTree.Right == nil && rightTree.Left != nil) ||
(leftTree.Right != nil && rightTree.Left == nil) {
return false
}
if leftTree.Right != nil {
leftStack.Push(leftTree.Right)
rightStack.Push(rightTree.Left)
}
if leftTree.Left != nil {
leftStack.Push(leftTree.Left)
rightStack.Push(rightTree.Right)
}
}
return true
}
type Stack struct {
stack []*TreeNode
}
func (s *Stack) Push(node *TreeNode) {
s.stack = append(s.stack, node)
}
func (s *Stack) Pop() (node *TreeNode) {
node = s.stack[len(s.stack)-1]
s.stack = s.stack[:len(s.stack)-1]
return
}
func (s *Stack) IsEmpty() bool {
return len(s.stack) == 0
}
func (s *Stack) Len() int {
return len(s.stack)
}
<file_sep>/algorithms/_6_ZigZag_Conversion/answer_test.go
package _6_ZigZag_Conversion
import (
"testing"
)
func TestConvert(t *testing.T) {
if ret := convert("PAYPALISHIRING", 3); ret != "PAHNAPLSIIGYIR" {
t.Error("err ret with test1.")
}
}
<file_sep>/algorithms/_862_Shortest_Subarray_with_Sum_at_Least_K/answer_test.go
package _862_Shortest_Subarray_with_Sum_at_Least_K
import "testing"
func TestShortestSubarray(t *testing.T) {
var res int
if res = shortestSubarray([]int{1}, 1); res != 1 {
t.Errorf("wrong res=%d", res)
}
}
<file_sep>/algorithms/_146_LRU_Cache/answer.go
package _146_LRU_Cache
import "sync"
type Node struct {
Key int
Value int
Next *Node
Pre *Node
}
type LRUCache struct {
Mutex sync.Mutex
Capacity int
DummyHead *Node
DummyTail *Node
StoreMap map[int]int
}
func Constructor(capacity int) LRUCache {
dh := &Node{}
dt := &Node{
Next: dh,
Pre: dh,
}
dh.Next, dh.Pre = dt, dt
lc := LRUCache{
StoreMap: make(map[int]int),
Capacity: capacity,
DummyHead: dh,
DummyTail: dt,
Mutex: sync.Mutex{},
}
return lc
}
func (this *LRUCache) Get(key int) int {
this.Mutex.Lock()
defer this.Mutex.Unlock()
if x, ok := this.StoreMap[key]; ok {
// 调整顺序
tmpNode := this.DummyHead.Next
for {
if tmpNode.Key == key {
dummy := &Node{
Key: tmpNode.Key,
Value: tmpNode.Value,
Next: this.DummyHead.Next,
Pre: this.DummyHead,
}
this.DummyHead.Next.Pre = dummy
this.DummyHead.Next = dummy
tmpNode.Pre.Next = tmpNode.Next
tmpNode.Next.Pre = tmpNode.Pre
tmpNode = nil
break
}
tmpNode = tmpNode.Next
}
return x
}
return -1
}
func (this *LRUCache) Put(key int, value int) {
this.Mutex.Lock()
defer this.Mutex.Unlock()
if _, ok := this.StoreMap[key]; ok { // 如果已经存在
// 更新链表
tmp := this.DummyHead.Next
for {
if tmp.Key == key {
tmp.Next.Pre = tmp.Pre
tmp.Pre.Next = tmp.Next
tmp = nil
break
}
tmp = tmp.Next
}
this.addNode(key, value)
} else {
if len(this.StoreMap) < this.Capacity { // 直接添加,无需删除
this.addNode(key, value)
} else { // 添加后需要删除
// 找到最后一个
del := this.DummyTail.Pre
del.Pre.Next = del.Next
del.Next.Pre = del.Pre
delete(this.StoreMap, del.Key)
del = nil
// 添加新的
this.addNode(key, value)
}
}
}
func (this *LRUCache) addNode(key, value int) {
newone := &Node{
Key: key,
Value: value,
Pre: this.DummyHead,
Next: this.DummyHead.Next,
}
this.DummyHead.Next.Pre = newone
this.DummyHead.Next = newone
this.StoreMap[key] = value
}
<file_sep>/algorithms/_208_Implement_Trie_Prefix_Tree/answer.go
package _208_Implement_Trie_Prefix_Tree
type Node struct {
isStr bool // 是否为完整字符串
next [26]*Node // a-z 位置可能的后继节点,如果为nil则尚未存在, idx=字符-'a'
}
type Trie struct {
root *Node
}
func Constructor() Trie {
return Trie{root: &Node{}}
}
func (this *Trie) Insert(word string) {
var cur *Node = this.root
for _, b := range word {
idx := b - 'a'
aimNode := cur.next[idx]
if aimNode == nil {
aimNode = &Node{}
cur.next[idx] = aimNode
}
cur = aimNode
}
cur.isStr = true
}
func (this *Trie) Search(word string) bool {
var cur *Node = this.root
for _, b := range word {
idx := b - 'a'
aimNode := cur.next[idx]
if aimNode == nil {
return false
}
cur = aimNode
}
return cur.isStr
}
func (this *Trie) StartsWith(prefix string) bool {
var cur *Node = this.root
for _, b := range prefix {
idx := b - 'a'
aimNode := cur.next[idx]
if aimNode == nil {
return false
}
cur = aimNode
}
return true
}
/**
* Your Trie object will be instantiated and called as such:
* obj := Constructor();
* obj.Insert(word);
* param_2 := obj.Search(word);
* param_3 := obj.StartsWith(prefix);
*/
<file_sep>/algorithms/_124_Binary_Tree_Maximum_Path_Sum/answer.go
package _124_Binary_Tree_Maximum_Path_Sum
import "math"
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func maxPathSum(root *TreeNode) int {
var ret = math.MinInt32
calc(root, &ret)
return ret
}
func calc(node *TreeNode, max *int) int {
if node == nil {
return 0
}
lret := int(math.Max(float64(calc(node.Left, max)), 0))
rret := int(math.Max(float64(calc(node.Right, max)), 0))
*max = int(math.Max(float64(*max), float64(lret+rret+node.Val)))
return int(math.Max(float64(lret+node.Val), float64(rret+node.Val)))
}
<file_sep>/algorithms/_106_Construct_Binary_Tree_from_Inorder_and_Postorder_Traversal/answer.go
package _106_Construct_Binary_Tree_from_Inorder_and_Postorder_Traversal
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func buildTree(inorder []int, postorder []int) *TreeNode {
inorderLen := len(inorder)
postLen := len(postorder)
return buildTreeRecursion(inorder, 0, inorderLen-1, postorder, 0, postLen-1)
}
func buildTreeRecursion(inorder []int, inStart int, inEnd int, postorder []int, postStart int, postEnd int) *TreeNode {
if inStart > inEnd || postStart > postEnd {
return nil
}
var rootVal = postorder[postEnd]
var rootIndex = 0
for i := 0; i <= inEnd; i++ {
if inorder[i] == rootVal {
rootIndex = i
break
}
}
length := rootIndex - inStart // 左子树长度
node := &TreeNode{
Val: rootVal,
}
node.Right = buildTreeRecursion(inorder, rootIndex+1, inEnd, postorder, postStart+length, postEnd-1)
node.Left = buildTreeRecursion(inorder, inStart, rootIndex-1, postorder, postStart, postStart+length-1)
return node
}
<file_sep>/algorithms/_117_Populating_Next_Right_Pointers_in_Each_Node_2/answer.go
package _117_Populating_Next_Right_Pointers_in_Each_Node_2
type Node struct {
Val int
Left *Node
Right *Node
Next *Node
}
func connect(root *Node) *Node {
return connectIteration(root)
}
// 层序迭代
func connectIteration(root *Node) *Node {
if root == nil {
return root
}
var queue = []*Node{root, nil}
for len(queue) > 0 {
cur := queue[0]
queue = queue[1:]
if cur != nil {
cur.Next = queue[0]
if cur.Left != nil {
queue = append(queue, cur.Left)
}
if cur.Right != nil {
queue = append(queue, cur.Right)
}
} else {
if len(queue) == 0 || queue[0] == nil {
return root
} else {
queue = append(queue, nil)
}
}
}
return root
}
<file_sep>/algorithms/_118_Pascals_Triangle/answer.go
package _118_Pascals_Triangle
func generate(numRows int) [][]int {
var ret = [][]int{}
if numRows <= 0 {
return ret
}
for i := 1; i <= numRows; i++ {
tmp := []int{1}
if i != 1 {
for j := 1; j < i-1; j++ {
tmp = append(tmp, ret[i-2][j]+ret[i-2][j-1])
}
tmp = append(tmp, 1)
}
ret = append(ret, tmp)
}
return ret
}
<file_sep>/algorithms/_113_Path_Sum_2/answer.go
package _113_Path_Sum_2
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func pathSum(root *TreeNode, sum int) [][]int {
ret := [][]int{}
cur := []int{}
pathSumRecursion(root, sum, cur, &ret)
return ret
}
func pathSumRecursion(root *TreeNode, sum int, cur []int, ret *[][]int) {
if root == nil {
return
}
cur = append(cur, root.Val)
if root.Left == nil && root.Right == nil && sum == root.Val {
cur1 := []int{}
for _, v := range cur {
cur1 = append(cur1, v)
}
*ret = append(*ret, cur1)
} else {
pathSumRecursion(root.Left, sum-root.Val, cur, ret)
pathSumRecursion(root.Right, sum-root.Val, cur, ret)
}
cur = cur[:len(cur)-1]
}
<file_sep>/algorithms/_204_Count_Primes/answer.go
package _204_Count_Primes
func countPrimes(n int) int {
return countPrimesMultiplication(n)
// return countPrimesDivision(n)
}
func countPrimesMultiplication(n int) int {
var (
table = make([]int, n)
ret int
)
for i := 2; i < n; i++ {
if table[i] != 0 {
continue
}
ret++
table[i] = 1
for j := 1; j*i < n; j++ {
table[j*i] = -1
}
}
return ret
}
// 会超时
func countPrimesDivision(n int) int {
var (
table = make([]int, n)
ret int
)
for i := 2; i < n; i++ {
if table[i] != 0 {
continue
}
ret++
table[i] = 1
for j := i + 1; j < n; j++ {
if table[j] == 0 && j%i == 0 {
table[j] = -1
}
}
}
return ret
}
<file_sep>/algorithms/_7_Reverse_Integer/answer.go
package _7_Reverse_Integer
import (
"math"
)
func reverse(x int) int {
ret := 0
if x > math.MaxInt32 || x < math.MinInt32 {
return 0
}
for {
if x == 0 {
break
}
tail := x % 10
newret := ret*10 + tail
ret = newret
x = x / 10
}
if ret > math.MaxInt32 || ret < math.MinInt32 {
return 0
}
return ret
}
<file_sep>/algorithms/_297_Serialize_and_Deserialize_Binary_Tree/answer.go
package _297_Serialize_and_Deserialize_Binary_Tree
import (
"fmt"
"strconv"
"strings"
)
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
type Codec struct {
}
func Constructor() Codec {
return Codec{}
}
// Serializes a tree to a single string.
func (this *Codec) serialize(root *TreeNode) string {
//return preOrderSerialize(root)
//return postOrderSerialize(root)
return BFSSerialize(root)
}
// Deserializes your encoded data to tree.
func (this *Codec) deserialize(data string) *TreeNode {
//startCur := 0
//return preOrderDeserialize(data, &startCur)
//startCur := len(data) - 1
//return postOrderDeserialize(data, &startCur)
return BFSDeserialize(data)
}
// 后续遍历序列化
func postOrderSerialize(root *TreeNode) string {
var str string
if root == nil {
str += ","
return str
}
str += postOrderSerialize(root.Left)
str += postOrderSerialize(root.Right)
str += fmt.Sprintf(",%d", root.Val)
return str
}
// 后序遍历反序列化
func postOrderDeserialize(data string, cur *int) *TreeNode {
if string(data[*cur]) == "," {
*cur -= 1
return nil
}
begin := *cur
for begin >= 0 && string(data[begin]) != "," {
begin--
}
num, _ := strconv.ParseInt(data[begin+1:*cur+1], 10, 64)
node := &TreeNode{Val: int(num)}
*cur = begin - 1
node.Right = postOrderDeserialize(data, cur)
node.Left = postOrderDeserialize(data, cur)
return node
}
// 先序遍历序列化
func preOrderSerialize(root *TreeNode) string {
var str string
if root == nil {
str += ","
return str
}
str += fmt.Sprintf("%d,", root.Val)
str += preOrderSerialize(root.Left)
str += preOrderSerialize(root.Right)
return str
}
// 先序遍历反序列化
func preOrderDeserialize(data string, cur *int) *TreeNode {
if string(data[*cur]) == "," { // 说明是空节点
*cur += 1
return nil
}
tail := *cur
for tail < len(data) && string(data[tail]) != "," {
tail++
}
num, _ := strconv.ParseInt(data[*cur:tail], 10, 64)
node := &TreeNode{Val: int(num)}
*cur = tail + 1
node.Left = preOrderDeserialize(data, cur)
node.Right = preOrderDeserialize(data, cur)
return node
}
// 层序遍历序列化
func BFSSerialize(root *TreeNode) string {
var (
str string
q = []*TreeNode{root}
)
for len(q) != 0 {
node := q[0]
q = q[1:]
newStr := ""
if node != nil {
newStr = fmt.Sprintf("%d,", node.Val)
q = append(q, node.Left, node.Right)
} else {
newStr = fmt.Sprintf(",")
}
str += newStr
}
return str
}
// 层序遍历反序列化
func BFSDeserialize(data string) *TreeNode {
if data == "," {
return nil
}
eles := strings.Split(data, ",")
num, _ := strconv.ParseInt(eles[0], 10, 64)
eles = eles[1:]
root := &TreeNode{Val: int(num)}
q := []*TreeNode{root}
for len(q) > 0 && len(eles) >= 2 {
preNode := q[0]
q = q[1:]
lv, rv := eles[0], eles[1]
eles = eles[2:]
if lv != "" {
num, _ := strconv.ParseInt(lv, 10, 64)
preNode.Left = &TreeNode{Val: int(num)}
q = append(q, preNode.Left)
}
if rv != "" {
num, _ := strconv.ParseInt(rv, 10, 64)
preNode.Right = &TreeNode{Val: int(num)}
q = append(q, preNode.Right)
}
}
return root
}
/**
* Your Codec object will be instantiated and called as such:
* ser := Constructor();
* deser := Constructor();
* data := ser.serialize(root);
* ans := deser.deserialize(data);
*/
<file_sep>/algorithms/_347_Top_K_Frequent_Elements/answer.go
package _347_Top_K_Frequent_Elements
type Freq struct {
num int
freq int
}
func topKFrequent(nums []int, k int) []int {
if len(nums) == 0 {
return []int{}
}
var (
countMap = make(map[int]int)
heap = []Freq{}
minidx int
min Freq
ret []int
)
for _, num := range nums {
// 获得计数
count, _ := countMap[num]
count++
countMap[num] = count
// 处理堆
if len(heap) < k { // 直接插入
tmp := Freq{
num: num,
freq: count,
}
var exist bool
var existIdx int
for idx, h := range heap {
if h.num == num {
exist = true
existIdx = idx
break
}
}
if exist {
h := heap[existIdx]
if count > h.freq {
h.freq = count
}
heap[existIdx] = h
} else {
heap = append(heap, tmp)
}
if len(heap) == 1 { // 初始化情形
minidx = 0
} else {
if min.freq > tmp.freq {
if exist {
minidx = existIdx
} else {
minidx = len(heap) - 1
}
}
}
min = heap[minidx]
} else {
// 找有没有重复
var exist bool
var existIdx int
for idx, h := range heap {
if h.num == num {
exist = true
existIdx = idx
}
}
if exist {
h := heap[existIdx]
h.freq = count
heap[existIdx] = h
} else {
if count > min.freq { // 需要删除
if minidx == k-1 {
heap = heap[0 : k-1]
} else {
heap = append(heap[0:minidx], heap[minidx+1:]...)
}
heap = append(heap, Freq{
num: num,
freq: count,
})
}
// 找新的最小的
minidx = 0
min = heap[0]
for idx, h := range heap {
if h.freq < min.freq {
min = h
minidx = idx
}
}
}
}
}
for _, h := range heap {
ret = append(ret, h.num)
}
return ret
}
<file_sep>/utils/linkedlist/intlinkedlist.go
package linkedlist
import "fmt"
type IntListNode struct {
Val int
Next *IntListNode
}
func (l *IntListNode) String() string {
var s string
for l != nil {
s += fmt.Sprintf("%d", l.Val)
l = l.Next
}
return s
}
// IsTwoIntLinkedListEqual compares two int linked list, return if they are the same or not.
func IsTwoIntLinkedListEqual(l1, l2 *IntListNode) bool {
sl1 := IntLinkedList2slice(l1)
sl2 := IntLinkedList2slice(l2)
if len(sl1) != len(sl2) {
return false
}
for i := 0; i < len(sl1); i++ {
if sl1[i] != sl2[i] {
return false
}
}
return true
}
// IntLinkedList2slice transfer an int linked list to an int slice.
func IntLinkedList2slice(l *IntListNode) []int {
ret := []int{}
if l == nil {
return ret
}
for {
ret = append(ret, l.Val)
if l.Next != nil {
l = l.Next
} else {
break
}
}
return ret
}
// GenerateIntLinkedList generates an int linked list
func GenerateIntLinkedList(vals []int) *IntListNode {
var l *IntListNode
var head *IntListNode
for _, val := range vals {
if l == nil {
l = &IntListNode{
Val: val,
}
head = l
} else {
l.Next = &IntListNode{
Val: val,
}
l = l.Next
}
}
return head
}
<file_sep>/algorithms/_530_Minimum_Absolute_Difference_in_BST/answer.go
package _530_Minimum_Absolute_Difference_in_BST
import (
"math"
)
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func getMinimumDifference(root *TreeNode) int {
//return getMinimumDifferenceInOrderR(root)
return getMinimumDifferenceInOrderNR(root)
}
func absInt(a, b int) int {
if a > b {
return a - b
} else {
return b - a
}
}
func minInt(a, b int) int {
if a < b {
return a
}
return b
}
// 解法一:中序遍历,递归实现
func getMinimumDifferenceInOrderR(root *TreeNode) int {
if root == nil {
return 0
}
min := math.MaxInt32
dfs(root, nil, &min)
return min
}
func dfs(root *TreeNode, pre *TreeNode, min *int) (newPre *TreeNode) {
if root.Left != nil {
pre = dfs(root.Left, pre, min)
}
if pre != nil {
//fmt.Println(pre.Val, root.Val, absInt(root.Val, pre.Val))
*min = minInt(*min, absInt(root.Val, pre.Val))
}
pre, newPre = root, root
if root.Right != nil {
newPre = dfs(root.Right, pre, min)
}
return
}
// 解法二:中序遍历,非递归实现
func getMinimumDifferenceInOrderNR(root *TreeNode) int {
var (
stack []*TreeNode
node = root
pre *TreeNode
min int = math.MaxInt32
)
for node != nil || len(stack) > 0 {
if node != nil {
stack = append(stack, node)
node = node.Left
} else {
node = stack[len(stack)-1]
stack = stack[:len(stack)-1]
if pre != nil {
min = minInt(min, absInt(node.Val, pre.Val))
}
pre = node
node = node.Right
}
}
return min
}
<file_sep>/algorithms/_132_Palindrome_Partitioning_2/answer.go
package _132_Palindrome_Partitioning_2
import (
"math"
)
func minCut(s string) int {
n := len(s)
judge := make([][]bool, n)
for idx := range judge {
judge[idx] = make([]bool, n)
}
dp := make([]int, n) // dp[i]为s中第i位到第n-1位的子字符串中,最小分割次数
for i := n - 1; i >= 0; i-- {
dp[i] = math.MaxInt32
for j := i; j < n; j++ {
if s[j] == s[i] && (j-i <= 1 || judge[i+1][j-1]) {
judge[i][j] = true
if j+1 < n {
dp[i] = int(math.Min(float64(dp[i]), 1+float64(dp[j+1])))
} else {
dp[i] = 0
}
}
}
}
return dp[0]
}
<file_sep>/algorithms/_81_Search_in_Rotated_Sorted_Array_2/answer.go
package _81_Search_in_Rotated_Sorted_Array_2
func search(nums []int, target int) bool {
for _, num := range nums {
if num == target {
return true
}
}
return false
}
<file_sep>/algorithms/_75_Sort_Colors/answer_test.go
package _75_Sort_Colors
import (
"testing"
)
func TestSortColors(t *testing.T) {
}
<file_sep>/algorithms/_713_Subarray_Product_Less_Than_K/answer_test.go
package _713_Subarray_Product_Less_Than_K
import "testing"
func TestNumSubarrayProductLessThanK(t *testing.T) {
if count := numSubarrayProductLessThanK([]int{10, 5, 2, 6}, 100); count != 8 {
t.Errorf("wrong count with %d", count)
}
if count := numSubarrayProductLessThanK([]int{1, 2, 3}, 0); count != 0 {
t.Errorf("wrong count with %d", count)
}
}
<file_sep>/own_practice/sort/fastsort/linkedlistsort_test.go
package fastsort
import (
"testing"
. "leetcode_notes/utils/linkedlist"
)
func TestLinkedListFastSort1(t *testing.T) {
l1 := GenerateIntLinkedList([]int{3, 1, 2})
ret := LinkedListFastSort1(l1)
retl1 := GenerateIntLinkedList([]int{1, 2, 3})
if !IsTwoIntLinkedListEqual(l1, retl1) {
t.Error("error test1 with", IntLinkedList2slice(ret))
}
}
<file_sep>/algorithms/_515_Find_Largest_Value_in_Each_Tree_Row/answer.go
package _515_Find_Largest_Value_in_Each_Tree_Row
import "math"
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func largestValues(root *TreeNode) []int {
if root == nil {
return []int{}
}
var (
level, tmp []*TreeNode
max int
ret []int
)
level = append(level, root)
for len(level) != 0 {
max = math.MinInt32
for _, node := range level {
val := node.Val
if val > max {
max = val
}
if node.Left != nil {
tmp = append(tmp, node.Left)
}
if node.Right != nil {
tmp = append(tmp, node.Right)
}
}
level = tmp
tmp = []*TreeNode{}
ret = append(ret, max)
}
return ret
}
<file_sep>/algorithms/_34_Find_First_and_Last_Position_of_Element_in_Sorted_Array/answer_test.go
package _34_Find_First_and_Last_Position_of_Element_in_Sorted_Array
import (
"fmt"
"testing"
)
func TestSearchRange(t *testing.T) {
nums := []int{5, 7, 7, 8, 8, 10}
target := 8
fmt.Println(searchRange(nums, target))
}
func TestFindFirst(t *testing.T) {
nums := []int{5, 7, 7, 8, 8, 10}
target := 8
fmt.Println(FindFirst(nums, target, 0, len(nums)-1, -1))
}
func TestFindLast(t *testing.T) {
nums := []int{5, 7, 7, 8, 8, 10}
target := 8
fmt.Println(FindLast(nums, target, 0, len(nums)-1, -1))
}
<file_sep>/algorithms/_116_Populating_Next_Right_Pointers_in_Each_Node/answer.go
package _116_Populating_Next_Right_Pointers_in_Each_Node
type Node struct {
Val int
Left *Node
Right *Node
Next *Node
}
func connect(root *Node) *Node {
//return connectRecursion(root)
return connectIteration(root)
}
// 层序迭代
func connectIteration(root *Node) *Node {
if root == nil {
return root
}
var queue = []*Node{root, nil}
for len(queue) > 0 {
cur := queue[0]
queue = queue[1:]
if cur != nil {
cur.Next = queue[0]
if cur.Left != nil {
queue = append(queue, cur.Left)
}
if cur.Right != nil {
queue = append(queue, cur.Right)
}
} else {
if len(queue) == 0 || queue[0] == nil {
return root
} else {
queue = append(queue, nil)
}
}
}
return root
}
// 层序递归
func connectRecursion(root *Node) *Node {
if root == nil {
return root
}
if root.Left != nil {
root.Left.Next = root.Right
}
if root.Right != nil {
var x *Node
if root.Next != nil {
x = root.Next.Left
}
root.Right.Next = x
}
connectRecursion(root.Left)
connectRecursion(root.Right)
return root
}
<file_sep>/algorithms/_918_Maximum_Sum_Circular_Subarray/answer.go
package _918_Maximum_Sum_Circular_Subarray
func maxSubarraySumCircular(nums []int) int {
if len(nums) == 0 {
return 0
}
var (
sum = nums[0]
curMax = nums[0]
curMin = nums[0]
maxSum = nums[0]
minSum = nums[0]
n = len(nums)
)
for i := 1; i < n; i++ {
sum += nums[i]
curMax = max(curMax+nums[i], nums[i])
curMin = min(curMin+nums[i], nums[i])
maxSum = max(maxSum, curMax)
minSum = min(minSum, curMin)
}
if minSum == sum {
return maxSum
}
return max(maxSum, sum-minSum)
}
func max(a, b int) int {
if a > b {
return a
} else {
return b
}
}
func min(a, b int) int {
if a < b {
return a
} else {
return b
}
}
<file_sep>/algorithms/_5_Longest_Palindromic_Substring/answer_test.go
package _5_Longest_Palindromic_Substring
import (
"testing"
)
func TestLongestPalindrome(t *testing.T) {
if ret := longestPalindrome("babad"); ret != "bab" && ret != "aba" {
t.Error("not \"bab\" or \"aba\" with \"babad\", get:", ret)
}
if ret := longestPalindrome("cbbd"); ret != "bb" {
t.Error("not \"bb\" with \"cbbd\", get:", ret)
}
}
<file_sep>/algorithms/_1260_Shift_2D_Grid/answer_test.go
package _1260_Shift_2D_Grid
import (
"fmt"
"testing"
)
func TestShiftGrid(t *testing.T) {
var (
grid [][]int
k int
ret [][]int
)
grid, k = [][]int{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}, 1
ret = shiftGrid(grid, k)
//fmt.Println(ret)
grid, k = [][]int{{1}}, 100
ret = shiftGrid(grid, k)
fmt.Println(ret)
}
<file_sep>/algorithms/_13_Roman_to_Integer/answer.go
package _13_Roman_to_Integer
func romanToInt(s string) int {
return romanToIntScan(s)
}
func romanToIntScan(s string) int {
if len(s) == 0 {
return 0
}
var (
sum int
pre int
)
pre = c2i(s[0])
for idx := 1; idx < len(s); idx++ {
num := c2i(s[idx])
if pre < num {
sum -= pre
} else {
sum += pre
}
pre = num
}
sum += pre
return sum
}
func c2i(x uint8) int {
var i int
switch x {
case 'I':
i = 1
case 'V':
i = 5
case 'X':
i = 10
case 'L':
i = 50
case 'C':
i = 100
case 'D':
i = 500
case 'M':
i = 1000
}
return i
}
<file_sep>/algorithms/_653_Two_Sum_4_Input_is_a_BST/answer.go
package _653_Two_Sum_4_Input_is_a_BST
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func findTarget(root *TreeNode, k int) bool {
var (
m = make(map[int]*TreeNode)
)
runTree(root, m)
return findTree(root, k, m)
}
func findTree(root *TreeNode, target int, m map[int]*TreeNode) bool {
if root == nil {
return false
}
if node, ok := m[target-root.Val]; ok && node != root {
return true
}
if left := findTree(root.Left, target, m); left {
return true
}
if right := findTree(root.Right, target, m); right {
return true
}
return false
}
func runTree(root *TreeNode, m map[int]*TreeNode) {
if root == nil {
return
}
m[root.Val] = root
runTree(root.Left, m)
runTree(root.Right, m)
return
}
<file_sep>/algorithms/_606_Construct_String_from_Binary_Tree/answer.go
package _606_Construct_String_from_Binary_Tree
import (
"strconv"
)
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func tree2str(root *TreeNode) string {
return tree2strPreR(root)
}
func tree2strPreR(root *TreeNode) string {
if root == nil {
return ""
}
x := strconv.Itoa(root.Val)
if root.Left == nil && root.Right == nil {
return x
}
ret := x + "(" + tree2strPreR(root.Left) + ")"
if root.Right != nil {
ret += "(" + tree2strPreR(root.Right) + ")"
}
return ret
}
<file_sep>/algorithms/_75_Sort_Colors/answer.go
package _75_Sort_Colors
func sortColors(nums []int) {
fastSort1(nums)
}
func fastSort1(nums []int) {
if len(nums) <= 1 {
return
}
mid, i := nums[0], 1
head, tail := 0, len(nums)-1
for head < tail {
if nums[i] > mid {
nums[i], nums[tail] = nums[tail], nums[i]
tail--
} else {
nums[i], nums[head] = nums[head], nums[i]
head++
i++
}
}
nums[head] = mid
fastSort1(nums[0:head])
fastSort1(nums[head+1:])
}
<file_sep>/algorithms/_211_Design_Add_and_Search_Words_Data_Structure/answer.go
package _211_Design_Add_and_Search_Words_Data_Structure
type Node struct {
isStr bool // 是否为完整字符串
next [26]*Node // a-z 位置可能的后继节点,如果为nil则尚未存在, idx=字符-'a'
}
type WordDictionary struct {
root *Node
}
func Constructor() WordDictionary {
return WordDictionary{root: &Node{}}
}
func (this *WordDictionary) AddWord(word string) {
var cur *Node = this.root
for _, b := range word {
idx := b - 'a'
aimNode := cur.next[idx]
if aimNode == nil {
aimNode = &Node{}
cur.next[idx] = aimNode
}
cur = aimNode
}
cur.isStr = true
}
func (this *WordDictionary) Search(word string) bool {
return search(this.root, word)
}
func search(node *Node, word string) bool {
if len(word) == 0 {
return node.isStr
}
b := word[0]
if b == '.' {
for _, n := range node.next {
if n == nil {
continue
}
ret := search(n, word[1:])
if ret == true {
return true
}
}
return false
} else {
idx := b - 'a'
n := node.next[idx]
if n == nil {
return false
}
return search(n, word[1:])
}
}
/**
* Your WordDictionary object will be instantiated and called as such:
* obj := Constructor();
* obj.AddWord(word);
* param_2 := obj.Search(word);
*/
<file_sep>/algorithms/_40_Combination_Sum_2/answer_test.go
package _40_Combination_Sum_2
import "testing"
func TestCombinationSum(t *testing.T) {
var (
candidates []int
target int
ret [][]int
)
candidates = []int{10, 1, 2, 7, 6, 1, 5}
target = 8
ret = combinationSum2(candidates, target)
t.Log(ret)
candidates = []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
target = 10
ret = combinationSum2(candidates, target)
t.Log(ret)
//candidates = []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
//target = 30
//ret = combinationSum2(candidates, target)
t.Log(ret)
}
<file_sep>/algorithms/_228_Summary_Ranges/answer.go
package _228_Summary_Ranges
import "fmt"
func summaryRanges(nums []int) []string {
var (
ret = []string{}
tmp = []int{}
idx int
)
for idx < len(nums) {
if len(tmp) == 0 {
tmp = append(tmp, nums[idx])
idx++
} else {
last := tmp[len(tmp)-1]
curr := nums[idx]
if curr-last == 1 {
tmp = append(tmp, curr)
idx++
} else {
ret = append(ret, slice2Str(tmp))
tmp = tmp[0:0]
}
}
}
if len(tmp) != 0 {
ret = append(ret, slice2Str(tmp))
}
return ret
}
func slice2Str(tmp []int) string {
if len(tmp) == 0 {
return ""
}
if len(tmp) == 1 {
return fmt.Sprint(tmp[0])
}
return fmt.Sprintf("%d->%d", tmp[0], tmp[len(tmp)-1])
}
<file_sep>/algorithms/_179_Largest_Number/answer.go
package _179_Largest_Number
import (
"sort"
"strconv"
"strings"
)
func largestNumber(nums []int) string {
s := make(StrNums, len(nums))
for idx, num := range nums {
s[idx] = strconv.FormatInt(int64(num), 10)
}
sort.Sort(s)
str := ""
for _, tmp := range s {
str += tmp
}
str = strings.TrimLeft(str, "0")
if str == "" {
str = "0"
}
return str
}
type StrNums []string
func (s StrNums) Len() int {
return len(s)
}
func (s StrNums) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s StrNums) Less(i, j int) bool {
s1, s2 := s[i], s[j]
if s1+s2 > s2+s1 {
return true
} else {
return false
}
}
<file_sep>/algorithms/_501_Find_Mode_in_Binary_Search_Tree/answer.go
package _501_Find_Mode_in_Binary_Search_Tree
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func findMode(root *TreeNode) []int {
var (
ret = []int{}
maxFreq int
m = make(map[int]int)
s = []*TreeNode{}
node = root
)
for node != nil || len(s) > 0 {
for node != nil {
s = append(s, node)
node = node.Left
}
if len(s) > 0 {
node = s[len(s)-1]
s = s[:len(s)-1]
tmp := node.Val
var (
c int
ok bool
)
if c, ok = m[tmp]; ok {
c = c + 1
} else {
c = 1
}
m[tmp] = c
if c >= maxFreq {
maxFreq = c
}
node = node.Right
}
}
for k, v := range m {
if v == maxFreq {
ret = append(ret, k)
}
}
return ret
}
<file_sep>/algorithms/_129_Sum_Root_to_Leaf_Numbers/answer.go
package _129_Sum_Root_to_Leaf_Numbers
import "math"
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func sumNumbers(root *TreeNode) int {
if root == nil {
return 0
}
var (
traces [][]int
sum int
)
findTrace(root, []int{}, &traces)
for _, trace := range traces {
sum += transTrace2Num(trace)
}
return sum
}
func transTrace2Num(t []int) (num int) {
for i := 0; i < len(t); i++ {
num += t[i] * int(math.Pow(10, float64(len(t)-i-1)))
}
return
}
func findTrace(node *TreeNode, trace []int, ret *[][]int) {
trace = append(trace, node.Val)
if node.Left == nil && node.Right == nil {
var tmpTrace []int
for _, v := range trace {
tmpTrace = append(tmpTrace, v)
}
*ret = append(*ret, tmpTrace)
return
}
if node.Left != nil {
findTrace(node.Left, trace, ret)
}
if node.Right != nil {
findTrace(node.Right, trace, ret)
}
}
<file_sep>/algorithms/_74_Search_a_2D_Matrix/answer.go
package _74_Search_a_2D_Matrix
func searchMatrix(matrix [][]int, target int) bool {
if len(matrix) == 0 || len(matrix[0]) == 0 { // 如果横竖都不到1列,非法输入
return false
}
m, n := len(matrix), len(matrix[0]) // 行、列
l, r := 0, m*n-1
for l <= r {
mid := l + (r-l)/2
i, j := idxToPos(mid, n)
if matrix[i][j] == target {
return true
} else if matrix[i][j] > target {
r = mid - 1
} else {
l = mid + 1
}
}
return false
}
func idxToPos(idx, n int) (i, j int) {
if idx < 0 {
return -1, -1
}
i = idx / n
j = idx % n
return
}
<file_sep>/algorithms/_76_Minimum_Window_Substring/answer_test.go
package _76_Minimum_Window_Substring
import "testing"
func TestIsValid(t *testing.T) {
if b := isValid("BVCA", "BC"); !b {
t.Error("should be true")
}
if b := isValid("BVCA", "BCV"); !b {
t.Error("should be true")
}
}
func TestMinWindow(t *testing.T) {
if str := minWindow("ADOBECODEBANC", "ABC"); str != "BANC" {
t.Errorf("wrong str with %s", str)
}
if str := minWindow("a", "a"); str != "a" {
t.Errorf("wrong str with %s", str)
}
if str := minWindow("a", "aa"); str != "" {
t.Errorf("wrong str with %s", str)
}
if str := minWindow("a", "b"); str != "" {
t.Errorf("wrong str with %s", str)
}
}
<file_sep>/algorithms/_968_Binary_Tree_Cameras/answer.go
package _968_Binary_Tree_Cameras
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
type status int
const (
installed status = 0
seen status = 1
ignored status = 2
)
var count int
func minCameraCover(root *TreeNode) int {
if root == nil {
return 0
}
if x := getStatus(root); x == ignored {
count++
}
return count
}
func getStatus(node *TreeNode) status {
if node == nil {
return seen
}
ls, rs := getStatus(node.Left), getStatus(node.Right)
if ls == ignored || rs == ignored { // 子节点有任一不被监控,当前节点要安装
count++
return installed
} else if ls == installed || rs == installed { // 子节点有任一安装了监控,则此节点必被监控
return seen
} else { // 俩都是seen,则此节点必不被监控
return ignored
}
}
<file_sep>/algorithms/_6_ZigZag_Conversion/answer.go
package _6_ZigZag_Conversion
func convert(s string, numRows int) string {
if numRows == 1 {
return s
}
var y = 0
var flag = true
var sarray = []string{}
for i := 0; i < numRows; i++ {
sarray = append(sarray, "")
}
for i := 0; i < len(s); i++ {
sarray[y] += string(s[i])
if y == 0 {
flag = true
}
if y == numRows-1 {
flag = false
}
if flag {
y++
} else {
y--
}
}
var ret string
for i := 0; i < numRows; i++ {
ret += sarray[i]
}
return ret
}
<file_sep>/algorithms/_80_Remove_Duplicates_from_Sorted_Array_2/answer.go
package _80_Remove_Duplicates_from_Sorted_Array_2
func removeDuplicates(nums []int) int {
var (
index int
lastValue int
repeatTime int
)
if len(nums) == 0 {
return 0
}
lastValue = nums[index]
index++
repeatTime = 1
for {
if len(nums) <= index {
break
}
if nums[index] == lastValue {
repeatTime++
if repeatTime > 2 {
if index == (len(nums) - 1) {
nums = nums[0:index]
} else {
nums = append(nums[0:index], nums[index+1:]...)
}
} else {
index++
}
} else {
lastValue = nums[index]
repeatTime = 1
index++
}
}
return len(nums)
}
<file_sep>/algorithms/_38_Count_and_Say/answer_test.go
package _38_Count_and_Say
import (
"testing"
)
func TestCountAndSay(t *testing.T) {
if ret := countAndSay(1); ret != "1" {
t.Error("ret not 1, get", ret)
}
if ret := countAndSay(2); ret != "11" {
t.Error("ret not 11, get", ret)
}
if ret := countAndSay(3); ret != "21" {
t.Error("ret not 21, get", ret)
}
}
<file_sep>/algorithms/_872_Leaf_Similar_Trees/answer.go
package _872_Leaf_Similar_Trees
import "fmt"
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func leafSimilar(root1 *TreeNode, root2 *TreeNode) bool {
var l1, l2 []int
getLeaves(root1, &l1)
getLeaves(root2, &l2)
fmt.Println(l1)
fmt.Println(l2)
if len(l1) != len(l2) {
return false
}
for i := 0; i < len(l1); i++ {
if l1[i] != l2[i] {
return false
}
}
return true
}
func getLeaves(node *TreeNode, l *[]int) {
if node == nil {
return
}
if node.Left == nil && node.Right == nil {
*l = append(*l, node.Val)
return
}
getLeaves(node.Left, l)
getLeaves(node.Right, l)
return
}
<file_sep>/algorithms/_862_Shortest_Subarray_with_Sum_at_Least_K/QD.md
Given an integer array nums and an integer k, return the length of the shortest non-empty subarray of nums with a sum of at least k. If there is no such subarray, return -1.
A subarray is a contiguous part of an array.
Example 1:
```
Input: nums = [1], k = 1
Output: 1
```
Example 2:
```
Input: nums = [1,2], k = 4
Output: -1
```
Example 3:
```
Input: nums = [2,-1,2], k = 3
Output: 3
```
Constraints:
- 1 <= nums.length <= 10^5
- -10^5 <= nums[i] <= 10^5
- 1 <= k <= 10^9
<file_sep>/algorithms/_122_Best_Time_to_Buy_and_Sell_Stock_2/answer.go
package _122_Best_Time_to_Buy_and_Sell_Stock_2
func maxProfit(prices []int) int {
var total int
for i := 0; i < len(prices)-1; i++ {
if val := prices[i+1] - prices[i]; val > 0 {
total += val
}
}
return total
}
<file_sep>/algorithms/_508_Most_Frequent_Subtree_Sum/answer.go
package _508_Most_Frequent_Subtree_Sum
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func findFrequentTreeSum(root *TreeNode) []int {
var (
m = make(map[int]int)
maxCount int
ret []int
)
push(m, root)
for _, v := range m {
if v > maxCount {
maxCount = v
}
}
for k, v := range m {
if v == maxCount {
ret = append(ret, k)
}
}
return ret
}
func push(m map[int]int, n *TreeNode) (ret int) {
if n == nil {
return
}
var (
left int
right int
)
if n.Left != nil {
left = push(m, n.Left)
}
if n.Right != nil {
right = push(m, n.Right)
}
ret = left + right + n.Val
if c, ok := m[ret]; ok {
m[ret] = c + 1
} else {
m[ret] = 1
}
return
}
<file_sep>/algorithms/_429_N-ary_Tree_Level_Order_Traversal/answer.go
package _429_N_ary_Tree_Level_Order_Traversal
type Node struct {
Val int
Children []*Node
}
func levelOrder(root *Node) [][]int {
var (
ret [][]int
q, tq []*Node
)
if root == nil {
return ret
}
q = append(q, root)
for len(q) > 0 {
tr := []int{}
for _, n := range q {
if n == nil {
continue
}
tr = append(tr, n.Val)
tq = append(tq, n.Children...)
}
q = tq
tq = []*Node{}
ret = append(ret, tr)
}
return ret
}
<file_sep>/algorithms/_491_Increasing_Subsequences/answer.go
package _491_Increasing_Subsequences
import "fmt"
func findSubsequences(nums []int) [][]int {
return findSubsequencesDFS(nums)
}
func findSubsequencesDFS(nums []int) [][]int {
var (
ss [][]int
nss [][]int
sm = make(map[string][]int)
)
dfs(nums, []int{}, &ss)
for _, s := range ss {
key := fmt.Sprint(s)
if _, ok := sm[key]; !ok {
sm[key] = s
nss = append(nss, s)
}
}
return nss
}
func dfs(nums []int, tmp []int, ss *[][]int) {
if len(nums) == 0 {
return
}
for idx, num := range nums {
var newNums []int
if idx != len(nums)-1 {
newNums = nums[idx+1:]
}
if len(tmp) == 0 {
dfs(newNums, append(tmp, num), ss)
} else {
if num >= tmp[len(tmp)-1] {
x := make([]int, len(tmp))
copy(x, tmp)
x = append(x, num)
*ss = append(*ss, x)
dfs(newNums, append(tmp, num), ss)
}
}
}
}
<file_sep>/algorithms/_572_Subtree_of_Another_Tree/answer.go
package _572_Subtree_of_Another_Tree
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func isSubtree(s *TreeNode, t *TreeNode) bool {
// return isSubtreeUnrecursion(s, t)
return isSubtreeRecursion(s, t)
}
func isSubtreeUnrecursion(s *TreeNode, t *TreeNode) bool {
if t == nil {
return true
}
var (
l = []*TreeNode{s}
)
for len(l) > 0 {
x := l[len(l)-1]
l = l[:len(l)-1]
if x == nil {
continue
}
if isEqual(x, t) {
return true
}
l = append(l, x.Left, x.Right)
}
return false
}
func isSubtreeRecursion(s *TreeNode, t *TreeNode) bool {
if t == nil {
return true
}
if s == nil {
return false
}
if isEqual(s, t) {
return true
}
return isSubtreeRecursion(s.Left, t) || isSubtreeRecursion(s.Right, t)
}
func isEqual(s, t *TreeNode) bool {
if s == nil && t == nil {
return true
}
if s != nil && t == nil {
return false
}
if s == nil && t != nil {
return false
}
if s.Val != t.Val {
return false
}
return isEqual(s.Left, t.Left) && isEqual(s.Right, t.Right)
}
<file_sep>/algorithms/_12_Integer_to_Roman/answer.go
package _12_Integer_to_Roman
func intToRoman(num int) string {
return intToRomanGreedy(num)
}
func intToRomanGreedy(num int) string {
var (
str string
values = []int{1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1}
symbols = []string{"M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"}
)
for idx, value := range values {
for value <= num {
num -= value
str += symbols[idx]
}
}
return str
}
<file_sep>/algorithms/_215_Kth_Largest_Element_in_an_Array/answer.go
package _215_Kth_Largest_Element_in_an_Array
import (
"container/heap"
"sort"
)
func findKthLargest(nums []int, k int) int {
//return findKthLargestWithKLengthArray(nums, k)
//return findKthLargestAfterSortingAllElements(nums, k)
//return findKthLargestWithHeap(nums, k)
return findKthLargestWithPartion(nums, k)
}
// 快排partion思路,持续找一个中间值,当这个中间值左边的数为k-1时,这个数即为第k大的数,不需要全排序
func findKthLargestWithPartion(nums []int, k int) int {
low, high := 0, len(nums)
for low < high {
i, j := low, high-1
pivot := nums[low] // 取low位置的值
for i <= j {
for i <= j && nums[i] >= pivot {
i++
}
for i <= j && nums[j] <= pivot {
j--
}
if i < j {
nums[i], nums[j] = nums[j], nums[i]
}
}
nums[low], nums[j] = nums[j], nums[low]
if j == k-1 {
return nums[j]
} else if j < k-1 {
low = j + 1
} else {
high = j
}
}
return 0
}
// 优先队列/堆
func findKthLargestWithHeap(nums []int, k int) int {
h := IntHeap(nums)
heap.Init(&h)
var ret int
for i := 0; i < k; i++ {
ret = heap.Pop(&h).(int)
}
return ret
}
type IntHeap []int
func (h IntHeap) Len() int { return len(h) }
func (h IntHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h IntHeap) Less(i, j int) bool { return h[i] > h[j] }
func (h *IntHeap) Push(x interface{}) {
*h = append(*h, x.(int))
}
func (h *IntHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
// 全排序然后取第K位,数据量大的时候效率不好,因为不需要关注所有元素的排序
func findKthLargestAfterSortingAllElements(nums []int, k int) int {
sort.Ints(nums) // 升序
return nums[len(nums)-k]
}
// 其实没有用到堆,只是用了一个数组,每次添加都遍历一遍找最小,其实效率并不好
func findKthLargestWithKLengthArray(nums []int, k int) int {
var (
heap []int
min, minidx int
)
for _, num := range nums {
if len(heap) == 0 {
heap = append(heap, num)
min = num
minidx = 0
} else {
if len(heap) < k { // 直接插入
heap = append(heap, num)
if num < min {
min = num
minidx = len(heap) - 1
}
} else {
if num > min { // 需要删除
if minidx == len(heap)-1 {
heap = heap[0 : len(heap)-1]
} else {
heap = append(heap[0:minidx], heap[minidx+1:]...)
}
heap = append(heap, num)
// 重新找最小的
min, minidx = heap[0], 0
for idx, h := range heap {
if h < min {
min = h
minidx = idx
}
}
}
}
}
}
return min
}
<file_sep>/algorithms/_139_Word_Break/answer.go
package _139_Word_Break
func wordBreak(s string, wordDict []string) bool {
//return wordBreakBackTrack(s, wordDict)
dict := make(map[string]struct{})
for _, word := range wordDict {
dict[word] = struct{}{}
}
return wordBreakDP(s, dict)
}
// 回溯法,会超时
func wordBreakBackTrack(s string, wordDict map[string]struct{}) bool {
return backFunc(s, 0, wordDict)
}
func backFunc(s string, idx int, wordDict map[string]struct{}) bool {
if idx >= len(s) {
return true
}
for i := idx; i < len(s); i++ {
subStr := s[idx : i+1]
if wordInDict(subStr, wordDict) {
ret := backFunc(s, i+1, wordDict)
if ret {
return true
}
}
}
return false
}
// DP法 dp[j] = dp[i] && (s[i:j+1] in wordDict)
func wordBreakDP(s string, wordDict map[string]struct{}) bool {
dp := make([]bool, len(s)+1)
dp[0] = true
for j := 1; j <= len(s); j++ {
for i := 0; i < j; i++ {
substr := s[i:j]
if dp[i] && wordInDict(substr, wordDict) {
dp[j] = true
}
}
}
return dp[len(s)]
}
func wordInDict(word string, dict map[string]struct{}) bool {
_, ok := dict[word]
return ok
}
<file_sep>/algorithms/_152_Maximum_Product_Subarray/answer.go
package _152_Maximum_Product_Subarray
func maxProduct(nums []int) int {
return maxProductDP1(nums)
}
func maxProductDP1(nums []int) int {
if len(nums) == 0 {
return 0
}
currMax, currMin := make([]int, len(nums)), make([]int, len(nums))
currMax[0], currMin[0] = nums[0], nums[0]
max := nums[0]
for i := 1; i < len(nums); i++ {
currMin[i] = MultiMin(nums[i], currMin[i-1]*nums[i], currMax[i-1]*nums[i])
currMax[i] = MultiMax(nums[i], currMax[i-1]*nums[i], currMin[i-1]*nums[i])
max = MultiMax(max, currMax[i])
}
return max
}
func MultiMin(nums ...int) int {
if len(nums) == 0 {
panic("empty nums")
}
min := nums[0]
for i := 1; i < len(nums); i++ {
if nums[i] < min {
min = nums[i]
}
}
return min
}
func MultiMax(nums ...int) int {
if len(nums) == 0 {
panic("empty nums")
}
max := nums[0]
for i := 1; i < len(nums); i++ {
if nums[i] > max {
max = nums[i]
}
}
return max
}
<file_sep>/algorithms/_145_Binary_Tree_Postorder_Traversal/answer.go
package _145_Binary_Tree_Postorder_Traversal
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func postorderTraversal(root *TreeNode) []int {
// return postorderTraversalRecursion(root)
return postorderTraversalUnrecursionWithTwoStack(root)
}
func postorderTraversalUnrecursionWithTwoStack(node *TreeNode) []int {
var (
ret = []int{}
l = []*TreeNode{node}
lret = []*TreeNode{}
)
if node == nil {
return ret
}
for len(l) > 0 {
x := l[len(l)-1]
l = l[:len(l)-1]
lret = append(lret, x)
if x.Left != nil {
l = append(l, x.Left)
}
if x.Right != nil {
l = append(l, x.Right)
}
}
for len(lret) > 0 {
x := lret[len(lret)-1]
lret = lret[:len(lret)-1]
ret = append(ret, x.Val)
}
return ret
}
func postorderTraversalRecursion(node *TreeNode) []int {
var (
ret = []int{}
)
if node == nil {
return ret
}
if node.Left != nil {
ret = append(ret, postorderTraversalRecursion(node.Left)...)
}
if node.Right != nil {
ret = append(ret, postorderTraversalRecursion(node.Right)...)
}
ret = append(ret, node.Val)
return ret
}
<file_sep>/algorithms/_2130_Maximum_Twin_Sum_of_Linked_List/answer_test.go
package _2130_Maximum_Twin_Sum_of_Linked_List
import (
"testing"
"github.com/shadas/leetcode_notes/utils/linkedlist"
)
type testCase struct {
input *linkedlist.IntListNode
output int
}
func TestPairSum(t *testing.T) {
cases := []testCase{
{
input: linkedlist.GenerateIntLinkedList([]int{5, 4, 2, 1}),
output: 6,
},
{
input: linkedlist.GenerateIntLinkedList([]int{4, 2, 2, 3}),
output: 7,
},
{
input: linkedlist.GenerateIntLinkedList([]int{1, 100000}),
output: 100001,
},
}
for _, c := range cases {
if x := pairSum(c.input); x != c.output {
t.Errorf("output should be \"%v\" instead of \"%v\" with input=\"%v\"", c.output, x, c.input)
}
}
}
<file_sep>/algorithms/_66_Plus_One/answer.go
package _66_Plus_One
func plusOne(digits []int) []int {
l := len(digits)
var isAdd bool
var ret []int
for i := l - 1; i >= 0; i-- {
v := digits[i]
if i == l-1 || isAdd {
v = v + 1
if v == 10 {
isAdd = true
v = 0
} else {
isAdd = false
}
}
ret = append([]int{v}, ret...)
}
if isAdd {
ret = append([]int{1}, ret...)
}
return ret
}
<file_sep>/algorithms/_14_Longest_Common_Prefix/answer_test.go
package _14_Longest_Common_Prefix
import (
"testing"
)
func TestLongestCommonPrefix(t *testing.T) {
if ret := longestCommonPrefix([]string{"a", "ab"}); ret != "a" {
t.Error("not a with test1.")
}
}
<file_sep>/algorithms/_57_Insert_Interval/answer.go
package _57_Insert_Interval
import (
"sort"
)
func insert(intervals [][]int, newInterval []int) [][]int {
if len(intervals) == 0 {
return [][]int{newInterval}
}
sort.Sort(X(intervals))
var (
ret [][]int
idx int
notFound bool
)
// 先找插入位置
for {
if idx >= len(intervals) {
notFound = true
break
}
cur := intervals[idx]
if newInterval[0] < cur[0] {
break
} else {
if newInterval[0] > cur[1] {
idx++
continue
} else {
break
}
}
}
// 未找到位置,直接追在最后
if notFound {
ret = append(intervals, newInterval)
return ret
}
//fmt.Printf("insert pos=%d\n", idx)
// 找到位置后面需要判断的第一个为idx
// 先把前面的部分合并进来
x := intervals[0:idx]
ret = append(ret, x...)
var (
tailIdx int
foundPos bool
)
for idx < len(intervals) {
cur := intervals[idx]
if newInterval[0] < cur[0] && newInterval[1] < cur[0] {
foundPos = true
ret = append(ret, newInterval)
tailIdx = idx
break
}
// 要合并
if cur[0] < newInterval[0] {
newInterval[0] = cur[0]
}
if cur[1] > newInterval[1] {
newInterval[1] = cur[1]
}
idx++
}
//fmt.Printf("insert tailIdx=%d, foundPos=%v\n", tailIdx, foundPos)
// 合并尾巴
if !foundPos {
ret = append(ret, newInterval)
} else {
ret = append(ret, intervals[tailIdx:]...)
}
return ret
}
type X [][]int
func (x X) Len() int {
return len(x)
}
func (x X) Less(i, j int) bool {
if x[i][0] < x[j][0] {
return true
} else if x[i][0] > x[j][0] {
return false
} else {
if x[i][1] <= x[j][1] {
return true
} else {
return false
}
}
}
func (x X) Swap(i, j int) {
x[i], x[j] = x[j], x[i]
}
<file_sep>/algorithms/_415_Add_Strings/answer_test.go
package _415_Add_Strings
import "testing"
func TestAddStrings(t *testing.T) {
if ret := addStrings("10", "2"); ret != "12" {
t.Errorf("wrong with %s", ret)
}
if ret := addStrings("99", "1"); ret != "100" {
t.Errorf("wrong with %s", ret)
}
if ret := addStrings("199", "199"); ret != "398" {
t.Errorf("wrong with %s", ret)
}
}
<file_sep>/algorithms/_283_Move_Zeroes/answer.go
package _283_Move_Zeroes
func moveZeroes(nums []int) {
if len(nums) == 0 || len(nums) == 1 {
return
}
for i := 0; i < len(nums); i++ {
if nums[i] != 0 {
continue
}
// 如果发现0了,向后找第一个不是0的交换,
j := i + 1
for j <= len(nums)-1 && nums[j] == 0 {
j++
}
if j >= len(nums) { // 后面全是0
return
}
// 找到不是0的,交换
nums[i], nums[j] = nums[j], nums[i]
}
}
<file_sep>/algorithms/_238_Product_of_Array_Except_Self/answer.go
package _238_Product_of_Array_Except_Self
func productExceptSelf(nums []int) []int {
var (
leftProduct = make([]int, len(nums))
tmp = 1
)
leftProduct[0] = 1
for i := 1; i < len(nums); i++ {
leftProduct[i] = leftProduct[i-1] * nums[i-1]
}
for i := len(nums) - 2; i >= 0; i-- {
tmp = tmp * nums[i+1]
leftProduct[i] = tmp * leftProduct[i]
}
return leftProduct
}
<file_sep>/algorithms/_93_Restore_IP_Addresses/answer_test.go
package _93_Restore_IP_Addresses
import "testing"
func TestRestoreIpAddresses(t *testing.T) {
var (
ret []string
)
ret = restoreIpAddresses("25525511135")
t.Logf("ret is %v", ret) // ["255.255.11.135","255.255.111.35"]
ret = restoreIpAddresses("0000")
t.Logf("ret is %v", ret) // ["0.0.0.0"]
}
<file_sep>/algorithms/_15_3Sum/answer.go
package _15_3Sum
import (
"sort"
)
func threeSum(nums []int) [][]int {
sort.Ints(nums)
ret := [][]int{}
for i := 0; i < len(nums)-2; i++ {
if i == 0 || i > 0 && nums[i] != nums[i-1] {
var header, tail, sum = i + 1, len(nums) - 1, 0 - nums[i]
for {
if header >= tail {
break
}
if nums[header]+nums[tail] == sum {
ret = append(ret, []int{nums[i], nums[header], nums[tail]})
for {
if header >= tail || nums[header] != nums[header+1] {
break
}
header++
}
for {
if header >= tail || nums[tail] != nums[tail-1] {
break
}
tail--
}
header++
tail--
} else if nums[header]+nums[tail] < sum {
header++
} else {
tail--
}
}
}
}
return ret
}
<file_sep>/algorithms/_202_Happy_Number/answer_test.go
package _202_Happy_Number
import "testing"
func TestIsHappy(t *testing.T) {
if isHappy(19) != true {
t.Log("19 failed")
}
if isHappy(20) != true {
t.Log("20 failed")
}
}<file_sep>/algorithms/_703_Kth_Largest_Element_in_a_Stream/answer.go
package _703_Kth_Largest_Element_in_a_Stream
type KthLargest struct {
queue []int
k int
}
func Constructor(k int, nums []int) KthLargest {
kl := KthLargest{
k: k,
}
for _, n := range nums {
if len(kl.queue) == 0 {
kl.queue = append(kl.queue, n)
} else {
if n <= kl.queue[len(kl.queue)-1] {
kl.queue = append(kl.queue, n)
} else if n > kl.queue[0] {
kl.queue = append([]int{n}, kl.queue...)
} else {
for i := len(kl.queue) - 2; i >= 0; i-- {
if n <= kl.queue[i] {
kl.queue = append(kl.queue[0:i+1], append([]int{n}, kl.queue[i+1:]...)...)
break
}
}
}
}
}
// 取前k个
if len(kl.queue) >= k {
kl.queue = kl.queue[0:k]
}
return kl
}
func (this *KthLargest) Add(val int) int {
if len(this.queue) == 0 {
this.queue = append(this.queue, val)
} else {
if val <= this.queue[len(this.queue)-1] {
this.queue = append(this.queue, val)
} else if val > this.queue[0] {
this.queue = append([]int{val}, this.queue...)
} else {
for i := len(this.queue) - 2; i >= 0; i-- {
if val <= this.queue[i] {
this.queue = append(this.queue[0:i+1], append([]int{val}, this.queue[i+1:]...)...)
break
}
}
}
}
var ret int
if len(this.queue) >= this.k {
this.queue = this.queue[0:this.k]
ret = this.queue[this.k-1]
} else {
ret = -1
}
return ret
}
/**
* Your KthLargest object will be instantiated and called as such:
* obj := Constructor(k, nums);
* param_1 := obj.Add(val);
*/
<file_sep>/algorithms/_55_Jump_Game/answer.go
package _55_Jump_Game
import "math"
func canJump(nums []int) bool {
// return canJumpDP(nums)
return canJumpGreedy(nums)
}
func canJumpDP(nums []int) bool {
length := len(nums)
// 0为不可达,1为可达
dp := make([]int, length)
dp[len(dp)-1] = 1
for i := length - 2; i >= 0; i-- {
furthestJump := int(math.Min(float64(i+nums[i]), float64(length-1)))
for j := i + 1; j <= furthestJump; j++ {
if dp[j] == 1 {
dp[i] = 1
break
}
}
}
return dp[0] == 1
}
func canJumpGreedy(nums []int) bool {
length := len(nums)
lastPos := length - 1
for i := length - 1; i >= 0; i-- {
if i+nums[i] >= lastPos {
lastPos = i
}
}
return lastPos == 0
}
<file_sep>/algorithms/_316_Remove_Duplicate_Letters/answer.go
package _316_Remove_Duplicate_Letters
import "strings"
func removeDuplicateLetters(s string) string {
count := make([]int, 26)
for _, b := range s {
count[b-'a'] += 1
}
stackCount := make([]int32, 26)
stack := make([]int32, len(s))
index := -1
for _, b := range s {
for index != -1 && stack[index] > b && count[stack[index]-'a'] > 0 && stackCount[b-'a'] == 0 {
stackCount[stack[index]-'a'] -= 1
index--
}
if stackCount[b-'a'] == 0 {
index++
stack[index] = b
stackCount[b-'a'] += 1
}
count[b-'a'] -= 1
}
builder := strings.Builder{}
for i := 0; i <= index; i++ {
builder.WriteByte(byte(stack[i]))
}
return builder.String()
}
<file_sep>/algorithms/_581_Shortest_Unsorted_Continuous_Subarray/answer_test.go
package _581_Shortest_Unsorted_Continuous_Subarray
import "testing"
type testCase struct {
input []int
output int
}
func TestFindUnsortedSubarray(t *testing.T) {
cases := []testCase{
{
input: []int{2, 6, 4, 8, 10, 9, 15},
output: 5,
},
{
input: []int{1, 2, 3, 4},
output: 0,
},
{
input: []int{4, 3, 2, 1},
output: 4,
},
{
input: []int{1},
output: 0,
},
}
for _, c := range cases {
if x := findUnsortedSubarray(c.input); x != c.output {
t.Errorf("output should be \"%v\" instead of \"%v\" with input=\"%v\"", c.output, x, c.input)
}
}
}
<file_sep>/algorithms/_11_Container_With_Most_Water/answer.go
package _11_Container_With_Most_Water
func maxArea(height []int) int {
l, r := 0, len(height)-1
container := 0
for l < r {
h := 0
if height[l] > height[r] {
h = height[r]
} else {
h = height[l]
}
if container < (r-l)*h {
container = (r - l) * h
}
if height[l] > height[r] {
r--
} else {
l++
}
}
return container
}
<file_sep>/algorithms/_445_Add_Two_Numbers_2/answer_test.go
package _445_Add_Two_Numbers_2
import (
"fmt"
"testing"
)
func TestAddTwoNumbers(t *testing.T) {
l1 := &ListNode{
Val: 7,
Next: &ListNode{
Val: 2,
Next: &ListNode{
Val: 4,
Next: &ListNode{
Val: 3,
Next: nil,
},
},
},
}
l2 := &ListNode{
Val: 5,
Next: &ListNode{
Val: 6,
Next: &ListNode{
Val: 4,
Next: nil,
},
},
}
ret := addTwoNumbers(l1, l2)
fmt.Println(ret)
l3 := &ListNode{
Val: 0,
Next: nil,
}
l4 := &ListNode{
Val: 0,
Next: nil,
}
ret1 := addTwoNumbers(l3, l4)
fmt.Println(ret1)
}
<file_sep>/algorithms/_138_Copy_List_with_Random_Pointer/answer.go
package _138_Copy_List_with_Random_Pointer
type Node struct {
Val int
Next *Node
Random *Node
}
func copyRandomList(head *Node) *Node {
nodeMap := make(map[*Node]*Node)
return copyRandomListDFS(head, nodeMap)
}
func copyRandomListDFS(node *Node, nodeMap map[*Node]*Node) *Node {
if node == nil {
return nil
}
if _, ok := nodeMap[node]; !ok {
tmp := &Node{Val: node.Val}
nodeMap[node] = tmp
tmp.Next = copyRandomListDFS(node.Next, nodeMap)
tmp.Random = copyRandomListDFS(node.Random, nodeMap)
}
return nodeMap[node]
}
<file_sep>/algorithms/_99_Recover_Binary_Search_Tree/answer_test.go
package _99_Recover_Binary_Search_Tree
import (
"fmt"
"testing"
)
func TestRecoverTree(t *testing.T) {
var (
root *TreeNode
)
root = &TreeNode{Val: 1, Left: &TreeNode{Val: 3, Right: &TreeNode{Val: 2}}}
recoverTree(root)
fmt.Println(root.Val, root.Left.Val, root.Left.Right.Val)
root = &TreeNode{Val: 3, Left: &TreeNode{Val: 1}, Right: &TreeNode{Val: 4, Left: &TreeNode{Val: 2}}}
recoverTree(root)
fmt.Println(root.Val, root.Left.Val, root.Right.Val, root.Right.Left.Val)
}
<file_sep>/algorithms/_1171_Remove_Zero_Sum_Consecutive_Nodes_from_Linked_List/answer.go
package _1171_Remove_Zero_Sum_Consecutive_Nodes_from_Linked_List
type ListNode struct {
Val int
Next *ListNode
}
func removeZeroSumSublists(head *ListNode) *ListNode {
if head == nil {
return nil
}
sum, p, m := 0, head, make(map[int]*ListNode)
for p != nil {
sum += p.Val
if sum == 0 {
head = p.Next
p = head
m = make(map[int]*ListNode)
} else {
if node, ok := m[sum]; ok {
node.Next = p.Next
p = head
sum, m = 0, make(map[int]*ListNode)
} else {
m[sum] = p
p = p.Next
}
}
}
return head
}
<file_sep>/algorithms/_189_Rotate_Array/answer_test.go
package _189_Rotate_Array
import (
"testing"
"github.com/shadas/leetcode_notes/utils/array"
)
func TestRotate(t *testing.T) {
var (
nums []int
)
nums = []int{1, 2, 3, 4, 5, 6, 7}
if rotate(nums, 3); !array.IsIntArrayEqual(nums, []int{5, 6, 7, 1, 2, 3, 4}) {
t.Errorf("wrong ret with %v", nums)
}
}
<file_sep>/algorithms/_108_Convert_Sorted_Array_to_Binary_Search_Tree/answer.go
package _108_Convert_Sorted_Array_to_Binary_Search_Tree
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func sortedArrayToBST(nums []int) *TreeNode {
return sortedArrayToBSTRecursion(nums)
}
func sortedArrayToBSTRecursion(nums []int) *TreeNode {
if len(nums) == 0 {
return nil
}
var (
mid int
ret *TreeNode
)
mid = len(nums) / 2
ret = &TreeNode{Val: nums[mid]}
ret.Left = sortedArrayToBSTRecursion(nums[:mid])
if mid >= len(nums)-1 {
ret.Right = nil
} else {
ret.Right = sortedArrayToBSTRecursion(nums[mid+1:])
}
return ret
}
<file_sep>/algorithms/_875_Koko_Eating_Bananas/answer.go
package _875_Koko_Eating_Bananas
import "math"
func minEatingSpeed(piles []int, H int) int {
var (
min = 1
max = math.MaxInt32
mid int
)
for mid != (min+max)>>1 {
mid = (min + max) >> 1
if eatOver(piles, mid, H) {
max = mid
} else {
min = mid + 1
}
}
return mid
}
func eatOver(piles []int, k, H int) bool {
total := 0
for _, pile := range piles {
if pile <= k {
total += 1
} else {
total += pile/k + 1
}
if total > H {
return false
}
}
return total <= H
}
<file_sep>/algorithms/_973_K_Closest_Points_to_Origin/answer.go
package _973_K_Closest_Points_to_Origin
import "container/heap"
func kClosest(points [][]int, k int) [][]int {
return kClosestWithPQ(points, k)
}
// 前k个,用堆/优先队列即可
func kClosestWithPQ(points [][]int, k int) [][]int {
h := PQ{}
for _, point := range points {
heap.Push(&h, point)
}
ret := make([][]int, k)
for i := 0; i < k; i++ {
ret[i] = heap.Pop(&h).([]int)
}
return ret
}
type Item struct {
Point []int
Val int
}
type PQ []*Item
func (pq PQ) Len() int {
return len(pq)
}
func (pq PQ) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
}
func (pq PQ) Less(i, j int) bool {
return pq[i].Val < pq[j].Val
}
// x = []int, len=2
func (pq *PQ) Push(x interface{}) {
point := x.([]int)
item := &Item{Point: point, Val: point[0]*point[0] + point[1]*point[1]}
*pq = append(*pq, item)
}
func (pq *PQ) Pop() interface{} {
old := *pq
n := len(old)
item := old[n-1]
*pq = old[0 : n-1]
return item.Point
}
<file_sep>/algorithms/_258_Add_Digits/answer.go
package _258_Add_Digits
func addDigits(num int) int {
return addDightsSmart(num)
}
func addDightsSmart(num int) int {
return 1 + (num-1)%9
}
<file_sep>/algorithms/_290_Word_Pattern/answer.go
package _290_Word_Pattern
import "strings"
func wordPattern(pattern string, str string) bool {
var pm = make(map[rune][]int)
for idx, b := range pattern {
idxs, _ := pm[b]
idxs = append(idxs, idx)
pm[b] = idxs
}
strs := strings.Split(str, " ")
if len(strs) != len(pattern) {
return false
}
var diffWord []string
for _, idxs := range pm {
tmp := strs[idxs[0]]
for _, idx := range idxs {
if strs[idx] != tmp {
return false
}
}
for _, dw := range diffWord {
if tmp == dw {
return false
}
}
diffWord = append(diffWord, tmp)
}
return true
}
<file_sep>/algorithms/_222_Count_Complete_Tree_Nodes/answer.go
package _222_Count_Complete_Tree_Nodes
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func countNodes(root *TreeNode) int {
return countNodesRecursion(root)
}
func countNodesRecursion(root *TreeNode) int {
if root == nil {
return 0
}
return 1 + countNodesRecursion(root.Left) + countNodesRecursion(root.Right)
}
<file_sep>/algorithms/_209_Minimum_Size_Subarray_Sum/answer.go
package _209_Minimum_Size_Subarray_Sum
import (
"math"
)
func minSubArrayLen(target int, nums []int) int {
return minSubArrayLenWithSlidingWindow(target, nums)
}
func minSubArrayLenWithSlidingWindow(target int, nums []int) int {
if len(nums) == 0 {
return 0
}
var (
p int
q int = 1
sum int = nums[p]
ret int = math.MaxInt32
)
for p < len(nums) || q < len(nums) {
//fmt.Printf("p=%d, q=%d, sum=%d\n", p, q, sum)
if target <= sum {
tmp := q - p
if tmp < ret {
ret = tmp
}
if target == sum {
if q >= len(nums) {
break
} else {
sum += nums[q]
q += 1
}
} else { // 大于target的情形
if p >= len(nums) {
break
} else {
sum -= nums[p]
p += 1
}
}
} else if target > sum {
if q >= len(nums) {
break
}
sum += nums[q]
q += 1
}
}
if ret == math.MaxInt32 {
ret = 0
}
return ret
}
<file_sep>/algorithms/_67_Add_Binary/answer.go
package _67_Add_Binary
import (
"fmt"
)
func addBinary(a string, b string) string {
la, lb := len(a), len(b)
// let a >= b
if la < lb {
la, lb = lb, la
a, b = b, a
}
ca, cb := []byte(a), []byte(b)
var isAdd bool
var ret []byte
for i := lb - 1; i >= 0; i-- {
var va, vb int
var v string
if string(ca[i+la-lb]) == "1" {
va = 1
}
if string(cb[i]) == "1" {
vb = 1
}
sum := va + vb
if isAdd {
sum++
}
switch sum {
case 0:
v = "0"
isAdd = false
case 1:
v = "1"
isAdd = false
case 2:
v = "0"
isAdd = true
case 3:
v = "1"
isAdd = true
}
ret = append([]byte(v), ret...)
}
for i := la - lb - 1; i >= 0; i-- {
var v int
if string(ca[i]) == "1" {
v = 1
}
if isAdd {
v = v + 1
if v == 2 {
isAdd = true
v = 0
} else {
isAdd = false
}
}
ret = append([]byte(fmt.Sprintf("%v", v)), ret...)
}
if isAdd {
ret = append([]byte("1"), ret...)
}
return string(ret)
}
<file_sep>/algorithms/_203_Remove_Linked_List_Elements/answer.go
package _203_Remove_Linked_List_Elements
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func removeElements(head *ListNode, val int) *ListNode {
ret := &ListNode{
Next: head,
}
current := head
last := ret
for {
tmp := current
if tmp == nil {
break
}
if tmp.Val == val {
last.Next = tmp.Next
} else {
last = tmp
}
current = tmp.Next
}
return ret.Next
}
<file_sep>/algorithms/_303_Range_Sum_Query_Immutable/answer.go
package _303_Range_Sum_Query_Immutable
type NumArray struct {
result []int
}
func Constructor(nums []int) NumArray {
na := NumArray{
result: nums,
}
for i := 1; i < len(nums); i++ {
na.result[i] = na.result[i-1] + nums[i]
}
return na
}
func (this *NumArray) SumRange(i int, j int) int {
if i > j {
return -1
}
if i == 0 {
return this.result[j]
}
return this.result[j] - this.result[i-1]
}
/**
* Your NumArray object will be instantiated and called as such:
* obj := Constructor(nums);
* param_1 := obj.SumRange(i,j);
*/
<file_sep>/algorithms/_61_Rotate_List/answer.go
package _61_Rotate_List
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func rotateRight(head *ListNode, k int) *ListNode {
var (
valueList = []int{}
newValueList []int
offset int
tmpHead *ListNode
ret *ListNode
)
if head == nil {
return nil
}
tmpHead = head
for {
valueList = append(valueList, tmpHead.Val)
if tmpHead.Next == nil {
break
} else {
tmpHead = tmpHead.Next
}
}
offset = k % len(valueList)
if offset == 0 {
return head
}
newValueList = append(valueList[len(valueList)-offset:], valueList[0:len(valueList)-offset]...)
ret = &ListNode{}
tmpHead = ret
for k, value := range newValueList {
tmpHead.Val = value
if k != len(newValueList)-1 {
tmpHead.Next = &ListNode{}
tmpHead = tmpHead.Next
}
}
return ret
}
<file_sep>/algorithms/_79_Word_Search/answer_test.go
package _79_Word_Search
import "testing"
func TestExist(t *testing.T) {
var (
board [][]byte
word string
)
board = [][]byte{{'A', 'B', 'C', 'E'}, {'S', 'F', 'C', 'S'}, {'A', 'D', 'E', 'E'}}
word = "ABCCED"
if b := exist(board, word); !b {
t.Errorf("should be true")
}
word = "SEE"
if b := exist(board, word); !b {
t.Errorf("should be true")
}
board = [][]byte{{'C', 'A', 'A'}, {'A', 'A', 'A'}, {'B', 'C', 'D'}}
word = "AAB"
if b := exist(board, word); !b {
t.Errorf("should be true")
}
board = [][]byte{{'A', 'B', 'C', 'E'}, {'S', 'F', 'E', 'S'}, {'A', 'D', 'E', 'E'}}
word = "ABCESEEEFS"
if b := exist(board, word); !b {
t.Errorf("should be true")
}
}
<file_sep>/algorithms/_56_Merge_Intervals/answer_test.go
package _56_Merge_Intervals
import (
"fmt"
"testing"
)
func TestMerge(t *testing.T) {
var (
intervals, ret [][]int
)
intervals = [][]int{{1, 3}, {2, 6}, {8, 10}, {15, 18}}
ret = merge(intervals)
fmt.Println(ret)
intervals = [][]int{{1, 4}, {4, 5}}
ret = merge(intervals)
fmt.Println(ret)
intervals = [][]int{{1, 4}, {1, 5}}
ret = merge(intervals)
fmt.Println(ret)
}
<file_sep>/algorithms/_28_Implement_StrStr/answer_test.go
package _28_Implement_StrStr
import (
"testing"
)
func TestStrStr(t *testing.T) {
if ret := strStr("hello", "ll"); ret != 2 {
t.Error("Error not 2, get", ret)
}
if ret := strStr("aaaaa", "bba"); ret != -1 {
t.Error("Error not -1, get", ret)
}
if ret := strStr("mississippi", "mississippi"); ret != 0 {
t.Error("Error not 0, get", ret)
}
}
<file_sep>/algorithms/_125_Valid_Palindrome/answer.go
package _125_Valid_Palindrome
import "strings"
func isPalindrome(s string) bool {
if len(s) == 0 || len(s) == 1 {
return true
}
var (
i, j = 0, len(s) - 1
)
for i <= j {
if !isWord(s[i]) {
i++
continue
}
if !isWord(s[j]) {
j--
continue
}
if strings.ToLower(string(s[i])) != strings.ToLower(string(s[j])) {
return false
}
i++
j--
}
return true
}
func isWord(b byte) bool {
if (b >= 'A' && b <= 'Z') || (b >= 'a' && b <= 'z') || (b >= '0' && b <= '9') {
return true
}
return false
}
<file_sep>/algorithms/_142_Linked_List_Cycle_2/answer.go
package _142_Linked_List_Cycle_2
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func detectCycle(head *ListNode) *ListNode {
return detectCycleWithPointer(head)
}
func detectCycleWithPointer(head *ListNode) *ListNode {
var (
hhead = &ListNode{
Next: head,
}
fast, slow = hhead, hhead
)
for {
if fast.Next == nil || fast.Next.Next == nil {
return nil
}
fast = fast.Next.Next
slow = slow.Next
if fast == slow {
break
}
}
slow = hhead
for {
if slow == fast {
return slow
}
slow = slow.Next
fast = fast.Next
}
return nil
}
<file_sep>/algorithms/_56_Merge_Intervals/answer.go
package _56_Merge_Intervals
import (
"sort"
)
func merge(intervals [][]int) [][]int {
return mergeAfterSort(intervals)
}
func mergeAfterSort(intervals [][]int) [][]int {
if len(intervals) == 0 || len(intervals) == 1 {
return intervals
}
sort.Sort(X(intervals))
var (
ret [][]int
pre []int
)
pre = intervals[0]
for i := 1; i < len(intervals); i++ {
cur := intervals[i]
if needMerge(pre, cur) {
pre[1] = cur[1]
} else if needKeep(pre, cur) {
continue
} else {
ret = append(ret, pre)
pre = cur
}
}
ret = append(ret, pre)
return ret
}
func needKeep(pre, cur []int) bool {
if cur[1] <= pre[1] && cur[0] >= pre[0] {
return true
}
return false
}
func needMerge(pre, cur []int) bool {
if pre[0] == cur[0] || pre[1] == cur[1] {
return true
}
if pre[1] <= cur[1] && pre[1] >= cur[0] {
return true
}
return false
}
type X [][]int
func (x X) Len() int {
return len(x)
}
func (x X) Less(i, j int) bool {
if x[i][0] < x[j][0] {
return true
} else if x[i][0] > x[j][0] {
return false
} else {
if x[i][1] <= x[j][1] {
return true
} else {
return false
}
}
}
func (x X) Swap(i, j int) {
x[i], x[j] = x[j], x[i]
}
<file_sep>/algorithms/_520_Detect_Capital/answer.go
package _520_Detect_Capital
func detectCapitalUse(word string) bool {
var (
allCapital = true
allNotCapital = true
onlyFirstCapital = true
)
for idx, b := range word {
if idx == 0 {
if b >= 'a' && b <= 'z' {
onlyFirstCapital = false
allCapital = false
} else {
allNotCapital = false
}
} else {
if allCapital && b >= 'a' && b <= 'z' {
allCapital = false
}
if allNotCapital && b >= 'A' && b <= 'Z' {
allNotCapital = false
}
if onlyFirstCapital && b >= 'A' && b <= 'Z' {
onlyFirstCapital = false
}
}
if !allCapital && !allNotCapital && !onlyFirstCapital {
return false
}
}
return allCapital || allNotCapital || onlyFirstCapital
}
<file_sep>/algorithms/_66_Plus_One/answer_test.go
package _66_Plus_One
import (
. "leetcode_notes/utils/array"
"testing"
)
func TestPlusOne(t *testing.T) {
if ret := plusOne([]int{8, 9, 9, 9}); !IsIntArrayEqual(ret, []int{9, 0, 0, 0}) {
t.Errorf("error with return: %v", ret)
}
}
<file_sep>/algorithms/_2_Add_Two_Numbers/answer_test.go
package _2_Add_Two_Numbers
import (
"fmt"
"strings"
"testing"
)
func TestAddTwoNumbers(t *testing.T) {
l1 := &ListNode{
Val: 2,
Next: &ListNode{
Val: 4,
Next: &ListNode{
Val: 3,
Next: nil,
},
},
}
l2 := &ListNode{
Val: 5,
Next: &ListNode{
Val: 6,
Next: &ListNode{
Val: 4,
Next: nil,
},
},
}
ret := addTwoNumbers(l1, l2)
iswrong := false
want := []int{7, 0, 8}
for i := 0; i < 3; i++ {
if ret.Val == want[i] {
if i == 2 {
if ret.Next == nil {
break
}
} else {
if ret.Next != nil {
ret = ret.Next
} else {
iswrong = true
break
}
}
} else {
iswrong = true
break
}
}
var rets []string
for {
rets = append(rets, fmt.Sprintf("%v", ret.Val))
if ret.Next != nil {
ret = ret.Next
} else {
break
}
}
retstr := strings.Join(rets, ",")
if iswrong {
t.Errorf("Your answer is %v, it should be %v", "["+retstr+"]", want)
}
}
<file_sep>/algorithms/_704_Binary_Search/answer_test.go
package _704_Binary_Search
import (
"fmt"
"testing"
)
func TestSearch(t *testing.T) {
var (
nums []int
target int
)
nums = []int{-1, 0, 3, 5, 9, 12}
target = 13
fmt.Println(search(nums, target))
}
<file_sep>/algorithms/_530_Minimum_Absolute_Difference_in_BST/answer_test.go
package _530_Minimum_Absolute_Difference_in_BST
import "testing"
func TestGetMinimumDifference(t *testing.T) {
var (
root *TreeNode
ret int
)
root = &TreeNode{Val: 1, Left: &TreeNode{Val: 0}, Right: &TreeNode{Val: 48, Left: &TreeNode{Val: 12}, Right: &TreeNode{Val: 49}}}
if ret = getMinimumDifference(root); ret != 1 {
t.Errorf("wrong ret with %d", ret)
}
root = &TreeNode{Val: 236, Left: &TreeNode{Val: 104, Right: &TreeNode{Val: 227}}, Right: &TreeNode{Val: 701, Right: &TreeNode{Val: 911}}}
if ret = getMinimumDifference(root); ret != 9 {
t.Errorf("wrong ret with %d", ret)
}
root = &TreeNode{Val: 2, Right: &TreeNode{Val: 4443, Left: &TreeNode{Val: 1329, Right: &TreeNode{Val: 2917, Right: &TreeNode{Val: 4406}}}}}
if ret = getMinimumDifference(root); ret != 37 {
t.Errorf("wrong ret with %d", ret)
}
}
<file_sep>/algorithms/_109_Convert_Sorted_List_to_Binary_Search_Tree/answer.go
package _109_Convert_Sorted_List_to_Binary_Search_Tree
import "fmt"
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func printList(l *ListNode) {
vals := []int{}
tmp := l
for tmp != nil {
vals = append(vals, tmp.Val)
tmp = tmp.Next
}
fmt.Println(vals)
}
func sortedListToBST(head *ListNode) *TreeNode {
return sortedListToBSTRecursion(head)
}
func sortedListToBSTRecursion(head *ListNode) *TreeNode {
if head == nil {
return nil
}
var (
ret *TreeNode
val, l1, l2 = cutList(head)
)
ret = &TreeNode{
Val: val,
}
if l1 != nil {
ret.Left = sortedListToBSTRecursion(l1)
}
if l2 != nil {
ret.Right = sortedListToBSTRecursion(l2)
}
return ret
}
func cutList(head *ListNode) (val int, l1, l2 *ListNode) {
var (
fast, slow = head, head
)
l1 = head
if head == nil {
val = 0
l1 = nil
l2 = nil
return
}
if head.Next == nil {
val = head.Val
l1 = nil
l2 = nil
return
}
for fast != nil && fast.Next != nil {
fast = fast.Next.Next
slow = slow.Next
}
val = slow.Val
l2 = slow.Next
fast = head
for fast.Next != slow && fast.Next != nil {
fast = fast.Next
}
fast.Next = nil
return
}
<file_sep>/algorithms/_1609_Even_Odd_Tree/answer.go
package _1609_Even_Odd_Tree
import "math"
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func isEvenOddTree(root *TreeNode) bool {
return isEvenOddTreeRunLine(root)
}
func isEvenOddTreeRunLine(root *TreeNode) bool {
var (
evenLines, oddLines [][]int
nodeLine = []*TreeNode{root}
lineNum = 0
)
// prepare data
for len(nodeLine) != 0 {
var (
tmpNodeLine []*TreeNode
tmpNumLine []int
)
for _, node := range nodeLine {
if node.Left != nil {
tmpNodeLine = append(tmpNodeLine, node.Left)
}
if node.Right != nil {
tmpNodeLine = append(tmpNodeLine, node.Right)
}
tmpNumLine = append(tmpNumLine, node.Val)
}
if lineNum%2 == 0 {
evenLines = append(evenLines, tmpNumLine)
} else {
oddLines = append(oddLines, tmpNumLine)
}
lineNum++
nodeLine = tmpNodeLine
}
// check data
for _, line := range oddLines {
if !isOddLine(line) {
return false
}
}
for _, line := range evenLines {
if !isEvenLine(line) {
return false
}
}
return true
}
func isEvenLine(line []int) bool {
var cur = math.MinInt
for _, num := range line {
if num%2 == 0 || num <= cur {
return false
}
cur = num
}
return true
}
func isOddLine(line []int) bool {
var cur = math.MaxInt
for _, num := range line {
if num%2 == 1 || num >= cur {
return false
}
cur = num
}
return true
}
<file_sep>/algorithms/_658_Find_K_Closest_Element/answer_test.go
package _658_Find_K_Closest_Element
import (
"testing"
"github.com/shadas/leetcode_notes/utils/array"
)
func TestFindClosestElements(t *testing.T) {
var (
arr, ret []int
k, x int
)
arr = []int{1, 2, 3, 4, 5}
k, x = 4, 3
ret = findClosestElements(arr, k, x)
if !array.IsIntArrayEqual(ret, []int{1, 2, 3, 4}) {
t.Errorf("wrong ret is %v", ret)
}
arr = []int{1, 1, 1, 10, 10, 10}
k, x = 1, 9
ret = findClosestElements(arr, k, x)
if !array.IsIntArrayEqual(ret, []int{10}) {
t.Errorf("wrong ret is %v", ret)
}
arr = []int{1, 1, 2, 2, 2, 2, 2, 3, 3}
k, x = 3, 3
ret = findClosestElements(arr, k, x)
if !array.IsIntArrayEqual(ret, []int{2, 3, 3}) {
t.Errorf("wrong ret is %v", ret)
}
}
<file_sep>/algorithms/_692_Top_K_Frequent_Words/answer_test.go
package _692_Top_K_Frequent_Words
import (
"testing"
"github.com/shadas/leetcode_notes/utils/array"
)
func TestTopKFrequent(t *testing.T) {
var (
words, ret []string
k int
)
words, k = []string{"i", "love", "leetcode", "i", "love", "coding"}, 2
ret = topKFrequent(words, k)
if !array.IsStrArrayEqual(ret, []string{"i", "love"}) {
t.Errorf("wrong ret=%v", ret)
}
words, k = []string{"the", "day", "is", "sunny", "the", "the", "the", "sunny", "is", "is"}, 4
ret = topKFrequent(words, k)
if !array.IsStrArrayEqual(ret, []string{"the", "is", "sunny", "day"}) {
t.Errorf("wrong ret=%v", ret)
}
}
func TestLess(t *testing.T) {
var a, b *Item
a, b = &Item{word: "i", count: 2}, &Item{word: "love", count: 2}
if !Less(a, b) {
t.Error("should be true")
}
a, b = &Item{word: "i", count: 2}, &Item{word: "love", count: 1}
if Less(a, b) {
t.Error("should be false")
}
}
func TestSortUtilK(t *testing.T) {
var (
s []*Item
k int
)
s, k = []*Item{&Item{"the", 4}, &Item{"day", 1}, &Item{"is", 3}, &Item{"sunny", 2}}, 4
SortUtilK(s, k)
//printL(s)
}
<file_sep>/algorithms/_143_Reorder_List/answer.go
package _143_Reorder_List
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func reorderList(head *ListNode) {
reorderListWithStack(head)
}
func reorderListWithStack(head *ListNode) {
var (
s = []int{}
tmp = head
flag bool
)
if head == nil {
return
}
for tmp != nil {
s = append(s, tmp.Val)
tmp = tmp.Next
}
tmp = &ListNode{
Next: head,
}
for len(s) > 0 {
var value int
if !flag { // 取头
value = s[0]
s = s[1:]
} else { // 取尾
value = s[len(s)-1]
s = s[:len(s)-1]
}
tmp.Next.Val = value
tmp = tmp.Next
flag = !flag // 换向
}
head = head.Next
return
}
<file_sep>/algorithms/_744_Find_Smallest_Letter_Greater_Than_Target/answer_test.go
package _744_Find_Smallest_Letter_Greater_Than_Target
import (
"fmt"
"testing"
)
func TestNextGreatestLetter(t *testing.T) {
letters := []byte{'c', 'f', 'j'}
target := byte('z')
fmt.Println(string(nextGreatestLetter(letters, target)))
target = byte('c')
fmt.Println(string(nextGreatestLetter(letters, target)))
}
<file_sep>/algorithms/_198_House_Robber/answer.go
package _198_House_Robber
import "math"
func rob(nums []int) int {
// return solution1(len(nums)-1, nums)
// return solution2(nums)
return solution3(nums)
}
// 纯递归,time limit exceeded
func solution1(idx int, nums []int) int {
if idx < 0 {
return 0
}
max := int(math.Max(float64(nums[idx]+solution1(idx-2, nums)), float64(solution1(idx-1, nums))))
return max
}
// 带结果储存的递归
func solution2(nums []int) int {
var ret []int
for i := 0; i < len(nums); i++ {
ret = append(ret, -1)
}
return solution2Recursion(len(nums)-1, nums, ret)
}
func solution2Recursion(idx int, nums []int, ret []int) int {
if idx < 0 {
return 0
}
if idx < len(ret) && ret[idx] != -1 {
return ret[idx]
}
max := int(math.Max(float64(nums[idx]+solution2Recursion(idx-2, nums, ret)), float64(solution2Recursion(idx-1, nums, ret))))
ret[idx] = max
return max
}
// 自底向上,DP dp[i] = max(money[i]+dp[i-2], dp[i-1])
func solution3(nums []int) int {
var ret []int
for i := 0; i < len(nums); i++ {
ret = append(ret, -1)
}
if len(nums) == 0 {
return 0
}
if len(nums) == 1 {
return nums[0]
}
ret[0] = nums[0]
ret[1] = int(math.Max(float64(ret[0]), float64(nums[1])))
for i := 2; i < len(nums); i++ {
ret[i] = int(math.Max(float64(nums[i]+ret[i-2]), float64(ret[i-1])))
}
return ret[len(nums)-1]
}
<file_sep>/algorithms/_131_Palindrome_Partitioning/answer_test.go
package _131_Palindrome_Partitioning
import (
"fmt"
"testing"
)
func TestIsPalindromic(t *testing.T) {
if b := isPalindromic(""); !b {
t.Error("'' should be palindromic")
}
if b := isPalindromic("aa"); !b {
t.Error("aa should be palindromic")
}
if b := isPalindromic("aaa"); !b {
t.Error("aaa should be palindromic")
}
if b := isPalindromic("acbaa"); b {
t.Error("acbaa should not be palindromic")
}
if b := isPalindromic("aab"); b {
t.Error("aab should not be palindromic")
}
}
func TestPartition(t *testing.T) {
var ret [][]string
var str = "cbbbcc"
ret = partition(str)
fmt.Println(str, ret)
}
<file_sep>/algorithms/_210_Course_Schedule_2/answer.go
package _210_Course_Schedule_2
func findOrder(numCourses int, prerequisites [][]int) []int {
return findOrderBfs(numCourses, prerequisites)
}
func findOrderBfs(numCourses int, prerequisites [][]int) []int {
var (
matrix [][]int
indegree []int
queue []int
ret []int
)
// 初始化 邻接矩阵、入度列表
for i := 0; i < numCourses; i++ {
tmp := []int{}
for j := 0; j < numCourses; j++ {
tmp = append(tmp, 0)
}
matrix = append(matrix, tmp)
indegree = append(indegree, 0)
}
// 构造邻接矩阵、入度列表
for _, p := range prerequisites {
ready, pre := p[0], p[1]
indegree[ready]++ // 入度+1
matrix[pre][ready] = 1
}
// 筛选所有入度为空的节点
for i, degree := range indegree {
if degree == 0 {
queue = append(queue, i)
}
}
for len(queue) != 0 {
// 出队列
course := queue[0]
queue = queue[1:]
// 放入结果集
ret = append(ret, course)
// 在邻接矩阵找所有该节点的后驱节点
for i := 0; i < numCourses; i++ {
if matrix[course][i] != 0 {
indegree[i]-- // 其入度-1
if indegree[i] == 0 { // 如果其入度减到0,则加入队列
queue = append(queue, i)
}
}
}
}
if len(ret) != numCourses {
return []int{}
}
return ret
}
<file_sep>/algorithms/_134_Gas_Station/answer.go
package _134_Gas_Station
func canCompleteCircuit(gas []int, cost []int) int {
var (
total int
cur int
ret int
)
for idx, g := range gas {
total += g - cost[idx]
cur += g - cost[idx]
if cur < 0 {
cur = 0
ret = idx + 1
}
}
if total < 0 {
return -1
}
return ret
}
<file_sep>/algorithms/_561_Array_Partition/answer.go
package _561_Array_Partition
import "sort"
func arrayPairSum(nums []int) int {
return arrayPairSumWithSort(nums)
}
func arrayPairSumWithSort(nums []int) int {
sort.Ints(nums)
x := 0
for i := 0; i < len(nums); i += 2 {
x += nums[i]
}
return x
}
<file_sep>/algorithms/_965_Univalued_Binary_Tree/answer.go
package _965_Univalued_Binary_Tree
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func isUnivalTree(root *TreeNode) bool {
if root == nil {
return true
}
return isUnivalTreeWithValue(root, root.Val)
}
func isUnivalTreeWithValue(root *TreeNode, val int) bool {
var ret bool
if root == nil {
return true
}
if root.Val != val {
return ret
}
if root.Left != nil && !isUnivalTreeWithValue(root.Left, val) {
return ret
}
if root.Right != nil && !isUnivalTreeWithValue(root.Right, val) {
return ret
}
return true
}
<file_sep>/algorithms/_24_Swap_Nodes_in_Pairs/answer.go
package _24_Swap_Nodes_in_Pairs
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func swapPairs(head *ListNode) *ListNode {
if head == nil {
return nil
}
ret := head
count := 0
var front, back *ListNode
for head != nil {
count++
if count%2 != 0 {
front = head
} else {
back = head
front.Val, back.Val = back.Val, front.Val
}
head = head.Next
}
return ret
}
<file_sep>/algorithms/_52_N_Queens_2/answer.go
package _52_N_Queens_2
func totalNQueens(n int) int {
return totalNQueensWithDFS(n)
}
func totalNQueensWithDFS(n int) int {
var (
pos []int // 描述放置结果,下标为行,值为列,按行遍历dfs
ret [][]int // 结果集
)
dfs(n, pos, &ret)
return len(ret)
}
func abs(a int) int {
if a < 0 {
return -a
}
return a
}
func dfs(n int, pos []int, ret *[][]int) {
if len(pos) == n {
newPos := make([]int, len(pos))
copy(newPos, pos)
*ret = append(*ret, newPos)
return
}
column_loop:
for i := 0; i < n; i++ { // 尝试第i列往里放
// 检查是否有同列
for _, p := range pos {
if i == p {
continue column_loop
}
}
// 判断是否有存在对角线
existDiagonal := false
for line, col := range pos {
if abs(len(pos)-line) == abs(col-i) {
existDiagonal = true
break
}
}
// 如果有,尝试下一个位置
if existDiagonal {
continue
}
// 可以安放
pos = append(pos, i)
// 尝试安放下一个
dfs(n, pos, ret)
// 回退,尝试其他可能
pos = pos[:len(pos)-1]
}
return
}
<file_sep>/algorithms/_25_Reverse_Nodes_in_k_Group/answer_test.go
package _25_Reverse_Nodes_in_k_Group
import (
"fmt"
"testing"
)
func TestReverseKGroup(t *testing.T) {
var (
l *ListNode
k int
ret *ListNode
)
l = &ListNode{
Val: 1,
Next: &ListNode{
Val: 2,
Next: &ListNode{
Val: 3,
Next: &ListNode{
Val: 4,
Next: &ListNode{
Val: 5,
Next: nil,
},
},
},
},
}
k = 3
ret = reverseKGroup(l, k)
fmt.Println(ret)
l = &ListNode{
Val: 1,
Next: &ListNode{
Val: 2,
Next: nil,
},
}
k = 2
ret = reverseKGroup(l, k)
fmt.Println(ret)
}
<file_sep>/algorithms/_1201_Ugly_Number_3/answer_test.go
package _1201_Ugly_Number_3
import "testing"
func TestNthUglyNumber(t *testing.T) {
if ret := nthUglyNumber(1000000000, 2, 217983653, 336916467); ret != 1999999984 {
t.Errorf("wrong ret with %d", ret)
}
if ret := nthUglyNumber(5, 2, 3, 3); ret != 8 {
t.Errorf("wrong ret with %d", ret)
}
}
<file_sep>/algorithms/_622_Design_Circular_Queue/answer.go
package _622_Design_Circular_Queue
type MyCircularQueue struct {
head, tail int
data []int
}
func (this *MyCircularQueue) Len() int {
if this.tail < this.head {
return len(this.data) - (this.head - this.tail)
} else {
return this.tail - this.head
}
}
func Constructor(k int) MyCircularQueue {
q := MyCircularQueue{
data: make([]int, k+1),
}
return q
}
func (this *MyCircularQueue) EnQueue(value int) bool {
if this.Len() == len(this.data)-1 {
return false
}
this.data[this.tail] = value
this.tail = (this.tail + 1) % len(this.data)
return true
}
func (this *MyCircularQueue) DeQueue() bool {
if this.Len() == 0 {
return false
}
this.head = (this.head + 1) % len(this.data)
return true
}
func (this *MyCircularQueue) Front() int {
if this.IsEmpty() {
return -1
}
return this.data[this.head]
}
func (this *MyCircularQueue) Rear() int {
if this.IsEmpty() {
return -1
}
return this.data[(this.head+this.Len()-1)%len(this.data)]
}
func (this *MyCircularQueue) IsEmpty() bool {
return this.head == this.tail
}
func (this *MyCircularQueue) IsFull() bool {
return this.Len() == len(this.data)-1
}
/**
* Your MyCircularQueue object will be instantiated and called as such:
* obj := Constructor(k);
* param_1 := obj.EnQueue(value);
* param_2 := obj.DeQueue();
* param_3 := obj.Front();
* param_4 := obj.Rear();
* param_5 := obj.IsEmpty();
* param_6 := obj.IsFull();
*/
<file_sep>/algorithms/_1_Two_Sum/answer.go
package _1_Two_Sum
func twoSum(nums []int, target int) []int {
// return twoSumForce(nums, target)
return twoSumHash(nums, target)
}
func twoSumHash(nums []int, target int) []int {
var (
m = make(map[int]int)
)
for idx, n := range nums {
m[n] = idx
}
for idx1, n := range nums {
if idx2, ok := m[target-n]; ok && idx1 != idx2 {
return []int{idx1, idx2}
}
}
return []int{}
}
func twoSumForce(nums []int, target int) []int {
ret := []int{}
for k1, v1 := range nums {
for k2, v2 := range nums {
if k1 >= k2 {
continue
}
if v1+v2 == target {
ret = append(ret, k1, k2)
return ret
}
}
}
return ret
}
<file_sep>/algorithms/_409_Longest_Palindrome/answer_test.go
package _409_Longest_Palindrome
import "testing"
type testCase struct {
input string
output int
}
func TestLongestPalindrome(t *testing.T) {
cases := []testCase{
{
input: "abccccdd",
output: 7,
},
{
input: "a",
output: 1,
},
{
input: "bb",
output: 2,
},
{
input: "ccc",
output: 3,
},
}
for _, c := range cases {
if x := longestPalindrome(c.input); x != c.output {
t.Errorf("output should be \"%v\" instead of \"%v\" with input=\"%v\"", c.output, x, c.input)
}
}
}
<file_sep>/algorithms/_842_Split_Array_into_Fibonacci_Sequence/answer.go
package _842_Split_Array_into_Fibonacci_Sequence
import "math"
func splitIntoFibonacci(num string) []int {
ret := []int{}
dfs(num, 0, &ret)
return ret
}
func dfs(num string, idx int, ret *[]int) bool {
if idx == len(num) {
return len(*ret) > 2 // 因为下面的第二个if条件,这儿需要>2
}
cur := 0
for i := idx; i < len(num); i++ {
cur = cur*10 + int(num[i]-'0')
if cur > math.MaxInt32 { // 越界也返回错误
break
}
if len(*ret) >= 2 && cur-(*ret)[len(*ret)-1] > (*ret)[len(*ret)-2] { // 此数大于之前的n-1、n-2的和,已经失败
break
}
if len(*ret) <= 1 || cur-(*ret)[len(*ret)-1] == (*ret)[len(*ret)-2] { // 长度不够,不能够定义斐波那契规律 or 正好匹配斐波那契规律 => 找下一个数
*ret = append(*ret, cur) // 追加结果
if dfs(num, i+1, ret) { // 如果符合,则返回
return true
}
// 此时下一个不符合,说明此次追加的结果及后续不可用
*ret = (*ret)[0 : len(*ret)-1] // 撤销追加
if cur == 0 { // 处理为0开头的情形
break
}
}
// 此时 小于之前n-1,n-2的和,继续循环加数尝试
}
return false
}
<file_sep>/algorithms/_334_Increasing_Triplet_Subsequence/answer.go
package _334_Increasing_Triplet_Subsequence
import "math"
func increasingTriplet(nums []int) bool {
if len(nums) < 3 {
return false
}
//return increasingTripletDP(nums)
return increasingTripletSimple(nums)
}
func increasingTripletSimple(nums []int) bool {
n1, n2 := math.MaxInt, math.MaxInt
for i := 0; i < len(nums); i++ {
if nums[i] <= n1 {
n1 = nums[i]
} else if nums[i] <= n2 {
n2 = nums[i]
} else {
return true
}
}
return false
}
// time limit exceeded with [1,1,1,1,1.....1,3,7]
func increasingTripletDP(nums []int) bool {
dp := make([]int, len(nums))
for idx := range dp {
dp[idx] = 1
}
for i := 0; i < len(nums); i++ {
for j := 0; j < i; j++ {
if nums[j] < nums[i] {
dp[i] = max(dp[i], dp[j]+1)
}
if dp[i] == 3 {
return true
}
}
}
return false
}
func max(a, b int) int {
if a < b {
return b
}
return a
}
<file_sep>/algorithms/_232_Implement_Queue_using_Stacks/answer.go
package _232_Implement_Queue_using_Stacks
import "sync"
type MyQueue struct {
in, out []int // 两个栈,分别处理进出
top int
rwm sync.RWMutex
}
/** Initialize your data structure here. */
func Constructor() MyQueue {
q := MyQueue{}
return q
}
/** Push element x to the back of queue. */
func (this *MyQueue) Push(x int) {
this.rwm.Lock()
defer this.rwm.Unlock()
if len(this.in) == 0 {
this.top = x
}
this.in = append(this.in, x)
}
/** Removes the element from in front of queue and returns that element. */
func (this *MyQueue) Pop() int {
this.rwm.Lock()
defer this.rwm.Unlock()
if len(this.out) == 0 {
for len(this.in) > 0 {
// pop
x := this.in[len(this.in)-1]
this.in = this.in[:len(this.in)-1]
// push
this.out = append(this.out, x)
}
}
ret := this.out[len(this.out)-1]
this.out = this.out[:len(this.out)-1]
return ret
}
/** Get the front element. */
func (this *MyQueue) Peek() int {
this.rwm.RLock()
defer this.rwm.RUnlock()
if len(this.out) == 0 {
return this.top
}
return this.out[len(this.out)-1]
}
/** Returns whether the queue is empty. */
func (this *MyQueue) Empty() bool {
this.rwm.RLock()
defer this.rwm.RUnlock()
return len(this.in) == 0 && len(this.out) == 0
}
/**
* Your MyQueue object will be instantiated and called as such:
* obj := Constructor();
* obj.Push(x);
* param_2 := obj.Pop();
* param_3 := obj.Peek();
* param_4 := obj.Empty();
*/
<file_sep>/algorithms/_746_Min_Cost_Climbing_Stairs/answer.go
package _746_Min_Cost_Climbing_Stairs
func minCostClimbingStairs(cost []int) int {
return minCostClimbingStairsDynamic(cost)
}
func minCostClimbingStairsDynamic(cost []int) int {
if len(cost) == 0 {
return 0
}
if len(cost) == 1 {
return cost[0]
}
if len(cost) == 2 {
return minCost(cost[0], cost[1])
}
costList := make([]int, len(cost))
costList[0] = cost[0]
costList[1] = cost[1]
for i := 2; i < len(cost); i++ {
costList[i] = minCost(costList[i-2]+cost[i], costList[i-1]+cost[i])
}
return minCost(costList[len(cost)-1], costList[len(cost)-2])
}
func minCost(a, b int) int {
if a > b {
return b
}
return a
}
<file_sep>/algorithms/_14_Longest_Common_Prefix/answer.go
package _14_Longest_Common_Prefix
import (
"strings"
)
func longestCommonPrefix(strs []string) string {
if len(strs) == 0 {
return ""
}
var prefix string = strs[0]
var i = 1
for {
if i >= len(strs) {
break
}
for {
if idx := strings.Index(strs[i], prefix); idx != 0 {
prefix = prefix[0 : len(prefix)-1]
} else {
break
}
}
i++
}
return prefix
}
<file_sep>/algorithms/_2181_Merge_Nodes_in_Between_Zeros/answer.go
package _2181_Merge_Nodes_in_Between_Zeros
import "github.com/shadas/leetcode_notes/utils/linkedlist"
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func mergeNodes(head *linkedlist.IntListNode) *linkedlist.IntListNode {
return mergeNodesSliceRun(head)
}
func mergeNodesSliceRun(head *linkedlist.IntListNode) *linkedlist.IntListNode {
var (
tmpSum int = -1
s []int
)
for head != nil {
if head.Val == 0 {
if tmpSum != -1 {
s = append(s, tmpSum)
}
tmpSum = 0
}
tmpSum += head.Val
head = head.Next
}
x := &linkedlist.IntListNode{}
y := x
for _, i := range s {
x.Next = &linkedlist.IntListNode{Val: i}
x = x.Next
}
return y.Next
}
<file_sep>/algorithms/_342_Power_of_Four/answer.go
package _342_Power_of_Four
func isPowerOfFour(num int) bool {
return isPowerOfFourLoop(num)
}
func isPowerOfFourLoop(n int) bool {
if n <= 0 {
return false
}
for n > 1 {
if n%4 != 0 {
return false
}
n = n / 4
}
return true
}
<file_sep>/algorithms/_240_Search_a_2D_Matrix_2/answer_test.go
package _240_Search_a_2D_Matrix_2
import "testing"
func TestSearchMatrix(t *testing.T) {
var (
matrix [][]int
target int
)
matrix, target = [][]int{{1, 4, 7, 11, 15}, {2, 5, 8, 12, 19}, {3, 6, 9, 16, 22}, {10, 13, 14, 17, 24}, {18, 21, 23, 26, 30}}, 5
if !searchMatrix(matrix, target) {
t.Errorf("should be true")
}
}
<file_sep>/algorithms/_563_Binary_Tree_Tilt/answer.go
package _563_Binary_Tree_Tilt
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func findTilt(root *TreeNode) int {
if root == nil {
return 0
}
var sum int
funcRecursive(root, &sum)
return sum
}
func funcRecursive(root *TreeNode, sum *int) int {
if root == nil {
return 0
}
left := funcRecursive(root.Left, sum)
right := funcRecursive(root.Right, sum)
tmp := 0
if left >= right {
tmp = left - right
} else {
tmp = right - left
}
*sum += tmp
return left + right + root.Val
}
<file_sep>/algorithms/_559_Maximum_Depth_of_N_ary_Tree/answer.go
package _559_Maximum_Depth_of_N_ary_Tree
type Node struct {
Val int
Children []*Node
}
func maxDepth(root *Node) int {
return maxDepthRecursively(root)
}
// 递归解法
func maxDepthRecursively(root *Node) int {
if root == nil {
return 0
}
var (
result int
)
for _, child := range root.Children {
tr := maxDepthRecursively(child)
if tr > result {
result = tr
}
}
return result + 1
}
<file_sep>/algorithms/_27_Remove_Element/answer_test.go
package _27_Remove_Element
import (
"testing"
)
func TestRemoveElement(t *testing.T) {
if ret := removeElement([]int{3, 2, 2, 3}, 3); ret != 2 {
t.Error("not 2 in test1.")
}
}
<file_sep>/utils/array/intarray_test.go
package array
import (
"testing"
)
func TestIsIntArrayEqual(t *testing.T) {
a1 := []int{1, 2, 3}
a2 := []int{1, 2, 3}
a3 := []int{1, 1, 3}
if ret := IsIntArrayEqual(a1, a2); !ret {
t.Error("is equal error.")
}
if ret := IsIntArrayEqual(a1, a3); ret {
t.Error("is equal error.")
}
}
<file_sep>/algorithms/_2095_Delete_the_Middle_Node_of_a_Linked_List/answer.go
package _2095_Delete_the_Middle_Node_of_a_Linked_List
import "github.com/shadas/leetcode_notes/utils/linkedlist"
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func deleteMiddle(head *linkedlist.IntListNode) *linkedlist.IntListNode {
return deleteMiddleFastSlow(head)
}
func deleteMiddleFastSlow(head *linkedlist.IntListNode) *linkedlist.IntListNode {
newhead := &linkedlist.IntListNode{Next: head}
fast, slow := newhead, newhead
for {
if fast.Next == nil || fast.Next.Next == nil {
break
}
fast = fast.Next.Next
slow = slow.Next
}
// rm slow.next
slow.Next = slow.Next.Next
return newhead.Next
}
<file_sep>/algorithms/_725_Split_Linked_List_in_Parts/answer.go
package _725_Split_Linked_List_in_Parts
import "fmt"
type ListNode struct {
Val int
Next *ListNode
}
func (l *ListNode) String() string {
var s string
for l != nil {
s += fmt.Sprintf("%d", l.Val)
l = l.Next
}
return s
}
func splitListToParts(root *ListNode, k int) []*ListNode {
count := 0
tmp := root
for tmp != nil {
count++
tmp = tmp.Next
}
per := count / k
last := count % k
ret := []*ListNode{}
head := root
for i := 0; i < k; i++ {
th := &ListNode{}
thp := th
for j := 0; j < per; j++ {
x := &ListNode{Val: head.Val}
thp.Next = x
thp = thp.Next
head = head.Next
}
if i < last {
x := &ListNode{Val: head.Val}
thp.Next = x
thp = thp.Next
head = head.Next
}
ret = append(ret, th.Next)
}
return ret
}
<file_sep>/algorithms/_257_Binary_Tree_Paths/answer.go
package _257_Binary_Tree_Paths
import (
"fmt"
"strings"
)
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func binaryTreePaths(root *TreeNode) []string {
if root == nil {
return []string{}
}
var ret []string
binaryTreePathsRecursion(root, []string{}, &ret)
return ret
}
func binaryTreePathsRecursion(node *TreeNode, trace []string, ret *[]string) {
trace = append(trace, fmt.Sprint(node.Val))
if node.Left == nil && node.Right == nil {
*ret = append(*ret, strings.Join(trace, "->"))
return
}
if node.Left != nil {
binaryTreePathsRecursion(node.Left, trace, ret)
}
if node.Right != nil {
binaryTreePathsRecursion(node.Right, trace, ret)
}
}
<file_sep>/algorithms/_39_Combination_Sum/answer.go
package _39_Combination_Sum
import "sort"
func combinationSum(candidates []int, target int) [][]int {
var (
result [][]int
)
sort.Ints(candidates)
combinationSumR(candidates, target, 0, []int{}, &result)
return result
}
func combinationSumR(candidates []int, target, idx int, path []int, result *[][]int) {
if target == 0 {
x := make([]int, len(path))
for idx, v := range path {
x[idx] = v
}
*result = append(*result, x)
return
}
if len(path) != 0 && path[len(path)-1] > target {
return
}
for i := idx; i < len(candidates); i++ {
x := candidates[i]
combinationSumR(candidates, target-x, i, append(path, x), result)
}
}
<file_sep>/algorithms/_745_Prefix_and_Suffix_Search/answer.go
package _745_Prefix_and_Suffix_Search
type Node struct {
next [26]*Node
weight map[string]int
}
type WordFilter struct {
pr *Node
sr *Node
}
func Constructor(words []string) WordFilter {
pr := &Node{}
sr := &Node{}
for idx, word := range words {
addWord(pr, word, idx)
addWord(sr, reverseStr(word), idx)
}
return WordFilter{pr: pr, sr: sr}
}
func reverseStr(str string) string {
ns := ""
for _, b := range str {
ns = string(b) + ns
}
return ns
}
func addWord(root *Node, word string, weight int) {
var (
cur = root
i int
)
for i < len(word) {
b := word[i]
idx := b - 'a'
aimNode := cur.next[idx]
if aimNode == nil {
aimNode = &Node{weight: make(map[string]int)}
}
//aimNode.weight = append(aimNode.weight, weight)
//tmpWord := word[:i+1]
aimNode.weight[word] = weight
cur.next[idx] = aimNode
cur = aimNode
i++
}
}
func (this *WordFilter) F(prefix string, suffix string) int {
var rps, rss []int
rps = searchStrFix(this.pr, prefix)
rss = searchStrFix(this.sr, reverseStr(suffix))
if len(rps) == 0 || len(rss) == 0 {
return -1
}
var max int
for _, rp := range rps {
for _, rs := range rss {
if rp == rs {
if rs > max {
max = rs
}
}
}
}
return max
}
func searchStrFix(root *Node, word string) []int {
var (
cur = root
i int
)
for i >= 0 && i < len(word) {
b := word[i]
idx := b - 'a'
aimNode := cur.next[idx]
if aimNode == nil {
return []int{}
}
cur = aimNode
i++
}
weights := cur.weight
var ret []int
for _, v := range weights {
ret = append(ret, v)
}
return ret
}
/**
* Your WordFilter object will be instantiated and called as such:
* obj := Constructor(words);
* param_1 := obj.F(prefix,suffix);
*/
<file_sep>/algorithms/_509_Fibonacci_Number/answer.go
package _509_Fibonacci_Number
func fib(n int) int {
if n == 0 {
return 0
}
if n == 1 {
return 1
}
a, b := 0, 1
for i := 2; i < n; i++ {
a, b = b, a+b
}
return a + b
}
<file_sep>/algorithms/_93_Restore_IP_Addresses/answer.go
package _93_Restore_IP_Addresses
import "strconv"
func restoreIpAddresses(s string) []string {
var res []string
find(4, s, "", &res)
return res
}
// 递归回溯
func find(level int, str, tmp string, res *[]string) {
if level == 0 {
if len(str) == 0 {
*res = append(*res, tmp[1:])
return
}
} else {
for i := 1; i <= 3; i++ { // 每段长度1-3位
if len(str) < i {
break
}
ss := str[0:i]
if !isValid(ss) {
break
}
find(level-1, str[i:], tmp+"."+ss, res)
}
}
}
func isValid(s string) bool {
if len(s) == 0 || len(s) > 3 || (len(s) > 1 && s[0] == '0') {
return false
}
num, err := strconv.Atoi(s)
if err != nil {
return false
}
if num < 0 || num > 255 {
return false
}
return true
}
<file_sep>/algorithms/_179_Largest_Number/answer_test.go
package _179_Largest_Number
import "testing"
func TestLargestNumber(t *testing.T) {
if ret := largestNumber([]int{10, 2}); ret != "210" {
t.Errorf("wrong ret with %s", ret)
}
if ret := largestNumber([]int{3, 30, 34, 5, 9}); ret != "9534330" {
t.Errorf("wrong ret with %s", ret)
}
if ret := largestNumber([]int{1}); ret != "1" {
t.Errorf("wrong ret with %s", ret)
}
if ret := largestNumber([]int{10}); ret != "10" {
t.Errorf("wrong ret with %s", ret)
}
if ret := largestNumber([]int{0, 0}); ret != "0" {
t.Errorf("wrong ret with %s", ret)
}
}
<file_sep>/own_practice/README.md
The reason I create the directory is that I found my basis of algorithms is so bad.
So I need to make some simple practice and know how to use the tools familiarly.
What a shame, but should never give up.<file_sep>/algorithms/_9_Palindrome_Number/answer.go
package _9_Palindrome_Number
func isPalindrome(x int) bool {
var n int
if x < 0 || (x%10 == 0 && x != 0) {
return false
}
for {
if n >= x {
break
}
n = n*10 + x%10
x = x / 10
}
return n == x || n/10 == x
}
<file_sep>/algorithms/_239_Sliding_Window_Maximum/answer_test.go
package _239_Sliding_Window_Maximum
import (
"testing"
"github.com/shadas/leetcode_notes/utils/array"
)
func TestMaxSlidingWindow(t *testing.T) {
if ret := maxSlidingWindow([]int{1, 3, -1, -3, 5, 3, 6, 7}, 3); !array.IsIntArrayEqual([]int{3, 3, 5, 5, 6, 7}, ret) {
t.Errorf("wrong ret with %v", ret)
}
if ret := maxSlidingWindow([]int{1, -1}, 1); !array.IsIntArrayEqual([]int{1, -1}, ret) {
t.Errorf("wrong ret with %v", ret)
}
if ret := maxSlidingWindow([]int{7, 2, 4}, 2); !array.IsIntArrayEqual([]int{7, 4}, ret) {
t.Errorf("wrong ret with %v", ret)
}
}
<file_sep>/algorithms/_875_Koko_Eating_Bananas/answer_test.go
package _875_Koko_Eating_Bananas
import "testing"
func TestMinEatingSpeed(t *testing.T) {
if ret := minEatingSpeed([]int{3, 6, 7, 11}, 8); ret != 4 {
t.Errorf("ret is %d, not 4", ret)
}
if ret := minEatingSpeed([]int{30, 11, 23, 4, 20}, 6); ret != 23 {
t.Errorf("ret is %d, not 23", ret)
}
if ret := minEatingSpeed([]int{2, 2}, 6); ret != 1 {
t.Errorf("ret is %d, not 1", ret)
}
if ret := minEatingSpeed([]int{312884470}, 312884469); ret != 2 {
t.Errorf("ret is %d, not 2", ret)
}
}
func TestEatOver(t *testing.T) {
if ret := eatOver([]int{30, 11, 23, 4, 20}, 23, 6); ret == false {
t.Errorf("ret need be true")
}
}
| 5a587780b4449ceb4d2f7a1a6e25f48997a925d1 | [
"Markdown",
"Go Module",
"Go"
] | 359 | Go | Shadas/leetcode_notes | 99ce6d4cadd5c03e54bedcde0116065b67f6147a | 0870c0dc8641f2aea5ec64709639b80d1fc781a3 |
refs/heads/master | <file_sep>/*
General syntax of a URL.
protocol://hostname[:port]/path/filename#ref
*protocol used to access the server (such as http),
*name of the server,
*port on the server (optional)
*path and name of a specific file on the server (sometimes optional)
*anchor or reference within the file (optional)
*/
import java.net.*;
import java.io.*;
public class CrawlRunner{
public CrawlRunner(){ //constructor function
System.out.println("<NAME>");
} //end constructor
public void run(String strURL){
//declare String
String dataLine;
//try & catch blocks
try{
//Instantiate URL object and pass string argument
URL url = new URL(strURL);
//Open a connection to this URL and return an
// input stream for reading from the connection.
BufferedReader htmlPage =
new BufferedReader(new InputStreamReader(url.openStream()));
for (int i = 0; i <10 ; i++ ) {
dataLine = htmlPage.readLine();
System.out.println((i+1)+" "+dataLine);
}
} //end try
catch(Exception e){
e.printStackTrace();
} //end catch
} //end run
} //end class<file_sep>About
-----------
This program simulates an application for signing up with a mobile phone service provider. The original purpose of the exercise was to display some competency using **structures**, **classes**, and **interfaces** in C#.
I initially built the project in Visual Studio as a solution. For simplicity,
I have stripped down the project to a single directory.
Testing
-----------
*Windows users can use the .bat file to compile a program.exe to test.
*Linux and OSX users can run the .sh to compile a program.exe to test.
For working with the .NET framework in Linux, I'm using the [Mono Platform](http://www.mono-project.com/) , this is also what the .sh script uses.
If the file is not executable, you may need to add permissions first.
Example $ chmod +x *fileName.sh*
Project Specifications
----------------------
*Create a console application using Visual Studio
*Output appropriate communication to the user
*All requirements should be output and identified by number.
1. Output a header in the console: "This is Program-6"
2. Create a structure named Phone with the following members:
a. Phone Number
b. Manufacturer
c. Model
d. Operating System
e. Diagonal Screen Size
f. Constructor to set data above
3. Ask the user how many phones to enter and configure appropriately.
4. Create a class named Account with the following members:
a. Inherits IAccountUpdate interface
b. Phone Number(s)
c. Customer Name
d. Address
e. Credit Card Number
f. Constructor(s) as appropriate
5. Create an interface named IAccountUpdate with the following members:
a. Balance Owed
b. Minutes Used
c. Cost Per Minute
d. CalculateCharge()
e. AdjustMinutes()
6. Using Phone, Account, and IAccountUpdate, create an application for a mobile phone company that minimally supports functionality listed. For instance: creating accounts, adding phones, calculating charges, and adjusting minutes.
7. Output a thank you message: "Thanks for running this Program".<file_sep>/*
Test Scores
Programmer: <NAME>
. selector has higher priority than *
dereference and dot operator is the equivalent of the structure pointer operator, like so:
(*pRect).property;
pRect->property;
*/
#include <iostream> // input/output declarations
#include <iomanip> //Header file for stream manipulators
#include <string> //Header file for string objects
#include <cmath> //Expanded math library
using namespace std;
/* Structures */
struct Student{
string name;
int score;
};
/* Function Prototypes */
void recordStudents(Student *&list, int size);
void sortStudents(Student *&list, int size);
float avgFunction(Student *&list, int size);
void postGrades(Student *&list, int size,float avgScore);
/*Main Function - Begin Program */
int main(){
int sizeStudents; // Holds size of array
float avgScore; //Holds class average
//Dynamically create array for classroom
cout << "How many students? ";
cin >> sizeStudents;
Student* classRoom = new Student[sizeStudents]; //classRoom points to dynamically allocated array
//Record students and scores
recordStudents(classRoom, sizeStudents);
//Sort array
sortStudents(classRoom, sizeStudents);
//Calc average of scores
avgScore = avgFunction(classRoom, sizeStudents);
//Print results table to user
postGrades(classRoom, sizeStudents,avgScore);
return 0;
}
/* Function Definitions */
//Gather data into the array
void recordStudents(Student *&list, int size){
cout << "Enter Student name followed by score. \nExample: Tommy 87\n\n";
for (int i = 0; i < size; i++)
{
cout << "Student "<<(i+1)<<": ";
cin >> (*(list + i)).name >> (*(list+i)).score;
//validate score entry
while((((*(list+i)).score) > 105)||(((*(list+i)).score) < 0)){
cout <<endl<<"Invalid Score, enter value from 0 to 105: ";
cin >>(*(list+i)).score;
}
} //end for loop
cout <<endl<<endl;
}
//Sort ascending (low to high) on score property
void sortStudents(Student *&list, int size){
Student temp; //holds overwritten value
bool swap; //flag
do{
swap = false;
for (int i = 0; i < (size-1); i++)
{
if (((list+i)->score) > ((list+(i+1))->score) ) //if current val greater than nextVal
{
temp = *(list+i); //save current student
*(list+i) = *(list+(i+1)); //move next student down
*(list+(i+1)) = temp; // move current student up
swap = true; // raise flag
}
} //end for-loop
} while(swap); //Loop again if a swap occurred this pass
} //END sortStudents()
//Find score average and return it
float avgFunction(Student *&list, int size){
float total =0;
for (int i = 0; i < size; i++){
total += (list+i)->score;
}
return (total/size);
}
//Print grades table to user
void postGrades(Student *&list, int size, float avgScore){
cout <<"Name"<<setw(29)<<"Score\n";
cout <<setw(27)<<"--------------------------------\n";
for (int i = 0; i < size; i++)
{
cout <<setw(30)<<left<<(list+i)->name<<setw(5)<<(list+i)->score<<endl;
}
cout <<setw(27)<<"--------------------------------\n";
cout <<setw(10)<<right<<"Average"<<setw(22)<<setprecision(1)<<fixed<<avgScore;
cout <<endl<<endl;
}
<file_sep>/*
Package class
Programmer: <NAME>
This is the class specification file for a class I'm using with Prog7
*/
#ifndef PACKAGE_H
#define PACKAGE_H
#include <string> //Header file for string objects
using namespace std;
//Date Class Declaration
class Package
{
//Member declarations
private:
int weight;
int length;
int width;
int height;
int girth;
int largest; //holds largest side to calc girth
bool stat; //true: accepted. false: rejected
int weightTable[15];//Holds package weight values
float shipTable[15];//Holds shipping cost values
public:
Package(); //Default Constructor
Package(int weigh, int lengt, int widt, int heigh,int tabWeight[15],float tabCost[15]); // Constructor
~Package();
string status(); //Returns status message
float cost(); //Returns shipping cost of package
bool getStat(); //returns true or false for accepted vs rejected
int getWeight(); //Gets weight
int getLength(); //Gets length
int getWidth(); //Gets width
int getHeight(); //Gets height
void setWeght( int weigh); //Set attribute outside of constructor
void setLength(int lengt); //Set attribute outside of constructor
void setWidth(int widt); //Set attribute outside of constructor
void setHeight(int heigh); //Set attribute outside of constructor
};
#endif<file_sep>using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace Program_8
{
public partial class Prog8Form : Form
{
//Declarations
decimal counter; // quantity of instantiated structs
List<Phone> phones = new List<Phone>(); //phone list
List<TextBox> phoneNumberList = new List<TextBox>(); //for holding dynamic references
List<TextBox> manufacList = new List<TextBox>(); //for holding dynamic references
List<TextBox> modelList = new List<TextBox>(); //for holding dynamic references
List<TextBox> oSList = new List<TextBox>(); //for holding dynamic references
struct Phone
{
public string PhoneNumber;
public string Manufacturer;
public string Model;
public string OperatingSystem;
public double DiagonalScreenSize;
public Phone(string number, string manuf, string model, string os)
{
this.PhoneNumber = number;
this.Manufacturer = manuf;
this.Model = model;
this.OperatingSystem = os;
this.DiagonalScreenSize = 10.2; //centimeters
}
}; //End Phone struct
public Prog8Form()
{
InitializeComponent();
}
private void buttonPhones_Click(object sender, EventArgs e)
{
//Gather quantity of phones
counter = quantityPhones.Value;
//for quantity of phones, add textboxes
for (int i = 0; i < counter; i++)
{
int j = i + 1; //appends label to textboxes for user clarification
TextBox numberBox = new TextBox();
numberBox.AppendText("Phone #"+j);
flowPhoneBox.Controls.Add(numberBox); //add textbox to form
phoneNumberList.Add(numberBox); //add reference to value
TextBox manuBox = new TextBox();
manuBox.AppendText("Manufacturer "+j);
flowPhoneBox.Controls.Add(manuBox); //add textbox to form
manufacList.Add(manuBox); // add reference to value
TextBox modelBox = new TextBox();
modelBox.AppendText("Model "+j);
flowPhoneBox.Controls.Add(modelBox); // add textbox to form
modelList.Add(modelBox); // add reference to value
TextBox osBox = new TextBox();
osBox.AppendText("OS "+j);
flowPhoneBox.Controls.Add(osBox); // add textbox to form
oSList.Add(osBox); //add reference to value
}
//make submit button visible (vs create and add button?)
createPhonesButton.Visible = true;
}
private void createPhonesButton_Click(object sender, EventArgs e)
{
//convert lists to arrays (arrays belong to this method only)
var numbersArray = phoneNumberList.ToArray();
var manuArray = manufacList.ToArray();
var modelArray = modelList.ToArray();
var osArray = oSList.ToArray();
//uses user entry to populate phones <List>
for (int i = 0; i < counter; i++)
{
phones.Add(new Phone(numbersArray[i].Text, manuArray[i].Text, modelArray[i].Text, osArray[i].Text));
}
//Convert struct list to struct array (NOT DOING THIS, EASIER TO WORK WITH LIST)
// phonesArray = phones.ToArray();
//make confirmation visible, append text (number of phones created)
confirmPhones.Text = "Created "+ counter + " phones.";
//Populate radio selection in account creation section
for (int phone = 0; phone < counter; phone++)
{
RadioButton sumChoice = new RadioButton();
string tempString = phones[phone].PhoneNumber +" "+ phones[phone].Manufacturer+" "+ phones[phone].Model+" "+ phones[phone].OperatingSystem+" "+ phones[phone].DiagonalScreenSize+"cm";
sumChoice.AutoSize = true;
sumChoice.Text = tempString;
sumChoice.ForeColor = System.Drawing.Color.Orange;
flowRadioPhones.Controls.Add(sumChoice);
}
//make account creation buttion visible
buttAcctCreat.Visible = true;
}
private void buttAcctCreat_Click(object sender, EventArgs e)
{
//gather radio button selection into string to pass to account
// var checkedList = Controls.OfType<RadioButton>().Where(t => t.Checked);
var checkedList = flowRadioPhones.Controls.OfType<RadioButton>().Where(t => t.Checked);
//search for substring from checkbox, if matches number, pass number. This is a loop
string chosenNumber="";
for (int p = 0; p < counter; p++)
{
if (checkedList.ElementAt(0).Text.StartsWith(phones[p].PhoneNumber)) //must substring
{
chosenNumber += phones[p].PhoneNumber;
}
}
//Gather user data and instantiate an account object
Account theAccount = new Account(chosenNumber,acctNameBox.Text,acctAddrBox.Text,acctCredBox.Text);
//pass minutes used to object
theAccount.MinutesUsed = decimal.ToInt32(minsUpDown.Value);
//Set cost per minute
theAccount.CostPerMinute = .15m;
//Status confirm account creation
confirmAcctMade.Visible = true;
//Output account info to user
acctGrid.Rows.Add(theAccount.CustomerName,theAccount.Address,theAccount.PhoneNumber,theAccount.CreditCard,"$"+theAccount.CalculateCharge(),theAccount.MinutesUsed);
}
private void Prog8Form_FormClosing(object sender, FormClosingEventArgs e)
{
MessageBox.Show("#6. Thank you for running Program-8.");
}
} //END CLASS
} //END NAMESPACE
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Program_6
{
interface IAccountUpdate //Requirement #4
{
decimal BalanceOwed { get; set; }
int MinutesUsed { get; set; }
decimal CostPerMinute { get; set; }
//interface only declares method's existence. Class defines specifications
decimal CalculateCharge();
int AdjustMinutes();
} //END Interface
}
<file_sep>About
----------------
This program simulates processing test scores for a class of students. The user determines
the number of students, so the program implements dynamic memory allocation and uses pointers
to reference the stored location.
Project Specifications
-------------------------------
**Input Data**
For each student, the following data will be input:
student name (your program should handle names of up to 30 characters - may contain spaces)
score (an integer value)
Use a struct to hold the name and score for one student. The name should be storedin a C++ string object.
**Validation Processing**
Your program should work for any number of students. When the program starts, it should ask the user for the number of students to be processed. Then it should dynamically allocate an array of that size (array of student/score structures).
The program should then:
Call a function to input the student name/score pairs and store them in the array. Input validation: the score must be between 0 and 105, inclusive.
Call a function to sort the array on the score member in ascending (increasing) order.
Call a value-returning function that calculates the average of the scores.
Call a function that displays a neat table of student name/score pairs in sorted order. Include appropriate column heading for your table. Following the table, display the average score with appropriate label.
Note:In at least one of your functions, you must declare the array parameter using pointer notation instead of array notation, and inside the function you must use pointer notation instead of array notation to access the array elements. If you do not understand this requirement, email me.
**Sample Output**
Name || Score
<NAME> || 75
<NAME> || 81
<NAME> || 88
Average || 81.3
**Requirements/Hints:**
Global variables are variables that are declared outside any function. Do not use global variables in your programs. Declare all your variables inside functions
Use the C++ string class to hold the student name.
Use a struct to store student name/score pairs.
Use a dynamically-allocated array of structs to store the information for a class.
In at least one of your functions, you must declare the array parameter using pointer notation instead of array notation, and inside the function you must use pointer notation instead of array notation to access the array elements. <file_sep>About
-------------
This program is designed to be a manual tiny web crawler.
It allows the user to supply a web url as a command line argument,
at which point the program returns the first 10 lines of html source
from the landing page.
The purpose of the project was to show some understanding of the
Java URL object and manipulation of stream buffers.
:boom: :boom: **Important** :boom: :boom:
To be 100% clear, I only authored the CrawlRunner source file. I did not author the CrawlDriver source file. This file was supplied by the course instructor to be used
as a driver class to build around. Identifying information has been removed to
prevent future students from using the project.
Testing
------------
While you can recompile the bytecode if you wish, there are class files present.
Without a command-line argument, the program will default to querying http://stackoverflow.com . To test a different site, simply supply the URL at run-time
Example:
*java CrawlDriver http://someSite.com*
Program Specifications
---------------------
Write a program that uses the driver class definition provided that will:
* Connect to an arbitrary website identified as a command-line parameter
* Download the default file from the website
* Display the first ten lines of downloaded text on the command line screen
* Number each line of text as it is displayed
* Display your name where indicated<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Program_8
{
class Account : IAccountUpdate
{
//public members
public string PhoneNumber { get; set; }
public string CustomerName { get; set; }
public string Address { get; set; }
public string CreditCard { get; set; }
//Members from implimented interface (contract to contain these)
public decimal BalanceOwed { get; set; }
public int MinutesUsed { get; set; }
public decimal CostPerMinute { get; set; }
public decimal CalculateCharge()
{
//finish method logic
this.BalanceOwed = this.MinutesUsed * this.CostPerMinute;
return this.BalanceOwed;
}
public int AdjustMinutes()
{
//method logic not useful
return this.MinutesUsed;
}
//Programmer Defined Default Constructor
public Account()
{
}
//Programmer Defined Constructor
public Account(string num, string custom, string address, string card)
{
this.PhoneNumber = num;
this.CustomerName = custom;
this.Address = address;
this.CreditCard = card;
}
} //END CLASS
} // END NAMESPACE
<file_sep>mcs -out:program.exe *.cs<file_sep>About
-------------
This program renders a graphical slider widget which adjusts a numeric value
as it moves. The original purpose of the program was to display some understanding
of working with both AWT and Swing API's for creating GUI components.
:boom: :boom: **Important** :boom: :boom:
To be 100% clear, I only authored the SlideRunner source file. I did not author the SlideDriver source file. This file was supplied by the course instructor to be used
as a driver class to build around.(Though its only functionality is instantiating an object of the SlideRunner class). Identifying information has been removed to
prevent future students from using the project.
Testing
-------------
While you can recompile the bytecode if you wish, there are class files present.
To run, simply open a terminal and enter: *java SlideDriver*
Program Specifications
----------------------
Write a program that uses the driver class definition provided to produce the graphic output image shown (not shown here).
At startup, the thumb on the slider is positioned at 50 and the number 50 is displayed above the thumb. As you move the slider to the left or right, the number that is displayed tracks the position of the thumb on the slider scale.
The program must terminate and return control to the operating system when you click the large X in the upper-right corner of the GUI containing the slider.
<file_sep># Primitive-Showcase
A cleaned-up small collection of some of my school projects, intended to showcase language exposure to potential employers or clients.
master branch
-------------
Project directories are organized by language and each contain a 'ReadMe' file
with pertinent information. Where possible, the original instructions or project specifications will also be present.
While the majority of content in this repository was authored soley by me,
some projects (namely in the Java directory) contain supplemental drivers
which were provided by a course instructor. Where this is the case,
those files have been noted. Also for this reason, I provide no
explicit license for this repository.
gh-pages branch
-------------------
In the gh-pages branch you will find a sample of some front-end web projects. To view the
rendered projects, and repository home page, visit https://ten-taken.github.io/Primitive-Showcase/<file_sep>/*
Package class
Programmer: <NAME>
This is the class implementation file for a class I'm using with Prog7
*/
#include "Package.h" //Class specification header
#include <iostream> // input/output declarations
#include <iomanip> //Header file for stream manipulators
#include <string> //string class
using namespace std;
//Programmer Defined Default Constructor
Package::Package(){
cout <<"Default constructor, empty object.";
}
//Programmer Defined Constructor
Package::Package(int weigh, int lengt, int widt, int heigh,int tabWeight[15],float tabCost[15]){
//int weightTable[15] ={1,2,3,5,7,10,13,16,20,25,30,35,40,45,50};
//float shipTable[15] ={1.5,2.1,4.0,6.75,9.9,14.95,19.4,24.2,27.3,31.9,38.5,43.5,44.8,47.4,55.2};
for (int i = 0; i < 15; i++)
{
weightTable[i] = tabWeight[i];
shipTable[i]=tabCost[i];
}
weight = weigh;
length = lengt;
width = widt;
height = heigh;
//get largest
largest = length;
if (largest < width)
{
largest = width;
}
if (largest < height)
{
largest = height;
}
girth = 2*(length + width + height - largest );
//Determine status. true: accepted. false: rejected
if ((width > 36)||(height > 36)||(length > 36)||(girth >60))
{
stat = false;
} else if (weight > 50)
{
stat = false;
} else{
stat = true;
}
} // end constructor
//Destructor
Package::~Package(){}
string Package::status(){
string accepted ="Accepted";
string rejected = "Rejected";
if (stat)
{
return accepted;
}
else{
return rejected;
}
}
//cost function - THIS IS WHERE SEARCH ALGORITHM OCCURS
float Package::cost(){
bool found = false; //trigger for completion
int position = -1; //for index location found
int index = 0; //subscript for index location
while(index < 15 && !found)
{
if (weightTable[index] >= weight)
{
found = true;
position = index;
}
index++;
}
//If rejected, set cost to 0, else return cost
if (!(stat))
{
return 0.00;
}
else
{
return shipTable[position];
}
} //END cost function
//Setters
void Package::setWeght( int weigh){
weight = weigh;
}
void Package::setLength(int lengt){
length = lengt;
}
void Package::setWidth(int widt){
width = widt;
}
void Package::setHeight(int heigh){
height = heigh;
}
//getters
bool Package::getStat(){
return stat;
}
int Package::getWeight(){
return weight;
}
int Package::getLength(){
return length;
}
int Package::getWidth(){
return width;
}
int Package::getHeight(){
return height;
}<file_sep>/*
Postal Packages
Programmer: <NAME>
Main file, begin execution here.
*/
#include <iostream> // input/output declarations
#include <iomanip> //Header file for stream manipulators
#include <string> //Header file for string objects
#include <cmath> //Expanded math library
#include "Package.h" //Class specification header
using namespace std;
/* Function Prototypes - All functions moved to Package Class */
/*Main Function - Begin Program */
int main(){
int weightTable[15] ={1,2,3,5,7,10,13,16,20,25,30,35,40,45,50};
float shipTable[15] ={1.5,2.1,4.0,6.75,9.9,14.95,19.4,24.2,27.3,31.9,38.5,43.5,44.8,47.4,55.2};
int accepted =0; //# of packages accepted
int rejected =0; //# of packages rejected
int weight; // holds transaction weight
int height; // holds transaction height
int length; // holds transaction length
int width; // holds transaction width
int transCount =0; //Holds loop increment count
cout <<"For each transaction, enter package weight and 3 dimensions.";
cout <<"\nEnter -1 to quit.\n\n";
//transaction processing loop
while(weight != -1)
{
transCount++;
//Gather user input
cout <<"Enter package weight length width height: ";
cin >> weight;
if (weight > -1)
{
cin >> length >> width >> height;
}
else{break;}
cout <<endl <<endl;
//Validate data with a nested loop
if((weight <=0)||(length <=0)||(width <=0)||(height <=0)){
cout<<endl<<"All dimensions must be greater than 0. Voiding transaction\n\n";
cin.clear();
cin.ignore(100,'\n');
transCount--; //prevents counting voided transactions
continue;
}
//instantiate object of Package Class
//Package thisPackage = new Package(weight, length, width, height);
Package thisPackage(weight, length, width, height,weightTable,shipTable);
//Print output to user
cout << setw(12)<<"Transaction:"<<setw(10)<<transCount<<endl;
cout <<setw(12)<<"Status :"<<setw(10)<<thisPackage.status()<<endl;
cout <<setw(12)<<"Weight :"<<setw(10)<<thisPackage.getWeight()<<endl;
cout <<setw(12)<<"Cost :"<<setw(10)<<setprecision(2)<<fixed<<thisPackage.cost()<<endl<<endl;
//Increment accepted or rejected total
if (thisPackage.getStat())
{
accepted++;
}
else
{
rejected++;
}
}
//Print package totals, exit program
cout <<"Number of accepted packages:"<<accepted;
cout<<"\nNumber of rejected packages:"<<rejected<<endl<<endl;
return 0;
}
<file_sep>About
-----------
This program simulates a GUI application for signing up with a mobile phone service provider. The original purpose of the exercise was to display some competency using Windows Forms by converting an earlier console application (see Mobile_Phones directory).
I initially built the project in Visual Studio as a solution. For simplicity,
I have stripped down the project to a single directory.
Testing
-----------
*Windows users can use the .bat file to compile a program.exe to test.
*Linux and OSX users can run the .sh to compile a program.exe to test.
For working with the .NET framework in Linux, I'm using the [Mono Platform](http://www.mono-project.com/) , this is also what the .sh script uses.
If the file is not executable, you may need to add permissions first.
Example $ chmod +x *fileName.sh*
Display under non Windows systems may be unpredictable due to the way Windows System libraries render in the various desktop environments. For best results, build this program in Windows 7.
Project Specifications
----------------------
*Create a Windows Forms using Visual Studio
*Output appropriate communication to the user
*All requirements should be output and identified by number.
1. Include a label : "This is Program-8"
2. For Program 8, recreate Program 6 as a windows Form application.
3. Include appropriate controls to accept input from the user.
4. Include appropriate controls to message state structure and object state
information back to the user.
5. Include appropriate controls to invoke the methods.
6. Include a MessageBox that displays: "Thank you for running Program-8."
when the application is closed.<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Program_6
{
class Program
{
//Requirement #2 - working with structures
struct Phone
{
public string PhoneNumber;
public string Manufacturer;
public string Model;
public string OperatingSystem;
public double DiagonalScreenSize;
public Phone(string number, string manuf, string model, string os)
{
this.PhoneNumber = number;
this.Manufacturer = manuf;
this.Model = model;
this.OperatingSystem = os;
this.DiagonalScreenSize = 10.2; //centimeters
}
}; //End Phone struct
static void Main(string[] args)
{
Console.Title = "Program-6";
//Main Declarations
string myHeader = "This is Program-6";
int counter; // quantity of instantiated structs
char confirm; //user condition for Req5
List<Phone> phones = new List<Phone>(); //phone list
List<Account> accounts = new List<Account>(); //to iterate for multiple accounts
int selection; //allows user to specify phone from phones Array
int accSelect; //allows user to specify account from accounts Array
string numHold;
string manHold;
string modelHold;
string osHold;
string custHold; //Account instantiation
string addHold; //Account instantiation
string credHold; //Account instantiation
int accCount =0; //Counter to track accounts array
int minsHold; //temp placeholder for minutes used
//Requirement #1
Console.ForegroundColor = ConsoleColor.Yellow;
Console.WriteLine("Requirement 1: ");
Console.ResetColor();
Console.ForegroundColor = ConsoleColor.Magenta;
Console.WriteLine("\t{0,7}", myHeader);
Console.Write("\n");
//Requirement #2 part II
Console.ForegroundColor = ConsoleColor.Yellow;
Console.WriteLine("Requirement 2: ");
Console.ResetColor();
Console.WriteLine("How many phones would you like to make?");
int.TryParse(Console.ReadLine(), out counter);
for (int i = 0; i < counter; i++)
{
Console.WriteLine("\nEnter Phone Number {0}: ", (i + 1));
numHold = Console.ReadLine();
Console.WriteLine("Enter Phone{0} Manufacturer: ", (i + 1));
manHold = Console.ReadLine();
Console.WriteLine("Enter Phone{0} Model: ", (i + 1));
modelHold = Console.ReadLine();
Console.WriteLine("Enter Phone{0} Operating System: ", (i + 1));
osHold = Console.ReadLine();
phones.Add(new Phone(numHold, manHold, modelHold, osHold));
}
//Confirm input to user
Console.ForegroundColor = ConsoleColor.Green;
Console.WriteLine("\nCreating {0} Phones...", counter);
Console.ResetColor();
var phonesArray = phones.ToArray(); //convert list to array
Console.Write("\n");
/* MOVED phones output to #5
for (int j = 0; j < counter; j++)
{
Console.WriteLine("\n Phone{0}: {1} {2} {3} {4} {5}cm", (j + 1), phonesArray[j].PhoneNumber, phonesArray[j].Manufacturer, phonesArray[j].Model, phonesArray[j].OperatingSystem,phonesArray[j].DiagonalScreenSize);
}
*/
Console.Write("\n");
Console.ForegroundColor = ConsoleColor.Yellow;
Console.WriteLine("Requirement 3: See Account.cs ");
Console.ResetColor();
Console.Write("\n");
Console.ForegroundColor = ConsoleColor.Yellow;
Console.WriteLine("Requirement 4: See IAccountUpdate.cs ");
Console.ResetColor();
//Req3- Account.cs
//Req4 - IAccountUpdate.cs
//Requirement #5 - Tying together struct, class, interface in an application
Console.Write("\n");
Console.ForegroundColor = ConsoleColor.Cyan;
Console.WriteLine("****** Hawk Mobile ******");
Console.ResetColor();
Console.WriteLine("Welcome to Hawk, would you like to create an account? 'y' or 'n' ");
confirm = Console.ReadKey().KeyChar;
Console.Write("\n");
confirm = Char.ToUpper(confirm); // convert user entry to upper-case
if (confirm =='Y') //If user wants to run through application, else nothing.
{
while (confirm != 'N')
{
//counter to track accounts Array
accCount++;
//gather account data (name, address, credit card)
Console.WriteLine("Enter your name:");
custHold = Console.ReadLine();
Console.WriteLine("Enter your address:");
addHold = Console.ReadLine();
Console.WriteLine("Enter credit card #:");
credHold = Console.ReadLine();
//let user select phone from phones Array. Pass matching phone # to account
Console.Write("\n");
Console.WriteLine("Which phone would you like to use with your plan?");
for (int j = 0; j < counter; j++)
{
Console.WriteLine("\n Phone{0}: {1} {2} {3} {4} {5}cm", (j + 1), phonesArray[j].PhoneNumber, phonesArray[j].Manufacturer, phonesArray[j].Model, phonesArray[j].OperatingSystem, phonesArray[j].DiagonalScreenSize);
}
int.TryParse(Console.ReadLine(),out selection);
//prevent runtime error, check selection range
if ((selection > counter) || (selection < 1))
{
Console.WriteLine("Invalid selection, using last phone in list");
selection = counter; //smart cookie
}
//instantiate new account, using matching # from phones Array
accounts.Add(new Account(phonesArray[(selection-1)].PhoneNumber, custHold, addHold,credHold));
Console.Write("\n");
Console.WriteLine("Create another account? 'y' or 'n' ");
confirm = Console.ReadKey().KeyChar;
Console.Write("\n");
confirm = Char.ToUpper(confirm);
} //END while loop
//Return some details about accounts created
var accountsArray = accounts.ToArray();
//Work with account selected by user
Console.WriteLine("Select account to modify:");
//Print account customer names to user to select from
for (int q = 0; q < accCount; q++)
{
Console.WriteLine("Account #{0}: Customer {1}",(q+1),accountsArray[q].CustomerName);
}
Console.Write("\n");
int.TryParse(Console.ReadLine(), out accSelect);
//validate selection
if ((accSelect >accCount) || (accSelect < 1))
{
Console.WriteLine("Invalid selection, using last account choice, customer {0}",accountsArray[accCount-1].CustomerName);
accSelect = accCount;
}
Console.WriteLine("How many minutes did {0} use this month?", accountsArray[accSelect - 1].CustomerName);
int.TryParse(Console.ReadLine(),out minsHold);
accountsArray[accSelect - 1].MinutesUsed = minsHold; //sets minutes used in account
//set rate
accountsArray[accSelect - 1].CostPerMinute = .15m;
//output balance owed
Console.ForegroundColor = ConsoleColor.Cyan;
Console.WriteLine("Balance owed for {0} is {1:C}", accountsArray[accSelect - 1].CustomerName, accountsArray[accSelect - 1].CalculateCharge());
Console.ResetColor();
} //end conf IF
//Requirement #6 - Thank user
Console.Write("\n");
Console.ForegroundColor = ConsoleColor.Yellow;
Console.WriteLine("Requirement 6: ");
Console.ForegroundColor = ConsoleColor.Magenta;
Console.WriteLine("Thank you for running Program-6");
Console.ResetColor();
//Pause before exit
Console.ReadKey(true);
} //END MAIN
} //END CLASS
} //END NAMESPACE
<file_sep>mcs -out:program.exe *.cs -r:System.Data.dll -r:System.Drawing.dll -r:System.Windows.Forms.dll | ad9e1a95e2eccb59c58aa12a164a6896bcb22e3b | [
"Markdown",
"Java",
"C#",
"C++",
"Shell"
] | 17 | Java | Ten-Taken/Primitive-Showcase | b60c436360c9fffdff740b5899381a8495fdad16 | 1f441c89547f354e10acbdfea2e6ff6409a2171f |
refs/heads/main | <file_sep>const Discord = require('discord.js');
module.exports.run = async (bot, message, args, guild, user) => {
const komut = new Discord.RichEmbed()
.setTitle(`__HATA__`)
.setDescription(`
Bu Komut Sadece [Ana Sunucumuzun](https://discord.gg/2N5NzSt) <#761153749895872553> Kanalında Kullanabilirsin.`)
if(message.channel.id !== "761153749895872553") return message.channel.send(komut)
const js = new Discord.RichEmbed()
.setColor('BLACK')
.setTitle('JavaScript')
.setDescription(`JavaScript Sürümü Seçmediniz!
Aşşağıdan Sürümlere Bakarak JavaScript Rolü Alabilirsiniz.
`)
.addField("JavaScript Sürümleri ;\n ",`<a:beyaz:761863795177947157> **≫** JavaScript - Normal • \`-js normal\`
<a:mavi:761863737485033472> **≫** JavaScript - Legend • \`-js legend\`
<a:turuncu:761863766467674163> **≫** JavaScript - Ultra • \`-js ultra\`
`)
.setThumbnail(`https://cdn.glitch.com/e967a0f0-8ff1-40bd-b34e-78038564ad07%2F1.gif?v=1601983632203`)
//HATALAR
const hata = new Discord.RichEmbed().setColor('RED').setTitle(`__HATA__`).setDescription(`Kodların Paylaşıldığı [Sunucuda](https://discord.gg/Ccbu7bz) Bulunmuyorsun, Js Rolü Almak için [CodAge Codes](https://discord.gg/Ccbu7bz) Sunucusunda Bulunman Gerekiyor!`)
const hatanormal = new Discord.RichEmbed().setColor('RED').setTitle(`__HATA__`).setDescription(`JavaScript Sürümünüz Zaten **JavaScript - Normal** Olarak Ayarlanmış!`)
//LEGEND HATALARI
const hatalegend = new Discord.RichEmbed().setColor('RED').setTitle(`__HATA__`).setDescription(`JavaScript Sürümünüz Zaten **JavaScript - Legend** Olarak Ayarlanmış!`)
const hatalegend2 = new Discord.RichEmbed().setColor('RED').setTitle(`__HATA__`).setDescription(`JavaScript Sürümünüz **JavaScript - Normal** Olarak Ayarlanmamış,
Bu da Js Sürümünüzü **JavaScript - Legend**'e Yükseltmenizi Engelliyor!`)
const hatalegend3 = new Discord.RichEmbed().setColor('RED').setTitle(`__HATA__`).setDescription(`Sürümünüzü **JavaScript - Legend**'e Yükseltemedim!
**Gerekli Davet Sayısı** • **10**
`)
//ULTRA HATALARI
const hataultra = new Discord.RichEmbed().setColor('RED').setTitle(`__HATA__`).setDescription(`JavaScript Sürümünüz Zaten **JavaScript - Ultra** Olarak Ayarlanmış!`)
const hataultra2 = new Discord.RichEmbed().setColor('RED').setTitle(`__HATA__`).setDescription(`JavaScript Sürümünüz **JavaScript - Legend** Olarak Ayarlanmamış,
Bu da Js Sürümünüzü **JavaScript - Ultra**'ya Yükseltmenizi Engelliyor!`)
const hataultra3 = new Discord.RichEmbed().setColor('RED').setTitle(`__HATA__`).setDescription(`Sürümünüzü **JavaScript - Ultra**'ya Yükseltemedim!
**Gerekli Davet Sayısı** • **20**
`)
//KOMUT
if(!bot.guilds.get("749937129097789530").members.get(message.author.id)) return message.channel.send(hata)
let x = args[0]
if(!x) return message.channel.send(js)
//NORMAL
if (x === "normal"){
if(bot.guilds.get("749937129097789530").members.get(message.author.id).roles.has("760109614715895808")) return message.channel.send(hatanormal)
await bot.guilds.get("749937129097789530").members.get(message.author.id).addRole("760109614715895808")
const normal = new Discord.RichEmbed()
.setColor('GREEN')
.setTitle('BAŞARILI')
.setDescription(`JavaScript Sürümünüz **JavaScript - Normal** Olarak Ayarlandı!
`)
.addField("JavaScript Sürümleri ;\n ",`<a:beyaz:761863795177947157> **≫** JavaScript - Normal • <:byz_tik:764575934094245920>\n<a:mavi:761863737485033472> **≫** JavaScript - Legend • \`-js legend\`\n<a:turuncu:761863766467674163> **≫** JavaScript - Ultra • \`-js ultra\`\n \n `)
.setThumbnail(`https://cdn.glitch.com/e967a0f0-8ff1-40bd-b34e-78038564ad07%2F1.gif?v=1601983632203`)
message.channel.send(normal)
}
//LEGEND
if (x === "legend"){
if(!bot.guilds.get("749937129097789530").members.get(message.author.id).roles.has("760109614715895808")) return message.channel.send(hatalegend2)
if(!message.guild.members.get(message.author.id).roles.has("761926237093494784")) return message.channel.send(hatalegend3)
if(bot.guilds.get("749937129097789530").members.get(message.author.id).roles.has("760109652247314433")) return message.channel.send(hatalegend)
await bot.guilds.get("749937129097789530").members.get(message.author.id).addRole("760109652247314433")
const normal = new Discord.RichEmbed()
.setColor('GREEN')
.setTitle('BAŞARILI')
.setDescription(`JavaScript Sürümünüz **JavaScript - Legend** Olarak Ayarlandı!
`)
.addField("JavaScript Sürümleri ;\n ",`<a:beyaz:761863795177947157> **≫** JavaScript - Normal • <:byz_tik:764575934094245920>\n<a:mavi:761863737485033472> **≫** JavaScript - Legend • <:byz_tik:764575934094245920>\n<a:turuncu:761863766467674163> **≫** JavaScript - Ultra • \`-js ultra\`\n \n `)
.setThumbnail(`https://cdn.glitch.com/e967a0f0-8ff1-40bd-b34e-78038564ad07%2F1.gif?v=1601983632203`)
message.channel.send(normal)
}
//ULTRA
if (x === "ultra"){
if(!bot.guilds.get("749937129097789530").members.get(message.author.id).roles.has("760109652247314433")) return message.channel.send(hataultra2)
if(!message.guild.members.get(message.author.id).roles.has("761926272494338048")) return message.channel.send(hataultra3)
if(bot.guilds.get("749937129097789530").members.get(message.author.id).roles.has("760109728192921610")) return message.channel.send(hataultra)
await bot.guilds.get("749937129097789530").members.get(message.author.id).addRole("760109728192921610")
const normal = new Discord.RichEmbed()
.setColor('GREEN')
.setTitle('BAŞARILI')
.setDescription(`JavaScript Sürümünüz **JavaScript - Ultra** Olarak Ayarlandı!
`)
.addField("JavaScript Sürümleri ;\n ",`<a:beyaz:761863795177947157> **≫** JavaScript - Normal • <:byz_tik:764575934094245920>\n<a:mavi:761863737485033472> **≫** JavaScript - Legend • <:byz_tik:764575934094245920>\n<a:turuncu:761863766467674163> **≫** JavaScript - Ultra • <:byz_tik:764575934094245920>\n \n `)
.setThumbnail(`https://cdn.glitch.com/e967a0f0-8ff1-40bd-b34e-78038564ad07%2F1.gif?v=1601983632203`)
message.channel.send(normal)
}
};
exports.conf = {
enabled: true,
guildOnly: true,
aliases: []
};
exports.help = {
name: "js",
description: "js rolu",
usage: "js"
};
<file_sep>const Discord = require('discord.js');
const useful = require('useful-tools')
const tarih = new Date()
exports.run = (client, msg, args) => {
let logkanal = "766236509291675648"
let yetkili = "760158563204202517"
let kategori = "760109430066511942"
if(!msg.member.roles.has(yetkili))if(!msg.member.roles.has("752857228138119180")) return msg.channel.send(`Bu Komutu Kullanmak için \`Kod Paylaş\` Yetkisine Sahip Olmalısın!`)
let kodIsım = args[0]
let kodLink = args[1]
if(!kodIsım) return msg.channel.send("Altyapı İsmi Belirt!")
if(!kodLink) return msg.channel.send("Altyapı Linki Belirt")
msg.guild.createChannel(`🔴・${kodIsım}`, {
type: "text",
parent: kategori
}).then(channel => {
let log = new Discord.RichEmbed()
.setThumbnail(`https://cdn.discordapp.com/attachments/765861125765857281/766209074432835584/CodAge.gif`)
.setTitle(`Altyapı Paylaşıldı!`)
.setDescription(`Kanala Abone Olarak Erişebiliceğiniz Bir Altyapı Paylaşıldı!\n `)
.addField("__Bilgiler__", `<:locked_channel:767026931103170581> **Yetkili** • ${msg.author}\n\n <:channel:767025851187068939> **Altyapı İsmi** • \`${kodIsım}\`\n <:channel:767025851187068939> **Altyapı Kategorisi** • \`Altyapı\`\n <:channel:767025851187068939> **Altyapı Kanalı** • ${channel}\n\n<:channel:767025851187068939> **Kanal** • [Tıkla!](https://www.youtube.com/channel/UCclQPI7Yq-wrN5-NJusQ98w/about)`)
.setColor("RED")
msg.channel.send(`**\`${kodIsım}\` Adlı Kod Paylaşıldı!**`)
client.channels.get(logkanal).send(log)
const kod = new Discord.RichEmbed()
.setTitle(`CodAge - Altyapı
`)
.addField("__Yetkili Bilgileri__",`**Yetkili** • ${msg.author}\n**Yetkili ID** • \`${msg.author.id}\``, true)
.addField("__Altyapı Bilgileri__",`**İsim** • \`${kodIsım}\`\n**Link** • [Tıkla Git!](${kodLink})\n**Kategori** • \`Altyapı\`\n**Tarih** • \`${useful.tarih(tarih, 'G a Y')}\``, true)
.setImage(`https://cdn.glitch.com/6b56d277-e694-418b-8e08-f976b178ea6b%2FCodAge%20Header.gif?v=1602753522928`)
.setColor("RED")
channel.send(kod)
const yetkili = new Discord.RichEmbed()
.setTitle(`Altyapı Paylaşıldı!`)
.addField("Gerekli Bilgiler",`<#${logkanal}>`)
.addField("Linki",`${kodLink}`)
client.channels.get("768795123165036544").send(yetkili)
})
}
exports.conf = {
enabled: true,
guildOnly: false,
aliases: ['altyapı-paylaş', 'altyapıpaylaş'],
permLevel: 0
};
exports.help = {
name: 'altyapı'
};<file_sep> CodAge
**Bu Altyapı Tamamen CodAge Kurucuları Tarafından Kodlanmıştır Paylaşılması Yasaktır Eğer Paylaşan Biri Görürseniz Lütfen Bize Bildirin!**
CodAge
<file_sep>const Discord = require('discord.js');
const db = require('quick.db')
const useful = require('useful-tools')
const tarih = new Date()
exports.run = (client, msg, args) => {
let logkanal = "766236509291675648"
let yetkili = "760158563204202517"
let kategori = "767722751808503858"
if(!msg.member.roles.has(yetkili))if(!msg.member.roles.has("752857228138119180")) return msg.channel.send(`Bu Komutu Kullanmak için \`Kod Paylaş\` Yetkisine Sahip Olmalısın!`)
let kodIsım = args[0]
let kodLink = args[1]
if(!kodIsım) return msg.channel.send("Kod İsmi Belirt!")
if(!kodLink) return msg.channel.send("Kod Linki Belirt")
msg.guild.createChannel(`⚪️・${kodIsım}`, {
type: "text",
parent: kategori
}).then(channel => {
let log = new Discord.RichEmbed()
.setThumbnail(`https://cdn.discordapp.com/attachments/765861125765857281/766209074432835584/CodAge.gif`)
.setTitle(`Normal Kod Paylaşıldı!`)
.setDescription(`Karşılıksız Erişebiliceğiniz Bir Kod Paylaşıldı!\n `)
.addField("__Bilgiler__", `<:locked_channel:767026931103170581> **Yetkili** • ${msg.author}\n\n <:channel:767025851187068939> **Kod İsmi** • \`${kodIsım}\`\n <:channel:767025851187068939> **Kod Kategorisi** • \`normal\`\n <:channel:767025851187068939> **Kod Kanalı** • ${channel}`)
.setColor("#ffffff")
msg.channel.send(`**\`${kodIsım}\` Adlı Kod Paylaşıldı!**`)
client.channels.get(logkanal).send(log)
const kod = new Discord.RichEmbed()
.setTitle(`JavaScript - Normal
`)
.addField("__Yetkili Bilgileri__",`**Yetkili** • ${msg.author}\n**Yetkili ID** • \`${msg.author.id}\``, true)
.addField("__Kod Bilgileri__",`**İsim** • \`${kodIsım}\`\n**Link** • [Tıkla Git!](${kodLink})\n**Kategori** • \`Normal\`\n**Tarih** • \`${useful.tarih(tarih, 'G a Y')}\``, true)
.setImage(`https://cdn.glitch.com/6b56d277-e694-418b-8e08-f976b178ea6b%2FCodAge%20Header.gif?v=1602753522928`)
.setColor("#ffffff")
channel.send(kod)
const yetkili = new Discord.RichEmbed()
.setTitle(`Normal Kod Paylaşıldı!`)
.addField("Gerekli Bilgiler",`<#${logkanal}>`)
.addField("Linki",`${kodLink}`)
client.channels.get("768795123165036544").send(yetkili)
})
}
exports.conf = {
enabled: true,
guildOnly: false,
aliases: ['normal-paylaş', 'normalpaylaş'],
permLevel: 0
};
exports.help = {
name: 'normal'
};<file_sep>const Discord = require('discord.js');
exports.run = function(client, message, args) {
const belirlenmedi = new Discord.RichEmbed().setColor('BLACK').setTitle('Kod Kontrol').setDescription(`Kontrol Ediceğiniz Kodları Seçmediniz\nAşşağıdan Kategorileri Görebilir/Kullanabilirsiniz.\n \n `).addField("Kod Kategorileri ;\n ",`**≫** js legend\n<a:turuncu:761863766467674163> **≫** js ultra\n<a:mor:763316877140819998> **≫** altyapılar\n \n \n**Örnek Kullanım ;**\n\`-kontrol js legend\`\n\` js ultra\`\n\` altyapılar\``)
let x = args.join(" ")
if(!x) return message.channel.send(belirlenmedi)
if (x === "js legend"){
let kategori = client.guilds.get('749937129097789530').channels.find(y => y.id === '760106938187907102')
let isimler = kategori.children.map(y => y.name )
let idler = kategori.children.map(y => y.id )
const embed = new Discord.RichEmbed()
.setThumbnail(`https://cdn.glitch.com/6b56d277-e694-418b-8e08-f976b178ea6b%2FCodAge.gif?v=1602765760748`)
.setTitle("JavaScript - Legend")
.setDescription(`10 Davet ile Erişebiliceğiniz Kodların Bilgileri,`)
.addField(`__Kod İsimleri__`,`\`\`\`${isimler.join(`\n`)}\`\`\``,true)
.addField(`__Kanallar__`,`・<#${idler.join(`>\n・<#`)}>`,true)
.setFooter(message.guild.name, message.guild.iconURL)
message.author.send(embed)
message.channel.send(`**DM Kutunu Kontrol Et Lütfen** 📬`)
}
if (x === "js ultra"){
let kategori = client.guilds.get('749937129097789530').channels.find(y => y.id === '760107014788612166')
let isimler = kategori.children.map(y => y.name )
let idler = kategori.children.map(y => y.id )
const embed = new Discord.RichEmbed()
.setThumbnail(`https://cdn.glitch.com/6b56d277-e694-418b-8e08-f976b178ea6b%2FCodAge.gif?v=1602765760748`)
.setTitle("JavaScript - Ultra")
.setDescription(`20 Davet ile Erişebiliceğiniz Kodların Bilgileri,`)
.addField(`__Kod İsimleri__`,`\`\`\`${isimler.join(`\n`)}\`\`\``,true)
.addField(`__Kanallar__`,`・<#${idler.join(`>\n・<#`)}>`,true)
.setFooter(message.guild.name, message.guild.iconURL)
message.author.send(embed)
message.channel.send(`**DM Kutunu Kontrol Et Lütfen** 📬`)
}
if (x === "altyapılar"){
let kategori = client.guilds.get('749937129097789530').channels.find(y => y.id === '760109430066511942')
let isimler = kategori.children.map(y => y.name )
let idler = kategori.children.map(y => y.id )
const embed = new Discord.RichEmbed()
.setThumbnail(`https://cdn.glitch.com/6b56d277-e694-418b-8e08-f976b178ea6b%2FCodAge.gif?v=1602765760748`)
.setTitle("CodAge - Altyapılar")
.addField(`__Kod İsimleri__`,`\`\`\`${isimler.join(`\n`)}\`\`\``,true)
.addField(`__Kanallar__`,`・<#${idler.join(`>\n・<#`)}>`,true)
.setFooter(message.guild.name, message.guild.iconURL)
message.author.send(embed)
message.channel.send(`**DM Kutunu Kontrol Et Lütfen** 📬`)
}
};
exports.conf = {
enabled: true,
guildOnly: true,
aliases: [],
permLevel: 0
};
exports.help = {
name: 'kontrol',
};<file_sep>const Discord = require('discord.js');
const ayarlar = require('../ayarlar.json')
const p = ayarlar.prefix
exports.run = (client, message, args) => {
let log = "761155760218177556"
let argüman = new Discord.RichEmbed()
.setTitle(`__HATA__`)
.setDescription(`
**Bir Argüman Belirle ;**
Açmak için => \`-bildirim aç\`
Kapatmak için => \`-bildirim kapat\`
`)
let açık = new Discord.RichEmbed()
.setDescription(`
<a:wumpus_yellow:762801842371297290>** Sen Artık Bir Bilginsin Sunucularımızda Olan Herşeyi Biliyorsun Yeni Yapılan Duyurulardan da Anında Haberdar Oluyorsun!**`)
let kapalı = new Discord.RichEmbed()
.setDescription(`
<a:wumpus_yellow:762801842371297290>** Uzaktan Bir Taş Parçası Kafana Çarptı Birden Bildiğin Herşeyi Unuttun ve İşitme Duyunuda Kaybettin!**`)
let x = args.join(" ")
if(!x) return message.channel.send(argüman)
if (x === "aç"){
message.channel.send(açık)
message.guild.members.get(message.author.id).addRole("762800670658920478")
client.channels.get(log).send(`${message.author} Adlı Kullanıcı Bildirimleri **Aktif** Hale Getirdi!`)
}
if (x === "kapat"){
message.channel.send(kapalı)
message.guild.members.get(message.author.id).removeRole("762800670658920478")
client.channels.get(log).send(`${message.author} Adlı Kullanıcı Bildirimleri **Deaktif** Hale Getirdi!`)
}
}
exports.conf = {
enabled: true,
guildOnly: false,
aliases: [],
permLevel: 0
};
exports.help = {
name: 'bildirim',
description: '',
usage: ''
};<file_sep>const Discord = require('discord.js');
const useful = require('useful-tools')
exports.run = async (client, message, args) => {
let log = "761155760218177556"
let kullanıcı = message.mentions.user.first()
if(!kullanıcı) return message.channel.send(`İstek Bildiren Kullanıcıyı Gir.`)
if(!args[1]) return message.channel.send(`İstek Kategorisini Gir.`)
if(!args[2]) return message.channel.send(`İstek Kanalını Gir.`)
client.channels.get(log).send(`
**\`${kullanıcı.username}\` Adlı Kullanıcının İsteği Yerine Getirildi!**
**Kategori** • **${args[1]}**
**Kanal** • ${args[2]}`);
kullanıcı.send(`
**İsteğiniz Yerine Getirildi!**
**Kategori** • **${args[1]}**
**Kanal** • ${args[2]}`)
}
exports.conf = {
enabled: true,
guildOnly: false,
aliases: [],
permLevel: 0
};
exports.help = {
name: 'tamamlandı',
description: 'Tarihi gösterir.',
usage: 'tarih'
}; | b3cad51dfe902cec46deb0817b54f0c41e0793ea | [
"JavaScript",
"Markdown"
] | 7 | JavaScript | wVoity/ergadgasdgasdgsad | 0be9b9e7efdb2e6ea07a9d04f4f5e99b8a784f4d | 8b9e6ccad0988f394094fdffae4a3984b8d23eea |
refs/heads/master | <file_sep>using FluentValidation;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using TiboxWebApi.Models;
namespace TiboxWebApi.WebApi.Validators
{
public class ProductValidator : AbstractValidator<Product>
{
public ProductValidator()
{
//Valdiaciones lamda
ValidatorOptions.CascadeMode = CascadeMode.StopOnFirstFailure;
RuleFor(p => p.ProductName).NotNull().NotEmpty().Length(1, 50).WithMessage("El nombre del producto es requerido");
RuleFor(p => p.SupplierId).NotNull().GreaterThan(0).WithName("Proveedor").WithMessage("No a seleccionado un proveedor");
//Validacion en caso el precio sea mayor que 0
RuleFor(p => p.UnitPrice).GreaterThan(0).WithName("Precio unitario").WithMessage("Costo tiene que se mayor que cero");
When(p => p.UnitPrice > 0, () =>
{
RuleFor(p => p.UnitPrice).LessThan(100000).WithName("Precio unitario").WithMessage("Costo muy elevado");
});
When(p => !string.IsNullOrWhiteSpace(p.Package), () =>
{
RuleFor(p => p.Package).Length(1, 30).WithMessage("El nombre del paquete excedio el limite permitido");
});
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using TiboxWebApi.Models;
namespace TiboxWebApi.Repository.Interfaces
{
public interface IMenuRepository: IRepository<Menu>
{
IEnumerable<Menu> SelMenus(string cCodUsu);
}
}
<file_sep>using Dapper.Contrib.Extensions;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TiboxWebApi.Models
{
public class User
{
public int Id { get; set; }
public string Email { get; set; }
public string FirstName { get; set; }
public string LastName { get; set; }
public string Password { get; set; }
public int nTipo { get; set; }
public int nCodPers { get; set; }
public string cMovil { get; set; }
//Datos WinUsuario
public int nCodAge { get; set; }
public string cUserName { get; set; }
public int nCodUsu { get; set; }
public string cDNIUsu { get; set; }
public int nIdRol { get; set; }
public string cRol { get; set; }
public string dFechaSistema { get; set; }
public string cNomAge { get; set; }
public string cNomUsu { get; set; }
public int changePass { get; set; }
}
}
<file_sep>using Dapper.Contrib.Extensions;
namespace TiboxWebApi.Models
{
[Table("VarNegocio")]
public class VarNegocio
{
[Key]
public int nCodVar { get; set; }
public string cNomVar { get; set; }
public string cValorVar { get; set; }
public int nTipoVar { get; set; }
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Dapper.Contrib.Extensions;
namespace TiboxWebApi.Models
{
[Table("WebPersonaLenddo")]
public class WebPersonaLenddo
{
[Key]
public int nCodigo { get; set; }
public string cClienteIDLenddo { get; set; }
public string cDocumento { get; set; }
public int nIdFlujo { get; set; }
public DateTime dFecha { get; set; }
public double nScore { get; set; }
}
}
<file_sep>
using Owin;
using System.Web.Http;
namespace TiboxWebApi.WebApi
{
public partial class Startup
{
public void Configuration(IAppBuilder app)
{
app.UseCors(Microsoft.Owin.Cors.CorsOptions.AllowAll);
var configuration = new HttpConfiguration();
Register(configuration);
ConfigureOAuth(app);
ConfigureInjector(configuration);
app.UseWebApi(configuration);
}
/* BASIC AUTHTENITCATION
* -Toda webapi se pide medienta reclamos(claim)
-Cada ves que se ejecute una tarea(TASK) el metodo debe de tener primero el valor de retorno
async asi como tambien el metodo debe de ser llamado con un await delante.
*/
}
}<file_sep>using System.Collections;
using System.Collections.Generic;
using TiboxWebApi.Models;
namespace TiboxWebApi.Repository.Interfaces
{
public interface IPersonaRepository:IRepository<Persona>
{
int LucasInsPersona(Persona persona);
IEnumerable<User> LucasVerificaClienteExiste(string cDocumento);
IEnumerable<Persona> LucasDatosPersona(string cDocumento, string cEmail, int nCodPers);
int LucasActPersona(Persona persona);
int LucasTratamientoDatos(Tratamiento tratamiento);
int LucasValidaPersonaCelular(string cDocumento, string cTelefono);
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TiboxWebApi.Models
{
public class Reporte
{
public int nCodCred { get; set; }
public int nCodAge { get; set; }
public int nPEP { get; set; }
public string cEmail { get; set; }
public string cNombres { get; set; }
public double nPrestamo { get; set; }
public bool bError { get; set; }
public string cMensajeError { get; set; }
public string oDocumento { get; set; }
}
}
<file_sep>using System.Web.Http;
using TiboxWebApi.UnitOfWork;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("Zona")]
[Authorize]
public class ZonaController : BaseController
{
public ZonaController(IUnitOfWork unit) : base(unit)
{
}
[Route("Departamento")]
[HttpGet]
public IHttpActionResult Departamento()
{
return Ok(_unit.Zona.selDepartamento());
}
[Route("Provincia/{cDep}")]
[HttpGet]
public IHttpActionResult Provincia(string cDep)
{
if (cDep == "" || cDep == null) return BadRequest();
return Ok(_unit.Zona.selProvincia(cDep));
}
[Route("Distrito/{cDep}/{cPro}")]
[HttpGet]
public IHttpActionResult Distrito(string cDep, string cPro)
{
if (cDep == "" || cDep == null) return BadRequest();
if (cPro == "" || cPro == null) return BadRequest();
return Ok(_unit.Zona.selDistrito(cDep, cPro));
}
}
}
<file_sep>
using System;
using TiboxWebApi.Models;
using TiboxWebApi.Repository;
using TiboxWebApi.Repository.Interfaces;
using TiboxWebApi.Repository.Repository;
namespace TiboxWebApi.UnitOfWork
{
public class TiboxUnitOfWork : IUnitOfWork, IDisposable
{
public TiboxUnitOfWork()
{
Products = new BaseRepository<Product>();
Users = new UserRepository();
CatalogoCodigo = new CatalogoCodigoRepository();
Zona = new ZonaRepository();
Persona = new PersonaRepository();
FlujoMaestro = new FlujoRepository();
Credito = new CreditoRepository();
Lenddo = new BaseRepository<WebPersonaLenddo>();
VarNegocio = new BaseRepository<VarNegocio>();
Reporte = new ReporteRepository();
Documento = new DocumentoRepository();
Error = new ErrorRepository();
ReglaNegocio = new ReglaNegocioRepository();
Menu = new MenuRepository();
}
public IRepository<Product> Products { get; private set; }
public IUserRepository Users { get; private set; }
public ICatalogoCodigoRepository CatalogoCodigo { get; private set; }
public IZonaRepository Zona { get; private set; }
public IPersonaRepository Persona { get; private set; }
public IFlujoRepository FlujoMaestro { get; private set; }
public ICreditoRepository Credito { get; private set; }
public IRepository<WebPersonaLenddo> Lenddo { get; private set; }
public IRepository<VarNegocio> VarNegocio { get; private set; }
public IReporteRepository Reporte { get; private set; }
public IDocumentoRepository Documento { get; private set; }
public IErrorRepository Error { get; private set; }
public IReglaNegocioRepository ReglaNegocio { get; private set; }
public IMenuRepository Menu { get; private set; }
public void Dispose()
{
this.Dispose();
}
}
}
<file_sep>using System.Collections;
using System.Collections.Generic;
using TiboxWebApi.Models;
namespace TiboxWebApi.Repository.Interfaces
{
public interface IReporteRepository:IRepository<Reporte>
{
int LucasInsCabeceraReporte(int nCodAcge, int nCodCred, string cAsunto, string cCuerpo);
int LucasInsDetalleReporte(int nCodAge, int nCodCred, int nTipo, byte[] oDoc);
IEnumerable<Reporte> LucasSeleccionaReporte(int nCodAge, int nCodCred, int nTipo);
}
}
<file_sep>using System.Collections.Generic;
using TiboxWebApi.Models;
namespace TiboxWebApi.Repository.Interfaces
{
public interface ICatalogoCodigoRepository : IRepository<CatalogoCodigos>
{
IEnumerable<CatalogoCodigos> selCatalogoCodigos(int nCodigo);
IEnumerable<CatalogoCodigos> selTipovivienda();
}
}
<file_sep>using System.Web.Http;
using TiboxWebApi.UnitOfWork;
namespace TiboxWebApi.WebApi.Controllers
{
public class BaseController : ApiController
{
protected readonly IUnitOfWork _unit;
public BaseController(IUnitOfWork unit)
{
_unit = unit;
}
}
}
<file_sep>using Microsoft.AspNet.WebApi.Extensions.Compression.Server;
using Newtonsoft.Json.Serialization;
using System.Net.Http.Extensions.Compression.Core.Compressors;
using System.Web.Http;
namespace TiboxWebApi.WebApi
{
public partial class Startup
{
public void Register(HttpConfiguration config)
{
// Configuración y servicios de API web
//Compreción
config.MessageHandlers.Insert(0,
new ServerCompressionHandler(new GZipCompressor(), new DeflateCompressor())
);
//Fin comprecion
//Formato JSON
config.Formatters.JsonFormatter.SerializerSettings.ContractResolver =
new CamelCasePropertyNamesContractResolver();
//Fin formato
// Rutas de API web
config.MapHttpAttributeRoutes();
config.Routes.MapHttpRoute(
name: "DefaultApi",
routeTemplate: "api/{controller}/{id}",
defaults: new { id = RouteParameter.Optional }
);
}
}
}
<file_sep>using System.Web.Http;
using TiboxWebApi.Models;
using TiboxWebApi.UnitOfWork;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("Lenddo")]
[Authorize]
public class LenddoController : BaseController
{
public LenddoController(IUnitOfWork unit): base(unit)
{
}
[Route("{id}")]
public IHttpActionResult Get(int id)
{
if (id <= 0) return BadRequest();
return Ok(_unit.Lenddo.GetEntityById(id));
}
[Route("")]
[HttpPost]
public IHttpActionResult Post(WebPersonaLenddo lenddo)
{
var id = _unit.Lenddo.Insert(lenddo);
return Ok(new { id = id });
}
[Route("")]
[HttpPut]
public IHttpActionResult Put(WebPersonaLenddo lenndo)
{
var id = _unit.Lenddo.Update(lenndo);
return Ok(new { status = true });
}
[Route("{id}")]
[HttpDelete]
public IHttpActionResult Delete(int id)
{
if (id <= 0) return BadRequest();
var result = _unit.Lenddo.Delete(new WebPersonaLenddo { nCodigo = id });
return Ok(new { detele = true });
}
[Route("List")]
[HttpGet]
public IHttpActionResult GetList()
{
return Ok(_unit.Lenddo.GetAll());
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Net.Mail;
using System.Web;
namespace TiboxWebApi.WebApi.Utils
{
public class Email
{
public bool envioEmail(string cEmail, string cCuerpo, string cTitulo, ref string cMensajeError)
{
string SMTP = System.Configuration.ConfigurationManager.AppSettings["SMTP"].ToString();
int PUERTO = Convert.ToInt32(System.Configuration.ConfigurationManager.AppSettings["PUERTO"]);
string CORREO = System.Configuration.ConfigurationManager.AppSettings["CORREO"].ToString();
string CORREO_CREDENCIALES = System.Configuration.ConfigurationManager.AppSettings["CORREO_CREDENCIALES"].ToString();
string CLAVE_CREDENCIALES = System.Configuration.ConfigurationManager.AppSettings["CLAVE_CREDENCIALES"].ToString();
string NOMBRE = System.Configuration.ConfigurationManager.AppSettings["NOMBRE"].ToString();
string Body = "";
Body = cCuerpo;
try
{
SmtpClient server = new SmtpClient(SMTP, PUERTO);
server.Credentials = new System.Net.NetworkCredential(CORREO_CREDENCIALES, CLAVE_CREDENCIALES);
server.EnableSsl = true;
MailMessage mnsj = new MailMessage();
mnsj.Subject = cTitulo;
mnsj.To.Add(new MailAddress(cEmail));
mnsj.From = new MailAddress(CORREO, NOMBRE);
mnsj.IsBodyHtml = true;
mnsj.Body = Body;
server.Send(mnsj);
}
catch (Exception ex)
{
cMensajeError = ex.Message;
return false;
}
return true;
}
}
}<file_sep>using System.Collections.Generic;
using TiboxWebApi.Models;
namespace TiboxWebApi.Repository.Interfaces
{
public interface ICreditoRepository : IRepository<Credito>
{
IEnumerable<Credito> LucasBandeja(int nCodPers, int nPagina, int nTam, int nCodAge);
int LucasInsCredito(Credito credito);
int LucasInsModalidad(Credito credito);
IEnumerable<Credito> LucasDatosPrestamo(int nCodAge, int nCodCred);
int LucasInsFirmaElectronica(Credito credito);
IEnumerable<Credito> LucasCalendarioLista(int nCodAge, int nCodCred);
IEnumerable<Credito> LucasKardexLista(int nCodAge, int nCodCred);
int LucasRechazadoPorDia(string cDocumento);
int LucasCreditoEnFlujo(string cDocumento);
int LucasCreditoAnulaxActualizacion(string cDocumento);
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TiboxWebApi.Models
{
public class ReglaNegocio
{
public int nIdNeg { get; set; }
public string cNomReg { get; set; }
public string cRegMensaje { get; set; }
public string cNomPro { get; set; }
public string cStored { get; set; }
public string cNombrePar { get; set; }
public string cNomCom { get; set; }
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TiboxWebApi.Models
{
public class CatalogoCodigos
{
public int nCodigo { get; set; }
public string cNomCod { get; set; }
public string nValor { get; set; }
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.DirectoryServices;
using System.Linq;
using System.Web;
namespace TiboxWebApi.WebApi.Utils
{
public class ActiveDirectory
{
public Boolean Autenticado(string psUser, string psPass)
{
string cIPAD = System.Configuration.ConfigurationManager.AppSettings["IPAD"].ToString();
bool Autentificado = false;
DirectoryEntry deSystem = new DirectoryEntry(cIPAD);
deSystem.AuthenticationType = AuthenticationTypes.Secure;
deSystem.Username = psUser;
deSystem.Password = <PASSWORD>;
DirectorySearcher deSearch = new DirectorySearcher();
deSearch.SearchRoot = deSystem;
deSearch.Filter = ("(anr=" + psUser + ")");
try
{
SearchResultCollection results = deSearch.FindAll();
if (results.Count == 0)
{
Autentificado = false;
}
else
{
Autentificado = true;
}
results = null;
deSearch = null;
deSystem = null;
}
catch (Exception ex)
{
Autentificado = false;
}
finally
{
deSystem = null;
}
return Autentificado;
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using TiboxWebApi.Models;
namespace TiboxWebApi.Repository.Interfaces
{
public interface IReglaNegocioRepository: IRepository<ReglaNegocio>
{
IEnumerable<ReglaNegocio> ListaRegla(string cNomForm);
}
}
<file_sep># SoyLucas-WebApiRest
Servicio Web se consumo de datos, realizado en c# con Dapper.
<file_sep>using System.Collections.Generic;
using TiboxWebApi.Models;
namespace TiboxWebApi.Repository.Interfaces
{
public interface IFlujoRepository:IRepository<FlujoMaestro>
{
int LucasRegistraFlujo(FlujoMaestro flujo);
int LucasRegistraMotor(FlujoMaestro flujo);
IEnumerable<FlujoMaestro> LucasRecuperaFlujo(int nIdFlujoMaestro);
IEnumerable<FlujoMaestro> LucasRecuperaSolicitud(int nIdFlujoMaestro);
IEnumerable<FlujoMaestro> ObtieneWizard(int nIdFlujoMaestro);
int EliminaFlujo(FlujoMaestro flujo);
}
}
<file_sep>namespace TiboxWebApi.Models
{
public class Error
{
public string cDescripcion { get; set; }
public string cControlador { get; set; }
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TiboxWebApi.Models
{
public class Zona
{
public string cCodZona { get; set; }
public string cNomZona { get; set; }
public int nCodigoCiudad { get; set; }
}
}
<file_sep>using Microsoft.Reporting.WinForms;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Net.Mail;
using System.Text;
using System.Web;
using TiboxWebApi.UnitOfWork;
namespace TiboxWebApi.WebApi.Utils
{
public class ReporteEmail
{
private readonly IUnitOfWork _unit;
public ReporteEmail(IUnitOfWork unit)
{
_unit = unit;
}
string SMTP = System.Configuration.ConfigurationManager.AppSettings["SMTP"].ToString();
int PUERTO = Convert.ToInt32(System.Configuration.ConfigurationManager.AppSettings["PUERTO"]);
string CORREO = System.Configuration.ConfigurationManager.AppSettings["CORREO"].ToString();
string CORREO_CREDENCIALES = System.Configuration.ConfigurationManager.AppSettings["CORREO_CREDENCIALES"].ToString();
string CLAVE_CREDENCIALES = System.Configuration.ConfigurationManager.AppSettings["CLAVE_CREDENCIALES"].ToString();
string NOMBRE = System.Configuration.ConfigurationManager.AppSettings["NOMBRE"].ToString();
string USER_REPORTE = System.Configuration.ConfigurationManager.AppSettings["USER_REPORTE"].ToString();
string PASS_REPORTE = System.Configuration.ConfigurationManager.AppSettings["PASS_REPORTE"].ToString();
string DOMINIO = System.Configuration.ConfigurationManager.AppSettings["DOMINIO"].ToString();
enum ReportFormat { PDF = 1, Word = 2, Excel = 3 }
public string LastmimeType { get { return mimeType; } }
private string mimeType;
public Dictionary<int, string> listaDictionary()
{
//SI SE DESEA MAS REPORTES AGREGAR AQUÍ
var ArrayString1 = new string[] {
"/WEB/Hoja_Resumen_Informativa_Anexo_1_ONL",
"/WEB/Hoja_Resumen_Informativa_Anexo_2_ONL",
"/WEB/001_FichaDeSolicitudDePrestamo",
"/WEB/002_ContratoPrestamoOnline",
"/WEB/003_CertificadoDeSeguroDesgravamen",
"/WEB/004_FormularioIdentificacionPersonas",
};
Dictionary<int, string> dListDictionary = new Dictionary<int, string>();
for (int i = 0; i < ArrayString1.Length; i++)
{
dListDictionary.Add(i, ArrayString1[i]);
}
return dListDictionary;
}
private byte[] ReporteABytes(int nCodAge, int nCodCred, string cReporte)
{
byte[] renderedBytes;
ReportViewer rs = new ReportViewer();
try
{
ICredentials Credentials = null;
Credentials = CredentialCache.DefaultCredentials;
NetworkCredential credentiales = new NetworkCredential();
credentiales.UserName = USER_REPORTE;
credentiales.Password = <PASSWORD>;
credentiales.Domain = DOMINIO;
rs.ShowCredentialPrompts = false;
var oVarNegocio = _unit.VarNegocio.GetEntityById(2032);
rs.ServerReport.ReportServerUrl = new Uri(oVarNegocio.cValorVar);
rs.ServerReport.ReportPath = cReporte;
rs.ServerReport.ReportServerCredentials.NetworkCredentials = credentiales;
var listParam = new List<ReportParameter>();
var param1 = new ReportParameter("nCodCred", nCodCred.ToString());
listParam.Add(param1);
var param2 = new ReportParameter("nCodAge", nCodAge.ToString());
listParam.Add(param2);
rs.ServerReport.SetParameters(listParam);
rs.RefreshReport();
var formatopdf = ReportFormat.PDF;
string formato = formatopdf.ToString();
string reportType = formatopdf.ToString();
string encoding;
string fileNameExtension;
string deviceInfo =
"<DeviceInfo>" +
" <OutputFormat>" + formatopdf.ToString() + "</OutputFormat>" +
"</DeviceInfo>";
Warning[] warnings;
string[] streams;
renderedBytes = rs.ServerReport.Render(formato,
deviceInfo,
out mimeType,
out encoding,
out fileNameExtension,
out streams,
out warnings);
}
catch (Exception)
{
renderedBytes = null;
throw;
}
return renderedBytes;
}
private string generaCuerpoEmail(string cNombre, double nPrestamo)
{
var cuerpo = "<table border='0' style='width: 100%; background: #f1f1f1; font-family: verdana'>" +
"<tr>" +
"<td>" +
"<table cellspacing='0' cellpadding='0' border='0' style='margin: 0 auto; width: 85%'>" +
"<tr style='background: #FD293F'>" +
"<td style='padding: 25px'>" +
"<h1 style='text-transform: uppercase; text-align: center; color: white; margin: 0 auto'>¡FELICIDADES!</h1>" +
"</td>" +
"</tr>" +
"<tr style='background: white'>" +
"<td style='padding: 15px;'>" +
"<h2 style='background: white; text-transform: uppercase; text-align: center'>HOLA " + cNombre.ToUpper() + "</h2>" +
"<h4 style='font-weight: 500; text-transform: uppercase; text-align: center'><i>¡Solicitud generada!</i></h4>" +
"<p style='text-align: justify; padding: 15px'>Tu solicitud de préstamo por S/ " + string.Format("{0:#,#.00}", nPrestamo) + " ha sido generada, Se adjuntan los documentos contractuales.</p>" +
"<p>Gracias por confiar en nosotros!</p>" +
"</td>" +
"</tr>" +
"<tr style='background: #454544; text-transform: uppercase; text-align: center'>" +
"<td style='padding: 25px;'>" +
"<button style='text-align: center; color: #fff; font-size: 14px; font-weight: 500; border:none; padding: 10px 20px; background-color: #FFB700; font-size: 25px'>" +
"<span style='color: #454544; text-transform: uppercase; font-weight: bold; font-size: 25px'>Aló Lucas:</span> 01 615-7030</button>" +
"</td>" +
"</tr>" +
"</table>" +
"</td>" +
"</tr>" +
"</table>";
return cuerpo;
}
public bool generaReportes(int nCodCred, int nCodAge, int nPEP, ref bool bError, ref string cMensajeError)
{
bool bRespuesta = true;
MemoryStream datoFile = null;
List<Attachment> lstFiles = new List<Attachment>();
List<string> lstArchivos = new List<string>();
List<object> lstArchivos1 = new List<object>();
List<string> lstArchivos2 = new List<string>();
List<DocumentosReporte> lstDocumentos = new List<DocumentosReporte>();
Attachment anexo = null;
Dictionary<int, string> lstDiccionario = null;
DocumentosReporte documentos = new DocumentosReporte();
lstDiccionario = listaDictionary();
string cNombreDocumento = "";
try
{
foreach (var item in lstDiccionario)
{
if (item.Value == "/WEB/004_FormularioIdentificacionPersonas" && nPEP == 0)
{
//nada
}
else
{
var renderedbytes = ReporteABytes(nCodAge, nCodCred, item.Value);
datoFile = new MemoryStream(renderedbytes);
datoFile.Seek(0, SeekOrigin.Begin);
var splited = item.Value.Split('/');
cNombreDocumento = splited[2] + ".PDF";
anexo = new Attachment(datoFile, cNombreDocumento);
lstFiles.Add(anexo);
documentos = new DocumentosReporte();
documentos.nombre = cNombreDocumento;
documentos.doc = renderedbytes;
lstDocumentos.Add(documentos);
}
}
_unit.Reporte.LucasInsCabeceraReporte(nCodAge, nCodCred, "¡Felicitaciones!, tu préstamo ha sido aprobado.", "");
var nTipoDoc = 0;
for (int i = 0; i < lstDocumentos.Count; i++)
{
if (lstDocumentos[i].nombre == "Hoja_Resumen_Informativa_Anexo_1_ONL.PDF") { nTipoDoc = 1; }
if (lstDocumentos[i].nombre == "Hoja_Resumen_Informativa_Anexo_2_ONL.PDF") { nTipoDoc = 2; }
if (lstDocumentos[i].nombre == "001_FichaDeSolicitudDePrestamo.PDF") { nTipoDoc = 6; }
if (lstDocumentos[i].nombre == "002_ContratoPrestamoOnline.PDF") { nTipoDoc = 5; }
if (lstDocumentos[i].nombre == "003_CertificadoDeSeguroDesgravamen.PDF") { nTipoDoc = 7; }
if (lstDocumentos[i].nombre == "004_FormularioIdentificacionPersonas.PDF") { nTipoDoc = 8; }
_unit.Reporte.LucasInsDetalleReporte(nCodAge, nCodCred, nTipoDoc, lstDocumentos[i].doc);
}
}
catch (Exception ex)
{
bError = true;
cMensajeError = "Error: " + ex.Message;
bRespuesta = false;
_unit.Error.InsertaError("Reporte Controller - generaReportes", ex.Message);
}
return bRespuesta;
}
public bool EnviarReportePorEmail(int nCodCred, int nCodAge, string cEmail, string cnombres, double nPrestamo, int nPEP, ref bool bError, ref string cMensajeError)
{
bool bRespuesta = true;
MemoryStream datoFile = null;
List<Attachment> lstFiles = new List<Attachment>();
List<string> lstArchivos = new List<string>();
List<object> lstArchivos1 = new List<object>();
List<string> lstArchivos2 = new List<string>();
List<DocumentosReporte> lstDocumentos = new List<DocumentosReporte>();
Attachment anexo = null;
Dictionary<int, string> lstDiccionario = null;
DocumentosReporte documentos = new DocumentosReporte();
lstDiccionario = listaDictionary();
string cNombreDocumento = "";
string cBodyCliente = generaCuerpoEmail(cnombres, nPrestamo);
try
{
foreach (var item in lstDiccionario)
{
if (item.Value == "/WEB/004_FormularioIdentificacionPersonas" && nPEP == 0)
{
//nada
}
else
{
var renderedbytes = ReporteABytes(nCodAge, nCodCred, item.Value);
datoFile = new MemoryStream(renderedbytes);
datoFile.Seek(0, SeekOrigin.Begin);
var splited = item.Value.Split('/');
cNombreDocumento = splited[2] + ".PDF";
anexo = new Attachment(datoFile, cNombreDocumento);
lstFiles.Add(anexo);
documentos = new DocumentosReporte();
documentos.nombre = cNombreDocumento;
documentos.doc = renderedbytes;
lstDocumentos.Add(documentos);
}
}
using (var smtpCliente = new SmtpClient(SMTP, PUERTO))
{
smtpCliente.Credentials = new NetworkCredential(CORREO_CREDENCIALES, CLAVE_CREDENCIALES);
smtpCliente.EnableSsl = true;
MailMessage mail = new MailMessage();
mail.Body = cBodyCliente;
mail.IsBodyHtml = true;
mail.To.Add(new MailAddress(cEmail));
mail.From = new MailAddress(CORREO, NOMBRE);
mail.Subject = "¡Felicitaciones!, tu préstamo ha sido aprobado.";
mail.SubjectEncoding = Encoding.UTF8;
mail.Priority = MailPriority.Normal;
mail.IsBodyHtml = true;
foreach (var item in lstFiles)
{
mail.Attachments.Add(item);
}
smtpCliente.Send(mail);
bRespuesta = true;
}
if (bRespuesta)
{
_unit.Reporte.LucasInsCabeceraReporte(nCodAge, nCodCred, "¡Felicitaciones!, tu préstamo ha sido aprobado.", cBodyCliente);
var nTipoDoc = 0;
for (int i = 0; i < lstDocumentos.Count; i++)
{
if (lstDocumentos[i].nombre == "Hoja_Resumen_Informativa_Anexo_1_ONL.PDF") { nTipoDoc = 1; }
if (lstDocumentos[i].nombre == "Hoja_Resumen_Informativa_Anexo_2_ONL.PDF") { nTipoDoc = 2; }
if (lstDocumentos[i].nombre == "001_FichaDeSolicitudDePrestamo.PDF") { nTipoDoc = 6; }
if (lstDocumentos[i].nombre == "002_ContratoPrestamoOnline.PDF") { nTipoDoc = 5; }
if (lstDocumentos[i].nombre == "003_CertificadoDeSeguroDesgravamen.PDF") { nTipoDoc = 7; }
if (lstDocumentos[i].nombre == "004_FormularioIdentificacionPersonas.PDF") { nTipoDoc = 8; }
_unit.Reporte.LucasInsDetalleReporte(nCodAge, nCodCred, nTipoDoc, lstDocumentos[i].doc);
}
}
}
catch (Exception ex)
{
bError = true;
cMensajeError = "Error: " + ex.Message;
bRespuesta = false;
_unit.Error.InsertaError("Reporte Controller - EnviarReportePorEmail", ex.Message);
}
return bRespuesta;
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Web;
namespace TiboxWebApi.Models
{
public class Documento
{
public int nCodigo { get; set; }
public string cNombreDoc { get; set; }
public int nCodAge { get; set; }
public int nCodCred { get; set; }
public byte[] iImagen { get; set; }
public string cNomArchivo { get; set; }
public string cExtencion { get; set; }
public int nIdFlujoMaestro { get; set; }
public string cTipoArchivo { get; set; }
public string oDocumento { get; set; }
}
}
<file_sep>using System.Web.Http;
using TiboxWebApi.Models;
using TiboxWebApi.UnitOfWork;
using TiboxWebApi.WebApi.Utils;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("Reporte")]
[Authorize]
public class ReporteController : BaseController
{
private ReporteEmail _reporte = null;
public ReporteController(IUnitOfWork unit) : base(unit)
{
_reporte = new ReporteEmail(unit);
}
[Route("Envio")]
[HttpPost]
public IHttpActionResult Envio(Reporte reporte)
{
if (reporte == null) return BadRequest();
bool bError = false;
string cMendajeError = "";
var resultado = _reporte.EnviarReportePorEmail(reporte.nCodCred, reporte.nCodAge, reporte.cEmail, reporte.cNombres, reporte.nPrestamo, reporte.nPEP, ref bError, ref cMendajeError);
return Ok(new {
bresultado = resultado,
bError = bError,
cMensaje = cMendajeError
});
}
[Route("Generar/{nCodCred}/{nCodAge}/{nPEP}")]
[HttpGet]
public IHttpActionResult GenerarReportes(int nCodCred, int nCodAge, int nPEP)
{
bool bError = false;
string cMendajeError = "";
var resultado = _reporte.generaReportes(nCodCred, nCodAge, nPEP, ref bError, ref cMendajeError);
return Ok(new
{
bresultado = resultado,
bError = bError,
cMensaje = cMendajeError
});
}
[Route("{nCodAge}/{nCodCred}/{nTipo}")]
[HttpGet]
public IHttpActionResult Get(int nCodAge, int nCodCred, int nTipo)
{
if (nCodAge == 0) return BadRequest();
if (nCodCred == 0) return BadRequest();
if (nTipo == 0) return BadRequest();
return Ok(_unit.Reporte.LucasSeleccionaReporte(nCodAge, nCodCred, nTipo));
}
}
}
<file_sep>namespace TiboxWebApi.Models
{
public class Credito
{
public int nIdFlujoMaestro { get; set; }
public int nCodCred { get; set; }
public int nCodAge { get; set; }
public string cNumeroContrato { get; set; }
public double nPrestamo { get; set; }
public int nNroCuotas { get; set; }
public double nMontoCuota { get; set; }
public string cFormulario { get; set; }
public string cProducto { get; set; }
public string cSubProd { get; set; }
public string dFechaRegistro { get; set; }
public int bActivo { get; set; }
public string cMoneda { get; set; }
public int nEstado { get; set; }
public string cEstado { get; set; }
public int nProd { get; set; }
public int nSubProd { get; set; }
public int nCodPers { get; set; }
public int nPagina { get; set; }
public int nTamanio { get; set; }
public double nTasa { get; set; }
public int nPeriodo { get; set; }
public double nPlazoSol { get; set; }
public double nMontoSol { get; set; }
public string nCodUsu { get; set; }
public string oCredito { get; set; }
public string cUsuReg { get; set; }
public int nCodPersReg { get; set; }
public int nIdFlujo { get; set; }
public int nOrdenFlujo { get; set; }
public string dFechaSistema { get; set; }
public double nSeguro { get; set; }
public int nTipoDesembolso { get; set; }
public int nBanco { get; set; }
public string cNroCuenta { get; set; }
public double nTasaComp { get; set; }
public string cMovil { get; set; }
public int nFirma { get; set; }
public double nCapital { get; set; }
public double nIntComp { get; set; }
public string dFecVenc { get; set; }
public string dFecPago { get; set; }
public string dFecCob { get; set; }
public double nGasto { get; set; }
public string cDescripcion { get; set; }
public double nIGV { get; set; }
public double nMontoNuevoSaldo { get; set; }
public double nTotal { get; set; }
public double nCuotaPag { get; set; }
}
}
<file_sep>using Microsoft.Owin.Security.OAuth;
using System.Threading.Tasks;
using System.Security.Claims;
using TiboxWebApi.UnitOfWork;
using TiboxWebApi.WebApi.Utils;
using System;
namespace TiboxWebApi.WebApi.Provider
{
public class SimpleAuthorizationServerProvider : OAuthAuthorizationServerProvider
{
private readonly IUnitOfWork _unit;
private ActiveDirectory _ad;
public SimpleAuthorizationServerProvider()
{
_unit = new TiboxUnitOfWork();
_ad = new ActiveDirectory();
}
public override async Task ValidateClientAuthentication(OAuthValidateClientAuthenticationContext context)
{
context.Validated();
}
public override async Task GrantResourceOwnerCredentials(OAuthGrantResourceOwnerCredentialsContext context)
{
//Aqui se hace la magia
var form = await context.Request.ReadFormAsync();
var user = new Models.User();
if (string.Equals(form["tipo"], "lucas", StringComparison.OrdinalIgnoreCase))
{
user = null;
user = _unit.Users.ValidateUser(context.UserName, context.Password);
}
else if(string.Equals(form["tipo"], "admin", StringComparison.OrdinalIgnoreCase))
{
bool validation = false;
validation = _ad.Autenticado(context.UserName, context.Password);
user = null;
if (validation)
{
user = _unit.Users.validateUserAD(context.UserName, context.Password);
}
}
if (user == null)
{
context.SetError("invalid_grant", "Usuario o password incorrecto");
return;
}
var identity = new ClaimsIdentity(context.Options.AuthenticationType);
identity.AddClaim(new Claim("sub", context.UserName));
identity.AddClaim(new Claim("role", context.UserName));
context.Validated(identity);
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using TiboxWebApi.Models;
namespace TiboxWebApi.Repository.Interfaces
{
public interface IDocumentoRepository: IRepository<Documento>
{
IEnumerable<Documento> ListaDocumentos();
int LucasInsDocumento(Documento documento);
}
}
<file_sep>using Dapper.Contrib.Extensions;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TiboxWebApi.Models
{
[Table("Product")]
public class Product
{
public int Id { get; set; }
public string ProductName { get; set; }
public int SupplierId { get; set; }
public decimal? UnitPrice { get; set; }
public string Package { get; set; }
public bool IsDiscontinued { get; set; }
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TiboxWebApi.Models
{
public class FlujoMaestro
{
public string nNroDoc { get; set; }
public int nCodAge { get; set; }
public int nProd { get; set; }
public int nSubProd { get; set; }
public string cNomform { get; set; }
public int nCodCred { get; set; }
public string cUsuReg { get; set; }
public int nIdFlujo { get; set; }
public int nCodPersReg { get; set; }
public int nOrdenFlujo { get; set; }
public string oScoringDatos { get; set; }
public string oScoringVarDemo { get; set; }
public string oScoringDetCuota { get; set; }
public string oScoringDemo { get; set; }
public string oScoringRCC { get; set; }
public int nRechazado { get; set; }
public string cClienteLenddo { get; set; }
public int nIdFlujoMaestro { get; set; }
public int nCodPers { get; set; }
public double nTasa { get; set; }
public double nCuotaDisp { get; set; }
public double nPrestamoMax { get; set; }
public double nPlazo { get; set; }
public double nPrestamoMinimo { get; set; }
public string dFechaSistema { get; set; }
public double nSeguroDesgravamen { get; set; }
public string cMovil { get; set; }
public int nClientePEP { get; set; }
public string cNombreProceso { get; set; }
public string cClassEstilo { get; set; }
public string cComentario { get; set; }
}
}
<file_sep>using TiboxWebApi.Models;
using TiboxWebApi.Repository;
using TiboxWebApi.Repository.Interfaces;
namespace TiboxWebApi.UnitOfWork
{
public interface IUnitOfWork
{
IUserRepository Users { get; }
IRepository<Product> Products { get; }
ICatalogoCodigoRepository CatalogoCodigo { get; }
IZonaRepository Zona { get; }
IPersonaRepository Persona { get; }
IFlujoRepository FlujoMaestro { get; }
ICreditoRepository Credito { get; }
IRepository<WebPersonaLenddo> Lenddo { get; }
IRepository<VarNegocio> VarNegocio { get; }
IReporteRepository Reporte { get; }
IDocumentoRepository Documento { get; }
IErrorRepository Error { get; }
IReglaNegocioRepository ReglaNegocio { get; }
IMenuRepository Menu { get; }
}
}
<file_sep>using Dapper;
using System.Collections.Generic;
using System.Data.SqlClient;
using TiboxWebApi.Models;
using TiboxWebApi.Repository.Interfaces;
using System.Data;
namespace TiboxWebApi.Repository.Repository
{
public class MenuRepository : BaseRepository<Menu>, IMenuRepository
{
public IEnumerable<Menu> SelMenus(string cCodUsu)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@username", cCodUsu);
return connection.Query<Menu>("WebApiADM_Menu_SP", parameters, commandType: CommandType.StoredProcedure);
}
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TiboxWebApi.Models
{
public class Tratamiento
{
public int nCodPers { get; set; }
public string cDocumento { get; set; }
public string cUsuario { get; set; }
public string cApePat { get; set; }
public string cApeMat { get; set; }
public string cNombres { get; set; }
public int nCodAge { get; set; }
public int nTipoSolicitud { get; set; }
public int nModoRegistro { get; set; }
public int nTipoResp { get; set; }
public string cPedido { get; set; }
public string cComentario { get; set; }
public int nCodPersTit { get; set; }
public string cApePatTit { get; set; }
public string cApeMatTit { get; set; }
public string cNomTit { get; set; }
public string cDocumentoTit { get; set; }
public int nCodSolicitud { get; set; }
}
}
<file_sep>using FluentValidation;
using TiboxWebApi.Models;
namespace TiboxWebApi.WebApi.Validators
{
public class PersonaValidator: AbstractValidator<Persona>
{
public PersonaValidator()
{
ValidatorOptions.CascadeMode = CascadeMode.StopOnFirstFailure;
RuleFor(p => p.nCodigoVerificador).NotNull().WithName("Codigo Veirificador").WithMessage("Código verificador es requerido.");
RuleFor(p => p.cNombres).NotNull().WithName("Nombres").WithMessage("Nombre es requerido")
.NotEmpty().WithMessage("Nombre es requerido")
.Length(1, 50).WithMessage("Nombre no debe de exceder de las 100 letras.");
RuleFor(p => p.cApePat).NotNull().WithName("Ape. Paterno").WithMessage("Apellido paterno es requerido")
.NotEmpty().WithMessage("Apellido paterno es requerido")
.Length(1, 50).WithMessage("Apellido paterno no debe de exceder de las 100 letras");
RuleFor(p => p.cApeMat).NotNull().WithName("Ape. Materno").WithMessage("Apellido materno es requerido")
.NotEmpty().WithMessage("Apellido materno es requerido")
.Length(1, 100).WithMessage("Apellido materno no debe de exceder de las 100 letras.");
RuleFor(p => p.nTipoDoc).NotNull().WithName("Tipo Documento").WithMessage("Tipo documento es requerido")
.GreaterThan(0).WithMessage("Tipo de documento es requerido");
RuleFor(p => p.nNroDoc).NotNull().WithName("Documento").WithMessage("Documento es requerido")
.NotEmpty().WithMessage("Documento es requerido")
.Length(1, 8).WithMessage("Documento debe de tener 8 digitos.");
RuleFor(p => p.cCelular).NotNull().WithName("Celular").WithMessage("Celular es requerido")
.NotEmpty().WithMessage("Celular es requerido")
.Length(1, 9).WithMessage("Celular debe de tener 9 digitos");
RuleFor(p => p.cEmail).NotNull().WithName("Email").WithMessage("Email no puede estar vacio")
.EmailAddress().WithMessage("Email incorrecto");
RuleFor(p => p.cConfirmaEmail).NotNull().WithName("Email confirmación").WithMessage("Email de confirmacion es requerido")
.EmailAddress().WithMessage("Email de confirmacion es incorrecto")
.Equal(p => p.cEmail).WithMessage("Los email no coinciden");
RuleFor(p => p.cCodZona).NotNull().WithName("Zona").WithMessage("Zona es requerido")
.NotEmpty().WithMessage("Zona es requerido").
Length(1, 15).WithMessage("Zona no debe de exceder de las 15 letras");
RuleFor(p => p.nTipoResidencia).NotNull().WithName("Tipo de residencia").WithMessage("Tipo de residencia es requerido")
.NotEmpty().WithMessage("Tipo de residencia es requerido");
RuleFor(p => p.nSexo).NotNull().WithName("Tipo de sexo").WithMessage("Tipo de sexo es requerido")
.NotEmpty().WithMessage("Tipo de sexo es requerido");
//RuleFor(p => p.cTelefono).NotNull().WithName("Teléfono").WithMessage("Teléfono es requerido")
// .NotEmpty().WithMessage("Teléfono es requerido")
// .Length(1, 9).WithMessage("Teléfeno no debe de tener exceder de los 9 digitos");
RuleFor(p => p.dFechaNacimiento).NotNull().WithName("Fecha de nacimiento").WithMessage("Fecha de nacimiento es requerido")
.NotEmpty().WithMessage("Fecha de nacimiento de requerido");
RuleFor(p => p.nEstadoCivil).NotNull().WithName("Estado civil").WithMessage("Estado civil es requerido")
.NotEmpty().WithMessage("Estado civil es requerido");
RuleFor(p => p.nDirTipo1).NotNull().WithName("Dirección 1").WithMessage("Dirección es requerido")
.NotEmpty().WithMessage("Dirección es requerido");
RuleFor(p => p.nDirTipo2).NotNull().WithName("Dirección 2").WithMessage("Dirección es requerido")
.NotEmpty().WithMessage("Dirección es requerido");
//RuleFor(p => p.nDirTipo3).NotNull().WithName("Dirección 3").WithMessage("Dirección es requerido")
// .NotEmpty().WithMessage("Dirección es requerido")
// .GreaterThan(0).WithMessage("Dirección es requerido");
RuleFor(p => p.cDirValor1).NotNull().WithName("Dirección 1").WithMessage("Dirección es requerido")
.NotEmpty().WithMessage("Dirección es requerido");
RuleFor(p => p.cDirValor2).NotNull().WithName("Dirección 2").WithMessage("Dirección es requerido")
.NotEmpty().WithMessage("Dirección es requerido");
//RuleFor(p => p.cDirValor3).NotNull().WithName("Dirección 3").WithMessage("Dirección es requerido")
// .NotEmpty().WithMessage("Dirección es requerido");
RuleFor(p => p.nCodAge).NotNull().WithName("Agencia").WithMessage("Agencia es requerido")
.NotEmpty().WithMessage("Agencia es requerido")
.GreaterThan(0).WithMessage("Agencia es requerido");
RuleFor(p => p.nCUUI).NotNull().WithName("Actividad economica").WithMessage("Actividad esconomica es requerido")
.NotEmpty().WithMessage("Actividad economica es requerida");
RuleFor(p => p.nSitLab).NotNull().WithName("Situación laboral").WithMessage("Situacion laboral es requerida")
.NotEmpty().WithMessage("Situacion laboral es requerida");
RuleFor(p => p.nProfes).NotNull().WithName("Profesión").WithMessage("Profesión es requerida")
.NotEmpty().WithMessage("Profesión es requerida");
RuleFor(p => p.nTipoEmp).NotNull().WithName("Tipo empleo").WithMessage("Tipo de empleo es requerido")
.NotEmpty().WithMessage("Tipo de empleo es requerido");
When(p => p.nEstadoCivil == "2", () =>
{
RuleFor(p => p.cDniConyuge).NotNull().WithName("DNI Conyuge").WithMessage("DNI Conyuge es requerido")
.NotEmpty().WithMessage("DNI Conyuge es requerido")
.Length(1, 8).WithMessage("DNI Conyuge no debe de exceder de los 8 digitos");
RuleFor(p => p.cNomConyuge).NotNull().WithName("Nombre conyuge").WithMessage("Nombre conyuge es requerido")
.NotEmpty().WithMessage("Nombre conyuge es requerido")
.Length(1, 50).WithMessage("Nombre conyuge no debe de exceder de las 50 letras");
RuleFor(p => p.cApeConyuge).NotNull().WithName("Apellido conyuge").WithMessage("Apellido conyuge es requerido")
.NotEmpty().WithMessage("Apellido conyuge es requerido")
.Length(1, 50).WithMessage("Apellido conyuge no debe de exceder de las 50 letras");
});
//RuleFor(p => p.cRuc).NotNull().WithName("RUC").WithMessage("RUC es requerido")
// .NotEmpty().WithMessage("RUC es requerido")
// .Length(1, 11).WithMessage("RUC no debe de exceder de los 11 digitos");
RuleFor(p => p.nIngresoDeclado).NotNull().WithName("Ingreso declarado").WithMessage("Ingreso declarado es requerido")
.NotEmpty().WithMessage("Ingreso declarado es requerido")
.GreaterThan(0).WithMessage("Ingreso declarado debe de ser mayor que cero");
//RuleFor(p => p.cDirEmpleo).NotNull().WithName("Dirección empleo").WithMessage("Dirección empleo es requerido")
// .NotEmpty().WithMessage("Dirección empleo es requerido")
// .Length(1,50).WithMessage("Dirección empleo no debe de exceder de las 50 letras");
RuleFor(p => p.cTelfEmpleo).NotNull().WithName("Teléfono empleo").WithMessage("Teléfono empleo es requerido")
.NotEmpty().WithMessage("Teléfono empleo es requerido")
.Length(1,9).WithMessage("Teléfono empleo no debe de exceder de los 9 digitos");
RuleFor(p => p.dFecIngrLab).NotNull().WithName("Fecha ingreso laboral").WithMessage("Fecha ingreso laboral es requerido")
.NotEmpty().WithMessage("Fecha ingreso laboral es requerido");
RuleFor(p => p.bCargoPublico).NotNull().WithName("Cargo publico").WithMessage("Cargo publico es requerido");
//RuleFor(p => p.cNomEmpresa).NotNull().WithName("Nombre empresa").WithMessage("Nombre empresa es requerido")
// .NotEmpty().WithMessage("Nombre empresa es requerido")
// .Length(1,30).WithMessage("Nombre empresa no debe de exceder de las 30 letras");
When(p => p.nProfes == "999", () => {
RuleFor(p => p.cProfesionOtros).NotNull().WithName("Profesion otros").WithMessage("Profesion otros es requerido")
.NotEmpty().WithMessage("Profesion otros es requerido")
.Length(1,40).WithMessage("Profesion otros no debe de exceder de las 50 letras");
});
RuleFor(p => p.cCodZonaEmpleo).NotNull().WithName("Zona Empleo")
.NotEmpty().WithMessage("Zona Empleo es requerido").
Length(1, 15).WithMessage("Zona Empleo no debe de exceder de las 15 letras");
RuleFor(p => p.nDirTipo1Empleo).NotNull().WithName("Dirección 1 Empleo").WithMessage("Dirección es requerido")
.NotEmpty().WithMessage("Dirección es requerido");
RuleFor(p => p.nDirTipo2Empleo).NotNull().WithName("Dirección 2 Empleo").WithMessage("Dirección es requerido")
.NotEmpty().WithMessage("Dirección es requerido");
RuleFor(p => p.cDirValor1Empleo).NotNull().WithName("Dirección 1 Empleo").WithMessage("Dirección es requerido")
.NotEmpty().WithMessage("Dirección es requerido");
RuleFor(p => p.cDirValor2Empleo).NotNull().WithName("Dirección 2 Empleo").WithMessage("Dirección es requerido")
.NotEmpty().WithMessage("Dirección es requerido");
}
}
}<file_sep>using Dapper;
using System.Data;
using System.Data.SqlClient;
using TiboxWebApi.Models;
using TiboxWebApi.Repository.Interfaces;
using System.Collections.Generic;
using System;
namespace TiboxWebApi.Repository.Repository
{
public class PersonaRepository : BaseRepository<Persona>, IPersonaRepository
{
public readonly Utiles _utils = null;
public PersonaRepository()
{
_utils = new Utiles();
}
public int LucasActPersona(Persona persona)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nNroDoc", persona.nNroDoc);
parameters.Add("@cCodZona", persona.cCodZona);
parameters.Add("@nTipoResidencia", persona.nTipoResidencia);
parameters.Add("@nSexo", persona.nSexo);
parameters.Add("@cTelefono", persona.cTelefono);
parameters.Add("@dFechaNacimiento", persona.dFechaNacimiento);
parameters.Add("@nEstadoCivil", persona.nEstadoCivil);
parameters.Add("@nDirTipo1", persona.nDirTipo1);
parameters.Add("@nDirTipo2", persona.nDirTipo2);
parameters.Add("@nDirTipo3", persona.nDirTipo3);
parameters.Add("@cDirValor1", persona.cDirValor1);
parameters.Add("@cDirValor2", persona.cDirValor2);
parameters.Add("@cDirValor3", persona.cDirValor3);
parameters.Add("@nCodAge", persona.nCodAge);
parameters.Add("@nCUUI", persona.nCUUI);
parameters.Add("@nSitLab", persona.nSitLab);
parameters.Add("@nProfes", persona.nProfes);
parameters.Add("@nTipoEmp", persona.nTipoEmp);
parameters.Add("@cDniConyuge", persona.cDniConyuge);
parameters.Add("@cNomConyuge", persona.cNomConyuge);
parameters.Add("@cApeConyuge", persona.cApeConyuge);
parameters.Add("@cRuc", persona.cRuc);
parameters.Add("@nIngresoDeclado", persona.nIngresoDeclado);
parameters.Add("@cTelfEmpleo", persona.cTelfEmpleo);
parameters.Add("@dFecIngrLab", persona.dFecIngrLab);
parameters.Add("@bCargoPublico", persona.bCargoPublico);
parameters.Add("@cNomEmpresa", persona.cNomEmpresa);
parameters.Add("@cProfesionOtros", persona.cProfesionOtros);
parameters.Add("@cCodZonaEmpleo", persona.cCodZonaEmpleo);
parameters.Add("@nDirTipo1Empleo", persona.nDirTipo1Empleo);
parameters.Add("@nDirTipo2Empleo", persona.nDirTipo2Empleo);
parameters.Add("@nDirTipo3Empleo", persona.nDirTipo3Empleo);
parameters.Add("@cDirValor1Empleo", persona.cDirValor1Empleo);
parameters.Add("@cDirValor2Empleo", persona.cDirValor2Empleo);
parameters.Add("@cDirValor3Empleo", persona.cDirValor3Empleo);
parameters.Add("@nCodPers", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WebApi_LucasActPersona_SP",
parameters,
commandType: CommandType.StoredProcedure, commandTimeout: 0);
var nCodPers = parameters.Get<int>("@nCodPers");
return nCodPers;
}
}
public IEnumerable<Persona> LucasDatosPersona(string cDocumento, string cEmail, int nCodPers)
{
using(var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@cNroDoc", cDocumento);
parameters.Add("@cEmail", cEmail);
parameters.Add("@nCodPers", nCodPers);
return connection.Query<Persona>("WebApi_LucasDatosPersona_SP", parameters, commandType: CommandType.StoredProcedure);
}
}
public int LucasInsPersona(Persona persona)
{
using (var connection = new SqlConnection(_connectionString))
{
string Pass = _utils.Encriptar(persona.nNroDoc);
var parameters = new DynamicParameters();
parameters.Add("@cNombres", persona.cNombres);
parameters.Add("@cApePat", persona.cApePat);
parameters.Add("@cApeMat", persona.cApeMat);
parameters.Add("@nTipoDoc", persona.nTipoDoc);
parameters.Add("@nNroDoc", persona.nNroDoc);
parameters.Add("@cCelular", persona.cCelular);
parameters.Add("@cEmail", persona.cEmail);
parameters.Add("@cCodZona", persona.cCodZona);
parameters.Add("@nTipoResidencia", persona.nTipoResidencia);
parameters.Add("@nSexo", persona.nSexo);
parameters.Add("@cTelefono", persona.cTelefono);
parameters.Add("@dFechaNacimiento", persona.dFechaNacimiento);
parameters.Add("@nEstadoCivil", persona.nEstadoCivil);
parameters.Add("@nDirTipo1", persona.nDirTipo1);
parameters.Add("@nDirTipo2", persona.nDirTipo2);
parameters.Add("@nDirTipo3", persona.nDirTipo3);
parameters.Add("@cDirValor1", persona.cDirValor1);
parameters.Add("@cDirValor2", persona.cDirValor2);
parameters.Add("@cDirValor3", persona.cDirValor3);
parameters.Add("@nCodAge", persona.nCodAge);
parameters.Add("@nCUUI", persona.nCUUI);
parameters.Add("@nSitLab", persona.nSitLab);
parameters.Add("@nProfes", persona.nProfes);
parameters.Add("@nTipoEmp", persona.nTipoEmp);
parameters.Add("@cDniConyuge", persona.cDniConyuge);
parameters.Add("@cNomConyuge", persona.cNomConyuge);
parameters.Add("@cApeConyuge", persona.cApeConyuge);
parameters.Add("@cRuc", persona.cRuc);
parameters.Add("@nIngresoDeclado", persona.nIngresoDeclado);
parameters.Add("@cTelfEmpleo", persona.cTelfEmpleo);
parameters.Add("@dFecIngrLab", persona.dFecIngrLab);
parameters.Add("@bCargoPublico", persona.bCargoPublico);
parameters.Add("@cNomEmpresa", persona.cNomEmpresa);
parameters.Add("@cProfesionOtros", persona.cProfesionOtros);
parameters.Add("@cPassEncripta", Pass);
parameters.Add("@nCodigoVerificador", persona.nCodigoVerificador);
parameters.Add("@cCodZonaEmpleo", persona.cCodZonaEmpleo);
parameters.Add("@nDirTipo1Empleo", persona.nDirTipo1Empleo);
parameters.Add("@nDirTipo2Empleo", persona.nDirTipo2Empleo);
parameters.Add("@nDirTipo3Empleo", persona.nDirTipo3Empleo);
parameters.Add("@cDirValor1Empleo", persona.cDirValor1Empleo);
parameters.Add("@cDirValor2Empleo", persona.cDirValor2Empleo);
parameters.Add("@cDirValor3Empleo", persona.cDirValor3Empleo);
parameters.Add("@nCodPers", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WebApi_LucasInsPersona_SP",
parameters,
commandType: CommandType.StoredProcedure, commandTimeout: 0);
var nCodPers = parameters.Get<int>("@nCodPers");
return nCodPers;
}
}
public int LucasTratamientoDatos(Tratamiento tratamiento)
{
using(var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nCodPers", tratamiento.nCodPers);
parameters.Add("@cDocumento", tratamiento.cDocumento);
parameters.Add("@cUsuario", tratamiento.cUsuario);
parameters.Add("@cApePat", tratamiento.cApePat);
parameters.Add("@cApeMat", tratamiento.cApeMat);
parameters.Add("@cNombres", tratamiento.cNombres);
parameters.Add("@nCodAge", tratamiento.nCodAge);
parameters.Add("@nTipoSolicitud", tratamiento.nTipoSolicitud);
parameters.Add("@nModoRegistro", tratamiento.nModoRegistro);
parameters.Add("@nTipoResp", tratamiento.nTipoResp);
parameters.Add("@cPedido", tratamiento.cPedido);
parameters.Add("@cComentario", tratamiento.cComentario);
parameters.Add("@nCodPersTit", tratamiento.nCodPersTit);
parameters.Add("@cApePatTit", tratamiento.cApePatTit);
parameters.Add("@cApeMatTit", tratamiento.cApeMatTit);
parameters.Add("@cNomTit", tratamiento.cNomTit);
parameters.Add("@cDocumentoTit", tratamiento.cDocumentoTit);
parameters.Add("@nCodSolicitud", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WebApi_LucasTratamientoDatosInserta_SP", parameters, commandType: CommandType.StoredProcedure);
var nCodSolcitud = parameters.Get<int>("@nCodSolicitud");
return nCodSolcitud;
}
}
public int LucasValidaPersonaCelular(string cDocumento, string cTelefono)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@cCelular", cTelefono);
parameters.Add("@cDocumento", cDocumento);
parameters.Add("@nRes", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<Persona>("WebApi_PersonaValidaCelular_SP", parameters, commandType: CommandType.StoredProcedure);
return parameters.Get<int>("@nRes");
}
}
public IEnumerable<User> LucasVerificaClienteExiste(string cDocumento)
{
using(var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@cDocumento", cDocumento);
return connection.Query<User>("WebApi_LucasVerificaClienteExiste_SP",
parameters,
commandType: CommandType.StoredProcedure, commandTimeout: 0);
}
}
}
}
<file_sep>using System.Web.Http;
using TiboxWebApi.Models;
using TiboxWebApi.UnitOfWork;
using TiboxWebApi.WebApi.Utils;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("Credito")]
[Authorize]
public class CreditoController : BaseController
{
private Utiles _utils;
public CreditoController(IUnitOfWork unit) : base(unit)
{
_utils = new Utiles();
}
[Route("Bandeja")]
[HttpPost]
public IHttpActionResult Bandeja(Credito credito)
{
if (credito == null) return BadRequest();
if (credito.nCodAge == 0) return BadRequest();
if (credito.nCodPers == 0) return BadRequest();
return Ok(_unit.Credito.LucasBandeja(credito.nCodPers, credito.nPagina, credito.nTamanio, credito.nCodAge));
}
[Route("")]
[HttpPost]
public IHttpActionResult Credito(Credito credito)
{
if (credito == null) return BadRequest();
var nCodCred = _unit.Credito.LucasInsCredito(credito);
if (nCodCred == 0) return BadRequest();
return Ok(new { nCodCred = nCodCred });
}
[Route("Modalidad")]
[HttpPost]
public IHttpActionResult Modalidad(Credito credito)
{
if (credito == null) return BadRequest();
var nRetorno = _unit.Credito.LucasInsModalidad(credito);
if (nRetorno == 0) return BadRequest();
return Ok(new { bExito = nRetorno });
}
[Route("Firma")]
[HttpPost]
public IHttpActionResult Firma(Credito credito)
{
if (credito == null) return BadRequest();
var nRetorno = _unit.Credito.LucasInsFirmaElectronica(credito);
if (nRetorno == 0) return BadRequest();
return Ok(new { bExito = nRetorno });
}
[Route("DatosPrestamo/{nCodAge}/{nCodCred}")]
[HttpGet]
public IHttpActionResult DatosPrestamo(int nCodAge, int nCodCred)
{
if (nCodAge == 0) return BadRequest();
if (nCodCred == 0) return BadRequest();
return Ok(_unit.Credito.LucasDatosPrestamo(nCodAge, nCodCred));
}
[Route("Calendario/Lista/{nCodAge}/{nCodCred}")]
[HttpGet]
public IHttpActionResult CalendarioLista(int nCodAge, int nCodCred)
{
if (nCodAge == 0) return BadRequest();
if (nCodCred == 0) return BadRequest();
return Ok(_unit.Credito.LucasCalendarioLista(nCodAge, nCodCred));
}
[Route("Kardex/Lista/{nCodAge}/{nCodCred}")]
[HttpGet]
public IHttpActionResult KardexLista(int nCodAge, int nCodCred)
{
if (nCodAge == 0) return BadRequest();
if (nCodCred == 0) return BadRequest();
return Ok(_unit.Credito.LucasKardexLista(nCodAge, nCodCred));
}
[Route("RechazadoPorDia/{cDocumento}")]
[HttpGet]
public IHttpActionResult RechazadoPorDia(string cDocumento)
{
return Ok(_unit.Credito.LucasRechazadoPorDia(cDocumento));
}
[Route("CreditoxFlujo/{cDocumento}")]
[HttpGet]
public IHttpActionResult CreditoxFlujo(string cDocumento)
{
return Ok(_unit.Credito.LucasCreditoEnFlujo(cDocumento));
}
[Route("AnulaxActualizacion/{cDocumento}")]
[HttpGet]
public IHttpActionResult AnulaxActualizacion(string cDocumento)
{
return Ok(_unit.Credito.LucasCreditoAnulaxActualizacion(cDocumento));
}
[Route("Calendario")]
[HttpPost]
public IHttpActionResult Calendario(Credito credito)
{
return Ok(_utils.GeneraCalendario(credito.nPrestamo, credito.nNroCuotas, credito.nPeriodo, credito.nTasa, credito.dFechaSistema, credito.nSeguro));
}
}
}
<file_sep>using System;
using TiboxWebApi.WebApi.wsCPD;
using TiboxWebApi.WebApi.wsIngresoPredecido;
using TiboxWebApi.WebApi.wsPreAprobacion;
using TiboxWebApi.WebApi.wsScoreBuro;
using TiboxWebApi.WebApi.wsScoreComportamiento;
using TiboxWebApi.WebApi.wsScoreDemografico;
using TiboxWebApi.WebApi.wsScoreLenddo;
using TiboxWebApi.WebApi.wsScoringBuro;
using TiboxWebApi.WebApi.wsScoringDemografico;
using TiboxWebApi.WebApi.wsScoringValidacionReglas;
namespace TiboxWebApi.WebApi.Utils
{
public class EvaluacionMotor
{
private IwsScoreBuro oScoreBuro;
private IwsScoreDemografico oScoreDemografo;
private IScoringBuro oScoringBuro;
private IwsScoringDemografico oScoringDemografico;
private IngresoPredecido oScoringIngresoPredecido;
private IwsScoreLenddo oScoreLenddo;
private IwsCPD oScoreCPD;
private IwsPreAprobacion oPreAprobacion;
private IwsScoreComportamiento oComportamiento;
private IwsScoringValidacionReglas oReglas;
double nScoreLenddo = 0;
double nIngDemo1 = 0;
double nIngDemo2 = 0;
double nIngDemo3 = 0;
double nIngresoInfDemografico = 0;
double nIngresoInfRCC = 0;
double nIngRCC1 = 0;
double nIngRCC2 = 0;
double nIngRCC3 = 0;
double nIngFinal1 = 0;
double nIngFinal2 = 0;
double nIngFinal3 = 0;
double nMontoTotal = 0;
double nCuotaUtilizada1 = 0;
double nCuotaUtilizada2 = 0;
double nScoreBuro = 0;
double nCuotaMaxima = 0;
double nCuotaDisp = 0;
double nPrestamo1 = 0;
double nPrestamo2 = 0;
double nPrestamo3 = 0;
double nPrestamo4 = 0;
double nPrestamoMinimo = 0;
double nTasa = 0;
double nPlazo = 0;
double nRCI = 0;
double nPrestamoMax = 0;
double nRMA = 0;
double nPrestamoFinal = 0;
double nMora = 0;
double nSumatoria = 0;
double nIngresoDeclarado = 0;
double nPorGarantia = 0;
double nScoreComportamiento = 0;
double nScoreDemografico = 0;
int nTipoSolicitud = 0;
int nValorNecesario = 0;
int nCodigoFlujo = 0;
int nIdRechazado = 0;
int nTipoBanca = 0;
int nClientePEP = 0;
string cDecisionReglas = "";
string cMotivoRechazado = "";
string cDecision = "";
string cTipoBanca = "";
string cClienteLenddo = "";
public EvaluacionMotor()
{
oScoreBuro = new IwsScoreBuroClient();
oScoreDemografo = new IwsScoreDemograficoClient();
oScoringBuro = new ScoringBuroClient();
oScoringDemografico = new IwsScoringDemograficoClient();
oScoringIngresoPredecido = new IngresoPredecidoClient();
oScoreLenddo = new IwsScoreLenddoClient();
oScoreCPD = new IwsCPDClient();
oPreAprobacion = new IwsPreAprobacionClient();
oComportamiento = new IwsScoreComportamientoClient();
oReglas = new IwsScoringValidacionReglasClient();
}
string devuelveXMLDatos(string cDocumento)
{
string cXmlScoringDatosArma = "<GENESYS><DATA cDocumento = '" + cDocumento + "' " +
"nIngDemo1 = '" + nIngDemo1 + "' " + "nIngDemo2 = '" + nIngDemo2 + "' " + "nIngDemo3 = '" + nIngDemo3 + "' " + "nIngresoInfDemografico = '" + nIngresoInfDemografico + "' " +
"nIngresoInfRCC = '" + nIngresoInfRCC + "' " + "nIngRCC1 = '" + nIngRCC1 + "' " + "nIngRCC2 = '" + nIngRCC2 + "' " + "nIngRCC3 = '" + nIngRCC3 + "' " +
"nIngFinal1 = '" + nIngFinal1 + "' " + "nIngFinal2 = '" + nIngFinal2 + "' " + "nIngFinal3 = '" + nIngFinal3 + "' " + "nMontoTotal = '" + nMontoTotal + "' " +
"nCuotaUtilizada1 = '" + nCuotaUtilizada1 + "' " + "nCuotaUtilizada2 = '" + nCuotaUtilizada2 + "' " + "nScoreBuro = '" + nScoreBuro + "' " + "nCuotaMaxima = '" + nCuotaMaxima + "' " +
"nCuotaDisp = '" + nCuotaDisp + "' " + "nPrestamo1 = '" + nPrestamo1 + "' " + "nPrestamo2 = '" + nPrestamo2 + "' " + "nPrestamo3 = '" + nPrestamo3 + "' " +
"nPrestamo4 = '" + nPrestamo4 + "' " + "nTasa = '" + nTasa + "' " + "nPlazo = '" + nPlazo + "' " + "nRCI = '" + nRCI + "' " +
"nPrestamoMax = '" + nPrestamoMax + "' " + "nRMA = '" + nRMA + "' " + "nPrestamoFinal = '" + nPrestamoFinal + "' " + "nMora = '" + nMora + "' " +
"nSumatoria = '" + nSumatoria + "' " + "cDecision = '" + cDecision + "' " + "cTipoBanca = '" + cTipoBanca + "' " + " nScoreLenddo = '" + nScoreLenddo + "' " +
"nIngresoDeclarado = '" + nIngresoDeclarado + "' nPorGarantia = '" + nPorGarantia + "' nTipoSolicitud = '" + nTipoSolicitud + "' " +
"cClienteLenddo = '" + cClienteLenddo + "' nValorNecesario = '" + nValorNecesario + "' nCodigoFlujo = '" + nCodigoFlujo + "' nPrestamoMinimo = '" + nPrestamoMinimo + "' " +
"nScoreComportamiento = '" + nScoreComportamiento + "' nScoreDemografico = '" + nScoreDemografico + "' cMotivoRechazado = '" + cMotivoRechazado + "' " +
"nIdRechazado = '" + nIdRechazado + "' cDecisionReglas = '" + cDecisionReglas + "' nClientePEP = '" + nClientePEP + "'></DATA></GENESYS>";
return cXmlScoringDatosArma;
}
public bool Evaluacion(string cDocumento,
string cDistrito,
string cProvincia,
string cDepartamento,
int nEdad,
int nGenero,
int nEstadoCivil,
int nCIIU,
int nProducto,
int nModalidad,
int nCondicion,
int nVivienda,
int nScoreLendo,
int nTipoDocumento,
int nTipoDocumentoConyuge,
string cDocumentoConyuge,
int nCondicionSituacionLaboral,
int nCodPers,
double IngresoDeclarado,
string nTipoDependiente,
string nTipoFormal,
ref string cXmlScoringDatos,
ref string cXmlScoringCuota,
ref string cXmlDeudas,
ref string cXMLPuntajeIPDItems,
ref string cXmlScoringDemo,
ref string cMensajeError,
ref int nRechazado,
ref int nPEP)
{
cXmlScoringDatos = "<GENESYS></GENESYS>";
cXmlScoringCuota = "<GENESYS></GENESYS>";
cXmlDeudas = "<GENESYS></GENESYS>";
cXMLPuntajeIPDItems = "<GENESYS></GENESYS>";
cXmlScoringDemo = "<GENESYS></GENESYS>";
string cDep, cProv, cDis;
string cUbigueo = "";
string cZona = "";
cDep = cDepartamento;
cProv = cProvincia;
cDis = cDistrito;
cDepartamento = cDep + "0000000000";
cProvincia = cDep + cProv + "00000000";
cDistrito = cDep + cProv + cDis + "000000";
cZona = cDistrito;
if (cZona.Substring(0, 2) == "15") cUbigueo = cZona.Substring(0, 6);
else if (cZona.Substring(0, 2) == "15" && cZona.Substring(2, 2) != "01") cUbigueo.Substring(0, 6);
else
{
cUbigueo = cZona.Substring(0, 6);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ELIngresoPredecidoDemograficoDatos oIP = new ELIngresoPredecidoDemograficoDatos();
ELIngresoPredecidoDemograficoResultado oIPReturn = new ELIngresoPredecidoDemograficoResultado();
try
{
oIP.nGenero = nGenero.ToString();
oIP.nEstadoCivil = nEstadoCivil.ToString();
oIP.nEdad = nEdad.ToString();
oIP.sUbiGeo = cUbigueo;
oIP.sCIIU = nCIIU.ToString();
oIPReturn = oScoringIngresoPredecido.IngresoPredecidoDemografico(oIP);
if (oIPReturn.bError == true)
{
cMensajeError = "IngresoPredecidoDemografico Error: " + oIPReturn.sMensajeError;
return false;
}
cXMLPuntajeIPDItems = "<GENESYS>";
if (oIPReturn.PuntajeIPDItems != null)
{
for (var h = 0; h <= oIPReturn.PuntajeIPDItems.Length - 1; h++)
{
cXMLPuntajeIPDItems = cXMLPuntajeIPDItems + "<DATA cVariable = '" + oIPReturn.PuntajeIPDItems[h].sVariable + "' cVariableDescripcion = '" + oIPReturn.PuntajeIPDItems[h].sVariableDescripcion + "' " +
"nVariablePuntaje = '" + oIPReturn.PuntajeIPDItems[h].nVariablePuntaje + "' ></DATA>";
}
}
cXMLPuntajeIPDItems = cXMLPuntajeIPDItems + "</GENESYS>";
nIngDemo1 = oIPReturn.nIngresoPredecidoDemografico1;
nIngDemo2 = oIPReturn.nIngresoPredecidoDemografico2;
nIngDemo3 = oIPReturn.nIngresoPredecidoDemografico3;
nIngresoInfDemografico = oIPReturn.nIngresoPredecidoDemografico3;
}
catch (Exception ex)
{
cMensajeError = "IngresoPredecidoDemografico Error: " + ex.Message;
return false;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ELScoreBuroDatos oIPScoreBuro = new ELScoreBuroDatos();
ELScoreBuroResultado oIPScoreBuroReturn = new ELScoreBuroResultado();
try
{
oIPScoreBuro.sNroDoc = cDocumento;
oIPScoreBuroReturn = oScoreBuro.ScoreBuro(oIPScoreBuro);
if (oIPScoreBuroReturn.bError == true)
{
cMensajeError = "ScoreBuro Error: " + oIPScoreBuroReturn.sMensajeError;
return false;
}
nScoreBuro = oIPScoreBuroReturn.nScoreBuro;
}
catch (Exception ex)
{
cMensajeError = "ScoreBuro Error: " + ex.Message;
return false;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ELIngresoPredecidoRCCDatos oIPRCC = new ELIngresoPredecidoRCCDatos();
ELIngresoPredecidoRCCResultado oIPRCCReturn = new ELIngresoPredecidoRCCResultado();
try
{
oIPRCC.nEstadoCivil = nEstadoCivil.ToString();
wsIngresoPredecido.ELDocumento oDocumento = new wsIngresoPredecido.ELDocumento();
oDocumento = new wsIngresoPredecido.ELDocumento();
oDocumento.nTipoDoc = nTipoDocumento.ToString();
oDocumento.sNroDoc = cDocumento;
oIPRCC.oDocumentoTitular = oDocumento;
if (oIPRCC.nEstadoCivil == "2")
{
oDocumento = new wsIngresoPredecido.ELDocumento();
oDocumento.nTipoDoc = nTipoDocumentoConyuge.ToString();
oDocumento.sNroDoc = cDocumentoConyuge;
oIPRCC.oDocumentoConyuge = oDocumento;
}
oIPRCCReturn = oScoringIngresoPredecido.IngresoPredecidoRCC(oIPRCC);
if (oIPRCCReturn.bError == true)
{
cMensajeError = "IngresoPredecidoRCC Error: " + oIPRCCReturn.sMensajeError;
return false;
}
cXmlDeudas = "<GENESYS>";
int nContarError = 0;
if (oIPRCCReturn.Deudas != null)
{
for (var i = 0; i <= oIPRCCReturn.Deudas.Length - 1; i++)
{
cXmlDeudas = cXmlDeudas + "<DATA cTipoDescripcion = '" + oIPRCCReturn.Deudas[i].sTipoDescripcion + "' cCaracteristica = '" + oIPRCCReturn.Deudas[i].sCaracteristica + "' " +
"nMonto = '" + oIPRCCReturn.Deudas[i].nMonto + "' nFactor = '" + oIPRCCReturn.Deudas[i].nFactor + "' nInferencia = '" + oIPRCCReturn.Deudas[i].nInferencia + "' ></DATA>";
nContarError = nContarError + 1;
}
}
cXmlDeudas = cXmlDeudas + "</GENESYS>";
nIngRCC1 = oIPRCCReturn.nIngresoPredecidoRCC1;
nIngRCC2 = oIPRCCReturn.nIngresoPredecidoRCC2;
nIngRCC3 = oIPRCCReturn.nIngresoPredecidoRCC3;
nIngresoInfRCC = oIPRCCReturn.nIngresoPredecidoRCC3;
}
catch (Exception ex)
{
cMensajeError = "IngresoPredecidoRCC Error: " + ex.Message;
return false;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ELIngresoPredecidoDatos oIPFinal = new ELIngresoPredecidoDatos();
ELIngresoPredecidoResultado oIPFinalReturn = new ELIngresoPredecidoResultado();
try
{
wsIngresoPredecido.ELDocumento oDocumento = new wsIngresoPredecido.ELDocumento();
oDocumento = new wsIngresoPredecido.ELDocumento();
oDocumento.nTipoDoc = nTipoDocumento.ToString();
oDocumento.sNroDoc = cDocumento;
oIPFinal.oDocumentoTitular = oDocumento;
oIPFinal.nIngresoPredecidoDemografico = nIngresoInfDemografico.ToString();
oIPFinal.nIngresoPredecidoRCC = nIngresoInfRCC.ToString();
oIPFinalReturn = oScoringIngresoPredecido.IngresoPredecido(oIPFinal);
if (oIPFinalReturn.bError == true)
{
cMensajeError = "IngresoPredecido Error: " + oIPFinalReturn.sMensajeError;
return false;
}
nIngFinal1 = oIPFinalReturn.nIngresoPredecidoFinal1;
nIngFinal2 = oIPFinalReturn.nIngresoPredecidoFinal2;
nPrestamo1 = oIPFinalReturn.nIngresoPredecidoFinal2;
ELIngresoCliente OElIngresoCliente = new ELIngresoCliente();
ELIngresoCliente OElIngresoClienteResultado = new ELIngresoCliente();
OElIngresoCliente.nIngresoPredecido = nIngFinal2.ToString();
OElIngresoCliente.nIngresoDeclarado = IngresoDeclarado.ToString();
OElIngresoCliente.nIngresoFinal = "0";
OElIngresoClienteResultado = oScoringIngresoPredecido.IngresoPredecidoVsIngresoDeclarado(OElIngresoCliente);
if (OElIngresoClienteResultado.oError.bError == true)
{
cMensajeError = "IngresoPredecidoVsIngresoDeclarado Error: " + OElIngresoClienteResultado.oError.sMensajeError;
return false;
}
nIngFinal3 = Convert.ToDouble(OElIngresoClienteResultado.nIngresoFinal);
}
catch (Exception ex)
{
cMensajeError = "Error: " + ex.Message;
return false;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ELScoringBuroCuotaUtilizadaDatos oSBCuotaUtilizadaDatos = new ELScoringBuroCuotaUtilizadaDatos();
ELScoringBuroCuotaUtilizadaResultado oSBCuotaUtilizadaResultado = new ELScoringBuroCuotaUtilizadaResultado();
try
{
oSBCuotaUtilizadaDatos.nEstadoCivil = nEstadoCivil.ToString();
wsScoringBuro.ELDocumento oDocumento = new wsScoringBuro.ELDocumento();
oDocumento = new wsScoringBuro.ELDocumento();
oDocumento.nTipoDoc = nTipoDocumento.ToString();
oDocumento.sNroDoc = cDocumento;
oSBCuotaUtilizadaDatos.oDocumentoTitular = oDocumento;
if (oSBCuotaUtilizadaDatos.nEstadoCivil == "2")
{
oDocumento = new wsScoringBuro.ELDocumento();
oDocumento.nTipoDoc = nTipoDocumentoConyuge.ToString();
oDocumento.sNroDoc = cDocumentoConyuge;
oSBCuotaUtilizadaDatos.oDocumentoConyuge = oDocumento;
}
oSBCuotaUtilizadaResultado = oScoringBuro.ScoringBuroCuotaUtilizada(oSBCuotaUtilizadaDatos);
if (oSBCuotaUtilizadaResultado.bError == true)
{
cMensajeError = "ScoringBuroCuotaUtilizada Error: " + oSBCuotaUtilizadaResultado.sMensajeError;
return false;
}
var dgvDatos = oSBCuotaUtilizadaResultado.DatosCuotaUtilizada;
cXmlScoringCuota = "<GENESYS>";
if (dgvDatos != null)
{
for (var i = 0; i <= dgvDatos.Length - 1; i++)
{
cXmlScoringCuota = cXmlScoringCuota + "<DATA cTipoDescripcion = '" + dgvDatos[i].sTipoDescripcion + "' nMonto = '" + dgvDatos[i].nMonto + "' " +
"nPlazo = '" + dgvDatos[i].nPlazo + "' nTasa = '" + dgvDatos[i].nTasa + "' " +
"nFactorUtilizacion = '" + dgvDatos[i].nFactorUtilizacion + "' nCuota = '" + dgvDatos[i].nCuota + "' " +
"nTipoId = '" + dgvDatos[i].nTipoId + "' ></DATA>";
nMontoTotal = nMontoTotal + Convert.ToDouble(dgvDatos[i].nMonto);
}
}
cXmlScoringCuota = cXmlScoringCuota + "</GENESYS>";
nCuotaUtilizada1 = oSBCuotaUtilizadaResultado.nCuotaUtilizada;
nCuotaUtilizada2 = oSBCuotaUtilizadaResultado.nCuotaUtilizada;
}
catch (Exception ex)
{
cMensajeError = "ScoringBuroCuotaUtilizada Error: " + ex.Message;
return false;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if (nScoreBuro > 0) //BANCARIZADO
{
try
{
ELScoringBuroDatos oBuroIP = new ELScoringBuroDatos();
ELScoringBuroResultado oBuroIPReturn = new ELScoringBuroResultado();
ELCPDMatrizDatos oMatriz = new ELCPDMatrizDatos();
ELCPDMatrizResultado oMatrizResultado = new ELCPDMatrizResultado();
cTipoBanca = "BANCARIZADO";
nTipoBanca = 1;
oMatriz.nTipoScore = nTipoBanca.ToString();
oMatriz.nSitucionLaboral = nCondicion.ToString();
oMatriz.nCodicionSituacionlaboral = nCondicionSituacionLaboral.ToString();
oMatriz.oProducto = new wsCPD.ELProducto();
oMatriz.oProducto.nProducto = nProducto.ToString();
oMatriz.oProducto.nModalidad = nModalidad.ToString();
oMatrizResultado = oScoreCPD.DevuelveRequiereCPD(oMatriz);
if (oMatrizResultado.bError == true)
{
cMensajeError = "DevuelveRequiereCPD Error: " + oMatrizResultado.sMensajeError;
return false;
}
nValorNecesario = oMatrizResultado.bReqCPD ? 1 : 0;
wsScoringBuro.ELDocumento oDocumento = new wsScoringBuro.ELDocumento();
oDocumento.nTipoDoc = nTipoDocumento.ToString();
oDocumento.sNroDoc = cDocumento;
oBuroIP.oDocumentoTitual = oDocumento;
ELScoreDemograficoResultado oScoreDemoRes = new ELScoreDemograficoResultado();
wsScoreDemografico.ELDocumento oMCDocumento = new wsScoreDemografico.ELDocumento();
oMCDocumento.nTipoDoc = nTipoDocumento.ToString();
oMCDocumento.sNroDoc = cDocumento;
nMora = oScoreDemografo.DevuelveMoraComercial(oMCDocumento);
oBuroIP.nGarantia = "0";
oBuroIP.nMoraComercial = nMora.ToString();
oBuroIP.nScore = nScoreBuro.ToString();
oBuroIP.nCuotaUtilizada = oSBCuotaUtilizadaResultado.nCuotaUtilizada.ToString();
oBuroIP.nIngresoPredecido = nIngFinal3.ToString();
oBuroIP.nProducto = nProducto.ToString();
oBuroIP.nModalidad = nModalidad.ToString();
oBuroIP.nScoreOtros = nScoreLendo.ToString();
oBuroIP.nModalidadLaboral = nTipoDependiente;
oBuroIP.nTipoFormalidad = nTipoFormal;
oBuroIPReturn = oScoringBuro.ScoringBuro(oBuroIP);
if (oBuroIPReturn.bError == true)
{
cMensajeError = "ScoringBuro Error: " + oBuroIPReturn.sMensajeError;
return false;
}
nCuotaMaxima = oBuroIPReturn.nCuotaMaxima;
nCuotaDisp = oBuroIPReturn.nCuotaDisponible;
nPrestamo1 = oBuroIPReturn.nPrestamo1;
nPrestamo2 = oBuroIPReturn.nPrestamo2;
nPrestamo3 = oBuroIPReturn.nPrestamo3;
nPrestamo4 = oBuroIPReturn.nPrestamo4;
nPrestamoMinimo = oBuroIPReturn.nPrestamoMinimo;
nTasa = oBuroIPReturn.nTasa;
nPlazo = oBuroIPReturn.nPlazo;
nRCI = oBuroIPReturn.nRCI;
nPrestamoMax = oBuroIPReturn.nPrestamoMaximo;
nRMA = oBuroIPReturn.nRMA;
nPorGarantia = oBuroIPReturn.nPorcGarantiaAvaluo;
nPrestamoFinal = oBuroIPReturn.nPrestamo4;
cMotivoRechazado = oBuroIPReturn.sDescripcionRechazo;
ELScoreDemograficoDatos oDemoIP = new ELScoreDemograficoDatos();
ELScoreDemograficoResultado oDemoIPReturn = new ELScoreDemograficoResultado();
oDemoIP.nCondicionLaboral = nCondicion.ToString();
oDemoIP.nGenero = nGenero.ToString();
if (string.IsNullOrEmpty(cDistrito)) oDemoIP.nDepartamento = cDepartamento;
else
{
if (cDistrito.Substring(0, 2) == "15") oDemoIP.nDepartamento = cDistrito;
else
{
oDemoIP.nDepartamento = cDepartamento;
}
}
wsScoreDemografico.ELDocumento oDocumentoScoDemo = new wsScoreDemografico.ELDocumento();
oDocumentoScoDemo.nTipoDoc = nTipoDocumento.ToString();
oDocumentoScoDemo.sNroDoc = cDocumento;
oDemoIP.oDocumento = oDocumentoScoDemo;
nMora = oScoreDemografo.DevuelveMoraComercial(oDocumentoScoDemo);
oDemoIP.nIngresoSalarial = nIngFinal2.ToString();
oDemoIP.nEdad = nEdad.ToString();
oDemoIP.nMoraComercial = nMora.ToString();
oDemoIP.nEstadoCivil = nEstadoCivil.ToString();
oDemoIP.nVivienda = nVivienda.ToString();
oDemoIP.nSituacionLaboral = nCondicionSituacionLaboral.ToString();
oDemoIP.nCondicionLaboral = nCondicion.ToString();
oDemoIP.nGenero = nGenero.ToString();
oDemoIP.nDepartamento = cDistrito.Substring(0, 6);
oDemoIPReturn = oScoreDemografo.ScoreDemografico(oDemoIP);
if (oDemoIPReturn.bError == true)
{
cMensajeError = "ScoreDemografico Error: " + oDemoIPReturn.sMensajeError;
return false;
}
nSumatoria = Convert.ToDouble(oDemoIPReturn.nScoreDemografico);
nScoreDemografico = nSumatoria;
if (oBuroIPReturn.nDecicion == 1) cDecision = "APROBADO";
else if (oBuroIPReturn.nDecicion == 2) cDecision = "RECHAZADO";
else
{
cDecision = "INDECISO";
}
}
catch (Exception ex)
{
cMensajeError = "Error: " + ex.Message;
return false;
}
}
else // NO BANCARIZADO
{
try
{
ELScoreDemograficoDatos oDemoIP = new ELScoreDemograficoDatos();
ELScoreDemograficoResultado oDemoIPReturn = new ELScoreDemograficoResultado();
cTipoBanca = "NO BANCARIZADO";
oDemoIP.nCondicionLaboral = nCondicion.ToString();
oDemoIP.nGenero = nGenero.ToString();
if (string.IsNullOrEmpty(cDistrito)) oDemoIP.nDepartamento = cDepartamento;
else
{
if (cDistrito.Substring(0, 2) == "15") oDemoIP.nDepartamento = cDistrito;
else
{
oDemoIP.nDepartamento = cDepartamento;
}
}
nTipoBanca = 2;
wsScoreDemografico.ELDocumento oDocumento = new wsScoreDemografico.ELDocumento();
oDocumento.nTipoDoc = nTipoDocumento.ToString();
oDocumento.sNroDoc = cDocumento;
oDemoIP.oDocumento = oDocumento;
nMora = oScoreDemografo.DevuelveMoraComercial(oDocumento);
oDemoIP.nIngresoSalarial = nIngFinal2.ToString();
oDemoIP.nEdad = nEdad.ToString();
oDemoIP.nMoraComercial = nMora.ToString();
oDemoIP.nEstadoCivil = nEstadoCivil.ToString();
oDemoIP.nVivienda = nVivienda.ToString();
oDemoIP.nSituacionLaboral = nCondicionSituacionLaboral.ToString();
oDemoIP.nCondicionLaboral = nCondicion.ToString();
oDemoIP.nGenero = nGenero.ToString();
oDemoIP.nDepartamento = cDistrito.Substring(0, 6);
oDemoIPReturn = oScoreDemografo.ScoreDemografico(oDemoIP);
if (oDemoIPReturn.bError == true)
{
cMensajeError = "ScoreDemografico Error: " + oDemoIPReturn.sMensajeError;
return false;
}
cXmlScoringDemo = "<GENESYS>";
if (oDemoIPReturn.oScoreItems != null)
{
for (var k = 0; k <= oDemoIPReturn.oScoreItems.Length - 1; k++)
{
cXmlScoringDemo = cXmlScoringDemo + "<DATA nScoreID = '" + oDemoIPReturn.oScoreItems[k].nScoreId + "' cScore = '" + oDemoIPReturn.oScoreItems[k].sScore + "' " +
"cScoreDescripcion = '" + oDemoIPReturn.oScoreItems[k].sScoreDescripcion + "' nScorePuntaje = '" + oDemoIPReturn.oScoreItems[k].nScorePuntaje + "' " +
"></DATA>";
}
}
cXmlScoringDemo = cXmlScoringDemo + "</GENESYS>";
nSumatoria = Convert.ToDouble(oDemoIPReturn.nScoreDemografico);
nScoreDemografico = nSumatoria;
}
catch (Exception ex)
{
cMensajeError = "ScoreDemografico Error: " + ex.Message;
return false;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ELScoringDemograficoDatos oScoDemoIP = new ELScoringDemograficoDatos();
ELScoringDemograficoResultado oScoDemoIPReturn = new ELScoringDemograficoResultado();
try
{
ELCPDMatrizDatos oMatriz = new ELCPDMatrizDatos();
ELCPDMatrizResultado oMatrizResultado = new ELCPDMatrizResultado();
oMatriz.nTipoScore = nTipoBanca.ToString();
oMatriz.nSitucionLaboral = nCondicion.ToString();
oMatriz.nCodicionSituacionlaboral = nCondicionSituacionLaboral.ToString();
oMatriz.oProducto = new wsCPD.ELProducto();
oMatriz.oProducto.nProducto = nProducto.ToString();
oMatriz.oProducto.nModalidad = nModalidad.ToString();
oMatrizResultado = oScoreCPD.DevuelveRequiereCPD(oMatriz);
if(oMatrizResultado.bError == true)
{
cMensajeError = "DevuelveRequiereCPD Error: " + oMatrizResultado.sMensajeError;
return false;
}
nValorNecesario = oMatrizResultado.bReqCPD ? 1 : 0;
wsScoringDemografico.ELDocumento oDocumento = new wsScoringDemografico.ELDocumento();
oDocumento.nTipoDoc = nTipoDocumento.ToString();
oDocumento.sNroDoc = cDocumento;
oScoDemoIP.oDocumentoTitual = oDocumento;
oScoDemoIP.nGarantia = "0";
oScoDemoIP.nMoraComercial = nMora.ToString();
oScoDemoIP.nScore = nSumatoria.ToString();
oScoDemoIP.nIngresoPredecido = nIngFinal3.ToString();
oScoDemoIP.nProducto = nProducto.ToString();
oScoDemoIP.nModalidad = nModalidad.ToString();
oScoDemoIP.nScoreOtros = nScoreLendo.ToString();
oScoDemoIP.nModalidadLaboral = nTipoDependiente;
oScoDemoIP.nTipoFormalidad = nTipoFormal;
oScoDemoIPReturn = oScoringDemografico.ScoringDemografico(oScoDemoIP);
if (oScoDemoIPReturn.bError == true)
{
cMensajeError = "ScoringDemografico Error: " + oScoDemoIPReturn.sMensajeError;
return false;
}
nCuotaMaxima = oScoDemoIPReturn.nCuotaMaxima;
nCuotaDisp = oScoDemoIPReturn.nCuotaDisponible;
nPrestamo1 = oScoDemoIPReturn.nPrestamo1;
nPrestamo2 = oScoDemoIPReturn.nPrestamo2;
nPrestamo3 = oScoDemoIPReturn.nPrestamo3;
nPrestamoFinal = oScoDemoIPReturn.nPrestamo4;
nPrestamoMinimo = oScoDemoIPReturn.nPrestamoMinimo;
nTasa = oScoDemoIPReturn.nTasa;
nPlazo = oScoDemoIPReturn.nPlazo;
nPrestamo4 = oScoDemoIPReturn.nPrestamo4;
nRCI = oScoDemoIPReturn.nRCI;
nPrestamoMax = oScoDemoIPReturn.nPrestamoMaximo;
nPorGarantia = oScoDemoIPReturn.nPorcGarantiaAvaluo;
nRMA = oScoDemoIPReturn.nRMA;
cMotivoRechazado = oScoDemoIPReturn.sDescripcionRechazo;
if (oScoDemoIPReturn.nDecicion == 1) cDecision = "APROBADO";
else if (oScoDemoIPReturn.nDecicion == 2) cDecision = "RECHAZADO";
else
{
cDecision = "INDECISO";
}
}
catch (Exception ex)
{
cMensajeError = "Error: " + ex.Message;
}
}
if (nCodPers == 0)
{
nScoreComportamiento = 0;
}
else
{
nScoreComportamiento = 0;
ELScoreComportamientoDatos oScoreComportamiento = new ELScoreComportamientoDatos();
ELScoreComportamientoResultado oScoreComportamientoResultado = new ELScoreComportamientoResultado();
try
{
oScoreComportamiento.nCodPers = nCodPers.ToString();
oScoreComportamiento.nProducto = nProducto.ToString();
oScoreComportamientoResultado = oComportamiento.DevuelveScoreComportamiento(oScoreComportamiento);
if (oScoreComportamientoResultado.bError == true)
{
cMensajeError = "DevuelveScoreComportamiento Error: " + oScoreComportamientoResultado.sMensajeError;
return false;
}
nScoreComportamiento = oScoreComportamientoResultado.nScoreComportamiento;
}
catch (Exception ex)
{
cMensajeError = "DevuelveScoreComportamiento Error: " + ex.Message;
return false;
}
}
ELScoringValidacionReglasDatos oSReglas = new ELScoringValidacionReglasDatos();
ELScoringValidacionReglasResultado oSReglasResultado = new ELScoringValidacionReglasResultado();
ELClientePEPResultado oSReglaPEP = new ELClientePEPResultado();
try
{
wsScoringValidacionReglas.ELDocumento oDocumentoValReglas = new wsScoringValidacionReglas.ELDocumento();
wsScoringValidacionReglas.ELProducto oProductoValReglas = new wsScoringValidacionReglas.ELProducto();
oDocumentoValReglas.nTipoDoc = "1";
oDocumentoValReglas.sNroDoc = cDocumento;
oSReglas.oDocumento = oDocumentoValReglas;
oSReglaPEP = oReglas.ValidaClientePEP(oDocumentoValReglas);
if(oSReglaPEP.oError.bError == true)
{
cMensajeError = "ValidaClientePEP Error: " + oSReglaPEP.oError.sMensajeError;
return false;
}
nClientePEP = oSReglaPEP.bEsPEP == true ? 1 : 0;
nPEP = nClientePEP;
oProductoValReglas.nModalidad = nModalidad.ToString();
oProductoValReglas.nProducto = nProducto.ToString();
oSReglas.oProducto = oProductoValReglas;
oSReglas.nTipoScoring = (cTipoBanca.ToUpper().Equals("NO BANCARIZADO") ? 2 : 1).ToString();
oSReglasResultado = oReglas.ScoringValidacionReglasRechazo(oSReglas);
if (oSReglasResultado.bError == true)
{
cMensajeError = "ScoringValidacionReglasRechazo Error: " + oSReglasResultado.sMensajeError;
return false;
}
nIdRechazado = oSReglasResultado.nIdRechazo;
cDecisionReglas = oSReglasResultado.sDescRechazo;
if (oSReglasResultado.nIdRechazo != 0) cDecision = "RECHAZADO";
else
{
if (cDecision == "INDECISO") cDecision = "INDECISO";
else
{
if (cDecision != "APROBADO") cDecision = "RECHAZADO";
}
}
}
catch (Exception ex)
{
cMensajeError = "ScoringValidacionReglasRechazo Error: " + ex.Message;
return false;
}
if (cDecision == "RECHAZADO") nRechazado = 1;
cXmlScoringDatos = devuelveXMLDatos(cDocumento);
return true;
}
public bool preEvaluacion(string cDocumento, int nProducto, int nModalidad, ref string cResultado, ref string cMensajeError)
{
try
{
ELPreAprobacionResultado oPreAprobacionResultado = new ELPreAprobacionResultado();
wsPreAprobacion.ELDocumento oPreAprobacionDocumento = new wsPreAprobacion.ELDocumento();
oPreAprobacionDocumento.sNroDoc = cDocumento;
oPreAprobacionDocumento.nTipoDoc = "1";
wsPreAprobacion.ELProducto oPreAprobacionProducto = new wsPreAprobacion.ELProducto();
oPreAprobacionProducto.nProducto = nProducto.ToString();
oPreAprobacionProducto.nModalidad = nModalidad.ToString();
oPreAprobacionResultado = oPreAprobacion.DevuelvePreAprobacion(oPreAprobacionDocumento, oPreAprobacionProducto);
if (oPreAprobacionResultado.bError == true)
{
cMensajeError = "DevuelvePreAprobacion Error: " + oPreAprobacionResultado.sMensajeError;
return false;
}
else
{
cResultado = oPreAprobacionResultado.cDecision + "|" + oPreAprobacionResultado.cTipoScoring + "|" + oPreAprobacionResultado.nPrestamoMaximo + "|" + oPreAprobacionResultado.nPrestamoMinimo;
}
}
catch (Exception ex)
{
cMensajeError = "DevuelvePreAprobacion Error: " + ex.Message;
return false;
}
return true;
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using TiboxWebApi.Models;
namespace TiboxWebApi.Repository.Interfaces
{
public interface IErrorRepository: IRepository<Error>
{
int InsertaError(string Controlador, string cError);
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TiboxWebApi.Models
{
public class Persona
{
public string cNombres { get; set; }
public string cApePat { get; set; }
public string cApeMat { get; set; }
public int nTipoDoc { get; set; }
public string nNroDoc { get; set; }
public string cCelular { get; set; }
public string cEmail { get; set; }
public string cConfirmaEmail { get; set; }
public string cCodZona { get; set; }
public string nTipoResidencia { get; set; }
public string nSexo { get; set; }
public string cTelefono { get; set; }
public string dFechaNacimiento { get; set; }
public string nEstadoCivil { get; set; }
public string nDirTipo1 { get; set; }
public string nDirTipo2 { get; set; }
public string nDirTipo3 { get; set; }
public string cDirValor1 { get; set; }
public string cDirValor2 { get; set; }
public string cDirValor3 { get; set; }
public int nCodAge { get; set; }
public string nCUUI { get; set; }
public string nSitLab { get; set; }
public string nProfes { get; set; }
public string nTipoEmp { get; set; }
public string cDniConyuge { get; set; }
public string cNomConyuge { get; set; }
public string cApeConyuge { get; set; }
public string cRuc { get; set; }
public double nIngresoDeclado { get; set; }
public string cDirEmpleo { get; set; }
public string cTelfEmpleo { get; set; }
public string dFecIngrLab { get; set; }
public string bCargoPublico { get; set; }
public string cNomEmpresa { get; set; }
public string cProfesionOtros { get; set; }
public int nCodigoVerificador { get; set; }
public int nCodPers { get; set; }
public string cDepartamento { get; set; }
public string cProvincia { get; set; }
public string cDistrito { get; set; }
public int nProducto { get; set; }
public int nModalidad { get; set; }
public string cTextoSms { get; set; }
public string cTextoEmail { get; set; }
public string cTituloEmail { get; set; }
public int nProd { get; set; }
public int nSubProd { get; set; }
public string cLenddo { get; set; }
public int nRespuesta { get; set; }
public string cDepartamentoEmpleo { get; set; }
public string cProvinciaEmpleo { get; set; }
public string cDistritoEmpleo { get; set; }
public string nDirTipo1Empleo{ get; set; }
public string nDirTipo2Empleo { get; set; }
public string nDirTipo3Empleo { get; set; }
public string cDirValor1Empleo { get; set; }
public string cDirValor2Empleo { get; set; }
public string cDirValor3Empleo { get; set; }
public string cCodZonaEmpleo { get; set; }
public int changePass { get; set; }
}
}
<file_sep>using FluentValidation;
using System.Net;
using System.Web.Http;
using TiboxWebApi.Models;
using TiboxWebApi.UnitOfWork;
using TiboxWebApi.WebApi.Utils;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("Persona")]
[Authorize]
public class PersonaController : BaseController
{
private readonly AbstractValidator<Persona> _validator;
private Sms _sms;
private Email _email;
public PersonaController(IUnitOfWork unit, AbstractValidator<Persona> validator) : base(unit)
{
_validator = validator;
_sms = new Sms();
_email = new Email();
}
[Route("Datos")]
[HttpPost]
public IHttpActionResult Datos(Persona persona)
{
if (persona.nCodPers == 0) return BadRequest();
if (persona.nNroDoc == "" || persona.nNroDoc == null) return BadRequest();
if (persona.cEmail == "" || persona.cEmail == null) return BadRequest();
return Ok(_unit.Persona.LucasDatosPersona(persona.nNroDoc, persona.cEmail, persona.nCodPers));
}
[Route("")]
[HttpPost]
public IHttpActionResult Post(Persona persona)
{
var result = _validator.Validate(persona);
if (!result.IsValid) return Content(HttpStatusCode.BadRequest, result.Errors);
var nCodPers = _unit.Persona.LucasInsPersona(persona);
if(nCodPers > 0)
{
_sms.enviarSMS(persona.cCelular, persona.cTextoSms);
var cMensajeError = "";
_email.envioEmail(persona.cEmail, persona.cTextoEmail, persona.cTituloEmail, ref cMensajeError);
}
return Ok(new { nCodPers = nCodPers });
}
[Route("Verifica/{cDocumento}")]
[HttpGet]
public IHttpActionResult Verifica(string cDocumento)
{
if (cDocumento == "" || cDocumento == null) return BadRequest();
return Ok(_unit.Persona.LucasVerificaClienteExiste(cDocumento));
}
[Route("Put")]
[HttpPost]
public IHttpActionResult Put(Persona persona)
{
var result = _validator.Validate(persona);
if (!result.IsValid) return Content(HttpStatusCode.BadRequest, result.Errors);
var nCodPers = _unit.Persona.LucasActPersona(persona);
return Ok(new { nCodPers = nCodPers});
}
[Route("Tratamiento")]
[HttpPost]
public IHttpActionResult Tratamiento(Tratamiento tratamiento)
{
if (tratamiento == null) return BadRequest();
var nCodSolicitud = _unit.Persona.LucasTratamientoDatos(tratamiento);
return Ok(new { nCodSolicitud = nCodSolicitud});
}
[Route("Celular/{cDocumento}/{cCelular}")]
[HttpGet]
public IHttpActionResult Celular(string cDocumento, string cCelular)
{
return Ok(_unit.Persona.LucasValidaPersonaCelular(cDocumento, cCelular));
}
}
}
<file_sep>using System.Web.Http;
using TiboxWebApi.Models;
using TiboxWebApi.UnitOfWork;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("Flujo")]
[Authorize]
public class FlujoController : BaseController
{
public FlujoController(IUnitOfWork unit) : base(unit)
{
}
[Route("")]
[HttpPost]
public IHttpActionResult Post(FlujoMaestro flujo)
{
if (flujo == null) return BadRequest();
var resultado = _unit.FlujoMaestro.LucasRegistraFlujo(flujo);
if (resultado == 0) return BadRequest();
return Ok(new { bResultado = resultado });
}
[Route("{id}")]
[HttpGet]
public IHttpActionResult DevuelveFlujo(int id)
{
if (id == 0) return BadRequest();
return Ok(_unit.FlujoMaestro.LucasRecuperaFlujo(id));
}
[Route("Solicitud/{id}")]
[HttpGet]
public IHttpActionResult DevuelveDatosSolicitud(int id)
{
if (id == 0) return BadRequest();
return Ok(_unit.FlujoMaestro.LucasRecuperaSolicitud(id));
}
[Route("Wizard/{id}")]
[HttpGet]
public IHttpActionResult Wizard(int id)
{
if (id == 0) return BadRequest();
return Ok(_unit.FlujoMaestro.ObtieneWizard(id));
}
[Route("Eliminar")]
[HttpPost]
public IHttpActionResult Eliminar(FlujoMaestro flujo)
{
return Ok(_unit.FlujoMaestro.EliminaFlujo(flujo));
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Data.SqlClient;
using System.Linq;
using System.Security.Cryptography;
using System.Text;
using System.Threading.Tasks;
using TiboxWebApi.Models;
namespace TiboxWebApi.Repository
{
public class Utiles
{
public string Encriptar(string texto)
{
try
{
string key = "WebApiTiboxOnline"; //llave para encriptar datos
byte[] keyArray;
byte[] Arreglo_a_Cifrar = UTF8Encoding.UTF8.GetBytes(texto);
//Se utilizan las clases de encriptación MD5
MD5CryptoServiceProvider hashmd5 = new MD5CryptoServiceProvider();
keyArray = hashmd5.ComputeHash(UTF8Encoding.UTF8.GetBytes(key));
hashmd5.Clear();
//Algoritmo TripleDES
TripleDESCryptoServiceProvider tdes = new TripleDESCryptoServiceProvider();
tdes.Key = keyArray;
tdes.Mode = CipherMode.ECB;
tdes.Padding = PaddingMode.PKCS7;
ICryptoTransform cTransform = tdes.CreateEncryptor();
byte[] ArrayResultado = cTransform.TransformFinalBlock(Arreglo_a_Cifrar, 0, Arreglo_a_Cifrar.Length);
tdes.Clear();
texto = Convert.ToBase64String(ArrayResultado, 0, ArrayResultado.Length);
}
catch (Exception ex)
{
throw new Exception(ex.Message);
}
return texto;
}
public List<Calendario> GeneraCalendario(double nPrestamo, int nCuotas, int nPeriodicidad, double nTasa, string dFechaSistema, double nSeguro)
{
var valor = false;
double nGastoSegDeg = nSeguro;
double nMonto = nPrestamo;
double nMontoCuotaBK = 0.00;
double nValorInc = 0.00;
double nIGV = 0;
int pnFinCuotaGracia = 0;
int pnTipoGracia = 1;
bool bOK = false;
object MatFechas = new object();
string[,] matDatos = new string[nCuotas + 1, 8];
object[] MatFechas1 = new object[nCuotas];
string[,] MatFechas2 = new string[nCuotas + 1, 2];
DateTime fechaDesem = DateTime.Parse(dFechaSistema);
List<Calendario> calendarioList = new List<Calendario>();
DateTime dFechaPrueba = fechaDesem;
if (!valor)
{
var nNumSub = 1;
MatFechas = CalendarioFechasCuotaFija(nCuotas, dFechaPrueba, nPeriodicidad, nNumSub);
}
var cont = 0;
var nInteres = 0.00;
var nTasaCom = nTasa;
var nMontoCuota = Math.Round(CuotaPeriodoFijo(nTasaCom, 0, nGastoSegDeg, nCuotas, nPrestamo, 30), 2);
List<double> lstInteres = new List<double>();
var dFecha = fechaDesem;
double MontoCuotaReturn = 0.00;
double nMontoNegativo = 0.00;
double nMontoDiferenciaNeto = 0.00;
double nPendIntComp = 0.00;
double nPendComision = 0.00;
double nMontoComisionCalculado = 0.00;
double nMontoNetoICCOM = 0.00;
double pnMontoComision = 0;
//nCuotas = nCuotas + 1;
//y,0 = fechapago;
//y,1 = cuota;
//y,2 = montocuota,;
//y,3 = capital;
//y,4 = interes;
//y,5 = gastos;
//y,6 = saldos;
//y,7 = desgravamen
do
{
// setear los valores
nMonto = nPrestamo;
nMontoCuotaBK = nMontoCuota;
dFecha = dFechaPrueba;
for (int k = 1; k <= nCuotas; k++)
{
nMontoCuota = nMontoCuotaBK;
string[,] MatFe = (string[,])MatFechas;
var dFechaVenc = MatFe[k, 1];
matDatos[k, 0] = MatFe[k, 1];
var dias = diasrestantes(DateTime.Parse(dFecha.ToString()), DateTime.Parse(dFechaVenc.ToString()));
matDatos[k, 1] = Convert.ToString(k);
nInteres = Math.Pow((1 + nTasaCom / 100.00), (dias / 30.00)) - 1;
nInteres = Math.Round(Convert.ToDouble((nInteres * nMonto)), 2);
matDatos[k, 4] = Convert.ToDouble(nInteres).ToString();
nMontoComisionCalculado = 0;
if (pnTipoGracia != 4 || k > pnFinCuotaGracia)
{
if (pnMontoComision > 0)
{
nMontoComisionCalculado = Math.Round(nMonto * Math.Pow((1 + pnMontoComision / 100.00), (diasrestantes(DateTime.Parse(dFecha.ToString()), DateTime.Parse(MatFe[k, 1])) / 30.00)) - 1, 2);
}
}
else
{
//matDatos[k, 5] = "0.00";
}
nMontoNegativo = Convert.ToDouble(nMontoCuota - Convert.ToDouble(matDatos[k, 4]));
if (nMontoNegativo < 0)
{
nMontoDiferenciaNeto = Convert.ToDouble(Math.Round(Math.Abs(nMontoNegativo) / (1 + (nIGV + 100.00)), 2));
if (Convert.ToDouble(matDatos[k, 4]) > nMontoDiferenciaNeto)
{
nPendIntComp = Math.Round(Convert.ToDouble(nPendIntComp), 2) + nMontoDiferenciaNeto;
matDatos[k, 4] = Math.Round(Convert.ToDouble(matDatos[k, 4]) - nMontoDiferenciaNeto, 2).ToString();
}
else if (nMontoComisionCalculado > nMontoDiferenciaNeto)
{
nPendComision = nPendComision + nMontoDiferenciaNeto;
nMontoComisionCalculado = Math.Round(nMontoComisionCalculado - nMontoDiferenciaNeto, 2);
}
else
{
nPendIntComp = nPendIntComp + Convert.ToDouble(matDatos[k, 4]);
matDatos[k, 4] = "0.00";
nPendComision = nPendComision + (nMontoDiferenciaNeto - nPendIntComp);
nMontoComisionCalculado = Math.Round(nMontoComisionCalculado - (nMontoDiferenciaNeto - nPendIntComp), 2);
}
}
else
{
nMontoDiferenciaNeto = Convert.ToDouble(Math.Round(nMontoNegativo / (1 + (nIGV / 100.00)), 2));
if (nPendIntComp > 0)
{
if (nPendIntComp > nMontoDiferenciaNeto)
{
nMontoNetoICCOM = nMontoDiferenciaNeto;
matDatos[k, 4] = (Convert.ToDouble(matDatos[k, 4]) + nMontoNetoICCOM).ToString();
nPendIntComp = nPendIntComp - nMontoNetoICCOM;
nMontoNegativo = 0;
}
else
{
nMontoNetoICCOM = nPendIntComp;
nMontoNegativo = nMontoNegativo - nPendIntComp;
matDatos[k, 4] = (Convert.ToDouble(matDatos[k, 4]) + nMontoNetoICCOM).ToString();
nPendIntComp = 0;
}
}
if (nPendComision > 0)
{
if (nPendComision > nMontoDiferenciaNeto)
{
nMontoNetoICCOM = nMontoDiferenciaNeto;
nMontoComisionCalculado = nMontoComisionCalculado + nMontoNetoICCOM;
nPendComision = nPendComision - nMontoNetoICCOM;
}
else
{
nMontoNetoICCOM = nPendComision;
nMontoComisionCalculado = nMontoComisionCalculado + nMontoNetoICCOM;
nPendComision = 0;
}
}
}
if (k == 1)
{
matDatos[k, 7] = Math.Round(Convert.ToDouble(nPrestamo * nGastoSegDeg / 100.00), 2).ToString();
}
else
{
matDatos[k, 7] = Math.Round((Convert.ToDouble(matDatos[k - 1, 6]) * nGastoSegDeg / 100.00), 2).ToString();
}
if (pnTipoGracia != 4 || k > pnFinCuotaGracia)
{
if (k != nCuotas)
{
matDatos[k, 3] = Math.Round(nMontoCuota - (Convert.ToDouble(matDatos[k, 4]) + Convert.ToDouble(matDatos[k, 7])), 2).ToString();
if (Convert.ToDouble(matDatos[k, 3]) > 0 && Convert.ToDouble(matDatos[k, 3]) <= 0.05)
{
matDatos[k, 3] = "0.00";
}
if (Convert.ToDouble(matDatos[k, 3]) >= -0.05 && Convert.ToDouble(matDatos[k, 3]) < 0)
{
matDatos[k, 3] = "0.00";
}
}
else
{
if (k == 1)
{
matDatos[k, 3] = Math.Round(nMonto, 2).ToString();
}
else
{
matDatos[k, 3] = Math.Round(Convert.ToDouble(matDatos[k - 1, 6]), 2).ToString();
}
if (nPendComision > 0 || nPendIntComp > 0)
{
matDatos[k, 4] = Math.Round(Convert.ToDouble(matDatos[k, 4]) + nPendIntComp, 2).ToString();
}
}
}
else
{
matDatos[k, 3] = "0.00";
}
matDatos[k, 2] = Math.Round((Convert.ToDouble(matDatos[k, 3]) + Convert.ToDouble(matDatos[k, 4]) + Convert.ToDouble(matDatos[k, 7])), 2).ToString();
nMonto = Math.Round(nMonto - Convert.ToDouble(matDatos[k, 3]), 2);
matDatos[k, 6] = Math.Round(nMonto, 2).ToString(); // SALDO, 6
dFecha = Convert.ToDateTime(MatFe[k, 1]);
}
var ultimaFila = 0;
for (int l = 0; l < matDatos.Rank; l++)
{
ultimaFila = matDatos.GetUpperBound(l);
break;
}
if (int.Parse(ultimaFila.ToString()) > 1)
{
if (Convert.ToDouble(matDatos[ultimaFila, 2]) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= -0.01 &&
Convert.ToDouble(matDatos[ultimaFila, 2]) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) <= 0.0)
{
MontoCuotaReturn = Convert.ToDouble(matDatos[ultimaFila - 1, 3]);
bOK = true;
}
else
{
if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 250)
{
nValorInc = 0.3;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 210)
{
nValorInc = 0.28;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 180)
{
nValorInc = 0.27;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 120)
{
nValorInc = 0.26;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 100)
{
nValorInc = 0.25;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 90)
{
nValorInc = 0.24;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 80)
{
nValorInc = 0.23;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 75)
{
nValorInc = 0.22;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 70)
{
nValorInc = 0.21;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 69)
{
nValorInc = 0.2;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 68)
{
nValorInc = 0.19;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 67)
{
nValorInc = 0.18;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 65)
{
nValorInc = 0.17;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 60)
{
nValorInc = 0.16;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 50)
{
nValorInc = 0.15;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 40)
{
nValorInc = 0.14;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 30)
{
nValorInc = 0.13;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 20)
{
nValorInc = 0.12;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 10)
{
nValorInc = 0.11;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 5)
{
nValorInc = 0.1;
}
else if (Math.Abs(Convert.ToDouble(matDatos[ultimaFila, 2])) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) >= 1)
{
nValorInc = 0.01;
}
else
{
nValorInc = 0.01;
}
if (Convert.ToDouble(matDatos[ultimaFila, 2]) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) <= -0.01)
{
nMontoCuota = Math.Round(Convert.ToDouble(nMontoCuota - nValorInc), 2);
}
else if (Convert.ToDouble(matDatos[ultimaFila, 2]) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) > 0.0)
{
nMontoCuota = Math.Round(Convert.ToDouble(nMontoCuota + nValorInc), 2);
}
cont += 1;
if (cont > 2500)
{
if (Convert.ToDouble(matDatos[ultimaFila, 2]) - Convert.ToDouble(matDatos[ultimaFila - 1, 2]) < 0 || cont == 2502) //1502
{
MontoCuotaReturn = Convert.ToDouble(matDatos[ultimaFila - 1, 3]);
bOK = true;
}
}
}
}
else
{
MontoCuotaReturn = Convert.ToDouble(matDatos[1, 2]);
bOK = true;
}
var cantidad1 = matDatos.Length - 1;
} while (!bOK);
double nMontoRef = 0;
for (int m = 0; m < nCuotas * 1; m++)
{
nMontoRef += Math.Round(Convert.ToDouble(matDatos[m, 2]), 2);
}
for (int y = 1; y <= nCuotas; y++)
{
Calendario obj = new Calendario();
obj.cFechaPago = Convert.ToString(DateTime.Parse(matDatos[y, 0]).ToShortDateString());
obj.FechaPago = DateTime.Parse(matDatos[y, 0]);
obj.Cuota = int.Parse(matDatos[y, 1].ToString());
obj.MontoCuota = matDatos[y, 2].ToString();
var datoCapital = matDatos[y, 3] == null || matDatos[y, 3] == "null" ? 0.00 : double.Parse(matDatos[y, 3].ToString());
obj.Capital = Math.Round(datoCapital, 2);
obj.Interes = double.Parse(matDatos[y, 4].ToString());
obj.Saldos = double.Parse(matDatos[y, 6].ToString());
calendarioList.Add(obj);
}
return calendarioList;
}
public int diasrestantes(DateTime dFecInicio, DateTime dFechaVenc)
{
TimeSpan span = dFechaVenc.Subtract(dFecInicio);
return (int)span.TotalDays;
}
private object CalendarioFechasCuotaFija(int nCuotas, DateTime fechaDesem, int nPeriodicidad, int nNumSub)
{
DateTime pdFecha = default(DateTime);
//nCuotas = nCuotas + 1;
string[,] MatFechas = new string[nCuotas + 1, 2];
string[,] MatFechasEsp = new string[nCuotas * nNumSub + 1, 2];
DateTime dFechaDesembTempo = default(DateTime);
dFechaDesembTempo = fechaDesem;
MatFechas = new string[nCuotas + 1, 3];
pdFecha = dFechaDesembTempo;
for (int i = 1; i <= nCuotas; i++)
{
pdFecha = pdFecha.AddDays(nPeriodicidad);
if (pdFecha.DayOfWeek == DayOfWeek.Sunday) pdFecha = pdFecha.AddDays(1);
if (pdFecha.DayOfWeek == DayOfWeek.Saturday) pdFecha = pdFecha.AddDays(2);
MatFechas[i, 1] = pdFecha.ToString();
}
return MatFechas;
}
public static int Weekday(DateTime dt, DayOfWeek startOfWeek)
{
return (dt.DayOfWeek - startOfWeek + 7) % 7;
}
public double CuotaPeriodoFijo(double pTasaComp, double pTasaComision, double pnTasaSeguro, double pnCuotas, double vMonto, double pnPlazo)
{
double CuotaPeriodoFijo = 0;
double Pot1 = 0, nTasaTmp = 0, nTasaComiTmp = 0;
nTasaTmp = ((Math.Pow((1 + (pTasaComp / 100)), (pnPlazo / 30))) - 1);
nTasaComiTmp = ((Math.Pow((1 + (pTasaComision / 100)), (pnPlazo / 30))) - 1);
nTasaComiTmp = double.Parse(string.Format("{0:0.##}", nTasaComiTmp));
nTasaTmp = nTasaTmp + nTasaComiTmp;
nTasaTmp = (nTasaTmp * (1 + (pnTasaSeguro / 100)));
Pot1 = Math.Pow((1 + nTasaTmp), pnCuotas);
CuotaPeriodoFijo = ((Pot1 * nTasaTmp) / (Pot1 - 1)) * vMonto;
CuotaPeriodoFijo = Math.Round(CuotaPeriodoFijo, 2);
return CuotaPeriodoFijo;
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Data;
using System.Data.SqlClient;
using Dapper;
using TiboxWebApi.Models;
using TiboxWebApi.Repository.Interfaces;
namespace TiboxWebApi.Repository.Repository
{
public class FlujoRepository : BaseRepository<FlujoMaestro>, IFlujoRepository
{
public int EliminaFlujo(FlujoMaestro flujo)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nIdFlujoMaestro", flujo.nIdFlujoMaestro);
parameters.Add("@cComentario", flujo.cComentario);
parameters.Add("@cUser", flujo.cUsuReg);
parameters.Add("@nRes", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WebOnline_insAnularCreditoFlujo_SP", parameters, commandType: CommandType.StoredProcedure);
return parameters.Get<int>("@nRes");
}
}
public IEnumerable<FlujoMaestro> LucasRecuperaFlujo(int nIdFlujoMaestro)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nIdFlujoMaestro", nIdFlujoMaestro);
return connection.Query<FlujoMaestro>("WebApi_ObtenerFLujoSiguiente_SP", parameters, commandType: CommandType.StoredProcedure);
}
}
public IEnumerable<FlujoMaestro> LucasRecuperaSolicitud(int nIdFlujoMaestro)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nIdFlujoMaestro", nIdFlujoMaestro);
return connection.Query<FlujoMaestro>("WebApi_LucasDatosSolicitud_SP",
parameters,
commandType: CommandType.StoredProcedure);
}
}
public int LucasRegistraFlujo(FlujoMaestro flujo)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nProd", flujo.nProd);
parameters.Add("@nSubProd", flujo.nSubProd);
parameters.Add("@cNombFlujo", flujo.cNomform);
parameters.Add("@nCodPers", flujo.nCodPers);
parameters.Add("@cDoc", flujo.nNroDoc);
parameters.Add("@cUsuReg", flujo.cUsuReg);
parameters.Add("@nIdFlujo", flujo.nIdFlujo);
parameters.Add("@nCodCred", flujo.nCodCred);
parameters.Add("@nCodAge", flujo.nCodAge);
parameters.Add("@nCodPersReg", flujo.nCodPersReg);
parameters.Add("@nOrdenFlujo", flujo.nOrdenFlujo);
parameters.Add("@nValorNecesario", 1);
parameters.Add("@cTipoBanca", "");
parameters.Add("@nRechazado", 0);
parameters.Add("@nIdFlujoMaestro", flujo.nIdFlujoMaestro);
parameters.Add("@nResultado", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WebApi_FlujoRegistra_SP", parameters, commandType: CommandType.StoredProcedure);
var res = parameters.Get<int>("@nResultado");
return res;
}
}
public int LucasRegistraMotor(FlujoMaestro flujo)
{
using (var connection = new SqlConnection(_connectionString))
{
connection.Open();
using (var transaction = connection.BeginTransaction())
{
try
{
var nIdFlujoMaestro = 0;
var parameters = new DynamicParameters();
parameters.Add("@nNroDoc", flujo.nNroDoc);
parameters.Add("@nCodAge", flujo.nCodAge);
parameters.Add("@nProd", flujo.nProd);
parameters.Add("@nSubProd", flujo.nSubProd);
parameters.Add("@cNomForm", flujo.cNomform);
parameters.Add("@nCodCred", flujo.nCodCred);
parameters.Add("@cUsuReg", flujo.cUsuReg);
parameters.Add("@nIdFlujo", flujo.nIdFlujo);
parameters.Add("@nCodPersReg", flujo.nCodPersReg);
parameters.Add("@nOrdenFlujo", flujo.nOrdenFlujo);
parameters.Add("@oScoringDatos", flujo.oScoringDatos, dbType: DbType.Xml);
parameters.Add("@oScoringVarDemo", flujo.oScoringVarDemo, dbType: DbType.Xml);
parameters.Add("@oScoringDetCuota", flujo.oScoringDetCuota, dbType: DbType.Xml);
parameters.Add("@oScoringDemo", flujo.oScoringDemo, dbType: DbType.Xml);
parameters.Add("@oScoringRCC", flujo.oScoringRCC, dbType: DbType.Xml);
parameters.Add("@nRechazado", flujo.nRechazado);
parameters.Add("@cClienteLenddo", flujo.cClienteLenddo);
parameters.Add("@nIdFlujoMaestro", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WebApi_LucasIniciaFlujo_SP",
parameters,
commandType: CommandType.StoredProcedure, transaction: transaction);
nIdFlujoMaestro = parameters.Get<int>("@nIdFlujoMaestro");
transaction.Commit();
return nIdFlujoMaestro;
}
catch (Exception ex)
{
transaction.Rollback();
throw;
}
}
}
}
public IEnumerable<FlujoMaestro> ObtieneWizard(int nIdFlujoMaestro)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nIdFlujoMaestro", nIdFlujoMaestro);
return connection.Query<FlujoMaestro>("WebApi_ObtieneWizard_SP", parameters, commandType: CommandType.StoredProcedure);
}
}
}
}
<file_sep>using System.Web.Http;
using TiboxWebApi.UnitOfWork;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("ReglaNegocio")]
[Authorize]
public class ReglaNegocioController : BaseController
{
public ReglaNegocioController(IUnitOfWork unit): base(unit)
{
}
[Route("{cForm}")]
[HttpGet]
public IHttpActionResult Get(string cForm)
{
if (cForm == "" || cForm == null) return BadRequest("falta parametro");
return Ok(_unit.ReglaNegocio.ListaRegla(cForm));
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Data;
using System.Data.SqlClient;
using TiboxWebApi.Models;
using TiboxWebApi.Repository.Interfaces;
using Dapper;
namespace TiboxWebApi.Repository.Repository
{
public class ErrorRepository : BaseRepository<Error>, IErrorRepository
{
public int InsertaError(string Controlador, string cError)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@cDescripcion", Controlador);
parameters.Add("@cControlador", cError);
connection.Query<int>("WebApi_InsertaError_SP", parameters, commandType: CommandType.StoredProcedure);
return 1;
}
}
}
}
<file_sep>using System.Web.Http;
using TiboxWebApi.WebApi.Utils;
using TiboxWebApi.UnitOfWork;
using TiboxWebApi.Models;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("Usuario")]
[Authorize]
public class UsuarioController : BaseController
{
private readonly Utiles _utils;
public UsuarioController(IUnitOfWork unit) : base(unit)
{
_utils = new Utiles();
}
[Route("Encriptar/{cTexto}")]
[HttpGet]
public IHttpActionResult Encripta(string cTexto)
{
if (cTexto == "" || cTexto == null) return BadRequest();
var Resultado = _utils.Encriptar(cTexto);
return Ok(new { cTexto = Resultado });
}
[Route("Desencriptar")]
[HttpPost]
public IHttpActionResult Desencriptar(User user)
{
if (user.Password == "" || user.Password == null) return BadRequest();
var Resultado = _utils.Desencriptar(user.Password);
return Ok(new { cTexto = Resultado });
}
[Route("Verificar")]
[HttpPost]
public IHttpActionResult VerificaEmail(User user)
{
if (user.Email == "" || user.Email == null) return BadRequest();
return Ok(_unit.Users.LucasVerificaEmail(user.Email));
}
[Route("DatosLogin")]
[HttpPost]
public IHttpActionResult DatosLogin(User user)
{
if (user.Email == "" || user.Email == null) return BadRequest();
return Ok(_unit.Users.LucasDatosLogin(user.Email));
}
[Route("CambioPass")]
[HttpPost]
public IHttpActionResult CambioPass(User user)
{
if (user.Email == "" || user.Email == null) return BadRequest();
return Ok(new { nCodPers = _unit.Users.LucasCambiaPass(user.Email, user.Password) });
}
[Route("DatosADM/{cCodUsu}")]
[HttpGet]
public IHttpActionResult DatosADM(string cCodUsu)
{
return Ok(_unit.Users.validateUserAD(cCodUsu, ""));
}
[Route("ConsultaCambio/{nCodPers}")]
[HttpGet]
public IHttpActionResult ConsultaCambio(int nCodPers)
{
return Ok(_unit.Users.selCambioPass(nCodPers));
}
}
}
<file_sep>using Dapper;
using System.Data;
using System.Data.SqlClient;
using TiboxWebApi.Models;
using TiboxWebApi.Repository.Interfaces;
using System.Collections.Generic;
using System;
namespace TiboxWebApi.Repository.Repository
{
public class UserRepository : BaseRepository<User>, IUserRepository
{
private readonly Utiles _util = null;
public UserRepository()
{
_util = new Utiles();
}
public int LucasCambiaPass(string email, string password)
{
using (var connection = new SqlConnection(_connectionString))
{
var Encriptado = _util.Encriptar(password);
var parametrs = new DynamicParameters();
parametrs.Add("@cEmail", email);
parametrs.Add("@cPass", password);
parametrs.Add("@cPassEncriptado", Encriptado);
parametrs.Add("@nCodPers", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WebApi_insCambioPass_SP", parametrs, commandType: CommandType.StoredProcedure);
var nCodPers = parametrs.Get<int>("@nCodPers");
return nCodPers;
}
}
public IEnumerable<Persona> LucasDatosLogin(string cEmail)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@cEmail", cEmail);
return connection.Query<Persona>("WebApi_LucasDatosLogin_SP", parameters, commandType: CommandType.StoredProcedure);
}
}
public IEnumerable<User> LucasVerificaEmail(string cEmail)
{
using (var connection = new SqlConnection(_connectionString))
{
var paramaters = new DynamicParameters();
paramaters.Add("@cEmail", cEmail);
return connection.Query<User>("WebApi_LucasValidaCorreoExiste_SP",
paramaters,
commandType: CommandType.StoredProcedure);
}
}
public int selCambioPass(int nCodPers)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nCodPers", nCodPers);
parameters.Add("@nTotal", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WebApi_selCambioPass_SP", parameters, commandType: CommandType.StoredProcedure);
var total = parameters.Get<int>("@nTotal");
return total;
}
}
public User ValidateUser(string email, string password)
{
using (var connection = new SqlConnection(_connectionString))
{
string pass = _util.Encriptar(password);
var parameters = new DynamicParameters();
parameters.Add("@email", email);
parameters.Add("@password", pass);
return connection.QueryFirstOrDefault<User>("WebApi_ValidateUser_SP",
parameters,
commandType: CommandType.StoredProcedure);
}
}
public User validateUserAD(string userName, string password)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@cCodUsu", userName);
return connection.QueryFirstOrDefault<User>("WebApi_DatosWinUsuario_SP",
parameters, commandType:
CommandType.StoredProcedure);
}
}
}
}
<file_sep>using Dapper;
using System.Data;
using System.Data.SqlClient;
using TiboxWebApi.Models;
using TiboxWebApi.Repository.Interfaces;
using System;
using System.Collections;
using System.Collections.Generic;
namespace TiboxWebApi.Repository.Repository
{
public class ReporteRepository : BaseRepository<Reporte>, IReporteRepository
{
public int LucasInsCabeceraReporte(int nCodAcge, int nCodCred, string cAsunto, string cCuerpo)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@pnCodAge", nCodAcge);
parameters.Add("@pnCodCred", nCodCred);
parameters.Add("@pcAsunto", cAsunto);
parameters.Add("@pcCuerpo", cCuerpo);
connection.Query<int>("WebApi_ReporteInsertaCabecera_SP", parameters, commandType: CommandType.StoredProcedure);
return 1;
}
}
public int LucasInsDetalleReporte(int nCodAge, int nCodCred, int nTipo, byte[] oDoc)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@pnCodAge", nCodAge);
parameters.Add("@pnCodCred", nCodCred);
parameters.Add("@pnTipoDcoumento", nTipo);
parameters.Add("@poDocumento", oDoc);
connection.Query<int>("WebApi_ReporteInsertaDetalle_SP", parameters, commandType: CommandType.StoredProcedure);
return 1;
}
}
public IEnumerable<Reporte> LucasSeleccionaReporte(int nCodAge, int nCodCred, int nTipo)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nCodCred", nCodCred);
parameters.Add("@nCodAge", nCodAge);
parameters.Add("@nTipo", nTipo);
return connection.Query<Reporte>("WebApi_ReporteSelecciona_SP", parameters, commandType: CommandType.StoredProcedure);
}
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TiboxWebApi.Models
{
public class Menu
{
public int nIdPagina { get; set; }
public string nombregrupo { get; set; }
public string nombremodulo { get; set; }
public string stado { get; set; }
public string iconomodulo { get; set; }
public string ordenmodulo { get; set; }
public int ordengrupo { get; set; }
public string iconogrupo { get; set; }
}
}
<file_sep>using System.Web.Http;
using TiboxWebApi.Models;
using TiboxWebApi.UnitOfWork;
using TiboxWebApi.WebApi.Utils;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("Alerta")]
[Authorize]
public class AlertaController : BaseController
{
private Sms _sms = null;
private Email _email = null;
public AlertaController(IUnitOfWork unit) : base(unit)
{
_sms = new Sms();
_email = new Email();
}
[Route("SMS")]
[HttpPost]
public IHttpActionResult SMS(Alerta alerta)
{
if (alerta == null) return BadRequest("Faltan datos");
var bResultado = _sms.enviarSMS(alerta.cMovil, alerta.cTexto);
//if (!bResultado) return BadRequest();
return Ok(new { cRed = true });
}
[Route("Email")]
[HttpPost]
public IHttpActionResult Email(Alerta alerta)
{
string cMensajeError = "";
if (alerta == null) return BadRequest("Faltan datos");
var bResultado = _email.envioEmail(alerta.cEmail, alerta.cTexto, alerta.cTitulo, ref cMensajeError);
//if (!bResultado) return BadRequest();
return Ok(new { cRed = true });
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Data.SqlClient;
using System.Data;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using TiboxWebApi.Models;
using TiboxWebApi.Repository.Interfaces;
using Dapper;
namespace TiboxWebApi.Repository.Repository
{
public class ReglaNegocioRepository : BaseRepository<ReglaNegocio>, IReglaNegocioRepository
{
public IEnumerable<ReglaNegocio> ListaRegla(string cNomForm)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@pnIdForm", cNomForm);
return connection.Query<ReglaNegocio>("WebApi_ReglaNegocioSelecciona_SP", parameters, commandType: CommandType.StoredProcedure);
}
}
}
}
<file_sep>using TiboxWebApi.WebApi.wsSms;
namespace TiboxWebApi.WebApi.Utils
{
public class Sms
{
private wsSMSSoap _sms;
public Sms()
{
_sms = new wsSMSSoapClient();
}
public bool enviarSMS(string cMovil, string cMensaje)
{
EnviarMensajeRequestBody smsCuerpo = new EnviarMensajeRequestBody();
smsCuerpo.Telefono = cMovil;
smsCuerpo.Msg = cMensaje;
var smsEnvia = new EnviarMensajeRequest();
smsEnvia.Body = smsCuerpo;
var result = _sms.EnviarMensaje(smsEnvia);
if (!result.Body.EnviarMensajeResult) return false;
return true;
}
}
}<file_sep>using FluentValidation;
using LightInject;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Reflection;
using System.Web;
using System.Web.Http;
using TiboxWebApi.Models;
using TiboxWebApi.WebApi.Validators;
namespace TiboxWebApi.WebApi
{
public partial class Startup
{
public void ConfigureInjector(HttpConfiguration config)
{
//Inyeccion de dependecias
var container = new ServiceContainer();
container.RegisterAssembly(Assembly.GetExecutingAssembly());
container.RegisterAssembly("TiboxWebApi.Repository*.dll");
container.RegisterAssembly("TiboxWebApi.UnitOfWork*.dll");
//implementacion de las validators
container.Register<AbstractValidator<Product>, ProductValidator>();
container.RegisterApiControllers();
container.EnableWebApi(config);
}
}
}<file_sep>using System.Web.Http;
using TiboxWebApi.UnitOfWork;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("CatalogoCodigo")]
[Authorize]
public class CatalogoCodigoController : BaseController
{
public CatalogoCodigoController(IUnitOfWork unit) : base(unit)
{
}
[Route("{id}")]
public IHttpActionResult Get(int id)
{
if (id <= 0) return BadRequest();
return Ok(_unit.CatalogoCodigo.selCatalogoCodigos(id));
}
[Route("TipoVivienda")]
[HttpGet]
public IHttpActionResult Tipovivienda()
{
return Ok(_unit.CatalogoCodigo.selTipovivienda());
}
}
}
<file_sep>using System;
using System.IO;
using System.Web;
using System.Web.Http;
using Newtonsoft.Json.Linq;
using TiboxWebApi.Models;
using TiboxWebApi.UnitOfWork;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("Documento")]
[Authorize]
public class DocumentoController : BaseController
{
public DocumentoController(IUnitOfWork unit) : base(unit)
{
}
[Route("Tipo")]
[HttpGet]
public IHttpActionResult Tipo()
{
return Ok(_unit.Documento.ListaDocumentos());
}
[Route("Subir")]
[HttpPost]
public IHttpActionResult Subir(Documento documento)
{
var valores = new Documento();
valores.iImagen = Convert.FromBase64String(documento.oDocumento);
valores.cNomArchivo = documento.cNomArchivo;
valores.cExtencion = documento.cExtencion;
valores.nIdFlujoMaestro = documento.nIdFlujoMaestro;
valores.cTipoArchivo = documento.cTipoArchivo;
var resultado = _unit.Documento.LucasInsDocumento(valores);
return Ok(new { bRespuesta = resultado });
}
}
}
<file_sep>using System;
namespace TiboxWebApi.Models
{
public class Calendario
{
private DateTime dFechaPago;
private int nCuota;
private string nMontoCuota;
private double nCapital;
private double nInteres;
private string dfechPago;
private double nGastos;
private double nSaldos;
private double nSaldoCalendario;
private double nDesgravamen;
public string cFechaPago
{
get { return dfechPago; }
set { dfechPago = value; }
}
public DateTime FechaPago
{
get { return dFechaPago; }
set { dFechaPago = value; }
}
public double Seguro
{
get { return nDesgravamen; }
set { nDesgravamen = value; }
}
public int Cuota
{
get { return nCuota; }
set { nCuota = value; }
}
public string MontoCuota
{
get { return nMontoCuota; }
set { nMontoCuota = value; }
}
public double Capital
{
get { return nCapital; }
set { nCapital = value; }
}
public double Interes
{
get { return nInteres; }
set { nInteres = value; }
}
public double Gastos
{
get { return nGastos; }
set { nGastos = value; }
}
public double Saldos
{
get { return nSaldos; }
set { nSaldos = value; }
}
public double SaldoCalendario
{
get { return nSaldoCalendario; }
set { nSaldoCalendario = value; }
}
}
}
<file_sep>using System.Collections.Generic;
using System.Data.SqlClient;
using System.Data;
using TiboxWebApi.Models;
using TiboxWebApi.Repository.Interfaces;
using Dapper;
using System.Xml;
using System;
using System.Transactions;
namespace TiboxWebApi.Repository.Repository
{
public class CreditoRepository : BaseRepository<Credito>, ICreditoRepository
{
public readonly Utiles _utils = null;
public CreditoRepository()
{
_utils = new Utiles();
}
public IEnumerable<Credito> LucasBandeja(int nCodPers, int nPagina, int nTam, int nCodAge)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nCodPers", nCodPers);
parameters.Add("@nNumPagina", nPagina);
parameters.Add("@nTamPagina", nTam);
parameters.Add("@nCodAge", nCodAge);
return connection.Query<Credito>("WebApi_LucasBandeja_SP",
parameters,
commandType: CommandType.StoredProcedure);
}
}
public IEnumerable<Credito> LucasCalendarioLista(int nCodAge, int nCodCred)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nCodCred", nCodCred);
parameters.Add("@nCodAge", nCodAge);
return connection.Query<Credito>("WebApi_LucasCalendarioLista_SP", parameters, commandType: CommandType.StoredProcedure);
}
}
public int LucasCreditoAnulaxActualizacion(string cDocumento)
{
using (var connection = new SqlConnection(_connectionString))
{
var resultado = 0;
var parameters = new DynamicParameters();
parameters.Add("@cDocumento", cDocumento);
connection.Query<int>("WebApi_LucasCreditosAnularxActualizacion_SP", parameters, commandType: CommandType.StoredProcedure);
resultado = 1;
return resultado;
}
}
public int LucasCreditoEnFlujo(string cDocumento)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@cDocumento", cDocumento);
parameters.Add("@nRes", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WebApi_LucasCreditoPorFlujo_SP", parameters, commandType: CommandType.StoredProcedure);
return parameters.Get<int>("@nRes");
}
}
public IEnumerable<Credito> LucasDatosPrestamo(int nCodAge, int nCodCred)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nCodAge", nCodAge);
parameters.Add("@nCodCred", nCodCred);
return connection.Query<Credito>("WebApi_LucasDatosCredito_SP", parameters, commandType: CommandType.StoredProcedure);
}
}
public int LucasInsCredito(Credito credito)
{
using (var connection = new SqlConnection(_connectionString))
{
connection.Open();
using (var transaction = connection.BeginTransaction())
{
try
{
var lstCalendario = new List<Calendario>();
lstCalendario = _utils.GeneraCalendario(credito.nPrestamo, credito.nNroCuotas, credito.nPeriodo, credito.nTasa, credito.dFechaSistema, credito.nSeguro);
string oCreditoXml = "";
var xml = new XmlDocument();
XmlElement root = xml.CreateElement("Credito");
xml.AppendChild(root);
foreach (var cust in lstCalendario)
{
XmlElement child = xml.CreateElement("Credito");
child.SetAttribute("FechaPago", cust.FechaPago.ToString());
child.SetAttribute("cFechaPago", cust.cFechaPago.ToString());
child.SetAttribute("Cuota", cust.Cuota.ToString());
child.SetAttribute("MontoCuota", cust.MontoCuota);
child.SetAttribute("Capital", cust.Capital.ToString());
child.SetAttribute("Interes", cust.Interes.ToString());
child.SetAttribute("Gastos", cust.Gastos.ToString());
child.SetAttribute("Saldos", cust.Saldos.ToString());
child.SetAttribute("SaldoCalendario", cust.SaldoCalendario.ToString());
root.AppendChild(child);
}
oCreditoXml = xml.OuterXml;
var parameters = new DynamicParameters();
parameters.Add("@nCodPers", credito.nCodPers);
parameters.Add("@nCodAge", credito.nCodAge);
parameters.Add("@nProd", credito.nProd);
parameters.Add("@nSubProd", credito.nSubProd);
parameters.Add("@nNroCuotas", credito.nNroCuotas);
parameters.Add("@nMontoCuota", credito.nMontoCuota);
parameters.Add("@nTasa", credito.nTasa);
parameters.Add("@nPeriodo", credito.nPeriodo);
parameters.Add("@nMontoSol", credito.nPrestamo);
parameters.Add("@nCodUsu", credito.nCodUsu);
parameters.Add("@oCredito", oCreditoXml);
parameters.Add("@nIdFlujoMaestro", credito.nIdFlujoMaestro);
parameters.Add("@cNomForm", credito.cFormulario);
parameters.Add("@cUsuReg", credito.cUsuReg);
parameters.Add("@nCodPersReg", credito.nCodPersReg);
parameters.Add("@nIdFlujo", credito.nIdFlujo);
parameters.Add("@nOrdenFlujo", credito.nOrdenFlujo);
parameters.Add("@nCodCred", credito.nCodCred, dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WEBApi_LucasinsCredito_SP", parameters, commandType: CommandType.StoredProcedure, transaction: transaction);
var nCodCred = parameters.Get<int>("@nCodCred");
transaction.Commit();
return nCodCred;
}
catch (Exception)
{
transaction.Rollback();
throw;
}
}
}
}
public int LucasInsFirmaElectronica(Credito credito)
{
using (var connection = new SqlConnection(_connectionString))
{
connection.Open();
using (var transaction = connection.BeginTransaction())
{
try
{
var parameters = new DynamicParameters();
parameters.Add("@nCodCred", credito.nCodCred);
parameters.Add("@nCodAge", credito.nCodAge);
parameters.Add("@cNumCelular", credito.cMovil);
parameters.Add("@cCodElectronica", credito.nFirma);
parameters.Add("@nIdFlujoMaestro", credito.nIdFlujoMaestro);
parameters.Add("@nProd", credito.nProd);
parameters.Add("@nSubProd", credito.nSubProd);
parameters.Add("@cNomForm", credito.cFormulario);
parameters.Add("@cUsuReg", credito.cUsuReg);
parameters.Add("@nCodPersReg", credito.nCodPersReg);
parameters.Add("@nIdFlujo", credito.nIdFlujo);
parameters.Add("@nCodPers", credito.nCodPers);
parameters.Add("@nOrdenFlujo", credito.nOrdenFlujo);
parameters.Add("@nRetorno", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WebApi_LucasFirmaElectronica_SP", parameters, commandType: CommandType.StoredProcedure, transaction: transaction);
var nRetorno = parameters.Get<int>("@nRetorno");
transaction.Commit();
return nRetorno;
}
catch (Exception)
{
transaction.Rollback();
throw;
}
}
}
}
public int LucasInsModalidad(Credito credito)
{
using (var connection = new SqlConnection(_connectionString))
{
connection.Open();
using (var transaction = connection.BeginTransaction())
{
try
{
var parameters = new DynamicParameters();
parameters.Add("@nCodCred", credito.nCodCred);
parameters.Add("@nCodAge", credito.nCodAge);
parameters.Add("@nTipoDesembolso", credito.nTipoDesembolso);
parameters.Add("@nBanco", credito.nBanco);
parameters.Add("@cCuentaBancaria", credito.cNroCuenta);
parameters.Add("@nIdFlujoMaestro", credito.nIdFlujoMaestro);
parameters.Add("@nProd", credito.nProd);
parameters.Add("@nSubProd", credito.nSubProd);
parameters.Add("@cNomForm", credito.cFormulario);
parameters.Add("@cUsuReg", credito.cUsuReg);
parameters.Add("@nCodPersReg", credito.nCodPersReg);
parameters.Add("@nIdFlujo", credito.nIdFlujo);
parameters.Add("@nCodPers", credito.nCodPers);
parameters.Add("@nOrdenFlujo", credito.nOrdenFlujo);
parameters.Add("@nRetorno", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WEBApi_LucasInsModalidad_SP", parameters, commandType: CommandType.StoredProcedure, transaction: transaction);
var nRetorno = parameters.Get<int>("@nRetorno");
transaction.Commit();
return nRetorno;
}
catch (Exception)
{
transaction.Rollback();
throw;
}
}
}
}
public IEnumerable<Credito> LucasKardexLista(int nCodAge, int nCodCred)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nCodCred", nCodCred);
parameters.Add("@nCodAge", nCodAge);
return connection.Query<Credito>("WebApi_LucasKardexLista_SP", parameters, commandType: CommandType.StoredProcedure);
}
}
public int LucasRechazadoPorDia(string cDocumento)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@cDocumento", cDocumento);
parameters.Add("@nRes", dbType: DbType.Int32, direction: ParameterDirection.Output);
connection.Query<int>("WebApi_LucasRechazadoPorDia_SP", parameters, commandType: CommandType.StoredProcedure);
return parameters.Get<int>("@nRes");
}
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TiboxWebApi.Models
{
public class Alerta
{
public string cMovil { get; set; }
public string cTexto { get; set; }
public string cEmail { get; set; }
public string cTitulo { get; set; }
}
}
<file_sep>using System.Collections.Generic;
using TiboxWebApi.Models;
namespace TiboxWebApi.Repository.Interfaces
{
public interface IUserRepository :IRepository<User>
{
User ValidateUser(string email, string password);
IEnumerable<User> LucasVerificaEmail(string cEmail);
IEnumerable<Persona> LucasDatosLogin(string cEmail);
int LucasCambiaPass(string email, string password);
User validateUserAD(string userName, string password);
int selCambioPass(int nCodPers);
}
}
<file_sep>using FluentValidation;
using System.Net;
using System.Web.Http;
using TiboxWebApi.Models;
using TiboxWebApi.UnitOfWork;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("Product")]
[Authorize]
public class PoductController : BaseController
{
private readonly AbstractValidator<Product> _validator;
public PoductController(IUnitOfWork unit, AbstractValidator<Product> validator) : base(unit)
{
_validator = validator;
}
[Route("{id}")]
public IHttpActionResult Get(int id)
{
if (id <= 0) return BadRequest();
return Ok(_unit.Products.GetEntityById(id));
}
[Route("")]
[HttpPost]
public IHttpActionResult Post(Product product)
{
var result = _validator.Validate(product);
if (!result.IsValid) return Content(HttpStatusCode.BadRequest, result.Errors);
var id = _unit.Products.Insert(product);
return Ok(new { id = id });
}
[Route("")]
[HttpPut]
public IHttpActionResult Put(Product product)
{
if (!ModelState.IsValid) return BadRequest(ModelState);
var id = _unit.Products.Update(product);
return Ok(new { status = true });
}
[Route("{id}")]
[HttpDelete]
public IHttpActionResult Delete(int id)
{
if (id <= 0) return BadRequest();
var result = _unit.Products.Delete(new Product { Id = id });
return Ok(new { detele = true });
}
[Route("List")]
[HttpGet]
public IHttpActionResult GetList()
{
return Ok(_unit.Products.GetAll());
}
}
}
<file_sep>using Dapper;
using System;
using System.Collections.Generic;
using System.Data.SqlClient;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using TiboxWebApi.Models;
using TiboxWebApi.Repository.Interfaces;
namespace TiboxWebApi.Repository.Repository
{
public class ZonaRepository : BaseRepository<Zona>, IZonaRepository
{
public IEnumerable<Zona> selDepartamento()
{
using (var connection = new SqlConnection(_connectionString))
{
return connection.Query<Zona>(
"WebApi_selDepartamento_SP",
null,
commandType: System.Data.CommandType.StoredProcedure);
}
}
public IEnumerable<Zona> selDistrito(string cDepartamento, string cProvincia)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@cCodDepartamento", cDepartamento);
parameters.Add("@cCodProvincia", cProvincia);
return connection.Query<Zona>(
"WebApi_selDistrito_SP",
parameters,
commandType: System.Data.CommandType.StoredProcedure);
}
}
public IEnumerable<Zona> selProvincia(string cDepartamento)
{
using(var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@cCodDepartamento", cDepartamento);
return connection.Query<Zona>(
"WebApi_selProvincia_SP",
parameters,
commandType:System.Data.CommandType.StoredProcedure);
}
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Data;
using System.Data.SqlClient;
using TiboxWebApi.Models;
using TiboxWebApi.Repository.Interfaces;
using Dapper;
namespace TiboxWebApi.Repository.Repository
{
public class DocumentoRepository : BaseRepository<Documento>, IDocumentoRepository
{
public IEnumerable<Documento> ListaDocumentos()
{
using (var connection = new SqlConnection(_connectionString))
{
return connection.Query<Documento>("WebOnline_selListaDocumentosWeb_SP", commandType: CommandType.StoredProcedure);
}
}
public int LucasInsDocumento(Documento documento)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@iImagen", documento.iImagen);
parameters.Add("@cNomArchivo", documento.cNomArchivo);
parameters.Add("@cExtencion", documento.cExtencion);
parameters.Add("@nIdFlujoMaestro", documento.nIdFlujoMaestro);
parameters.Add("@cTipoArchivo", documento.cTipoArchivo);
var resultado = documento.nIdFlujoMaestro;
connection.Query<int>("WebApi_LucasDocumentosInserta_SP", parameters, commandType:CommandType.StoredProcedure);
return resultado;
}
}
}
}
<file_sep>using System;
using System.Web.Http;
using TiboxWebApi.Models;
using TiboxWebApi.UnitOfWork;
using TiboxWebApi.WebApi.Utils;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("Evaluacion")]
[Authorize]
public class EvaluacionController : BaseController
{
EvaluacionMotor _evaluacion = null;
public EvaluacionController(IUnitOfWork unit) : base(unit)
{
_evaluacion = new EvaluacionMotor();
}
[Route("")]
[HttpPost]
public IHttpActionResult Evaluacion(Persona persona)
{
var FlujoRespuesta = 0;
int nRechazado = 0;
int nPEP = 0;
var cMensajeTry = "";
try
{
if (persona == null) return BadRequest("Faltan un dato");
string cXmlScoringDatos = "";
string cXmlScoringCuota = "";
string cXmlDeudas = "";
string cXMLPuntajeIPDItems = "";
string cXmlScoringDemo = "";
string cMensajeError = "";
var splited = persona.dFechaNacimiento.Split('/');
DateTime fechaNacimiento = Convert.ToDateTime(splited[2] + '-' + splited[1] + '-' + splited[0]);
int nEdad = DateTime.Today.Year - fechaNacimiento.Year;
if (fechaNacimiento.Month > DateTime.Today.Month)
{
nEdad = nEdad - 1;
}
else if (fechaNacimiento.Month == DateTime.Today.Month && fechaNacimiento.Day > DateTime.Today.Day)
{
nEdad = nEdad - 1;
}
bool bResultado = _evaluacion.Evaluacion(persona.nNroDoc,
persona.cDistrito,
persona.cProvincia,
persona.cDepartamento,
nEdad,
int.Parse(persona.nSexo),
int.Parse(persona.nEstadoCivil),
int.Parse(persona.nCUUI),
persona.nProducto,
persona.nModalidad,
int.Parse(persona.nSitLab),
int.Parse(persona.nTipoResidencia),
0,
1,
1,
persona.cDniConyuge,
int.Parse(persona.nTipoEmp),
persona.nCodPers,
persona.nIngresoDeclado,
persona.nSitLab,
persona.nTipoEmp,
ref cXmlScoringDatos,
ref cXmlScoringCuota,
ref cXmlDeudas,
ref cXMLPuntajeIPDItems,
ref cXmlScoringDemo,
ref cMensajeError,
ref nRechazado,
ref nPEP);
if (!bResultado) return BadRequest(cMensajeError);
var flujo = new FlujoMaestro();
flujo.nNroDoc = persona.nNroDoc;
flujo.nCodAge = persona.nCodAge;
flujo.nProd = persona.nProd;
flujo.nSubProd = persona.nSubProd;
flujo.cNomform = "/StateClienteNuevo";
flujo.nCodCred = 0;
flujo.cUsuReg = "USU-LUCAS";
flujo.nIdFlujo = 0;
flujo.nCodPersReg = persona.nCodPers;
flujo.nOrdenFlujo = 0;
flujo.oScoringDatos = cXmlScoringDatos;
flujo.oScoringVarDemo = cXMLPuntajeIPDItems;
flujo.oScoringDetCuota = cXmlScoringCuota;
flujo.oScoringDemo = cXmlScoringDemo;
flujo.oScoringRCC = cXmlDeudas;
flujo.nRechazado = nRechazado;
flujo.cClienteLenddo = persona.cLenddo;
var FlujoMaestro = _unit.FlujoMaestro.LucasRegistraMotor(flujo);
if (FlujoMaestro == 0) return BadRequest("Error de evaluación.");
FlujoRespuesta = FlujoMaestro;
}
catch (Exception ex)
{
cMensajeTry = ex.Message;
_unit.Error.InsertaError("EVALUACION - Evaluacion", ex.Message);
}
return Ok(new { nIdFlujoMaestro = FlujoRespuesta, nRechazado = nRechazado, cMensajeTry = cMensajeTry, nPEP = nPEP });
}
[Route("PreEvaluacion")]
[HttpPost]
public IHttpActionResult PreEvaluacion(Persona persona)
{
if (persona == null) return BadRequest("Falta un dato");
if (persona.nNroDoc == "" || persona.nNroDoc == null) return BadRequest("Falta un dato.");
if (persona.nProducto == 0) return BadRequest("Falta un dato.");
string cRespuesta = "";
string cMensajeError = "";
bool bResultado = _evaluacion.preEvaluacion(persona.nNroDoc, persona.nProducto, persona.nModalidad, ref cRespuesta, ref cMensajeError);
if (!bResultado) return BadRequest(cMensajeError);
return Ok(new { cResultado = cRespuesta });
}
}
}
<file_sep>using System.Web.Http;
using TiboxWebApi.UnitOfWork;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("VarNegocio")]
[Authorize]
public class VarNegocioController : BaseController
{
public VarNegocioController(IUnitOfWork unit): base(unit)
{
}
[Route("{id}")]
public IHttpActionResult Get(int id)
{
if (id <= 0) return BadRequest();
return Ok(_unit.VarNegocio.GetEntityById(id));
}
}
}
<file_sep>using System.Web.Http;
using TiboxWebApi.UnitOfWork;
namespace TiboxWebApi.WebApi.Controllers
{
[RoutePrefix("Menu")]
[Authorize]
public class MenuController : BaseController
{
public MenuController(IUnitOfWork unit): base(unit)
{
}
[Route("{cCodUsu}")]
[HttpGet]
public IHttpActionResult Menu(string cCodUsu)
{
return Ok(_unit.Menu.SelMenus(cCodUsu));
}
}
}
<file_sep>namespace TiboxWebApi.WebApi.Utils
{
public class DocumentosReporte
{
public string nombre { get; set; }
public byte[] doc { get; set; }
}
}<file_sep>using Owin;
using Microsoft.Owin;
using Microsoft.Owin.Security.OAuth;
using System;
using TiboxWebApi.WebApi.Provider;
namespace TiboxWebApi.WebApi
{
public partial class Startup
{
//Esto trabaja a niveld e la aplicación
public void ConfigureOAuth(IAppBuilder app)
{
var OAuthServerOptions = new OAuthAuthorizationServerOptions()
{
//conexiones seguras
AllowInsecureHttp = true,//con certificado digital esto debe de ir false
TokenEndpointPath = new PathString("/token"), //Ruta la cual vamos a solicitar el token
AccessTokenExpireTimeSpan = TimeSpan.FromDays(1), //tiempo de expiración
Provider = new SimpleAuthorizationServerProvider() //escogiendo el proveedor que se va usar para el token, en esta caso uno personalizado
};
app.UseOAuthAuthorizationServer(OAuthServerOptions);
app.UseOAuthBearerAuthentication(new OAuthBearerAuthenticationOptions());
//con todo esto le estamo diciendo a la webapi que la autenticaión va ser por token y no basica por user y pass
}
}
}<file_sep>using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using TiboxWebApi.Models;
namespace TiboxWebApi.Repository.Interfaces
{
public interface IZonaRepository: IRepository<Zona>
{
IEnumerable<Zona> selDepartamento();
IEnumerable<Zona> selProvincia(string cDepartamento);
IEnumerable<Zona> selDistrito(string cDepartamento, string cProvincia);
}
}
<file_sep>using Dapper;
using System.Collections.Generic;
using System.Data.SqlClient;
using TiboxWebApi.Models;
using TiboxWebApi.Repository.Interfaces;
using System.Data;
namespace TiboxWebApi.Repository.Repository
{
public class CatalogoCodigoRepository : BaseRepository<CatalogoCodigos>, ICatalogoCodigoRepository
{
public IEnumerable<CatalogoCodigos> selCatalogoCodigos(int nCodigo)
{
using (var connection = new SqlConnection(_connectionString))
{
var parameters = new DynamicParameters();
parameters.Add("@nCodigo", nCodigo);
return connection.Query<CatalogoCodigos>(
"dbo.WEBApi_selCatalogoCodigo_SP",
parameters,
commandType: CommandType.StoredProcedure);
}
}
public IEnumerable<CatalogoCodigos> selTipovivienda()
{
using (var connection = new SqlConnection(_connectionString))
{
return connection.Query<CatalogoCodigos>("WebApiLucas_TipoViviendaLista_SP", null, commandType: CommandType.StoredProcedure);
}
}
}
}
| a72bd26126e195b0de1ddd1be553b4899a889fa4 | [
"Markdown",
"C#"
] | 72 | C# | SoyLucasAPP/SoyLucasWEBAPI | 91c294aea165d8e19b0eb100f3889b308a9be280 | c99ba452fe2b3de44c12346d030ada5485f36f22 |
refs/heads/master | <file_sep>package org.uci.luci.interchange.UI;
import org.uci.luci.interchange.Intersections.*;
import org.uci.luci.interchange.OSM.*;
import org.uci.luci.interchange.Driver.*;
import org.uci.luci.interchange.Vehicles.*;
import org.uci.luci.interchange.Graph.*;
import org.uci.luci.interchange.Util.*;
import org.uci.luci.interchange.Registry.*;
import org.uci.luci.interchange.Factory.*;
import javax.swing.JMenu;
import javax.swing.JMenuBar;
import javax.swing.JMenuItem;
import javax.swing.SwingUtilities;
import javax.swing.JFrame;
import javax.swing.JPanel;
import javax.swing.BorderFactory;
import java.awt.Color;
import java.awt.Dimension;
import java.awt.Graphics;
import java.awt.Graphics2D;
import java.awt.Color;
import java.awt.BasicStroke;
import java.awt.RenderingHints;
import java.text.DecimalFormat;
import java.util.List;
import java.util.LinkedList;
import java.util.Map;
import java.awt.event.*;
import java.awt.Point;
import java.awt.Font;
import java.awt.image.BufferedImage;
import java.awt.AlphaComposite;
import java.util.HashMap;
import java.util.ArrayList;
import java.util.Random;
import java.awt.*;
import java.awt.geom.*;
// import javax.swing.*;
public class AppWindow implements ActionListener {
JFrame f;
private AppPanel appPanel;
public AppWindow() throws InterruptedException {
appPanel = new AppPanel();
f = new JFrame("Interchange");
f.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
f.add(appPanel);
buildMenuBar();
f.pack();
f.setVisible(true);
f.setLocationRelativeTo(null);
}
private void buildMenuBar() {
JMenuBar menubar = new JMenuBar();
JMenu sim = new JMenu("Simulator");
sim.setMnemonic(KeyEvent.VK_S);
sim.add(makeMenuItem("Start"));
sim.add(makeMenuItem("Stop"));
sim.add(makeMenuItem("Reset"));
sim.addSeparator();
sim.add(makeMenuItem("Speed Up\t -"));
sim.add(makeMenuItem("Slow Down\t = or +"));
JMenu view = new JMenu("View");
view.setMnemonic(KeyEvent.VK_V);
view.add(makeMenuItem("Center Map"));
view.add(makeMenuItem("Zoom In\t\t ["));
view.add(makeMenuItem("Zoom Out\t ]"));
view.addSeparator();
view.add(makeMenuItem("Use White Background"));
view.add(makeMenuItem("Use Black Background"));
view.addSeparator();
view.add(makeMenuItem("Toggle Place Names"));
view.add(makeMenuItem("Toggle Vehicle Info\t v"));
JMenu debug = new JMenu("Debug");
debug.add(makeMenuItem("Toggle Vehicle Traces\t t"));
debug.add(makeMenuItem("Toggle Infrastructure Map\t m"));
debug.add(makeMenuItem("Toggle Nodes\t n"));
debug.add(makeMenuItem("Toggle Distances\t d"));
menubar.add(sim);
menubar.add(view);
menubar.add(debug);
f.setJMenuBar(menubar);
}
private JMenuItem makeMenuItem(String action) {
return makeMenuItem(action, this);
}
private JMenuItem makeMenuItem(String action, ActionListener listener) {
JMenuItem eMenuItem = new JMenuItem(action);
eMenuItem.addActionListener(listener);
return eMenuItem;
}
public void actionPerformed(ActionEvent e) {
// Menu item actions
String command = e.getActionCommand();
try {
if (command.equals("Start")) {
Global.simulator.unpause();
} else if (command.equals("Stop")) {
Global.simulator.pause();
} else if (command.equals("Reset")) {
// Global.simulator.resetSimulator();
} else if (command.equals("Toggle Vehicle Traces\t t")) {
appPanel.showVehicleDebugTraces = !appPanel.showVehicleDebugTraces;
} else if (command.equals("Speed Up\t = or +")) {
Global.simulator.changeSpeed(+10);
} else if (command.equals("Slow Down\t -")) {
Global.simulator.changeSpeed(-10);
} else if (command.equals("Zoom In\t\t ]")) {
System.out.println("here");
appPanel.zoomMap(+10);
// myPanel.centerMap();
} else if (command.equals("Zoom Out\t [")) {
appPanel.zoomMap(-10);
// myPanel.centerMap();
} else if (command.equals("Center Map")) {
appPanel.centerMap();
} else if (command.equals("Toggle Infrastructure Map\t m")) {
appPanel.showMap = !appPanel.showMap;
} else if (command.equals("Use Black Background")) {
appPanel.backgroundColor = Color.black;
} else if (command.equals("Use White Background")) {
appPanel.backgroundColor = Color.white;
} else if (command.equals("Toggle Nodes\t n")) {
appPanel.showAllNodes = !appPanel.showAllNodes;
} else if (command.equals("Toggle Place Names")) {
appPanel.showPlaceNames = !appPanel.showPlaceNames;
} else if (command.equals("Toggle Vehicle Info\t v")) {
appPanel.showVehicleInfo = !appPanel.showVehicleInfo;
} else if (command.equals("Toggle Distances\t d")) {
appPanel.showDistances = !appPanel.showDistances;
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
}<file_sep>package org.uci.luci.interchange.Util;
public class Vector2d {
public double x, y;
public Vector2d(double x, double y) {
this.x = x;
this.y = y;
}
public double mag() {
return Math.sqrt(Math.pow(x, 2) + Math.pow(y, 2));
}
}<file_sep>package org.uci.luci.interchange.Driver.VehicleDriverBehavior;
public class V2IMessage {
public int bid = 0;
}
<file_sep>package org.uci.luci.interchange.Intersections;
import org.uci.luci.interchange.Util.*;
import java.util.Random;
public class LightFSM {
public static enum LIGHT {
RED, GREEN, YELLOW
};
private double deadTimeDur = 0;
private double throughsGreenDur, throughsYellowDur;
private double leftsGreenDur, leftsYellowDur;
private double lastStateChangeAt;
private String state;
public LightFSM(double throughsGreenDur, double throughsYellowDur,
double leftsGreenDur, double leftsYellowDur) {
this.throughsGreenDur = throughsGreenDur;
this.throughsYellowDur = throughsYellowDur;
this.leftsGreenDur = leftsGreenDur;
this.leftsYellowDur = leftsYellowDur;
Random randomGenerator = Utils.randomNumberGenerator();
lastStateChangeAt = 0;
int randInt = randomGenerator.nextInt(9);
if (randInt == 0) state = "all_red";
else if (randInt == 1) state = "all_red";
else if (randInt == 2) state = "lefts_green1";
else if (randInt == 3) state = "lefts_yellow1";
else if (randInt == 4) state = "throughs_green1";
else if (randInt == 5) state = "throughs_yellow1";
else if (randInt == 6) state = "lefts_green2";
else if (randInt == 7) state = "lefts_yellow2";
else if (randInt == 8) state = "throughs_green2";
else if (randInt == 9) state = "throughs_yellow2";
else state = "all_red";
}
public String getState() {
return state;
}
public LIGHT getLightForRights1() {
return LIGHT.GREEN;
}
public LIGHT getLightForRights2() {
return LIGHT.GREEN;
}
public LIGHT getLightForThrough1() {
if (state.equals("throughs_green1"))
return LIGHT.GREEN;
else if (state.equals("throughs_yellow1"))
return LIGHT.YELLOW;
return LIGHT.RED;
}
public LIGHT getLightForLefts1() {
if (state.equals("lefts_green1"))
return LIGHT.GREEN;
else if (state.equals("lefts_yellow1"))
return LIGHT.YELLOW;
return LIGHT.RED;
}
public LIGHT getLightForThrough2() {
if (state.equals("throughs_green2"))
return LIGHT.GREEN;
else if (state.equals("throughs_yellow2"))
return LIGHT.YELLOW;
return LIGHT.RED;
}
public LIGHT getLightForLefts2() {
if (state.equals("lefts_green2"))
return LIGHT.GREEN;
else if (state.equals("lefts_yellow2"))
return LIGHT.YELLOW;
return LIGHT.RED;
}
public void tick(double simTime, double tickLength, int tick) {
double sinceLastStateChange = simTime - lastStateChangeAt;
if (state.equals("all_red") && sinceLastStateChange > deadTimeDur) {
state = "lefts_green1";
lastStateChangeAt = simTime;
}
else if (state.equals("lefts_green1") && sinceLastStateChange > leftsGreenDur) {
state = "lefts_yellow1";
lastStateChangeAt = simTime;
}
else if (state.equals("lefts_yellow1") && sinceLastStateChange > leftsYellowDur) {
state = "throughs_green1";
lastStateChangeAt = simTime;
}
else if (state.equals("throughs_green1") && sinceLastStateChange > throughsGreenDur) {
state = "throughs_yellow1";
lastStateChangeAt = simTime;
}
else if (state.equals("throughs_yellow1") && sinceLastStateChange > throughsYellowDur) {
state = "lefts_green2";
lastStateChangeAt = simTime;
}
else if (state.equals("lefts_green2") && sinceLastStateChange > leftsGreenDur) {
state = "lefts_yellow2";
lastStateChangeAt = simTime;
}
else if (state.equals("lefts_yellow2") && sinceLastStateChange > leftsYellowDur) {
state = "throughs_green2";
lastStateChangeAt = simTime;
}
else if (state.equals("throughs_green2") && sinceLastStateChange > throughsGreenDur) {
state = "throughs_yellow2";
lastStateChangeAt = simTime;
}
else if (state.equals("throughs_yellow2") && sinceLastStateChange > throughsYellowDur) {
state = "all_red";
lastStateChangeAt = simTime;
}
}
}<file_sep>package org.uci.luci.interchange.Driver;
import org.uci.luci.interchange.Graph.*;
import org.uci.luci.interchange.Util.*;
import org.uci.luci.interchange.Exception.*;
import java.util.Random;
import java.util.List;
import java.util.LinkedList;
public interface Navigation {
public Node nextNodeOnPath(String curNodeId);
public String getOrigin();
public String getDestination();
public List<Node> getPath();
}<file_sep>package org.uci.luci.interchange.Intersections;
import org.uci.luci.interchange.Driver.VehicleDriverBehavior.V2IMessage;
import org.uci.luci.interchange.Graph.*;
import org.uci.luci.interchange.Util.*;
import org.uci.luci.interchange.Vehicles.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Random;
public class HighwayRamp extends Intersection {
public HighwayRamp(String rootNodeId) {
super(rootNodeId);
}
// 0 = green, 1 = yellow, 2 = red
// public int getLightForWayOnLane(Way w, int lane) {
public LightFSM.LIGHT getLightForWayOnLane(Way w, String originNodeId,
String toNodeId, int lane) {
return LightFSM.LIGHT.GREEN;
}
public String getState() {
return "Green";
}
public void tick(double simTime, double tickLength, int tick) {
}
public void vehicleIsApproaching(Vehicle v, String originNodeId,
String toNodeId, int lane, V2IMessage msg) {
}
public void vehicleIsLeaving(Vehicle v) {
}
}
<file_sep>package org.uci.luci.interchange.Util;
import org.uci.luci.interchange.OSM.*;
import org.uci.luci.interchange.Simulator;
public class Global {
public static OpenStreetMap openStreetMap;
public static Simulator simulator;
public static Projection projection = new MercatorProjection();
public static double maxLat;
public static double minLat;
public static double maxLon;
public static double minLon;
// public static boolean runSim = true;
}<file_sep>package org.uci.luci.interchange.Factory;
import org.uci.luci.interchange.Vehicles.*;
import org.uci.luci.interchange.Registry.*;
import org.uci.luci.interchange.Exception.NoPathToDestinationException;
import org.uci.luci.interchange.Graph.*;
import org.uci.luci.interchange.Util.*;
import java.util.Random;
import java.util.List;
public class VehicleFactory {
public static Vehicle createVehicleAtRandomPoint()
throws NoPathToDestinationException {
Random generator = Utils.randomNumberGenerator();
List<Node> nodes = Global.openStreetMap.nodes();
Node randomNode = nodes.get(generator.nextInt(nodes.size()));
Node randomNextNode = randomNode.connectedNodes.get(generator
.nextInt(randomNode.connectedNodes.size()));
// double m = (randomNextNode.lon - randomNode.lon) / (randomNextNode.lat - randomNode.lat);
// double p = (double)generator.nextInt(101) / 100.0;
// double d_lat = (randomNextNode.lat - randomNode.lat) * p;
// double d_lon = d_lat * m;
//
// if (randomNextNode.lon - randomNode.lon < 0) {
// d_lat = -d_lat;
// d_lon = -d_lon;
// }
double d_lat = 0;
double d_lon = 0;
Vehicle vehicle = new Vehicle(randomNode.lat+d_lat, randomNode.lon+d_lon,
randomNode.id,
// randomNextNode.id,
// this isn't really right because node.way might not refer to
// the right way at intersections
generator.nextInt(Oracle.wayBetweenNodes(randomNode.id,
randomNextNode.id).lanes));
VehicleRegistry.registerVehicle(vehicle);
return vehicle;
}
public static Vehicle createVehicleAtNode(Node n)
throws NoPathToDestinationException {
Random generator = Utils.randomNumberGenerator();
List<Node> nodes = Global.openStreetMap.nodes();
Node randomNode = n;
// Node randomNextNode = randomNode.connectedNodes.get(2);
Node randomNextNode = randomNode.connectedNodes.get(generator
.nextInt(randomNode.connectedNodes.size()));
Vehicle vehicle = new Vehicle(randomNode.lat, randomNode.lon,
randomNode.id,
// randomNextNode.id,
// this isn't really right because node.way might not refer to
// the right way at intersections
generator.nextInt(Oracle.wayBetweenNodes(randomNode.id,
randomNextNode.id).lanes));
VehicleRegistry.registerVehicle(vehicle);
return vehicle;
}
public static Vehicle createVehicleAt(String n1id, String n2id)
throws NoPathToDestinationException {
Random generator = Utils.randomNumberGenerator();
Node randomNode = Global.openStreetMap.getNode(n1id);
Node randomNextNode = Global.openStreetMap.getNode(n2id);
if (!Oracle.hasRoomForCarAtNode(randomNode))
return null;
Vehicle vehicle = new Vehicle(randomNode.lat, randomNode.lon,
randomNode.id,
// randomNextNode.id,
// this isn't really right because node.way might not refer to
// the right way at intersections
generator.nextInt(Oracle.wayBetweenNodes(randomNode.id,
randomNextNode.id).lanes));
VehicleRegistry.registerVehicle(vehicle);
return vehicle;
}
public static void destroyVehicle(Vehicle v) {
Oracle.deregisterVehicleOrigin(v.vin, v.getOriginNode().id);
if (v.getDestinationNode() != null)
Oracle.deregisterVehicleOrigin(v.vin, v.getDestinationNode().id);
VehicleRegistry.deregisterVehicle(v);
}
}<file_sep>package org.uci.luci.interchange.Intersections;
import org.uci.luci.interchange.Intersections.LightFSM.LIGHT;
import org.uci.luci.interchange.Util.*;
import java.util.ArrayList;
import java.util.Random;
public class LoopLightFSM {
private double deadTimeDur = 0;
private double throughsGreenDur, throughsYellowDur;
private double leftsGreenDur, leftsYellowDur;
private double lastStateChangeAt;
private String state;
private ArrayList<String> vehiclesOnLeftGroup1 = new ArrayList<String>();
private ArrayList<String> vehiclesOnThroughGroup1 = new ArrayList<String>();
private ArrayList<String> vehiclesOnLeftGroup2 = new ArrayList<String>();
private ArrayList<String> vehiclesOnThroughGroup2 = new ArrayList<String>();
public LoopLightFSM(double throughsGreenDur, double throughsYellowDur,
double leftsGreenDur, double leftsYellowDur) {
this.throughsGreenDur = throughsGreenDur;
this.throughsYellowDur = throughsYellowDur;
this.leftsGreenDur = leftsGreenDur;
this.leftsYellowDur = leftsYellowDur;
Random randomGenerator = Utils.randomNumberGenerator();
lastStateChangeAt = 0;
int randInt = randomGenerator.nextInt(9);
if (randInt == 0)
state = "all_red";
else if (randInt == 1)
state = "all_red";
else if (randInt == 2)
state = "lefts_green1";
else if (randInt == 3)
state = "lefts_yellow1";
else if (randInt == 4)
state = "throughs_green1";
else if (randInt == 5)
state = "throughs_yellow1";
else if (randInt == 6)
state = "lefts_green2";
else if (randInt == 7)
state = "lefts_yellow2";
else if (randInt == 8)
state = "throughs_green2";
else if (randInt == 9)
state = "throughs_yellow2";
else
state = "all_red";
}
public void detectVehicleOnLeftGroup1(String vin) {
vehiclesOnLeftGroup1.add(vin);
}
public void detectVehicleOnThroughGroup1(String vin) {
vehiclesOnThroughGroup1.add(vin);
}
public void detectVehicleOnLeftGroup2(String vin) {
vehiclesOnLeftGroup2.add(vin);
}
public void detectVehicleOnThroughGroup2(String vin) {
vehiclesOnThroughGroup2.add(vin);
}
public void undetectVehicle(String vin) {
vehiclesOnLeftGroup1.add(vin);
vehiclesOnThroughGroup1.remove(vin);
vehiclesOnLeftGroup2.remove(vin);
vehiclesOnThroughGroup2.remove(vin);
}
public String getState() {
return state;
}
public LIGHT getLightForRights1() {
return LIGHT.GREEN;
}
public LIGHT getLightForRights2() {
return LIGHT.GREEN;
}
public LIGHT getLightForThrough1() {
if (state.equals("throughs_green1"))
return LIGHT.GREEN;
else if (state.equals("throughs_yellow1"))
return LIGHT.YELLOW;
return LIGHT.RED;
}
public LIGHT getLightForLefts1() {
if (state.equals("lefts_green1"))
return LIGHT.GREEN;
else if (state.equals("lefts_yellow1"))
return LIGHT.YELLOW;
return LIGHT.RED;
}
public LIGHT getLightForThrough2() {
if (state.equals("throughs_green2"))
return LIGHT.GREEN;
else if (state.equals("throughs_yellow2"))
return LIGHT.YELLOW;
return LIGHT.RED;
}
public LIGHT getLightForLefts2() {
if (state.equals("lefts_green2"))
return LIGHT.GREEN;
else if (state.equals("lefts_yellow2"))
return LIGHT.YELLOW;
return LIGHT.RED;
}
public void tick(double simTime, double tickLength, int tick) {
double sinceLastStateChange = simTime - lastStateChangeAt;
if (state.equals("all_red") && sinceLastStateChange > deadTimeDur) {
if (vehiclesOnLeftGroup1.size() > 0)
state = "lefts_green1";
else if (vehiclesOnThroughGroup1.size() > 0)
state = "throughs_green1";
else if (vehiclesOnLeftGroup2.size() > 0)
state = "lefts_green2";
else if (vehiclesOnThroughGroup2.size() > 0)
state = "throughs_green2";
else
state = "lefts_green1";
lastStateChangeAt = simTime;
} else if (state.equals("lefts_green1")
&& sinceLastStateChange > leftsGreenDur) {
state = "lefts_yellow1";
lastStateChangeAt = simTime;
} else if (state.equals("lefts_yellow1")
&& sinceLastStateChange > leftsYellowDur) {
state = "throughs_green1";
lastStateChangeAt = simTime;
} else if (state.equals("throughs_green1")) {
if (sinceLastStateChange > throughsGreenDur
|| vehiclesOnThroughGroup1.size() == 0
&& (vehiclesOnLeftGroup2.size() > 0 || vehiclesOnThroughGroup2
.size() > 0)) {
state = "throughs_yellow1";
lastStateChangeAt = simTime;
}
} else if (state.equals("throughs_yellow1")
&& sinceLastStateChange > throughsYellowDur) {
state = "lefts_green2";
lastStateChangeAt = simTime;
} else if (state.equals("lefts_green2")
&& sinceLastStateChange > leftsGreenDur) {
state = "lefts_yellow2";
lastStateChangeAt = simTime;
} else if (state.equals("lefts_yellow2")
&& sinceLastStateChange > leftsYellowDur) {
state = "throughs_green2";
lastStateChangeAt = simTime;
} else if (state.equals("throughs_green2")) {
if (sinceLastStateChange > throughsGreenDur
|| vehiclesOnThroughGroup2.size() == 0
&& (vehiclesOnLeftGroup1.size() > 0 || vehiclesOnThroughGroup1
.size() > 0)) {
state = "throughs_yellow2";
lastStateChangeAt = simTime;
}
} else if (state.equals("throughs_yellow2")
&& sinceLastStateChange > throughsYellowDur) {
state = "all_red";
lastStateChangeAt = simTime;
}
}
}<file_sep>package org.uci.luci.interchange.UI;
import java.awt.AlphaComposite;
import java.awt.BasicStroke;
import java.awt.Color;
import java.awt.Dimension;
import java.awt.Font;
import java.awt.Graphics;
import java.awt.Graphics2D;
import java.awt.Point;
import java.awt.RenderingHints;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.ComponentAdapter;
import java.awt.event.ComponentEvent;
import java.awt.event.KeyEvent;
import java.awt.event.KeyListener;
import java.awt.event.MouseEvent;
import java.awt.event.MouseListener;
import java.awt.event.MouseMotionListener;
import java.awt.event.MouseWheelEvent;
import java.awt.event.MouseWheelListener;
import java.awt.image.BufferedImage;
import java.text.DecimalFormat;
import java.util.Map;
import javax.swing.BorderFactory;
import javax.swing.JPanel;
import org.uci.luci.interchange.Driver.VehicleDriver;
import org.uci.luci.interchange.Graph.Node;
import org.uci.luci.interchange.Graph.Way;
import org.uci.luci.interchange.OSM.OpenStreetMap;
import org.uci.luci.interchange.Registry.*;
import org.uci.luci.interchange.Util.*;
import org.uci.luci.interchange.Vehicles.Vehicle;
import org.uci.luci.interchange.Driver.VehicleDriverBehavior.*;
import org.uci.luci.interchange.Intersections.*;
public class AppPanel extends JPanel {
private WindowProjector windowProjector;
OpenStreetMap osm;
private BufferedImage map;
private Graphics2D mapG2D;
private BufferedImage overlay;
private Graphics2D overlayG2D;
private BufferedImage hud;
private Graphics2D hudG2D;
Point draggingPointOrigin;
int draggingOffsetX;
int draggingOffsetY;
NodePoint highlightPoint;
public Color backgroundColor = Color.WHITE;
public boolean showMap = true;
public boolean showAllNodes = false;
public boolean showPlaceNames = false;
public boolean showVehicleInfo = false;
public boolean showVehicleDebugTraces = false;
public boolean showDistances = false;
public boolean showIntersectionInfo = false;
public AppPanel() {
this.osm = Global.openStreetMap;
windowProjector = new WindowProjector(osm.projectedMinY,
osm.projectedMaxY, osm.projectedMinX, osm.projectedMaxX);
setBorder(BorderFactory.createLineBorder(Color.black));
this.requestFocus();
registerListeners();
scheduleRepaintTimer();
}
private void registerListeners() {
addComponentListener(new ComponentAdapter() {
public void componentResized(ComponentEvent e) {
map = null;
overlay = null;
}
});
addMouseListener(new MouseListener() {
public void mouseClicked(MouseEvent e) {
}
public void mousePressed(MouseEvent e) {
}
public void mouseReleased(MouseEvent e) {
draggingPointOrigin = null;
}
public void mouseEntered(MouseEvent e) {
}
public void mouseExited(MouseEvent e) {
}
});
addMouseMotionListener(new MouseMotionListener() {
public void mouseMoved(MouseEvent e) {
highlightPoint = unscaleXY(e.getX(), e.getY());
}
public void mouseDragged(MouseEvent e) {
if (draggingPointOrigin == null) {
draggingPointOrigin = e.getPoint();
draggingOffsetX = (int) (draggingPointOrigin.getX() - windowProjector.offsetX);
draggingOffsetY = (int) (draggingPointOrigin.getY() - windowProjector.offsetY);
} else {
windowProjector.offsetX = e.getX() - draggingOffsetX;
windowProjector.offsetY = e.getY() - draggingOffsetY;
}
repaint();
}
});
addMouseWheelListener(new MouseWheelListener() {
public void mouseWheelMoved(MouseWheelEvent e) {
int steps = (int) Math.pow(e.getWheelRotation(), 2)
* (e.getWheelRotation() < 0 ? -1 : 1);
zoomMap(e.getX(), e.getY(), steps);
}
});
addKeyListener(new KeyListener() {
public void keyPressed(KeyEvent e) {
switch (e.getKeyCode()) {
case 45:
if (Global.simulator == null) {
System.out.println("why is simulator null??");
}
Global.simulator.changeSpeed(+1);
break;
// = Sign which is same as plus, but without shift
case 61:
Global.simulator.changeSpeed(-1);
break;
case 91:
zoomMap(-3);
break;
case 93:
zoomMap(3);
break;
case 38: // up
windowProjector.offsetY += 20;
break;
case 40: // down
windowProjector.offsetY -= 20;
break;
case 37: // left
windowProjector.offsetX += 20;
break;
case 39: // right
windowProjector.offsetX -= 20;
break;
case (int) 'V':
showVehicleInfo = !showVehicleInfo;
break;
case (int) 'M':
showMap = !showMap;
break;
case (int) 'D':
showDistances = !showDistances;
break;
case (int) 'T':
showVehicleDebugTraces = !showVehicleDebugTraces;
break;
case (int) 'N':
showAllNodes = !showAllNodes;
break;
case (int) 'I':
showIntersectionInfo = !showIntersectionInfo;
break;
}
}
public void keyReleased(KeyEvent e) {
}
public void keyTyped(KeyEvent e) {
}
});
}
private void scheduleRepaintTimer() {
javax.swing.Timer t = new javax.swing.Timer(16, new ActionListener() {
public void actionPerformed(ActionEvent e) {
repaint();
}
});
t.start();
}
public void centerMap() {
windowProjector.offsetX = (int) (getSize().getWidth() / 2)
- windowProjector.scale / 2;
windowProjector.offsetY = (int) (getSize().getHeight() / 2)
- windowProjector.scale / 2;
repaint();
}
public void zoomMap(int x, int y, int steps) {
int newScale = windowProjector.scale + (steps * 5);
if (newScale < 50)
newScale = 50;
if (newScale > 1000000)
newScale = 1000000;
NodePoint unscaledXY = unscaleXY(x, y);
windowProjector.scale = newScale;
NodePoint whereThePointIsNow = scaledXY(unscaledXY.x, unscaledXY.y);
windowProjector.offsetX += x - whereThePointIsNow.x;
windowProjector.offsetY += y - whereThePointIsNow.y;
repaint();
}
public void zoomMap(int steps) {
zoomMap(windowProjector.offsetX, windowProjector.offsetY, steps);
}
public Dimension getPreferredSize() {
return new Dimension(800, 600);
}
public NodePoint scaledXY(double x, double y) {
return windowProjector.scaledXY(x, y);
}
public NodePoint unscaleXY(int x, int y) {
return windowProjector.unscaleXY(x, y);
}
private void paintMap(Graphics g_old) {
if (map == null) {
map = new BufferedImage(getWidth(), getHeight(),
BufferedImage.TYPE_INT_ARGB);
mapG2D = map.createGraphics();
mapG2D.setRenderingHint(RenderingHints.KEY_ANTIALIASING,
RenderingHints.VALUE_ANTIALIAS_ON);
AlphaComposite ac = AlphaComposite
.getInstance(AlphaComposite.SRC_OVER);
mapG2D.setComposite(ac);
}
Graphics2D g2d = mapG2D;
g2d.setBackground(new Color(255, 255, 255, 0));
g2d.clearRect(0, 0, getWidth(), getHeight());
g2d.setStroke(new BasicStroke(1f));
g2d.setColor(new Color(200, 200, 255));
// ways
Node _last_n = null;
Node _n;
Way _w;
for (int i = 0; i < osm.ways.size(); i++) {
_w = osm.ways.get(i);
if (_w.lanes > 1)
g2d.setStroke(new BasicStroke(3f));
else
g2d.setStroke(new BasicStroke(1f));
if (_w.oneway) {
System.out.println("Warning: Not drawing one-way.");
} else {
for (int j = 0; j < _w.nd.size(); j++) {
_n = osm.getNode(_w.nd.get(j));
if (j == 0) {
_last_n = _n;
} else {
NodePoint _last_np = scaledXY(_last_n.x, _last_n.y);
NodePoint _np = scaledXY(_n.x, _n.y);
g2d.setColor(Color.black);
g2d.drawLine((int) _last_np.x, (int) _last_np.y,
(int) _np.x, (int) _np.y);
if (showDistances) {
// this will draw distances
g2d.setColor(Color.black);
double d = Utils.distance(_last_n.lat, _last_n.lon,
_n.lat, _n.lon, 'K');
g2d.setFont(new Font("TimesRoman", Font.BOLD, 8));
DecimalFormat twoDForm = new DecimalFormat("#.##");
g2d.drawString(Double.valueOf(twoDForm.format(d))
+ " km",
(int) (_last_np.x + _np.x) / 2 + 4,
(int) (_last_np.y + _np.y) / 2 + 4);
}
_last_n = _n;
}
}
}
}
if (showAllNodes) {
// nodes
g2d.setStroke(new BasicStroke(1f));
g2d.setColor(Color.BLUE);
g2d.setFont(new Font("TimesRoman", Font.PLAIN, 10));
int width = 5;
for (Map.Entry<String, Node> entry : osm.nodeHash.entrySet()) {
Node n = entry.getValue();
NodePoint p = scaledXY(n.x, n.y);
// TODO: get rid of this flag
if (n.flaggedForMerge)
g2d.setColor(Color.RED);
else
g2d.setColor(Color.BLUE);
// System.out.println(n.tagCloud());
if (n.hasTag("highway", "traffic_signals")) {
g2d.setColor(Color.RED);
width = 10;
} else {
width = 5;
g2d.setColor(Color.BLUE);
}
g2d.fillOval((int) p.x + width / 2, (int) p.y + width / 2,
width, width);
// g2d.setColor(Color.RED);
// g2d.drawString(n.id, (int) p.x + 5, (int) p.y + 5);
}
}
if (showPlaceNames) {
g2d.setColor(Color.BLACK);
g2d.setFont(new Font("TimesRoman", Font.PLAIN, 12));
for (Map.Entry<String, Node> entry : osm.nodeHash.entrySet()) {
Node n = entry.getValue();
String name = n.getName();
if (name == null)
continue;
NodePoint p = scaledXY(n.x, n.y);
g2d.drawString(name, (int) p.x, (int) p.y - 2);
}
}
}
private void paintOverlay() throws Exception {
if (overlay == null) {
overlay = new BufferedImage(getWidth(), getHeight(),
BufferedImage.TYPE_INT_ARGB);
overlayG2D = overlay.createGraphics();
overlayG2D.setRenderingHint(RenderingHints.KEY_ANTIALIASING,
RenderingHints.VALUE_ANTIALIAS_ON);
AlphaComposite ac = AlphaComposite
.getInstance(AlphaComposite.SRC_OVER);
overlayG2D.setComposite(ac);
}
Graphics2D g2d = overlayG2D;
g2d.setBackground(new Color(255, 255, 255, 0));
g2d.clearRect(0, 0, getWidth(), getHeight());
g2d.setStroke(new BasicStroke(4f));
for (Intersection i : IntersectionRegistry.allRegisteredIntersections()) {
String rootNodeId = i.getRootNodeId();
Node rootNode = osm.getNode(rootNodeId);
if (i instanceof HighwayRamp) {
continue;
}
// // || i instanceof ThreeWayBiddingIntersection
// if (i instanceof FourWayBiddingIntersection) {
// FourWayBiddingIntersection ii = (FourWayBiddingIntersection)i;
// NodePoint scaledRootNode = scaledXY(rootNode.lat,rootNode.lon);
// g2d.setColor(Color.black);
// g2d.drawString("n/s " + ii.nsBidTotal() + "; e/w " +
// ii.ewBidTotal(), (int)scaledRootNode.x+20,
// (int)scaledRootNode.y-20);
// }
NodePoint rnP = scaledXY(rootNode.x, rootNode.y);
for (Node connectedNode : rootNode.connectedNodes) {
Way w = Oracle.wayBetweenNodes(rootNode.id, connectedNode.id);
if (w.oneway) {
System.out
.println("Warning: not drawing one way traffic lights");
} else {
LightFSM.LIGHT light = i.getLightForWayOnLane(null,
connectedNode.id, null, 0);
NodePoint ccPUS = distanceFromPointInDirectionOfPoint(
rootNode.x, rootNode.y, connectedNode.x,
connectedNode.y, 10);
NodePoint cnP = scaledXY(ccPUS.x, ccPUS.y);
if (light == LightFSM.LIGHT.GREEN) {
g2d.setColor(new Color(0, 255, 0, 100));
} else if (light == LightFSM.LIGHT.YELLOW) {
g2d.setColor(new Color(255, 255, 0, 100));
} else if (light == LightFSM.LIGHT.RED) {
g2d.setColor(new Color(255, 0, 0, 100));
}
// if (i instanceof FiveWayIntersection) {
// FiveWayIntersection ii = (FiveWayIntersection)i;
// int size = 10;
// if (connectedNode.id.equals(ii.eastNodeId) ||
// connectedNode.id.equals(ii.westNodeId)) {
// g2d.setColor(Color.MAGENTA);
// }
// else if (connectedNode.id.equals(ii.northNodeId) ||
// connectedNode.id.equals(ii.southNodeId)) {
// g2d.setColor(Color.BLUE);
// }
// else if (connectedNode.id.equals(ii.branchNodeId)) {
// g2d.setColor(Color.RED);
// }
// g2d.fillOval((int) cnP.x - size / 2, (int) cnP.y - size /
// 2, size, size);
// }
g2d.drawLine((int) cnP.x, (int) cnP.y, (int) rnP.x,
(int) rnP.y);
}
}
if (showIntersectionInfo) {
g2d.setColor(Color.BLACK);
g2d.setFont(new Font("TimesRoman", Font.BOLD, 8));
g2d.drawString(i.getState(), (int) rnP.x + 5, (int) rnP.y - 5);
}
}
g2d.setStroke(new BasicStroke(1f));
for (VehicleDriver d : VehicleDriverRegistry.allLicensedDrivers()) {
Vehicle v = d.vehicle;
NodePoint p = null;
if (v.isGoingForwardOnWay()) {
p = scaledXY(Global.projection.convertLongToX(v.lon),
Global.projection.convertLatToY(v.lat));
} else {
p = scaledXY(Global.projection.convertLongToX(v.lon),
Global.projection.convertLatToY(v.lat));
}
int size = 5;
if (v.paused()) {
size = 10;
g2d.setColor(Color.MAGENTA);
g2d.fillOval((int) p.x - size / 2, (int) p.y - size / 2, size,
size);
} else if (d.driverGroup == 1 || d.driverGroup == 2) {
g2d.setColor(Color.CYAN);
g2d.fillOval((int) p.x - size / 2, (int) p.y - size / 2, size,
size);
} else {
g2d.setColor(Color.BLUE);
g2d.fillOval((int) p.x - size / 2, (int) p.y - size / 2, size,
size);
}
if (showVehicleInfo) {
g2d.setColor(Color.BLACK);
g2d.setFont(new Font("TimesRoman", Font.BOLD, 8));
g2d.drawString("vin " + v.vin, (int) p.x + 5, (int) p.y);
if (v.navigation != null) {
g2d.drawString("origin "
+ (v.navigation.getOrigin() == null ? "(none)"
: v.navigation.getOrigin()), (int) p.x + 5,
(int) p.y + 10);
g2d.drawString("dest "
+ (v.navigation.getDestination() == null ? "(none)"
: v.navigation.getDestination()),
(int) p.x + 5, (int) p.y + 20);
} else {
g2d.drawString("origin (none)", (int) p.x + 5,
(int) p.y + 10);
g2d.drawString("dest (none)", (int) p.x + 5, (int) p.y + 20);
}
g2d.drawString("km/h " + v.speed(), (int) p.x + 5,
(int) p.y + 30);
g2d.drawString("km/s^2 " + v.acceleration, (int) p.x + 5,
(int) p.y + 40);
if (d.activeBehavior instanceof FollowingBehavior)
g2d.drawString("behavior Following", (int) p.x + 5,
(int) p.y + 50);
else if (d.activeBehavior instanceof GeneralAccelerationBehavior)
g2d.drawString("behavior General", (int) p.x + 5,
(int) p.y + 50);
else if (d.activeBehavior instanceof IntersectionCrossingBehavior)
g2d.drawString("behavior Intersection Crossing",
(int) p.x + 5, (int) p.y + 50);
else if (d.activeBehavior instanceof ReachedDestinationBehavior)
g2d.drawString("behavior Reached Destination",
(int) p.x + 5, (int) p.y + 50);
g2d.drawString("total delay (sec) " + v.vehicleTotalWaitTime,
(int) p.x + 5, (int) p.y + 60);
g2d.drawString("behavior is " + d.activeBehavior.state(),
(int) p.x + 5, (int) p.y + 70);
g2d.drawString("traveled dist "
+ v.vehicleTotalTraveledDistance, (int) p.x + 5,
(int) p.y + 80);
g2d.drawString("rushedness " + d.rushedness(), (int) p.x + 5,
(int) p.y + 90);
// if (v.getWay() != null) {
// g2d.drawString("lae " + v.getOnLaneNumber() + " (total "
// + v.getWay().lanes + ")", (int) p.x + 5,
// (int) p.y + 100);
// }
if (v.getNextIntersection() != null
&& v.getOriginNode() != null
&& v.getNodeAfterNextIntersection() != null) {
g2d.drawString(
"angle to intersection "
+ v.getNextIntersection()
.angle(v.getOriginNode().id,
v.getNodeAfterNextIntersection().id),
(int) p.x + 5, (int) p.y + 100);
}
}
if (showVehicleDebugTraces) {
Vehicle vehicleInFront = v.getVehicleInFront();
if (vehicleInFront != null) {
NodePoint pp = scaledXY(
Global.projection
.convertLongToX(vehicleInFront.lon),
Global.projection.convertLatToY(vehicleInFront.lat));
g2d.setColor(Color.BLUE);
g2d.drawLine((int) pp.x, (int) pp.y, (int) p.x, (int) p.y);
}
Node nextNode = v.getDestinationNode();
if (nextNode != null) {
NodePoint pp = scaledXY(
Global.projection.convertLongToX(nextNode.lon),
Global.projection.convertLatToY(nextNode.lat));
g2d.setColor(Color.RED);
g2d.fillOval((int) pp.x - size / 2, (int) pp.y - size / 2,
size, size);
// System.out.println("nextnode is " + nextNode.id);
}
Node prevNode = v.getOriginNode();
if (prevNode != null) {
NodePoint pp = scaledXY(
Global.projection.convertLongToX(prevNode.lon),
Global.projection.convertLatToY(prevNode.lat));
g2d.setColor(Color.ORANGE);
g2d.fillOval((int) pp.x - size / 2, (int) pp.y - size / 2,
size, size);
}
if (v.navigation.getDestination() != null) {
Node destNode = Global.openStreetMap.getNode(v.navigation
.getDestination());
NodePoint ppd = scaledXY(
Global.projection.convertLongToX(destNode.lon),
Global.projection.convertLatToY(destNode.lat));
g2d.setColor(Color.RED);
g2d.drawLine((int) ppd.x, (int) ppd.y, (int) p.x, (int) p.y);
}
}
}
}
private void paintHUD() throws Exception {
if (hud == null) {
hud = new BufferedImage(getWidth(), getHeight(),
BufferedImage.TYPE_INT_ARGB);
hudG2D = hud.createGraphics();
hudG2D.setRenderingHint(RenderingHints.KEY_ANTIALIASING,
RenderingHints.VALUE_ANTIALIAS_ON);
AlphaComposite ac = AlphaComposite
.getInstance(AlphaComposite.SRC_OVER);
hudG2D.setComposite(ac);
}
Graphics2D g2d = hudG2D;
g2d.setBackground(new Color(255, 255, 255, 0));
g2d.clearRect(0, 0, getWidth(), getHeight());
g2d.setColor(Color.BLACK);
g2d.setFont(new Font("TimesRoman", Font.BOLD, 18));
DecimalFormat df = new DecimalFormat("#.##");
if (Global.simulator.simulatorTime() > 0)
g2d.drawString(
"Simulator Time: "
+ df.format(Global.simulator.simulatorTime()), 10,
30);
else
g2d.drawString("Simulator Time: 0", 10, 30);
// g2d.drawString("Total Vehicles: "
// + Global.simulator.lastSimulatorStepTotalVehicles, 10, 50);
// g2d.drawString("Spawn Rate: " + Global.simulator.getSpawnRate(), 10, 70);
if (highlightPoint != null) {
g2d.setColor(Color.RED);
int size = 20;
NodePoint pp = scaledXY(highlightPoint.x, highlightPoint.y);
// g2d.fillOval((int) pp.x - size / 2, (int) pp.y - size / 2, size,
// size);
for (Map.Entry<String, Node> entry : osm.nodeHash.entrySet()) {
Node n = entry.getValue();
NodePoint p = scaledXY(n.x, n.y);
if (Math.sqrt(Math.pow(n.x - highlightPoint.x, 2)
+ Math.pow(n.y - highlightPoint.y, 2)) < 10) {
System.out.println("node = " + n.id);
}
}
}
}
private NodePoint distanceFromPointInDirectionOfPoint(double fromLat,
double fromLon, double toLat, double toLon, double d) {
double angle = -Math.atan2((toLat - fromLat), (toLon - fromLon));
angle = Math.toDegrees(angle);
if (angle < 0)
angle = 360 + angle;
angle = Math.toRadians(angle);
double deltaLat = Math.sin(angle) * d;
double deltaLon = Math.cos(angle) * d;
// based on this we can negate deltaLat or deltaLon to the correct sign
if (Math.toDegrees(angle) < 0) {
deltaLat *= 1;
deltaLon *= 1;
} else if (Math.toDegrees(angle) > 45) {
deltaLat *= -1;
deltaLon *= 1;
} else {
deltaLat *= -1;
deltaLon *= 1;
}
double newLat = fromLat + deltaLat;
double newLon = fromLon + deltaLon;
return new NodePoint(newLat, newLon);
}
public void paintComponent(Graphics g) {
try {
// if (offsetX == -1)
// offsetX = (int) (getSize().getWidth() / 2) - scale / 2;
// if (offsetY == -1)
// offsetY = (int) (getSize().getHeight() / 2) - scale / 2;
super.paintComponent(g);
g.setColor(backgroundColor);
g.fillRect(0, 0, getWidth(), getHeight());
if (showMap) {
paintMap(g);
g.drawImage(map, 0, 0, null);
}
paintOverlay();
g.drawImage(overlay, 0, 0, null);
paintHUD();
g.drawImage(hud, 0, 0, null);
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
}
@Override
public boolean isFocusTraversable() {
return true;
}
}<file_sep>package org.uci.luci.interchange.Registry;
import org.uci.luci.interchange.Driver.*;
import java.util.ArrayList;
import java.util.Hashtable;
import java.util.List;
import java.util.Map;
public class VehicleDriverRegistry {
private static int nextLicenseToGenerate = 0;
private static Hashtable<String, VehicleDriver> driverHash = new Hashtable<String, VehicleDriver>();
public static void registerDriver(VehicleDriver d) {
d.licence = nextLicenseToGenerate;
driverHash.put(nextLicenseToGenerate + "", d);
nextLicenseToGenerate++;
}
public static void deregisterDriver(VehicleDriver d) {
driverHash.remove(d.licence + "");
}
public static VehicleDriver getDriver(Integer licenseNumber) {
return driverHash.get(licenseNumber + "");
}
public static List<VehicleDriver> allLicensedDrivers() {
return new ArrayList<VehicleDriver>(driverHash.values());
}
public static void reset() {
driverHash.clear();
nextLicenseToGenerate = 0;
}
}<file_sep>package org.uci.luci.interchange.Driver.VehicleDriverBehavior;
public interface VehicleDriverBehavior {
public void tick(double simTime, double tickLength);
public String state();
}<file_sep>package org.uci.luci.interchange.Vehicles;
import java.util.List;
import java.util.Random;
import org.uci.luci.interchange.Graph.Node;
import org.uci.luci.interchange.Graph.Way;
import org.uci.luci.interchange.Intersections.Intersection;
import org.uci.luci.interchange.Registry.IntersectionRegistry;
import org.uci.luci.interchange.Registry.VehicleRegistry;
import org.uci.luci.interchange.Util.Global;
import org.uci.luci.interchange.Util.Oracle;
import org.uci.luci.interchange.Util.Utils;
import org.uci.luci.interchange.Util.Vector2d;
// This class makes traversing node-by-node invisible to vehicles and drivers.
// Simply set the origin and destination node and getNextNodeId() will keep
// returning the next possible node. It will return null on dead-ends and
// intersections.
public class NodeTraversingMehanism {
private String originNodeId, destinationNodeId;
public NodeTraversingMehanism() {
}
public void setOriginNodeId(String nodeId) {
originNodeId = nodeId;
}
public void setDestinationNodeId(String nodeId) {
destinationNodeId = nodeId;
}
// public boolean isAtDestinationNode() {
// return distanceToDestinationNode() < DISTANCE_TO_CONSIDER_AS_SAME;
// }
//
// public boolean isAtOriginNode() {
// return distanceFromOriginNode() < DISTANCE_TO_CONSIDER_AS_SAME;
// }
public double distanceBetweenDestinationAndOriginNode() {
Node nextNode = getDestinationNode();
Node lastNode = getOriginNode();
// double d = Math.sqrt(Math.pow(lastNode.lat - nextNode.lat, 2)
// + Math.pow(lastNode.lon - nextNode.lon, 2));
// return d;
return Utils.distance(nextNode.lat, nextNode.lon, lastNode.lat,
lastNode.lon, 'K');
}
// public double distanceToDestinationNode() {
// Node nextNode = getDestinationNode();
// double d = Math.sqrt(Math.pow(lat - nextNode.lat, 2)
// + Math.pow(lon - nextNode.lon, 2));
// return d;
// }
//
// public double distanceFromOriginNode() {
// Node lastNode = getOriginNode();
// double d = Math.sqrt(Math.pow(lat - lastNode.lat, 2)
// + Math.pow(lon - lastNode.lon, 2));
// return d;
// }
public Node getOriginNode() {
if (originNodeId == null)
return null;
return Global.openStreetMap.getNode(originNodeId);
}
public Node getDestinationNode() {
if (destinationNodeId == null)
return null;
return Global.openStreetMap.getNode(destinationNodeId);
}
public Node getNextNode() {
Node lastNode = getOriginNode();
Node nextNode = getDestinationNode();
if (nextNode.connectedNodes.size() == 2) {
// pretty obvious where the car goes..
int i = nextNode.connectedNodes.indexOf(lastNode);
setOriginNodeId(nextNode.id);
if (i == 0) {
// setDestinationNodeId(nextNode.connectedNodes.get(1).id);
return nextNode.connectedNodes.get(1);
} else {
// setDestinationNodeId(nextNode.connectedNodes.get(0).id);
return nextNode.connectedNodes.get(0);
}
} else if (nextNode.connectedNodes.size() == 1) {
return null;
} else {
return null;
}
}
public double distanceToDestinationNode(double lat, double lon) {
Node nextNode = getDestinationNode();
// double d = Math.sqrt(Math.pow(lat - nextNode.lat, 2)
// + Math.pow(lon - nextNode.lon, 2));
// return d;
return Utils.distance(lat, lon, nextNode.lat, nextNode.lon, 'K');
}
public double distanceFromOriginNode(double lat, double lon) {
Node lastNode = getOriginNode();
// double d = Math.sqrt(Math.pow(lat - lastNode.lat, 2)
// + Math.pow(lon - lastNode.lon, 2));
// return d;
return Utils.distance(lat, lon, lastNode.lat, lastNode.lon, 'K');
}
// private Node randomConnectedNode(Node n, Node excludeNode) {
// Random randomGenerator = Utils.randomNumberGenerator();
//
// int nodeIndex = -1;
// int excludeNodeIndex = n.connectedNodes.indexOf(excludeNode);
//
// while (nodeIndex == -1 || nodeIndex == excludeNodeIndex) {
// nodeIndex = randomGenerator.nextInt(n.connectedNodes.size());
// }
//
// return n.connectedNodes.get(nodeIndex);
// }
}
<file_sep>package org.uci.luci.interchange.Intersections;
import java.util.HashMap;
import java.util.Random;
import org.uci.luci.interchange.Intersections.LightFSM.LIGHT;
import org.uci.luci.interchange.Util.Utils;
public class InterchangeLightFSM {
private double deadTimeDur = 0;
private double minThroughsGreenDur;
private double throughsGreenDur, throughsYellowDur;
private double leftsGreenDur, leftsYellowDur;
private double lastStateChangeAt;
private String state;
HashMap<String, Integer> g1Bids, g1BidsLeft, g2Bids, g2BidsLeft;
public InterchangeLightFSM(double minThroughsGreenDur,
double throughsGreenDur, double throughsYellowDur,
double leftsGreenDur, double leftsYellowDur) {
g1Bids = new HashMap<String, Integer>();
g1BidsLeft = new HashMap<String, Integer>();
g2Bids = new HashMap<String, Integer>();
g2BidsLeft = new HashMap<String, Integer>();
this.minThroughsGreenDur = minThroughsGreenDur;
this.throughsGreenDur = throughsGreenDur;
this.throughsYellowDur = throughsYellowDur;
this.leftsGreenDur = leftsGreenDur;
this.leftsYellowDur = leftsYellowDur;
Random randomGenerator = Utils.randomNumberGenerator();
lastStateChangeAt = 0;
int randInt = randomGenerator.nextInt(9);
if (randInt == 0)
state = "all_red";
else if (randInt == 1)
state = "lefts_green1";
else if (randInt == 2)
state = "lefts_yellow1";
else if (randInt == 3)
state = "throughs_green1";
else if (randInt == 4)
state = "throughs_yellow1";
else if (randInt == 5)
state = "lefts_green2";
else if (randInt == 6)
state = "lefts_yellow2";
else if (randInt == 7)
state = "throughs_green2";
else if (randInt == 8)
state = "throughs_yellow2";
else
state = "all_red";
}
public String getState() {
return state + " (g1 " + group1Bids() + " g2 " + group2Bids() + ")";
}
public LIGHT getLightForRights1() {
return LIGHT.GREEN;
}
public LIGHT getLightForRights2() {
return LIGHT.GREEN;
}
public LIGHT getLightForThrough1() {
if (state.equals("throughs_green1"))
return LIGHT.GREEN;
else if (state.equals("throughs_yellow1"))
return LIGHT.YELLOW;
return LIGHT.RED;
}
public LIGHT getLightForLefts1() {
if (state.equals("lefts_green1"))
return LIGHT.GREEN;
else if (state.equals("lefts_yellow1"))
return LIGHT.YELLOW;
return LIGHT.RED;
}
public LIGHT getLightForThrough2() {
if (state.equals("throughs_green2"))
return LIGHT.GREEN;
else if (state.equals("throughs_yellow2"))
return LIGHT.YELLOW;
return LIGHT.RED;
}
public LIGHT getLightForLefts2() {
if (state.equals("lefts_green2"))
return LIGHT.GREEN;
else if (state.equals("lefts_yellow2"))
return LIGHT.YELLOW;
return LIGHT.RED;
}
public void acceptBidGroup1Left(String vin, int value) {
synchronized (this) {
g1BidsLeft.put(vin, value);
}
}
public void acceptBidGroup1Right(String vin, int value) {
// ignore
}
public void acceptBidGroup1Through(String vin, int value) {
synchronized (this) {
g1Bids.put(vin, value);
}
}
public void acceptBidGroup2Left(String vin, int value) {
synchronized (this) {
g2BidsLeft.put(vin, value);
}
}
public void acceptBidGroup2Right(String vin, int value) {
// ignore
}
public void acceptBidGroup2Through(String vin, int value) {
synchronized (this) {
g2Bids.put(vin, value);
}
}
public void clearBidForVehicle(String vin) {
synchronized (this) {
g1Bids.remove(vin);
g1BidsLeft.remove(vin);
g2Bids.remove(vin);
g2BidsLeft.remove(vin);
}
}
private int group1Bids() {
int sum = 0;
synchronized (this) {
for (int i : g1Bids.values())
sum += i;
for (int i : g1BidsLeft.values())
sum += i;
}
return sum;
}
private int group2Bids() {
int sum = 0;
synchronized (this) {
for (int i : g2Bids.values())
sum += i;
for (int i : g2BidsLeft.values())
sum += i;
}
return sum;
}
private int numGroup1Bids() {
int sum = 0;
synchronized (this) {
sum += g1Bids.values().size();
sum += g1BidsLeft.values().size();
}
return sum;
}
private int numGroup2Bids() {
int sum = 0;
synchronized (this) {
sum += g2Bids.values().size();
sum += g2BidsLeft.values().size();
}
return sum;
}
public void tick(double simTime, double tickLength, int tick) {
double sinceLastStateChange = simTime - lastStateChangeAt;
// if (state.equals("all_red") && sinceLastStateChange > deadTimeDur) {
// state = "lefts_green1";
// lastStateChangeAt = simTime;
// } else if (state.equals("lefts_green1")
// && sinceLastStateChange > leftsGreenDur) {
// state = "lefts_yellow1";
// lastStateChangeAt = simTime;
// } else if (state.equals("lefts_yellow1")
// && sinceLastStateChange > leftsYellowDur) {
// state = "throughs_green1";
// lastStateChangeAt = simTime;
// } else if (state.equals("throughs_green1")) {
// if ((group1Bids() < group2Bids() && sinceLastStateChange >
// minThroughsGreenDur)
// || (sinceLastStateChange > throughsGreenDur)) {
// state = "throughs_yellow1";
// lastStateChangeAt = simTime;
// }
// } else if (state.equals("throughs_yellow1")
// && sinceLastStateChange > throughsYellowDur) {
// state = "lefts_green2";
// lastStateChangeAt = simTime;
// } else if (state.equals("lefts_green2")
// && sinceLastStateChange > leftsGreenDur) {
// state = "lefts_yellow2";
// lastStateChangeAt = simTime;
// } else if (state.equals("lefts_yellow2")
// && sinceLastStateChange > leftsYellowDur) {
// state = "throughs_green2";
// lastStateChangeAt = simTime;
// } else if (state.equals("throughs_green2")) {
// if ((group1Bids() > group2Bids() && sinceLastStateChange >
// minThroughsGreenDur)
// || (sinceLastStateChange > throughsGreenDur)) {
// state = "throughs_yellow2";
// lastStateChangeAt = simTime;
// }
// } else if (state.equals("throughs_yellow2")
// && sinceLastStateChange > throughsYellowDur) {
// state = "all_red";
// lastStateChangeAt = simTime;
// }
if (state.equals("all_red") && sinceLastStateChange > deadTimeDur) {
if (group1Bids() >= group2Bids()) {
if (g1BidsLeft.size() > 0)
state = "lefts_green1";
else
state = "throughs_green1";
} else {
if (g2BidsLeft.size() > 0)
state = "lefts_green2";
else
state = "throughs_green2";
}
lastStateChangeAt = simTime;
} else if (state.equals("lefts_green1")
&& sinceLastStateChange > leftsGreenDur) {
state = "lefts_yellow1";
lastStateChangeAt = simTime;
} else if (state.equals("lefts_yellow1")
&& sinceLastStateChange > leftsYellowDur) {
state = "throughs_green1";
lastStateChangeAt = simTime;
} else if (state.equals("throughs_green1")) {
if ((group1Bids() < group2Bids() && sinceLastStateChange > minThroughsGreenDur)
|| (numGroup2Bids() > 0 && sinceLastStateChange > throughsGreenDur)) {
state = "throughs_yellow1";
lastStateChangeAt = simTime;
} else if (g1BidsLeft.size() > 0
&& sinceLastStateChange > throughsGreenDur) {
state = "throughs_yellow1";
lastStateChangeAt = simTime;
}
} else if (state.equals("throughs_yellow1")
&& sinceLastStateChange > throughsYellowDur) {
if (group1Bids() >= group2Bids()) {
if (g1BidsLeft.size() > 0)
state = "lefts_green1";
else
state = "throughs_green1";
} else {
if (g2BidsLeft.size() > 0)
state = "lefts_green2";
else
state = "throughs_green2";
}
lastStateChangeAt = simTime;
} else if (state.equals("lefts_green2")
&& sinceLastStateChange > leftsGreenDur) {
state = "lefts_yellow2";
lastStateChangeAt = simTime;
} else if (state.equals("lefts_yellow2")
&& sinceLastStateChange > leftsYellowDur) {
state = "throughs_green2";
lastStateChangeAt = simTime;
} else if (state.equals("throughs_green2")) {
if ((group1Bids() > group2Bids() && sinceLastStateChange > minThroughsGreenDur)
|| (numGroup1Bids() > 0 && sinceLastStateChange > throughsGreenDur)) {
state = "throughs_yellow2";
lastStateChangeAt = simTime;
} else if (g2BidsLeft.size() > 0
&& sinceLastStateChange > throughsGreenDur) {
state = "throughs_yellow2";
lastStateChangeAt = simTime;
}
} else if (state.equals("throughs_yellow2")
&& sinceLastStateChange > throughsYellowDur) {
if (group1Bids() >= group2Bids()) {
if (g1BidsLeft.size() > 0)
state = "lefts_green1";
else
state = "throughs_green1";
} else {
if (g2BidsLeft.size() > 0)
state = "lefts_green2";
else
state = "throughs_green2";
}
// state = "all_red";
lastStateChangeAt = simTime;
}
}
}
<file_sep>package org.uci.luci.interchange.Factory;
import org.uci.luci.interchange.OSM.*;
import org.uci.luci.interchange.Registry.*;
import org.uci.luci.interchange.UI.*;
import org.uci.luci.interchange.Util.*;
import org.uci.luci.interchange.*;
public class SimulationFactory {
public static void runSimulation(String mapFile, int simulationSpeed,
String intersectionType) throws Exception {
OsmFileReader osmFileReader = new OsmFileReader(mapFile);
osmFileReader.parseStructure();
OpenStreetMap openStreetMap = osmFileReader.osmHandler.openStreetMap;
Global.openStreetMap = openStreetMap;
Global.openStreetMap.purgeUnconnectedNodes();
Global.openStreetMap.removeDisconnectedGraphs();
Global.openStreetMap.precomputeNeighborDistances();
Global.openStreetMap.projectUsingProjection(new MercatorProjection());
if (intersectionType.equals("Bidding"))
IntersectionRegistry.generateBiddingIntersections();
else if (intersectionType.equals("Loop Sensors"))
IntersectionRegistry.generateLoopSensorsIntersections();
else
IntersectionRegistry.generateTraditionalIntersections();
AppWindow appWindow = new AppWindow();
Simulator simulator = new Simulator();
simulator.setSpeed(simulationSpeed);
Global.simulator = simulator;
simulator.start();
}
}<file_sep>== Interchange
Interchange is a macro-scale traffic simulator. You can use any OSM file as the infrastructure, Interchange will attempt to auto-detect roads, intersections, etc.
* Interchange is still a work in progress and there are serious bugs and lots of undocumented code.
== Quickstart
1. Cleaning
mvn clean
2. Building
mvn package
3. Running
On OS X / Linux
./interchange
On Windows & Other
java -jar target/interchange-1.0-SNAPSHOT.jar
== Features
* Import OSM files to simulate any area of the world
* Vehicles accelerate and decelerate at real-world rates.
* Vehicles/Drivers know and follow speed limits.
* Vehicles/Drivers have a few behaviors they switch between including following other vehicles, crossing intersections, and general.
* Vehicles/Drivers switch between lanes and follow most laws including being in the left-most lane for left turns.
== Limitations
* Most 3 and 4 node connections are assumed to have traffic lights.
* One-way streets are not respected.
* Only one vehicle type/spec is supported.
* Only three-way and four-way intersections are supported.
* Drivers strictly follow all implemented laws, there is no chance for vehicles to collide or run red lines. (and it shouldn't happen).
* Only relatively small OSM files work well. (~5mi x 5mi).
* Interface does not render lanes although they are simulated.
* Edge case: there is a snapping effect where fast-moving vehicles may not move at the appropriate delta per tick<file_sep>package org.uci.luci.interchange;
import org.uci.luci.interchange.OSM.*;
import org.uci.luci.interchange.Registry.*;
import org.uci.luci.interchange.UI.*;
import org.uci.luci.interchange.Util.*;
public class App {
public static void main(String[] args) throws Exception {
SimulatorOptionsWindow options = new SimulatorOptionsWindow();
}
}
<file_sep>package org.uci.luci.interchange.Vehicles;
import java.util.List;
import org.uci.luci.interchange.Registry.VehicleRegistry;
import org.uci.luci.interchange.Util.Oracle;
public class VehicleUtils {
public static Vehicle findVehicleClosestToOriginNodeOnLane(
String originNodeId, String destinationNodeId, int onLaneNumber) {
List<String> vehicles = Oracle.vehiclesWithNodeAsOrigin(originNodeId);
if (vehicles == null || vehicles.isEmpty()) {
return null;
} else {
Vehicle vehicle = null;
for (String vin : vehicles) {
Vehicle v = VehicleRegistry.getVehicle(vin);
if (v == null) {
// System.out.println("2: vehicle with VIN " + vin +
// " is null (i am vehicle " + this.vin +
// ") (node "+originNodeId+")");
continue;
}
if (!v.getDestinationNode().id.equals(destinationNodeId)
|| v.getOnLaneNumber() != onLaneNumber)
continue;
if (vehicle == null
|| v.getNodeTraversingMehanism()
.distanceFromOriginNode(v.lat, v.lon) < vehicle
.getNodeTraversingMehanism()
.distanceFromOriginNode(vehicle.lat,
vehicle.lon))
vehicle = v;
}
return vehicle;
}
}
// vf^2 = vi^2 + 2ad
public static double determineNecessaryAcceleration(double initialSpeed,
double finalSpeed, double distance) {
if (distance == 0 || Double.isNaN(distance))
return Double.MAX_VALUE;
double a = ((Math.pow(finalSpeed, 2) - Math.pow(initialSpeed, 2)) / (2 * distance)) / 3600;
if (Double.isNaN(a)) {
System.out.println("params is = " + initialSpeed + " finalSpeed = "
+ finalSpeed + " distance = " + distance);
}
return a;
}
}
<file_sep>package org.uci.luci.interchange.Factory;
import org.uci.luci.interchange.Intersections.*;
import org.uci.luci.interchange.Graph.*;
import org.uci.luci.interchange.Registry.*;
import java.util.Random;
import java.util.List;
public class IntersectionFactory {
public static FiveWayIntersection createFiveWayIntersectionForNode(Node n) {
FiveWayIntersection intersection = new FiveWayIntersection(n.id);
IntersectionRegistry.registerIntersection(intersection);
return intersection;
}
public static FourWayIntersection createFourWayIntersectionForNode(Node n) {
FourWayIntersection intersection = new FourWayIntersection(n.id);
IntersectionRegistry.registerIntersection(intersection);
return intersection;
}
public static ThreeWayIntersection createThreeWayIntersectionForNode(Node n) {
ThreeWayIntersection intersection = new ThreeWayIntersection(n.id);
IntersectionRegistry.registerIntersection(intersection);
return intersection;
}
public static FourWayBiddingIntersection createFourWayBiddingIntersectionForNode(
Node n) {
FourWayBiddingIntersection intersection = new FourWayBiddingIntersection(
n.id);
IntersectionRegistry.registerIntersection(intersection);
return intersection;
}
public static ThreeWayBiddingIntersection createThreeWayBiddingIntersectionForNode(
Node n) {
ThreeWayBiddingIntersection intersection = new ThreeWayBiddingIntersection(
n.id);
IntersectionRegistry.registerIntersection(intersection);
return intersection;
}
public static FourWayLoopIntersection createFourWayLoopSensorsIntersectionForNode(
Node n) {
FourWayLoopIntersection intersection = new FourWayLoopIntersection(n.id);
IntersectionRegistry.registerIntersection(intersection);
return intersection;
}
public static ThreeWayLoopIntersection createThreeWayLoopSensorsIntersectionForNode(
Node n) {
ThreeWayLoopIntersection intersection = new ThreeWayLoopIntersection(
n.id);
IntersectionRegistry.registerIntersection(intersection);
return intersection;
}
public static HighwayRamp createHighwayRampForNode(Node n) {
HighwayRamp intersection = new HighwayRamp(n.id);
IntersectionRegistry.registerIntersection(intersection);
return intersection;
}
}<file_sep>package org.uci.luci.interchange.UI;
import java.awt.Dimension;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.io.File;
import java.io.FilenameFilter;
import javax.swing.ButtonGroup;
import javax.swing.JButton;
import javax.swing.JComboBox;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.JRadioButton;
import javax.swing.JTextField;
import org.uci.luci.interchange.Registry.IntersectionRegistry;
import org.uci.luci.interchange.Util.Global;
import org.uci.luci.interchange.Factory.*;
public class SimulatorOptionsWindow {
public boolean waiting = true;
public boolean TypeOfIntersection;
protected JFrame f;
public SimulatorOptionsWindow() throws Exception {
f = new JFrame("Interchange");
f.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
f.add(new OptionsWindow());
f.pack();
f.setVisible(true);
f.setLocationRelativeTo(null);
}
private class OptionsWindow extends JPanel implements ActionListener {
JComboBox mapChoice;
JTextField simSpeed;
ButtonGroup intersectionType;
public OptionsWindow() throws Exception {
// Parses Lib Directory for all Files to list as OSM
File dir = new File("lib");
String[] maps = dir.list(new FilenameFilter() {
public boolean accept(File arg0, String arg1) {
if (arg1.contains(".osm.xml"))
return true;
return false;
}
});
if (maps == null) {
throw new Exception();
}
this.add(new JLabel("Map to Simulate"));
mapChoice = new JComboBox(maps);
this.add(mapChoice);
// Speed for Simulator Default is 10
this.add(new JLabel("Simulator Speed"));
simSpeed = new JTextField("10", 2);
this.add(simSpeed);
// Traditional vs Interchange
intersectionType = new ButtonGroup();
JRadioButton t = new JRadioButton("Traditional");
t.setActionCommand("Traditional");
this.add(t);
intersectionType.add(t);
t = new JRadioButton("Bidding");
t.setActionCommand("Bidding");
this.add(t);
t.setSelected(true);
intersectionType.add(t);
t = new JRadioButton("Loop Sensors");
t.setActionCommand("Loop Sensors");
this.add(t);
t.setSelected(true);
intersectionType.add(t);
JButton subButton = new JButton("Start Simulation");
subButton.addActionListener(this);
this.add(subButton);
}
public void actionPerformed(ActionEvent arg0) {
if (arg0.getActionCommand().equals("Start Simulation")) {
if (submitValues())
f.setVisible(false);
}
}
public boolean submitValues() {
String map = (String) mapChoice.getSelectedItem();
if (map.equals("")) {
JOptionPane.showMessageDialog(this,
"Please Select a valid map file");
return false;
}
try {
int simulationSpeed = Integer.parseInt(simSpeed.getText());
String intersectionsType = intersectionType.getSelection()
.getActionCommand();
SimulationFactory.runSimulation(map, simulationSpeed,
intersectionsType);
} catch (Exception e) {
e.printStackTrace();
}
return true;
}
}
}
<file_sep>package org.uci.luci.interchange.UI;
public class WindowProjector {
double top = -1;
double bottom = -1;
double left = -1;
double right = -1;
int scale = 500;
int offsetX = -1;
int offsetY = -1;
double width = -1;
double height = -1;
public WindowProjector(double top, double bottom, double left, double right) {
this.top = top;
this.bottom = bottom;
this.left = left;
this.right = right;
this.width = Math.abs(right - left);
this.height = Math.abs(bottom - top);
}
public NodePoint scaledXY(double x, double y) {
// y = bottom-y;
x = (((double) x - (double) left) / width);
y = (((double) y - (double) top) / height);
x = (x * scale) + offsetX;
y = (y * scale) - offsetY;
y = scale - y;
return new NodePoint(x, y);
}
public NodePoint unscaleXY(int x, int y) {
return unscaleXY((double) x, (double) y);
}
public NodePoint unscaleXY(double x, double y) {
x = (x - offsetX) / scale;
y = (-(y - scale) + offsetY) / scale;
x = (x * width) + (double) left;
y = (y * height) + (double) top;
// y = -(y - bottom);
return new NodePoint(x, y);
// return new NodePoint(0,0);
// double pX = (x - offsetX) / (double) scale;
// double pY = (y - offsetY) / (double) scale;
//
// double lat = (pY * (bottom - top)) + top;
// double lon = (pX * (right - left)) + left;
//
// // lat = lat + bottom;
//
// return new NodePoint((double) lon, (double) lat);
}
}
<file_sep>package org.uci.luci.interchange.Factory;
import org.uci.luci.interchange.Vehicles.*;
import org.uci.luci.interchange.Registry.*;
import org.uci.luci.interchange.Driver.*;
import java.util.Random;
import java.util.List;
public class VehicleDriverFactory {
public static VehicleDriver createVehicleDriver(Vehicle v) {
VehicleDriver d = new VehicleDriver(v);
VehicleDriverRegistry.registerDriver(d);
return d;
}
public static void destroyVehicleDriver(VehicleDriver d) {
VehicleDriverRegistry.deregisterDriver(d);
}
}<file_sep>package org.uci.luci.interchange.Registry;
import org.uci.luci.interchange.Intersections.*;
import org.uci.luci.interchange.Factory.*;
import org.uci.luci.interchange.Graph.*;
import org.uci.luci.interchange.Util.*;
import org.uci.luci.interchange.Exception.*;
import java.util.ArrayList;
import java.util.Hashtable;
import java.util.List;
import java.util.Map;
public class IntersectionRegistry {
private static Hashtable<String, Intersection> intersectionHash = new Hashtable<String, Intersection>();
public static void registerIntersection(Intersection i) {
String id = "i-" + i.getRootNodeId();
i.id = id;
intersectionHash.put(id, i);
}
public static Intersection getIntersection(String id) {
return intersectionHash.get(id);
}
public static Intersection getIntersectionAtNode(Node node) {
return intersectionHash.get("i-" + node.id);
}
public static List<Intersection> allRegisteredIntersections() {
return new ArrayList<Intersection>(intersectionHash.values());
}
public static void generateTraditionalIntersections()
throws UnknownIntersectionTypeException {
List<Node> nodes = Global.openStreetMap.nodes();
for (Node n : nodes) {
if (n.connectedNodes.size() > 2) {
if (n.isHighwayNode()) {
IntersectionFactory.createHighwayRampForNode(n);
continue;
}
if (n.connectedNodes.size() == 3) {
IntersectionFactory.createThreeWayIntersectionForNode(n);
} else if (n.connectedNodes.size() == 4) {
IntersectionFactory.createFourWayIntersectionForNode(n);
} else if (n.connectedNodes.size() == 5) {
IntersectionFactory.createFiveWayIntersectionForNode(n);
} else {
System.out.println("Node " + n.id + " (" + n.lat + ", "
+ n.lon + ") has " + n.connectedNodes.size()
+ " connections!");
throw new UnknownIntersectionTypeException();
}
}
}
}
public static void generateBiddingIntersections()
throws UnknownIntersectionTypeException {
List<Node> nodes = Global.openStreetMap.nodes();
for (Node n : nodes) {
if (n.connectedNodes.size() > 2) {
if (n.isHighwayNode()) {
IntersectionFactory.createHighwayRampForNode(n);
continue;
}
if (n.connectedNodes.size() == 3) {
IntersectionFactory
.createThreeWayBiddingIntersectionForNode(n);
} else if (n.connectedNodes.size() == 4) {
IntersectionFactory
.createFourWayBiddingIntersectionForNode(n);
} else {
System.out.println("Node " + n.id + " (" + n.lat + ", "
+ n.lon + ") has " + n.connectedNodes.size()
+ " connections!");
throw new UnknownIntersectionTypeException();
}
}
}
}
public static void generateLoopSensorsIntersections()
throws UnknownIntersectionTypeException {
List<Node> nodes = Global.openStreetMap.nodes();
for (Node n : nodes) {
if (n.connectedNodes.size() > 2) {
if (n.isHighwayNode()) {
IntersectionFactory.createHighwayRampForNode(n);
continue;
}
if (n.connectedNodes.size() == 3) {
IntersectionFactory
.createThreeWayLoopSensorsIntersectionForNode(n);
} else if (n.connectedNodes.size() == 4) {
IntersectionFactory
.createFourWayLoopSensorsIntersectionForNode(n);
} else {
System.out.println("Node " + n.id + " (" + n.lat + ", "
+ n.lon + ") has " + n.connectedNodes.size()
+ " connections!");
throw new UnknownIntersectionTypeException();
}
}
}
}
}<file_sep>package org.uci.luci.interchange.Util;
public class LatLng {
private double lat, lng;
public LatLng(double lat, double lng) {
this.lat = lat;
this.lng = lng;
}
public void add(double lat, double lng) {
this.lat += lat;
this.lng += lng;
}
public double lat() {
return lat;
}
public double lng() {
return lat;
}
}<file_sep>package org.uci.luci.interchange.Util;
import org.uci.luci.interchange.Vehicles.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class VehicleCollisionChecker {
private static HashMap<String, ArrayList<Integer>> originNodesToVehicles = new HashMap<String, ArrayList<Integer>>();
public static List<Vehicle> checkCollisions(List<Vehicle> vehicles) {
ArrayList<Vehicle> collisions = new ArrayList<Vehicle>();
for (int i = 0; i < vehicles.size(); i++) {
Vehicle v = vehicles.get(i);
for (int ii = 0; ii < vehicles.size(); ii++) {
if (i == ii)
continue;
Vehicle vv = vehicles.get(ii);
if (v.isCollidingWith(vv)) {
collisions.add(v);
collisions.add(vv);
}
}
}
return collisions;
}
}
<file_sep>package org.uci.luci.interchange.Exception;
public class NoPathToDestinationException extends Exception {
}
<file_sep>package org.uci.luci.interchange;
import java.util.List;
import java.util.LinkedList;
import java.util.Map;
import java.util.HashMap;
import java.util.ArrayList;
import java.util.Random;
import javax.swing.Timer;
import java.awt.event.ActionListener;
import java.awt.event.ActionEvent;
import java.text.DecimalFormat;
import org.uci.luci.interchange.Intersections.*;
import org.uci.luci.interchange.OSM.*;
import org.uci.luci.interchange.Driver.*;
import org.uci.luci.interchange.Exception.*;
import org.uci.luci.interchange.Vehicles.*;
import org.uci.luci.interchange.Graph.*;
import org.uci.luci.interchange.Util.*;
import org.uci.luci.interchange.Util.StatisticsLogger.ConfidenceInterval;
import org.uci.luci.interchange.Util.StatisticsLogger.VehicleSample;
import org.uci.luci.interchange.Registry.*;
import org.uci.luci.interchange.Factory.*;
public class Simulator extends Thread {
private boolean debug = false;
private boolean paused;
ArrayList<String> spawnPoints = new ArrayList<String>();
public int lastSimulatorStepTotalVehicles;
long lastSimulatorStepTotalTime;
int delay;
int simulatorTicksSinceCheck;
long simulatorTimeSinceCheck;
double tickLength = 1.0 / 5.0; // 1/60th of a sec
double simulatorTime = 0;
// 60 for rushed tests, 120 for max
int spawnRate = 5; // 120;
int tick = 0;
int percentOfRushedDrivers = 0;
int driverRushingLevel = 100;
// "important" paths where drivers will be rushed
String impPath1Origin = "122969631";
String impPath1Destination = "331384664";
String impPath2Origin = "122788471";
String impPath2Destination = "122880644";
public void setSpawnRate(int sr) {
spawnRate = sr;
}
public int getSpawnRate() {
return spawnRate;
}
public double simulatorTime() {
return simulatorTime;
}
public Simulator() throws InterruptedException {
delay = 10;
lastSimulatorStepTotalVehicles = 0;
lastSimulatorStepTotalTime = 0;
simulatorTicksSinceCheck = 0;
simulatorTimeSinceCheck = 0;
paused = false;
ActionListener taskPerformer = new ActionListener() {
public void actionPerformed(ActionEvent evt) {
System.out.println("Simulator performance");
System.out.println("\tFree memory: "
+ Utils.humanReadableByteCount(Runtime.getRuntime()
.freeMemory(), false));
System.out
.println("\tMaximum memory: "
+ (Runtime.getRuntime().maxMemory() == Long.MAX_VALUE ? "no limit"
: Utils.humanReadableByteCount(Runtime
.getRuntime().maxMemory(),
false)));
System.out.println("\t" + lastSimulatorStepTotalVehicles
+ " vehicles in simulator.");
System.out.println("\t" + lastSimulatorStepTotalTime
+ " ns per simulator step.");
double tps = (simulatorTicksSinceCheck / ((System.nanoTime() - simulatorTimeSinceCheck) / 1000000000.0));
System.out.println("\t" + tps + " ticks per sec");
simulatorTicksSinceCheck = 0;
simulatorTimeSinceCheck = System.nanoTime();
double nsPerVehicle = 0;
if (lastSimulatorStepTotalVehicles != 0)
nsPerVehicle = (lastSimulatorStepTotalTime / lastSimulatorStepTotalVehicles);
double vps = 1.0 / (nsPerVehicle / 1000000000);
DecimalFormat df = new DecimalFormat();
System.out.println("\taround " + df.format(vps)
+ " vehicle ticks per sec.");
}
};
new Timer(10000, taskPerformer).start();
}
public void initPhase() {
Oracle.generateRoutes(300);
try {
Oracle.generatePath(Global.openStreetMap.getNode(impPath1Origin),
Global.openStreetMap.getNode(impPath1Destination));
Oracle.generatePath(Global.openStreetMap.getNode(impPath2Origin),
Global.openStreetMap.getNode(impPath2Destination));
} catch (NoPathToDestinationException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
// SpawningThread spawnThread = new SpawningThread();
// spawnThread.start();
}
public void generateVehiclesPhase(int tick) {
// if (tick % 50 == 0) {
// Vehicle v = null;
// VehicleDriver d = null;
// try {
// v = VehicleFactory.createVehicleAtNode(Global.openStreetMap
// .getNode(impPath1Origin));
// if (v == null)
// return;
// v.spawnedAtSpawnRate = spawnRate;
// d = VehicleDriverFactory.createVehicleDriver(v);
// d.setDestinationAndGo(impPath1Destination);
//
// d.setRushedness(driverRushingLevel);
// d.driverGroup = 1;
// d.spawnedAtPercentRushedness = percentOfRushedDrivers;
//
// v.isBeingCreated = false;
// } catch (NoPathToDestinationException e) {
// if (d != null)
// VehicleDriverFactory.destroyVehicleDriver(d);
// if (v != null)
// VehicleFactory.destroyVehicle(v);
// }
// }
//
// if (tick % 50 == 25) {
// Vehicle v = null;
// VehicleDriver d = null;
// try {
// v = VehicleFactory.createVehicleAtNode(Global.openStreetMap
// .getNode(impPath2Origin));
// if (v == null)
// return;
// v.spawnedAtSpawnRate = spawnRate;
// d = VehicleDriverFactory.createVehicleDriver(v);
// d.setDestinationAndGo(impPath2Destination);
//
// d.setRushedness(driverRushingLevel);
// d.driverGroup = 2;
// d.spawnedAtPercentRushedness = percentOfRushedDrivers;
//
// v.isBeingCreated = false;
// } catch (NoPathToDestinationException e) {
// if (d != null)
// VehicleDriverFactory.destroyVehicleDriver(d);
// if (v != null)
// VehicleFactory.destroyVehicle(v);
// }
// }
if (tick % spawnRate == 0) {
Vehicle v = null;
VehicleDriver d = null;
try {
List<String> route = Oracle.randomRoute();
v = VehicleFactory.createVehicleAt(route.get(0), route.get(1));
if (v == null)
return;
// v = VehicleFactory.createVehicleAtRandomPoint();
v.spawnedAtSpawnRate = spawnRate;
d = VehicleDriverFactory.createVehicleDriver(v);
// d.pickRandomDestinationAndGo();
d.setDestinationAndGo(route.get(route.size() - 1));
if (Utils.randomNumberGenerator().nextInt(100) < percentOfRushedDrivers && false) {
d.setRushedness(driverRushingLevel);
d.driverGroup = 1;
} else {
d.setRushedness(1);
d.driverGroup = 3;
}
d.spawnedAtPercentRushedness = percentOfRushedDrivers;
// if (Utils.randomNumberGenerator().nextInt(4) ==
// 0) {
// d.setRushedness(100);
// // d.spawnedAtPercentRushedness =
// } else {
// d.setRushedness(0);
// }
v.isBeingCreated = false;
} catch (NoPathToDestinationException e) {
if (d != null)
VehicleDriverFactory.destroyVehicleDriver(d);
if (v != null)
VehicleFactory.destroyVehicle(v);
}
}
}
public void vehicleDriversTickPhase(int tick) {
for (VehicleDriver d : VehicleDriverRegistry.allLicensedDrivers()) {
if (d.vehicle.paused() || d.vehicle.isBeingCreated)
continue;
try {
d.tick(simulatorTime, tickLength, tick);
} catch (Exception e) {
for (VehicleDriver dd : VehicleDriverRegistry
.allLicensedDrivers()) {
}
e.printStackTrace();
System.exit(1);
}
}
}
public void intersectionTickPhase(int tick) {
for (Intersection i : IntersectionRegistry.allRegisteredIntersections()) {
i.tick(simulatorTime, tickLength, tick);
}
}
public void commitPhase(int tick) {
for (Vehicle v : VehicleRegistry.allRegisteredVehicles()) {
if (v.isBeingCreated)
continue;
v.commit(simulatorTime, tickLength);
}
}
public void purgePhase(int tick) {
for (VehicleDriver d : VehicleDriverRegistry.allLicensedDrivers()) {
if (d.vehicle.flagForRemoval) {
Vehicle vv = d.vehicle;
// if (vv.spawnedAtSpawnRate == spawnRate) {
// StatisticsLogger.addSample(vv.spawnedAtSpawnRate + "",
// new VehicleSample(vv.vin, simulatorTime,
// vv.vehicleTotalTraveledDistance,
// vv.vehicleTotalWaitTime, d.rushedness()));
//
// StatisticsLogger
// .log("vehicle.distTraveled2DelayTime-all",
// simulatorTime + "," + vv.vin + ","
// + vv.vehicleTotalTraveledDistance
// + "," + vv.vehicleTotalWaitTime
// + "," + vv.spawnedAtSpawnRate + ","
// + spawnRate + "," + d.rushedness()
// + ","
// + d.spawnedAtPercentRushedness
// + "," + vv.leftTurnsMade + ","
// + vv.rightTurnsMade + ","
// + vv.throughsMade);
// }
// if (vv.throughsMade >= 10 && vv.throughsMade <= 15
// && vv.leftTurnsMade >= 1 && vv.leftTurnsMade <= 5) {
if (d.driverGroup == 1) {
StatisticsLogger.addSample("g1-"
+ d.spawnedAtPercentRushedness,// vv.spawnedAtSpawnRate
// + "",//
// spawnRate
new VehicleSample(vv.vin, simulatorTime,
vv.vehicleTotalTraveledDistance,
vv.vehicleTotalWaitTime, d.rushedness(),
vv.throughsMade));
StatisticsLogger.log("vehicle.distTraveled2DelayTime-g1",
simulatorTime + "," + vv.vin + ","
+ vv.vehicleTotalTraveledDistance + ","
+ vv.vehicleTotalWaitTime + ","
+ vv.spawnedAtSpawnRate + "," + spawnRate
+ "," + d.rushedness() + ","
+ d.spawnedAtPercentRushedness + ","
+ vv.leftTurnsMade + ","
+ vv.rightTurnsMade + "," + vv.throughsMade
+ "," + driverRushingLevel + ","
+ vv.vehicleTotalStoppedTimeAtLeft + ","
+ vv.vehicleTotalStoppedTimeAtRight + ","
+ vv.vehicleTotalStoppedTimeAtThrough);
}
if (d.driverGroup == 2) {
StatisticsLogger.addSample("g2-"
+ d.spawnedAtPercentRushedness,// vv.spawnedAtSpawnRate
// + "",//
// spawnRate
new VehicleSample(vv.vin, simulatorTime,
vv.vehicleTotalTraveledDistance,
vv.vehicleTotalWaitTime, d.rushedness(),
vv.throughsMade));
StatisticsLogger.log("vehicle.distTraveled2DelayTime-g2",
simulatorTime + "," + vv.vin + ","
+ vv.vehicleTotalTraveledDistance + ","
+ vv.vehicleTotalWaitTime + ","
+ vv.spawnedAtSpawnRate + "," + spawnRate
+ "," + d.rushedness() + ","
+ d.spawnedAtPercentRushedness + ","
+ vv.leftTurnsMade + ","
+ vv.rightTurnsMade + "," + vv.throughsMade
+ "," + driverRushingLevel + ","
+ vv.vehicleTotalStoppedTimeAtLeft + ","
+ vv.vehicleTotalStoppedTimeAtRight + ","
+ vv.vehicleTotalStoppedTimeAtThrough);
} else if (d.driverGroup == 3) {
StatisticsLogger.addSample("g3-"
+ d.spawnedAtPercentRushedness + "",//
// vv.spawnedAtSpawnRate
// + "",//
// spawnRate
new VehicleSample(vv.vin, simulatorTime,
vv.vehicleTotalTraveledDistance,
vv.vehicleTotalWaitTime, d.rushedness(),
vv.throughsMade));
StatisticsLogger.log("vehicle.distTraveled2DelayTime-g3",
simulatorTime + "," + vv.vin + ","
+ vv.vehicleTotalTraveledDistance + ","
+ vv.vehicleTotalWaitTime + ","
+ vv.spawnedAtSpawnRate + "," + spawnRate
+ "," + d.rushedness() + ","
+ d.spawnedAtPercentRushedness + ","
+ vv.leftTurnsMade + ","
+ vv.rightTurnsMade + "," + vv.throughsMade
+ "," + driverRushingLevel + ","
+ vv.vehicleTotalStoppedTimeAtLeft + ","
+ vv.vehicleTotalStoppedTimeAtRight + ","
+ vv.vehicleTotalStoppedTimeAtThrough);
}
// }
// if (d.rushedness() <= 50
// && d.spawnedAtPercentRushedness == percentOfRushedDrivers) {
// StatisticsLogger.addSample(d.spawnedAtPercentRushedness
// + "-l",// vv.spawnedAtSpawnRate
// // + "",//
// // spawnRate
// new VehicleSample(vv.vin, simulatorTime,
// vv.vehicleTotalTraveledDistance,
// vv.vehicleTotalWaitTime, d.rushedness()));
// StatisticsLogger.log("vehicle.distTraveled2DelayTime-low",
// simulatorTime + "," + vv.vin + ","
// + vv.vehicleTotalTraveledDistance + ","
// + vv.vehicleTotalWaitTime + ","
// + vv.spawnedAtSpawnRate + "," + spawnRate
// + "," + d.rushedness() + ","
// + d.spawnedAtPercentRushedness);
// } else if (d.spawnedAtPercentRushedness ==
// percentOfRushedDrivers) {
// StatisticsLogger.addSample(d.spawnedAtPercentRushedness
// + "-h",// vv.spawnedAtSpawnRate
// // + "",//
// // spawnRate
// new VehicleSample(vv.vin, simulatorTime,
// vv.vehicleTotalTraveledDistance,
// vv.vehicleTotalWaitTime, d.rushedness()));
//
// StatisticsLogger.log("vehicle.distTraveled2DelayTime-high",
// simulatorTime + "," + vv.vin + ","
// + vv.vehicleTotalTraveledDistance + ","
// + vv.vehicleTotalWaitTime + ","
// + vv.spawnedAtSpawnRate + "," + spawnRate
// + "," + d.rushedness() + ","
// + d.spawnedAtPercentRushedness);
// }
VehicleDriverFactory.destroyVehicleDriver(d);
VehicleFactory.destroyVehicle(d.vehicle);
}
}
}
private void statsForSpawnRate(int tick) {
ConfidenceInterval ci = StatisticsLogger
.calculateConfidenceIntervalForSample(spawnRate + "");
if (ci != null)
System.out.println("ci = " + ci + " samples = " + ci.samples
+ " range = " + ci.range());
if (ci != null && ci.samples > 200) {
StatisticsLogger.purgeAllSampleData();
// increasing spawn rate
spawnRate -= 1;
if (spawnRate < 5) {
System.out.println("Done!");
System.exit(0);
}
}
}
private void statsForConstantEverything(int tick) {
ConfidenceInterval ci = StatisticsLogger
.calculateConfidenceIntervalForSample(spawnRate + "");
if (ci != null)
System.out.println("ci = " + ci + " samples = " + ci.samples
+ " range = " + ci.range());
if (ci != null && ci.samples > 10000 && ci.range() < 10) {
StatisticsLogger.purgeAllSampleData();
System.out.println("Done!");
System.exit(0);
}
}
private void statsForSpawnRateWithTwoRushingRates(int tick) {
ConfidenceInterval ciH = StatisticsLogger
.calculateConfidenceIntervalForSample("g1-"
+ percentOfRushedDrivers);
ConfidenceInterval ciL = StatisticsLogger
.calculateConfidenceIntervalForSample("g3-"
+ percentOfRushedDrivers);
if (ciL != null && ciH != null) {
System.out.println("ci = " + ciL + " samples = " + ciL.samples
+ " range = " + ciL.range());
System.out.println("ci = " + ciH + " samples = " + ciH.samples
+ " range = " + ciH.range());
} else {
System.out.println("..null");
}
if (ciL != null && ciL.samples > 200 && ciH != null
&& ciH.samples > 200) {
StatisticsLogger.purgeAllSampleData();
percentOfRushedDrivers += 5;
// spawnRate -= 1;
// if (spawnRate < 5) {
if (percentOfRushedDrivers > 95) {
System.out.println("Done!");
System.exit(0);
}
}
}
private void statsForConstantSpawnRateWithRushingRateIncreasing(int tick) {
ConfidenceInterval ciH = StatisticsLogger
.calculateConfidenceIntervalForSample(percentOfRushedDrivers
+ "-h");
ConfidenceInterval ciL = StatisticsLogger
.calculateConfidenceIntervalForSample(percentOfRushedDrivers
+ "-l");
if (ciL != null && ciH != null) {
System.out.println("ci = " + ciL + " samples = " + ciL.samples
+ " range = " + ciL.range());
System.out.println("ci = " + ciH + " samples = " + ciH.samples
+ " range = " + ciH.range());
} else {
System.out.println("..null");
}
if (ciL != null && ciL.samples > 50 && ciL.sd() < 15 && ciH != null
&& ciH.samples > 50 && ciH.sd() < 15) {
StatisticsLogger.purgeAllSampleData();
percentOfRushedDrivers += 10;
if (percentOfRushedDrivers > 90) {
System.out.println("Done!");
System.exit(0);
}
}
}
private void statsForConstantSpawnRateWithRushingLevelIncreasing(int tick) {
ConfidenceInterval ciH = StatisticsLogger
.calculateConfidenceIntervalForSample("g1-"
+ driverRushingLevel);
ConfidenceInterval ciL = StatisticsLogger
.calculateConfidenceIntervalForSample("g3-"
+ driverRushingLevel);
if (ciL != null && ciH != null) {
System.out.println("ci = " + ciL + " samples = " + ciL.samples
+ " range = " + ciL.range());
System.out.println("ci = " + ciH + " samples = " + ciH.samples
+ " range = " + ciH.range());
} else {
System.out.println("..null");
}
// if (ciL != null && ciL.samples > 50 && ciL.sd() < 20 && ciH != null
// && ciH.samples > 50 && ciH.sd() < 20) {
// StatisticsLogger.purgeAllSampleData();
if (ciL != null && ciL.samples > 200 && ciH != null
&& ciH.samples > 200) {
driverRushingLevel += 5;
if (driverRushingLevel > 100) {
System.out.println("Done!");
System.exit(0);
}
}
}
public void statsPhase(int tick) {
// if ((tick%(tickLength/60)*60)!=0)
if (tick % (60 * 30) != 0)
return;
// statsForSpawnRate(tick);
// statsForConstantEverything(tick);
statsForSpawnRateWithTwoRushingRates(tick);
// statsForConstantSpawnRateWithRushingRateIncreasing(tick);
// statsForConstantSpawnRateWithRushingLevelIncreasing(tick);
// ConfidenceInterval ciH = StatisticsLogger
// .calculateConfidenceIntervalForSample(percentOfRushedDrivers
// + "-h");
// ConfidenceInterval ciL = StatisticsLogger
// .calculateConfidenceIntervalForSample(percentOfRushedDrivers
// + "-l");
// if (ciL != null && ciH != null) {
// System.out.println("ci = " + ciL + " samples = " + ciL.samples
// + " range = " + ciL.range());
// System.out.println("ci = " + ciH + " samples = " + ciH.samples
// + " range = " + ciH.range());
// } else {
// System.out.println("..null");
// }
// if (ciL != null && ciL.samples > 200 && ciL.range() < 10 && ciH !=
// null
// && ciH.samples > 200 && ciH.range() < 10) {
// StatisticsLogger.purgeAllSampleData();
//
// spawnRate -= 10;
// if (spawnRate < 10) {
// System.out.println("Done!");
// System.exit(0);
// }
// }
// if (ciL != null && ciL.samples > 800 && ciL.range() < 5 && ciH !=
// null
// && ciH.samples > 800 && ciH.range() < 5) {
// StatisticsLogger.purgeAllSampleData();
//
// // increasing % rushed drivers
// percentOfRushedDrivers += 10;
// if (percentOfRushedDrivers > 90) {
// System.out.println("Done!");
// System.exit(0);
// }
// }
}
public void collisionTestPhase(int tick) {
// log("\t// collision test");
// List<Vehicle> collisions =
// VehicleCollisionChecker.checkCollisions(VehicleRegistry.allRegisteredVehicles());
// for (Vehicle v : collisions)
// v.pause();
}
public void run() {
try {
tick = 0;
simulatorTimeSinceCheck = System.nanoTime();
initPhase();
while (true) {
if (paused) {
Thread.sleep(500);
continue;
}
tick++;
simulatorTicksSinceCheck++;
long startTime = System.nanoTime();
long endTime;
generateVehiclesPhase(tick);
vehicleDriversTickPhase(tick);
intersectionTickPhase(tick);
commitPhase(tick);
collisionTestPhase(tick);
purgePhase(tick);
statsPhase(tick);
endTime = System.nanoTime();
long duration = endTime - startTime;
lastSimulatorStepTotalVehicles = VehicleRegistry
.allRegisteredVehicles().size();
lastSimulatorStepTotalTime = duration;
simulatorTime += tickLength;
if (delay >= 1)
Thread.sleep(delay);
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void pause() {
paused = true;
}
public void unpause() {
paused = false;
}
public void changeSpeed(int delta) {
if (delay + delta < 0)
delay = 0;
else
delay += delta;
}
public void setSpeed(int speed) {
delay = speed;
}
private void log(String str) {
if (!debug)
return;
System.out.println(str);
}
} | 2af25e650789fa854d9d0348fa9fdb6a26d3370a | [
"Java",
"RDoc"
] | 27 | Java | ns/interchange | 9b6272ee8942914ab231c802503a524c7eb7b3d4 | a6c2e6422731f79195229f208be187772a6a78c7 |
refs/heads/master | <repo_name>KKStudio/menu<file_sep>/src/migrations/2014_10_27_200824_kkstudio_create_menu_table.php
<?php
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Database\Migrations\Migration;
class KkstudioCreateMenuTable extends Migration {
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::create('kkstudio_menu_menu', function($table) {
$table->increments('id');
$table->integer('enabled');
$table->integer('position');
$table->string('display_name');
$table->string('slug')->nullable();
$table->string('route')->nullable();
$table->string('params')->nullable();
$table->nullableTimestamps();
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::drop('kkstudio_menu_menu');
}
}
<file_sep>/src/Kkstudio/Menu/Repositories/MenuRepository.php
<?php namespace Kkstudio\Menu\Repositories;
use Kkstudio\Menu\Models\Menu as Model;
class MenuRepository {
public function all()
{
return Model::where('enabled', 1)->orderBy('position', 'asc')->get();
}
public function get($id)
{
return Model::findOrFail($id);
}
public function max() {
$position = 0;
$max = Model::orderBy('position', 'desc')->first();
if($max) $position = $max->position;
return $position;
}
public function create($display_name, $route, $params, $slug, $position)
{
return Model::create([
'display_name' => $display_name,
'route' => $route,
'params' => $params,
'slug' => $slug,
'position' => $position,
'enabled' => 1
]);
}
}<file_sep>/src/Kkstudio/Menu/MenuServiceProvider.php
<?php namespace Kkstudio\Menu;
use Illuminate\Support\ServiceProvider;
class MenuServiceProvider extends ServiceProvider {
/**
* Indicates if loading of the provider is deferred.
*
* @var bool
*/
protected $defer = false;
/**
* Bootstrap the application events.
*
* @return void
*/
public function boot()
{
$this->package('kkstudio/menu');
\Route::group([ 'prefix' => 'admin', 'before' => 'admin'], function() {
\Route::get('menu', '\Kkstudio\Menu\Controllers\MenuController@admin');
\Route::post('menu/create', '\Kkstudio\Menu\Controllers\MenuController@postAdd');
\Route::get('menu/{id}/edit', '\Kkstudio\Menu\Controllers\MenuController@edit');
\Route::post('menu/{id}/edit', '\Kkstudio\Menu\Controllers\MenuController@postEdit');
\Route::get('menu/{id}/delete', '\Kkstudio\Menu\Controllers\MenuController@delete');
\Route::post('menu/{id}/delete', '\Kkstudio\Menu\Controllers\MenuController@postDelete');
\Route::post('menu/swap', '\Kkstudio\Menu\Controllers\MenuController@swap');
});
}
/**
* Register the service provider.
*
* @return void
*/
public function register()
{
//
}
/**
* Get the services provided by the provider.
*
* @return array
*/
public function provides()
{
return array();
}
}
<file_sep>/src/controllers/MenuController.php
<?php namespace Kkstudio\Menu\Controllers;
use Illuminate\Routing\Controller;
use Kkstudio\Menu\Models\Menu;
use Kkstudio\Menu\Repositories\MenuRepository;
class MenuController extends Controller {
protected $repo;
public function __construct(MenuRepository $repo)
{
$this->repo = $repo;
}
public function admin()
{
$menu = $this->repo->all();
return \View::make('menu::admin')->with('menu', $menu);
}
public function postAdd()
{
$enabled = 1;
$display_name = \Request::get('display_name');
$slug = \Request::get('slug');
$route = \Request::get('route');
$params = \Request::get('params');
$lp = $this->repo->max() + 1;
$this->repo->create($display_name, $route, $params, $slug, $lp);
\Flash::success('Wpis w menu został utworzony.');
return \Redirect::to('admin/menu');
}
public function edit($id)
{
$item = $this->repo->get($id);
return \View::make('menu::edit')->with('menu', $item);
}
public function postEdit($id)
{
$item = $this->repo->get($id);
if(! \Request::get('display_name')) {
\Flash::error('Musisz podać nazwę.');
return \Redirect::back()->withInput();
}
if(! \Request::get('route')) {
\Flash::error('Musisz podać ścieżkę');
return \Redirect::back()->withInput();
}
$display_name = \Request::get('display_name');
$route = \Request::get('route');
$params = \Request::get('params');
$item->display_name = $display_name;
$item->route = $route;
$item->params = $params;
$item->save();
\Flash::success('Wpis z menu edytowany pomyślnie.');
return \Redirect::back();
}
public function delete($id)
{
$item = $this->repo->get($id);
return \View::make('menu::delete')->with('menu', $item);
}
public function postDelete($id)
{
$item = $this->repo->get($id);
$item->delete();
\Flash::success('Wpis z menu usunięty.');
return \Redirect::to('admin/menu/');
}
public function swap() {
$id1 = \Request::get('id1');
$id2 = \Request::get('id2');
$first = $this->repo->get($id1);
$second = $this->repo->get($id2);
$first->moveAfter($second);
\Flash::success('Posortowano.');
return \Redirect::back();
}
}<file_sep>/README.md
menu
====
Menu module for KK Studio CMS
<file_sep>/src/Kkstudio/Menu/Models/Menu.php
<?php namespace Kkstudio\Menu\Models;
use Illuminate\Database\Eloquent\Model as Eloquent;
use App\Http\Traits\Sortable as SortableTrait;
class Menu extends Eloquent {
use SortableTrait;
protected $table = 'kkstudio_menu_menu';
protected $guarded = [ 'id' ];
}<file_sep>/src/Kkstudio/Menu/Menu.php
<?php namespace Kkstudio\Menu;
class Menu extends \App\Module {
protected $menu;
protected $repo;
protected $data = [];
public function __construct()
{
$this->repo = new Repositories\MenuRepository;
$this->menu = $this->prepareUrl($this->repo->all());
$this->data = $this->intoArray($this->menu);
}
public function get()
{
return $this->menu;
}
private function intoArray($data)
{
$table = [];
foreach($data as $item) {
$table[$item->id] = $item;
}
return $table;
}
private function prepareUrl($menu)
{
foreach($menu as $key => $item) {
$url = $item->route;
$params = json_decode($item->params, true);
foreach($params as $param => $value) {
$url = str_replace('{$'. $param . '}', $value, $url);
}
$menu[$key]->url = $url;
}
return $menu;
}
} | 91ee3fab0732228339a68736a0432e330a6a998f | [
"Markdown",
"PHP"
] | 7 | PHP | KKStudio/menu | b70d15a219b4dada4b7a41549119c94c20cbc8be | f3d2a826d3c95a8c9d22eea820736a1e7f069db9 |
refs/heads/master | <repo_name>sciencecoder/D3js-scatterplot<file_sep>/README.md
# D3js-scatterplot
<file_sep>/chart.js
var svg = d3.select('svg');
var margin = 50;
var svgWidth = parseInt(svg.attr('width')) - margin;
var svgHeight = parseInt(svg.attr('height')) - margin;
var url = 'https://raw.githubusercontent.com/FreeCodeCamp/ProjectReferenceData/master/cyclist-data.json';
function formatDate(numberDate) {
return number;
}
//x and y coordinate origin is from the top left
$.getJSON(url, function(data) {
var fastestTime = new Date('2016-01-01T00:' + data[0].Time);
var barWidth = svgWidth / data.length;
function difFromFastestTime(time) {
return time - fastestTime;
}
var xScale = d3.scaleTime()
.domain([d3.max(data, function(d) {
var a = new Date('2016-01-01T00:' + d.Time)
return difFromFastestTime(a);
}), d3.min(data, function(d) {
var a = new Date('2016-01-01T00:' + d.Time)
return difFromFastestTime(a);
})])
.range([margin, svgWidth]);
var yScale = d3.scaleLinear()
.domain([0, d3.max(data, function(d) {
return d.Place;
})])
.range([margin, svgHeight]);
var tool_tip = d3.tip()
.attr("class", "d3-tip")
.offset([-8, 0])
.html(function(d) {
return d.Name + ': ' + d.Nationality + ', Year: ' + d.Year + ', Time: ' + d.Time + '</br>' + d.Doping;
});
svg.call(tool_tip);
svg.selectAll('g').data(data).enter().append('circle')
.attr('r', 5)
.attr('class', function(d, i) {
return 'data-point' + i
})
.attr('fill', function(d) {
return d.Doping != '' ? 'red' : 'black';
})
.attr('cx', function(d) {
return xScale(difFromFastestTime(new Date('2016-01-01T00:' + d.Time)))
})
.attr('cy', function(d) {
return yScale(d.Place)
})
.on('mouseover', tool_tip.show)
.on('mouseleave', tool_tip.hide);
svg.selectAll('g').data(data).enter().append('text')
.text(function(d) {
return d.Name;
})
.attr('x', function(d) {
return xScale(difFromFastestTime(new Date('2016-01-01T00:' + d.Time))) + 10
}).attr('y', function(d) {
return yScale(d.Place) + 5
});
var xAxis = d3.axisBottom(xScale).tickFormat(d3.timeFormat("%M:%S"));
var yAxis = d3.axisLeft(yScale);
//Create an SVG group Element for the Axis elements and call the xAxis function
var xAxisGroup = svg.append("g")
.attr("transform", "translate(0," + (svgHeight) + ")")
.call(xAxis);
var yAxisGroup = svg.append('g')
.attr("transform", "translate(" + (margin) + ",0)")
.call(yAxis);
});
| 2baea4987e8ae52982bcce470542b3faf5f8f611 | [
"Markdown",
"JavaScript"
] | 2 | Markdown | sciencecoder/D3js-scatterplot | 7bb55fccc018b2483ba97561b2df61a69929c196 | 7143c7031ba96b6e7684428a74a29855aca76ce0 |
refs/heads/master | <repo_name>swfz/autoreview-sample<file_sep>/app.rb
#!/usr/bin/env ruby
def main
p 'hello'
end
main
| cc002e4b8fc8d2952e28f2e3859880f66bc3f188 | [
"Ruby"
] | 1 | Ruby | swfz/autoreview-sample | e5058bc812308fdf24532160147c886e304d8100 | d3ba7058f5017ca848be811aae28ec627999d27d |
refs/heads/master | <file_sep>#include <stdio.h>
#include <string.h>
#include <assert.h>
/* 本程序实现字符串匹配算法 */
#define SEARCH_STR_MAX_LEN 128
/*******************************************
* function: 在串s中查找串t出现的位置
* date: 2019-12-18
* common: 自己实现的简单版本
******************************************/
int my_get_substring_pos (const char *s, const char *t)
{
if (NULL == s || NULL == t)
return -1;
int len_s = strlen (s);
int len_t = strlen (t);
if (len_s < len_t || len_s == 0 || len_t == 0)
return -1;
int s_beg_cmp_pos = 0;
int t_idx = 0;
int s_idx = 0;
while (s_beg_cmp_pos <= (len_s-len_t))
{
s_idx = s_beg_cmp_pos;
while (t_idx < len_t)
{
if (s[s_idx] == t[t_idx])
{
++s_idx;
++t_idx;
}
else
{
++s_beg_cmp_pos;
t_idx = 0;
break;
}
}
if (t_idx == len_t)
return s_beg_cmp_pos;
}
return -1;
}
/*******************************************
* function: 在串s中查找串t出现的位置
* date: 2019-12-18
* common: 严蔚敏-简单算法
******************************************/
int simple_str_match (const char *s, const char *t)
{
if (NULL == s || NULL == t)
return -1;
int len_s = strlen (s);
int len_t = strlen (t);
if (len_s < len_t || len_s == 0 || len_t == 0)
return -1;
int i = 0;
int j = 0;
while (i < len_s && j < len_t)
{
if (s[i] == t[j])
{
++i;
++j;
}
else
{
i = i - j + 1;
j = 0;
}
}
if (j == len_t)
return i - len_t;
else
return -1;
}
/*******************************************
* function: 在串s中查找串t出现的位置
* date: 2019-12-18
* common: 严蔚敏-首尾尾匹配算法,对简单算法的优化,先比较首尾字符相等的情况下再依次比较中间字符
******************************************/
int optimize_simple_str_match (const char *s, const char *t)
{
if (NULL == s || NULL == t)
return -1;
int len_s = strlen (s);
int len_t = strlen (t);
if (len_s < len_t || len_s == 0 || len_t == 0)
return -1;
int i = 0;
int j = 0;
while (i <= len_s - len_t)
{
if (s[i] == t[j])
{
// 首字符相等
if (s[i+len_t-1] == t[len_t-1])
{
// 末字符相等
while (j < len_t - 2) // t串中间的字符还有len_t-2个字符需要比较
{
// 首末字符都相等则依次比较中间字符
if (s[i+j+1] == t[j+1])
{
//如果相等则继续比较下一个字符
j++;
if (j == len_t - 2)
return i;
}
else
{
// 如果中间字符出现不相等的情况则回溯
++i;
j = 0;
break;
}
}
return i; // 中间没有需要比较的字符则返回i位置
}
else
{
// 末字符不相等则回溯
++i;
j = 0;
}
}
else
{
// 首字符不相等则回溯
++i;
j = 0;
}
}
return -1;
}
/*******************************************
* function: KMP算法求next值
* date: 2019-12-24
* common: KMP算法求next值(严蔚敏视频12集)
******************************************/
void get_next (const char *t, int *next)
{
if (NULL == t || NULL == next)
return ;
next[0] = -1;
int i = 0;
int len_t = strlen(t);
int j = -1;
while (i < len_t)
{
if (-1 == j || t[i] == t[j])
{
++i;
++j;
next[i] = j;
}
else
{
j = next[j];
}
}
}
/*******************************************
* function: 在串s中查找串t出现的位置
* date: 2019-12-24
* common: KMP算法(严蔚敏视频12集)
******************************************/
int kmp_str_match (const char* s, const char* t)
{
int i = 0;
int j = 0;
int len_s = strlen(s);
int len_t = strlen(t);
int next[SEARCH_STR_MAX_LEN] = {0};
if (len_s < len_t || len_s == 0 || len_t == 0)
return -1;
// 求模式串t的每个字符的next值
get_next (t,next);
while (i < len_s && j < len_t)
{
if (j == -1 || s[i] == t[j])
{
++i;
++j;
}
else
j = next[j]; // 如果碰到不能匹配的字符则i不回溯,j回溯到其当前不匹配字符的next值的位置。
}
if (j == len_t)
return i - len_t;
return -1;
}
void unit_test ()
{
assert (-1 == my_get_substring_pos ("hello,world", ""));
assert (-1 == my_get_substring_pos ("", "a"));
assert (-1 == my_get_substring_pos ("hello", "hello,"));
assert (-1 == my_get_substring_pos ("", ""));
assert (-1 == my_get_substring_pos ("hello,world", "t"));
assert (0 == my_get_substring_pos ("hello", "hello"));
assert (1 == my_get_substring_pos (";jk,.", "jk,"));
assert (5 == my_get_substring_pos ("hello, world", ", wor"));
assert (10 == my_get_substring_pos ("hello, world", "ld"));
assert (2 == my_get_substring_pos ("hello,world", "l"));
assert (58 == my_get_substring_pos ("zhonghuarenmin gonghe guo ,zhongyang renmin zhengfu,zai jintian chegnli le !", "nt"));
assert (-1 == simple_str_match ("hello,world", ""));
assert (-1 == simple_str_match ("", "a"));
assert (-1 == simple_str_match ("hello", "hello,"));
assert (-1 == simple_str_match ("", ""));
assert (-1 == simple_str_match ("hello,world", "t"));
assert (0 == simple_str_match ("hello", "hello"));
assert (1 == simple_str_match (";jk,.", "jk,"));
assert (5 == simple_str_match ("hello, world", ", wor"));
assert (10 == simple_str_match ("hello, world", "ld"));
assert (2 == simple_str_match ("hello,world", "l"));
assert (58 == simple_str_match ("zhonghu<NAME> ,zhongyang <NAME>,zai jintian chegnli le !", "nt"));
assert (-1 == optimize_simple_str_match ("hello,world", ""));
assert (-1 == optimize_simple_str_match ("", "a"));
assert (-1 == optimize_simple_str_match ("hello", "hello,"));
assert (-1 == optimize_simple_str_match ("", ""));
assert (-1 == optimize_simple_str_match ("hello,world", "t"));
assert (0 == optimize_simple_str_match ("hello", "hello"));
assert (1 == optimize_simple_str_match (";jk,.", "jk,"));
assert (5 == optimize_simple_str_match ("hello, world", ", wor"));
assert (1 == optimize_simple_str_match ("rld", "ld"));
assert (2 == optimize_simple_str_match ("hello,world", "l"));
assert (58 == optimize_simple_str_match ("zhonghu<NAME> ,zhongyang <NAME>,zai jintian chegnli le !", "nt"));
assert (-1 == kmp_str_match ("hello,world", ""));
assert (-1 == kmp_str_match ("", "a"));
assert (-1 == kmp_str_match ("hello", "hello,"));
assert (-1 == kmp_str_match ("", ""));
assert (-1 == kmp_str_match ("hello,world", "t"));
assert (0 == kmp_str_match ("hello", "hello"));
assert (1 == kmp_str_match (";jk,.", "jk,"));
assert (5 == kmp_str_match ("hello, world", ", wor"));
assert (1 == kmp_str_match ("rld", "ld"));
assert (2 == kmp_str_match ("hello,world", "l"));
assert (58 == kmp_str_match ("zhonghuaren<NAME> ,zhongyang <NAME>,zai <NAME> !", "nt"));
printf ("test OK!\n");
}
int main (void)
{
unit_test ();
return 0;
}
<file_sep># C-projects
some projects use C Language
It's a good practice
<file_sep>#include <stdio.h>
#include <stdlib.h>
#include "stack.h"
/* 本程序利用栈求得运算式的后缀形式,并根据其后缀形式求值 */
/* 本程序假定输入的运算式都是合法的,数字只有一位不能处理多位数字 */
/* 编译运行方法
compile: gcc postfix_expression.c stack.c
hanb@:stack$ ./a.out
Input the expression (End with '#'): 1 * 2 + (3-9 / 2) * 2 #
postfix expression is: 12*392/-2*+#
Calculation result is : -1.000000.
*/
/****************************************************
* function: 获取键盘输入的表达式字串
* author: herbert
* date: 2019-12-10
****************************************************/
void get_input (char *out)
{
int idx = 0;
char ch;
while ( ( ch = getchar () ) != EOF)
{
if (ch == '\r' || ch == '\n')
break;
out[idx++] = ch;
}
return;
}
/****************************************************
* function: 判断是否是操作符,#也看作操作符
* author: herbert
* date: 2019-12-10
****************************************************/
BOOL is_operator (char c)
{
return ( (c == '+') || (c == '-') || (c == '*') || (c == '/') || (c == '#') );
}
/****************************************************
* function: 判断操作符优先级
* author: herbert
* date: 2019-12-10
* common: 第一个操作数是否比第二个操作数优先级高
****************************************************/
BOOL is_priority_higher (char first_op, char second_op)
{
return (
( '#' != first_op && ('#' == second_op || '(' == second_op) ) ||
( ( first_op == '*' || first_op == '/' ) && ( second_op == '+' || second_op == '-') )
);
}
/****************************************************
* function: 获取一个表达式的后缀形式
* author: herbert
* date: 2019-12-10
* common: result中存放后缀式
****************************************************/
int get_postfix_expression (const char *ori_expression, char *result)
{
if (NULL == ori_expression || NULL == result)
{
printf ("invalid params\n");
return -1;
}
int rst_idx = 0;
Stack *stack_op = NULL;
init_stack (&stack_op); // 初始化运算符栈
char push_ch = '#';
char pop_ch = 0;
char top_ch = 0;
push (stack_op, &push_ch, sizeof (char)); // 栈底压入#表示最低优先级的运算符
const char *p = ori_expression;
while ('#' != *p)
{
if (' ' == *p || '\t' == *p)
{
++p;
continue;
}
if (!is_operator(*p)) // 不是操作符号
{
if ( '(' == *p ) // 左括号入栈
{
push_ch = '(';
push (stack_op, &push_ch, sizeof (char));
}
else if ( ')' == *p) // 遇到右括号则出栈直到左括号
{
pop (stack_op, &pop_ch, sizeof (char));
while ('(' != pop_ch)
{
result[rst_idx++] = pop_ch;
pop (stack_op, &pop_ch, sizeof (char));
}
}
else
result[rst_idx++] = *p;
}
else // 是操作符
{
get_top (stack_op, &top_ch, sizeof (char));
while (!is_priority_higher (*p, top_ch)) // 将当前栈中优先级高于*p的操作符全部出栈放入result
{
pop (stack_op, &pop_ch, sizeof (char));
result[rst_idx++] = pop_ch;
get_top (stack_op, &top_ch, sizeof (char));
}
if (is_priority_higher (*p, top_ch)) // 优先级高于栈顶元素则入栈
{
push (stack_op, (const void*)p, sizeof (char));
}
}
++p;
}
// 读到表达式结束的最后一个# 将栈中非#操作符号全部pop存入result
while (!is_empty (stack_op))
{
pop (stack_op, &pop_ch, sizeof (char));
if ('#' != pop_ch)
result[rst_idx++] = pop_ch;
}
result[rst_idx] = '#';
clear_stack (stack_op);
return 0;
}
/****************************************************
* function: 根据操作符计算结果
* author: herbert
* date: 2019-12-10
* common: ab*cde/-f*+
****************************************************/
float get_value (float first_val, float second_val, char op)
{
switch (op)
{
case '+':
return first_val + second_val;
case '-':
return first_val - second_val;
case '*':
return first_val * second_val;
case '/':
return first_val / second_val;
default:
printf ("get_value: invalid op\n");
exit (-1);
}
}
/****************************************************
* function: 根据后缀表达式求值
* author: herbert
* date: 2019-12-10
* common: ab*cde/-f*+
****************************************************/
int get_value_by_postfix_expression (const char *post_ex, float *ret)
{
if (NULL == post_ex || NULL == ret)
return -1;
Stack *stack = NULL;
init_stack (&stack);
const char *p = post_ex;
float push_val = 0.0;
float first_op_val = 0.0;
float second_op_val = 0.0;
float temp_ret = 0.0;
while (*p != '#')
{
if (!is_operator(*p))
{
push_val = (*p - '0') * 1.0;
push (stack, &push_val, sizeof(float));
}
else
{
if (stack->top < 1)
{
printf ("invalid postfix expression\n");
return -1;
}
pop (stack, &second_op_val, sizeof (float));
pop (stack, &first_op_val, sizeof (float));
temp_ret = get_value (first_op_val, second_op_val, *p);
push (stack, &temp_ret, sizeof (float));
}
++p;
}
// 最终的结果就是栈顶唯一的元素,取出它到ret
pop (stack, ret, sizeof (float));
return 0;
}
int main (int argc, char* argv[])
{
printf ("Input the expression (End with '#'): ");
char input_str[100] = {0};
get_input (input_str);
char postfix_expression [100] = {0};
get_postfix_expression (input_str, postfix_expression);
printf ("postfix expression is: %s\n", postfix_expression);
float result = 0.0;
get_value_by_postfix_expression (postfix_expression, &result);
printf ("Calculation result is : %f.\n", result);
return 0;
}
<file_sep>#include <stdio.h>
#include "stack.h"
/* 本程序用实现的通用栈来实现数制转换 */
/* 十进制转八进制 */
int main (void)
{
Stack *stack = NULL;
if ( NULL == (stack = init_stack (&stack)) )
{
printf ("init_stack error\n");
return -1;
}
int remainder = 0;
int decimal_number = 0;
int top_num = 0;
while (1)
{
printf ("input the decimal number: ");
scanf ("%d", &decimal_number);
if (0 > decimal_number)
{
printf ("invalid number\n");
return -1;
}
while (decimal_number > 0)
{
remainder = decimal_number % 8;
push (stack, &remainder, sizeof (int));
decimal_number /= 8;
}
printf ("Octal number is: ");
while (!is_empty (stack))
{
pop (stack, &top_num, sizeof (int));
printf ("%d",top_num);
}
printf ("\n");
}
clear_stack (stack);
return 0;
}
<file_sep>#include <stdio.h>
/* 本程序记录一下二叉树一些递归操作的例子 */
/* 1、统计二叉树中叶子节点的个数,伪代码 */
/* 叶子节点的个数=左子树叶子节点个数+右子树叶子节点个数 */
// 自己写的求叶子节点个数
int get_binarytree_leaf (BiTree *root, int *num)
{
if (NULL == root)
return 0;
if (NULL == root->l_node && NULL == root->r_node)
*num++;
get_binarytree_leaf (root->l_node,num);
get_binarytree_leaf (root->r_node,num);
return 0;
}
// 严蔚敏(ywm)视频中代码求叶子节点个数
void ywm_get_binarytree_leaf (BiTree *T, int *num)
{
if (T) // 树不为空
{
if (NULL == T->l_node && NULL == T->r_node) // 如果是叶子节点
*num++;
// 其实可以将下面两句写入到else分支里面
ywm_get_binarytree_leaf (T->l_node, num);
ywm_get_binarytree_leaf (T->r_node, num);
}
}
/* 2、求二叉树的深度 */
/* 二叉树深度=1+max(左子树深度,右子树深度) */
// 自己写的求二叉树的深度
int get_depth (BiTree *T)
{
int l_depth = 0;
int r_depth = 0;
if (NULL == T)
return 0;
l_depth = get_depth (T->l_node);
r_depth = get_depth (T->r_node);
return 1 + (l_depth > r_depth ? l_depth : r_depth);
}
// ywm视频中代码求二叉树深度,和我自己写的逻辑是一样的
int ywm_get_depth (BiTree *T)
{
if (NULL == T)
depthval = 0;
else
{
depthLeft = ywm_get_depth (T->l_node);
depthRight = ywm_get_depth (T->r_node);
depthval = 1 + (depthLeft > depthRight ? depthLeft : depthRight);
}
return depthval;
}
/* 3、复制二叉树 */
/* 建立根节点 复制左子树 复制右子树 根节点左指针指向左子树 右指针指向右子树 */
// 我的复制二叉树实现,我这种实现方式从代码量上来看比下面严蔚敏那种方式还简单一些.
BiTree * copy_binary_tree (BiTree *T)
{
if (NULL == T)
return NULL;
BiTree *new_node = (BiTree *)malloc (sizeof (BiTree));
if (NULL == new_node)
return NULL;
new_node->data = T->data;
new_node->l_node = copy_binary_tree (T->l_node);
new_node->r_node = copy_binary_tree (T->r_node);
return new_node;
}
// ywm实现复制二叉树方式
BiTree *GetTreeNode (int data, BiTree *lptr, BiTree *rptr)
{
if (!(T = (BiTree*)malloc (sizeof (BiTree))))
exit (1);
T->data = data;
T->l_node = lptr;
T->r_node = rptr;
return T;
}
BiTree *ywm_copy_binary_tree (BiTree *T)
{
if (!T)
return NULL;
if (T->l_node)
lptr = ywm_copy_binary_tree (T->l_node);
else
lptr = NULL;
if (T->r_node)
rptr = ywm_copy_binary_tree (T->r_node);
else
rptr = NULL;
newnode = GetTreeNode (T->data, lptr, rptr);
return newnode;
}
/* 4、按照给定的先序序列建立二叉链表,伪代码 eg:"ABC---DE---"可以创建一棵二叉树 */
int create_binary_tree (BiTree **T) // T是一个out型参数
{
scanf (&ch);
if (ch == '-')
*T = NULL;
else
{
if (!(*T = (BiTree*)malloc (sizeof (BiTree))))
exit (1);
*T->data = ch; // 生成根节点并赋值
create_binary_tree (&T->l_node); //生成左子树
create_binary_tree (&T->r_node); //生成右子树
}
return 0;
}
/* 5、 按给定的表达式前序序列建相应二叉树(a+b)*c-d/e ->前序: -x+abc/de */
int create_binary_tree_by_pre (BiTree **T)
{
scanf (&ch)
if (ch == num) // ch是操作数
{
*T = malloc (sizeof(BiTree));
*T->data = num;
*T->l_node = NULL;
*T->r_node = NULL;
}
else // ch是操作符
{
*T = malloc (sizeof (BiTree));
*T->data = ch;
create_binary_tree_by_pre (*T->l_node);
create_binary_tree_by_pre (*T->r_node);
}
return 0;
}
/* 6、按给定的表达式后缀序列建相应二叉树(a+b)*c-d/e ->后缀: ab+c*de/- */
// 递归法感觉不能解决这个问题,需要用栈
int create_binary_tree_by_post (BiTree **T)
{
scanf (&ch)
if (ch = num) // ch是操作数
{
*lptr = malloc (sizeof(BiTree));
*T
}
else //ch是操作符
{
}
return 0;
}
/* 7、根据前序和中序序列构建二叉树 */
// pre: A B I D E F G C H J
// mid: I B F E D G A J H C
<file_sep>#ifndef _STACK_H_
#define _STACK_H_
typedef void * Stack_node;
typedef unsigned char BOOL;
#define INIT_STACK_LEN 4096
#define TRUE 1
#define FALSE 0
typedef enum op_status
{
ERROR = -1,
OK = 0
} OP_STATUS;
typedef struct Stack_t
{
int top;
Stack_node node_array[INIT_STACK_LEN];
} Stack;
/* function defination */
Stack* init_stack (Stack **);
BOOL is_empty (Stack *stack);
BOOL is_full (Stack *stack);
OP_STATUS push (Stack *stack, const void *data, int data_size);
OP_STATUS pop (Stack *stack, void *buff, int data_size);
OP_STATUS get_top (Stack *stack, void *buff, int data_size);
void clear_stack (Stack *stack);
#endif
<file_sep>#include <stdio.h>
#include "stack.h"
typedef struct point
{
int x;
int y;
} Point;
int main (void)
{
Stack *stack = NULL;
/* 栈用来存储整数 */
init_stack (&stack);
int interger;
// 输入1 2 3 4 5 6 7 8 -1
// 输出8 7 6 5 4 3 2 1
while (1)
{
scanf ("%d", &interger);
if (0 > interger) //取一个输入终止条件
break;
push (stack, &interger, sizeof (int));
}
while (!is_empty (stack))
{
pop (stack, &interger, sizeof(int));
printf ("%d ", interger);
}
printf ("\n");
/* 销毁整数栈 */
clear_stack (stack);
/* 栈用来存储Point结构 */
init_stack (&stack);
Point pt;
// 输入1,2 2,3 3,4 4,5 5,6 6,7 7,8 8,9 -1,2
// 输出(8,9) (7,8) (6,7) (5,6) (4,5) (3,4) (2,3) (1,2)
while (1)
{
scanf ("%d,%d", &pt.x, &pt.y);
if (0 > pt.x)
break;
push (stack, &pt, sizeof (Point));
}
while (!is_empty (stack))
{
pop (stack, &pt, sizeof (Point));
printf ("(%d,%d) ", pt.x, pt.y);
}
printf ("\n");
clear_stack (stack);
return 0;
}
<file_sep>#ifndef _SPARSE_ARRAY_H_
#define _SPARSE_ARRAY_H_
#define ROW 5
#define COL 8
#define MAX_NONZERO_NODE_NUM 200 // 按照稀疏矩阵的定义,若有200个非零元则支持的最大总元素个数为200/(5/100) = 4000
#define MAX_NODE_VALUE 65535
/* 本程序实现的是稀疏数组的有效表示方法 */
/* 稀疏矩阵需要考虑到存储空间的节省和计算时间的节约 */
typedef struct matrix_node
{
int r_idx; // 稀疏矩阵非零元的行标
int c_idx; // 稀疏矩阵非零元的列标
int value; // 稀疏矩阵非零元的值
} matrix_node_t;
typedef struct sparse_matrix
{
int row; // 稀疏矩阵行数
int col; // 稀疏矩阵列数
int node_num; // 稀疏矩阵非零元个数
matrix_node_t nodes[MAX_NONZERO_NODE_NUM]; // 存储非零元节点
int row_first[MAX_NONZERO_NODE_NUM]; // 每行首元素在nodes中的位置
} Sparse_matrix;
#endif
<file_sep>#include <stdio.h>
#include <string.h>
/* 坐标移动 */
int is_number (char ch)
{
return (ch >= '0' && ch <= '9');
}
int is_valid_dir_ch (char ch)
{
return (ch == 'A' || ch == 'D' || ch == 'W' || ch == 'S');
}
int is_valid (char *str)
{
int len = strlen (str);
int i;
if (len != 2 && len != 3)
{
return 0;
}
if (str[0] != 'A' && str[0] != 'D' && str[0] != 'W' && str[0] != 'S')
{
return 0;
}
for (i = 1; i < len; ++i)
{
if (!is_number(str[i]))
{
return 0;
}
}
return 1;
}
void cacl_pix (char *one_pix, int *x_pos, int *y_pos)
{
int off_val = 0;
if (!is_valid (one_pix))
{
return;
}
char *p = one_pix + 1;
while (*p != '\0' && is_number (*p))
{
off_val = off_val * 10 + (*p - '0');
++p;
}
switch (one_pix[0])
{
case 'A':
*x_pos -= off_val;
break;
case 'D':
*x_pos += off_val;
break;
case 'W':
*y_pos += off_val;
break;
case 'S':
*y_pos -= off_val;
break;
default:
break;
}
return;
}
int main (void)
{
int x_pos = 0;
int y_pos = 0;
char input[3000] = {0};
char *p = NULL;
char *beg = NULL;
char *end = NULL;
char one_pix[5] = {0};
int one_pix_num = 0;
while (fgets (input, 3000, stdin) != NULL)
{
x_pos = 0;
y_pos = 0;
if (input[strlen(input)-1] == '\n')
input[strlen(input)-1] = '\0';
p = input;
beg = input;
while (*p != '\0')
{
while (!is_valid_dir_ch(*beg) && *beg != '\0') /* 此循环保证跳到下一个有效坐标位置的头 */
{
while (*beg != ';')
{
++beg;
}
++beg;
}
if (*beg == '\0')
break;
memset (one_pix, 0, 5);
one_pix_num = 0;
end = beg;
while (*end != ';')
{
one_pix[one_pix_num++] = *end;
++end;
}
one_pix[one_pix_num] = '\0';
cacl_pix (one_pix, &x_pos, &y_pos);
p = end+1;
beg = end+1;
}
printf ("%d,%d\n", x_pos, y_pos);
memset (input, 0, 3000);
}
return 0;
}
<file_sep>#include <string.h>
#include <stdio.h>
#include "stack.h"
#include <assert.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdlib.h>
/* 本程序的主要目的是进行括号匹配的判断 */
/* gcc -o bracket_match bracket_match.c stack.h stack.c
* ./bracket_match argv[1]
*/
// 判断是不是左括号
BOOL is_left_brackets (char ch)
{
return ( ( ch == '(') ||
( ch == '[') ||
( ch == '{') );
}
// 判断是不是右括号
BOOL is_right_brackets (char ch)
{
return ( (ch == ')') ||
(ch == ']') ||
(ch == '}') );
}
// 判断两个括号是否匹配
BOOL is_a_bracket_pair (char l_b, char r_b)
{
return ( (l_b == '(' && r_b == ')') ||
(l_b == '[' && r_b == ']') ||
(l_b == '{' && r_b == '}')
);
}
// 判断一个字符串中包含的所有括号是否匹配
BOOL is_brackets_match (char *str)
{
BOOL result = TRUE;
if (NULL == str)
return 0;
char *p = str;
char pop_val;
Stack *stack = NULL;
init_stack (&stack);
while (*p != '\0')
{
if (is_left_brackets(*p))
{
push (stack,p,sizeof(char));
}
else if (is_right_brackets(*p))
{
if (is_empty(stack))
{
result = FALSE;
goto OVER;
}
else
{
pop (stack, &pop_val, sizeof(char));
if (is_a_bracket_pair (pop_val, *p))
{
}
else
{
result = FALSE;
goto OVER;
}
}
}
else
{
++p;
continue;
}
++p;
}
if (!is_empty(stack))
{
result = FALSE;
goto OVER;
}
result = TRUE;
OVER:
clear_stack (stack);
return result;
}
// 括号匹配的单元测试
void unit_test ()
{
assert (is_brackets_match ("()") == 1);
assert (is_brackets_match ("[]") == 1);
assert (is_brackets_match ("{}") == 1);
assert (is_brackets_match ("([])") == 1);
assert (is_brackets_match ("[()]") == 1);
assert (is_brackets_match ("{()}") == 1);
assert (is_brackets_match ("[[]]") == 1);
assert (is_brackets_match ("[t[]]") == 1);
assert (is_brackets_match (")") == 0);
assert (is_brackets_match ("())") == 0);
assert (is_brackets_match ("([)]") == 0);
assert (is_brackets_match ("()[](") == 0);
assert (is_brackets_match ("[[]]") == 1);
printf ("test OK!\n");
}
// 判断运行参数argv[1]代表的文件中所有的括号是否匹配
int main (int argc, char *argv[])
{
unit_test ();
char *file_buf = NULL;
int read_byte = 0;
off_t filepos;
if (2 != argc)
{
printf ("Usage: ./a.out filename\n");
return -1;
}
int fd = open (argv[1], O_RDONLY);
if (-1 == fd)
{
printf ("open file bracket_file error!\n");
return -1;
}
filepos = lseek (fd, 0, SEEK_END);
lseek (fd, 0, SEEK_SET);
file_buf = (char*)malloc ( (int)filepos+1);
if (NULL == file_buf)
{
printf ("malloc error!\n");
return -1;
}
if ( (read_byte == read (fd, file_buf, filepos)) < 0)
{
printf ("read file error!\n");
free (file_buf);
return -1;
}
if (is_brackets_match (file_buf))
{
printf ("program edit brackets OK!\n");
}
else
{
printf ("some brackets miss_match in program!\n");
}
free (file_buf);
return 0;
}
<file_sep>/* 本程序的目的是应用栈来做迷宫的求解,方法采用的是回溯法 */
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
#include "stack.h"
#define MAZE_SIZE 50 /* 迷宫地图的尺寸 */
#define MAZE_BEG_I 1 /* 起始x坐标 */
#define MAZE_BEG_J 1 /* 起始y坐标 */
#define MAZE_END_I (MAZE_SIZE-2) /* 终点x坐标 */
#define MAZE_END_J (MAZE_SIZE-2) /* 终点y坐标 */
#define PATH_NODE_NUM (MAZE_SIZE*MAZE_SIZE) /* 存储探索路径的数组大小 */
/* 节点之间的方位关系定义 */
typedef enum dir
{
NONE,
EAST,
SOUTH,
WEST,
NORTH,
INVALID,
} Dir;
typedef struct path_node
{
int x;
int y;
} Path_node;
typedef struct path_info
{
int num;
Path_node paths[PATH_NODE_NUM];
} Path_info;
/* 探索到某个节点需要保存的节点信息 */
typedef struct maze_node
{
int i; // 迷宫行坐标
int j; // 迷宫列坐标
Dir to_dir; // 此节点上一次的试探方向,当后续回溯到此节点时,就只需要往to_dir的下一个方向试探
Dir from_dir; // 此节点上一个节点的方位,回溯时上一个节点的方位也按照不通来处理
} Maze_node;
/* global variable defination */
int maze_map[MAZE_SIZE][MAZE_SIZE] = {0};
Path_info paths_info = {0};
/****************************************************
* function: 建立迷宫地图
* author: herbert
* date: 2019-12-13
****************************************************/
void create_maze_map ()
{
srand (time(NULL));
int map_block_num = ( (MAZE_SIZE-1)*(MAZE_SIZE-1)*30 ) / 100; // map中除了墙之外的砖块数其数值取总数的30%
memset (maze_map, 0, sizeof (maze_map));
int i,j;
int x, y;
// 生成墙
for (i = 0; i < MAZE_SIZE; ++i)
{
for (j = 0; j < MAZE_SIZE; ++j)
{
if ( (i == 0) || (i == MAZE_SIZE - 1) || (j == 0) || (j == MAZE_SIZE-1) )
maze_map[i][j] = 1;
else
maze_map[i][j] = 0;
}
}
// 生产墙内的砖块
for (i = 0; i < map_block_num; ++i)
{
x = rand () % (MAZE_SIZE - 2) + 1;
y = rand () % (MAZE_SIZE - 2) + 1;
maze_map[x][y] = 1;
}
// 出入口两个位置不能为砖块
maze_map[1][1] = 0;
maze_map[MAZE_END_I][MAZE_END_J] = 0;
}
/****************************************************
* function: 打印出迷宫地图
* author: herbert
* date: 2019-12-13
****************************************************/
void print_map ()
{
int i,j;
int num = 0;
// 打印列标
printf (" ");
for (i = 1; i <= MAZE_SIZE; ++i)
printf ("%d ", i%10);
printf ("\n");
for (i = 0; i < MAZE_SIZE; ++i)
{
printf ("%2d ", i+1); // 打印行标
for (j = 0; j < MAZE_SIZE; ++j)
{
if (maze_map[i][j] == 1)
printf ("# ");
else if (maze_map[i][j] == 0)
printf (". ");
else
{
++num;
printf ("@ ");
}
}
printf ("\n");
}
}
/****************************************************
* function: 查看参数代表的坐标是否在试探过的路径上
* author: herbert
* date: 2019-12-14
* common:
****************************************************/
BOOL is_node_in_paths (int i, int j)
{
int idx = 0;
for (idx = 0; idx < paths_info.num; ++idx)
{
if ( (i == paths_info.paths[idx].x) && (j == paths_info.paths[idx].y) )
return TRUE;
}
return FALSE;
}
/****************************************************
* function:获得当前节点的下一个试探节点方向
* author: herbert
* date: 2019-12-13
* common: 根据当前节点的坐标获取下一个试探节点的方向,按照东南西北的方向探测
****************************************************/
Dir get_probe_dir_for_this_node (Maze_node node)
{
int i = node.i;
int j = node.j;
if (node.to_dir == NONE)
goto NONE;
else if (node.to_dir == EAST)
goto EAST;
else if (node.to_dir == SOUTH)
goto SOUTH;
else if (node.to_dir == WEST)
goto WEST;
else
goto OVER;
NONE: // 表示此节点还未探测过其他方向
if ( (maze_map[i][j+1] == 0) && !is_node_in_paths (i, j+1) && (node.from_dir != EAST) )
{
// 如果此节点东边的节点为0且其东边的节点不在栈中的路径上且此节点不是从东边过来的则返回EAST
return EAST;
}
EAST: // 表示此节点上次探测的是EAST方向
if ( (maze_map[i+1][j] == 0) && !is_node_in_paths (i+1, j) && (node.from_dir != SOUTH) )
{
return SOUTH;
}
SOUTH: // 表示此节点上次探测的是SOUTH方向
if ( (maze_map[i][j-1] == 0) && !is_node_in_paths (i, j-1) && (node.from_dir != WEST) )
{
return WEST;
}
WEST: // 表示此节点上次探测的是WEST方向
if ( (maze_map[i-1][j] == 0) && !is_node_in_paths (i-1, j) && (node.from_dir != NORTH) )
{
return NORTH;
}
OVER:
return INVALID;
}
/****************************************************
* function: 解迷宫
* author: herbert
* date: 2019-12-13
* common: 1,1坐标当作迷宫的起点,n-2,n-2坐标当作迷宫的终点
****************************************************/
int solve_maze ()
{
Stack *stack = NULL;
init_stack (&stack);
int is_node_pop_from_stack = FALSE;
Maze_node node,top_node;
node.i = MAZE_BEG_I;
node.j = MAZE_BEG_J;
node.from_dir = INVALID;
node.to_dir = NONE;
do
{
node.to_dir = get_probe_dir_for_this_node (node);
if (!is_full (stack))
{
if (!is_node_pop_from_stack)
{
// printf ("push1: i = %d, j = %d, from_dir = %d, to_dir = %d\n", node.i, node.j, node.from_dir, node.to_dir);
push (stack, &node, sizeof (Maze_node));
// 往栈中push时需要把push的坐标记录下来,后续用来判断试探的节点是否在push的路径上
paths_info.paths[paths_info.num].x = node.i;
paths_info.paths[paths_info.num].y = node.j;
paths_info.num++;
}
else
{
// 需要更新栈顶元素的to_dir
pop (stack, NULL, sizeof (Maze_node));
// printf ("push2: i = %d, j = %d, from_dir = %d, to_dir = %d\n", node.i, node.j, node.from_dir, node.to_dir);
push (stack, &node, sizeof (Maze_node));
}
}
else
{
printf ("error: stack full, top = %d\n", stack->top);
return -1;
}
if (INVALID != node.to_dir) //如果此节点有通路
{
// 根据to_dir得出下一个节点的信息
if (node.to_dir == EAST)
{
node.j += 1;
node.from_dir = WEST;
}
else if (node.to_dir == SOUTH)
{
node.i += 1;
node.from_dir = NORTH;
}
else if (node.to_dir == WEST)
{
node.j -= 1;
node.from_dir = EAST;
}
else if (node.to_dir == NORTH)
{
node.i -= 1;
node.from_dir = SOUTH;
}
// 一个新节点应当将它的to_dir置为NONE
node.to_dir = NONE;
is_node_pop_from_stack = FALSE;
// 找到出口
if ( node.i == (MAZE_END_I) && node.j == (MAZE_END_J) )
{
// 将栈中的所有元素对应的map中对应位设置为2
while (!is_empty (stack))
{
pop (stack, &top_node, sizeof (Maze_node));
maze_map[top_node.i][top_node.j] = 2;
}
clear_stack (stack);
return 0;
}
}
else // 如果此节点是死胡同
{
if (!is_empty (stack))
{
// 死胡同但栈不为空,则弹出栈顶元素继续计算
pop (stack, &node, sizeof (Maze_node));
if (!is_empty (stack))
{
get_top (stack, &node, sizeof (Maze_node));
is_node_pop_from_stack = TRUE;
}
else
{
clear_stack (stack);
return -1;
}
}
else
{
// 死胡同且栈空说明没找到,清空栈并返回.
clear_stack (stack);
return -1;
}
}
} while (!is_empty (stack));
}
int main (void)
{
create_maze_map ();
print_map ();
solve_maze ();
print_map ();
return 0;
}
<file_sep># for stack program, you should compile the program like:
gcc -o xxx xxx.c stack.h stack.c
<file_sep>#include <stdio.h>
#include <stdlib.h>
/* 本程序实现一个队列,通过链表结构实现,不是通用队列,只能存放整数 */
typedef struct queue_node
{
int data;
struct queue_node *next;
} Qnode, *QNODE;
typedef struct
{
QNODE q_head;
QNODE q_tail;
} Queue;
typedef unsigned char BOOL;
#define TRUE 1
#define FALSE 0
int init_queue (Queue **queue)
{
if (NULL == queue)
return -1;
*queue = (Queue*)malloc (sizeof(Queue));
if (NULL == *queue)
return -1;
(*queue)->q_head = NULL;
(*queue)->q_tail = NULL;
}
BOOL is_queue_empty (Queue *queue)
{
return (NULL == queue->q_head && NULL == queue->q_tail);
}
int en_queue (Queue *queue, int data)
{
QNODE new_node = (QNODE)malloc(sizeof(Qnode));
if (NULL == new_node)
return -1;
new_node->data = data;
new_node->next = NULL;
if (is_queue_empty(queue))
{
queue->q_head = new_node;
queue->q_tail = new_node;
}
else
{
queue->q_tail->next = new_node;
queue->q_tail = new_node;
}
return 0;
}
int de_queue (Queue *queue, int *data)
{
if (NULL == queue)
return -1;
QNODE p = NULL;
if (!is_queue_empty (queue))
{
p = queue->q_head;
if (queue->q_head == queue->q_tail)
{
queue->q_head = NULL;
queue->q_tail = NULL;
}
else
{
queue->q_head = queue->q_head->next;
}
if (NULL != data)
{
*data = p->data;
}
free (p);
p = NULL;
}
else
{
printf ("error: queue empty\n");
return -1;
}
return 0;
}
int clear_queue (Queue *queue)
{
if (NULL == queue)
return 0;
while (!is_queue_empty(queue))
{
de_queue (queue, NULL);
}
free (queue);
queue = NULL;
}
int main (void)
{
int idx;
Queue *queue = NULL;
init_queue (&queue);
en_queue (queue, 1);
en_queue (queue, 2);
en_queue (queue, 3);
int de_val = 0;
de_queue (queue, &de_val);
printf ("%d ", de_val);
de_queue (queue, &de_val);
printf ("%d ", de_val);
de_queue (queue, &de_val);
printf ("%d ", de_val);
en_queue(queue, 4);
de_queue (queue, &de_val);
printf ("%d\n", de_val);
for (idx = 0; idx < 10000000; ++idx)
{
en_queue (queue, idx);
}
while (!is_queue_empty(queue))
{
de_queue (queue, &de_val);
printf ("%d ",de_val);
}
printf ("\n");
clear_queue (queue);
return 0;
}
<file_sep>#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "stack.h"
/* 本程序试图实现一个通用的栈结构 */
/****************************************************
* function: 初始化栈栈顶指向栈底元素
* author: herbert
* date: 2019-12-10
****************************************************/
Stack* init_stack (Stack **stack)
{
if (NULL == stack)
return NULL;
*stack = (Stack *)malloc (sizeof (Stack));
if (NULL == *stack)
return NULL;
(*stack)->top = -1;
int i;
for (i = 0; i < INIT_STACK_LEN; ++i)
{
(*stack)->node_array[i] = NULL;
}
return *stack;
}
/****************************************************
* function: 判断栈是否为空
* author: herbert
* date: 2019-12-10
****************************************************/
BOOL is_empty (Stack *stack)
{
if (NULL == stack)
{
printf ("operate no init stack\n");
exit (-1);
}
return (stack->top == -1);
}
/****************************************************
* function: 判断栈满
* author: herbert
* date: 2019-12-10
****************************************************/
BOOL is_full (Stack *stack)
{
if (NULL == stack)
{
printf ("operate no init stack\n");
exit (-1);
}
return ( stack->top == (INIT_STACK_LEN - 1) );
}
/****************************************************
* function: 入栈
* author: herbert
* date: 2019-12-10
****************************************************/
OP_STATUS push (Stack *stack, const void *data, int data_size)
{
if (!is_full (stack))
{
stack->top += 1;
stack->node_array[stack->top] = (void *)malloc (data_size);
if (NULL == stack->node_array[stack->top])
return ERROR;
memcpy (stack->node_array[stack->top], data, data_size);
}
else
{
return ERROR;
}
return OK;
}
/****************************************************
* function: 出栈
* author: herbert
* date: 2019-12-10
* modify: 2019-12-14 支持buff为NULL,此时仅将栈顶元素丢弃
****************************************************/
OP_STATUS pop (Stack *stack, void *buff, int data_size)
{
if (!is_empty (stack))
{
if (NULL != buff)
{
memcpy (buff, stack->node_array[stack->top], data_size);
}
free (stack->node_array[stack->top]);
stack->node_array[stack->top] = NULL;
stack->top -= 1;
}
else
{
return ERROR;
}
return OK;
}
/****************************************************
* function: 获取栈顶元素
* author: herbert
* date: 2019-12-10
****************************************************/
OP_STATUS get_top (Stack *stack, void *buff, int data_size)
{
if (NULL == buff)
return ERROR;
if (!is_empty (stack))
{
memcpy (buff, stack->node_array[stack->top], data_size);
}
else
{
return ERROR;
}
return OK;
}
/****************************************************
* function: 释放栈
* author: herbert
* date: 2019-12-10
****************************************************/
void clear_stack (Stack *stack)
{
if (NULL == stack)
return;
int i = 0;
for (i = stack->top; i >= 0; --i)
{
free (stack->node_array[i]);
stack->node_array[i] = NULL;
}
free (stack);
stack = NULL;
}
<file_sep>#include <stdio.h>
#include <stdlib.h>
/* 本程序用来实现一下线索二叉树 */
enum tag
{
INDEX,
NODE,
};
// 定义线索二叉树的node节点结构
typedef struct BiTreeNode
{
int data;
struct BiTreeNode *l_node, *r_node;
enum tag l_tag; // 左指针指向线索还是节点
enum tag r_tag; // 右指针指向线索还是节点
} BiTreeNode, *BiTree;
#define STACK_SIZE 100
// 定义一个简单的栈结构用来进行二叉树节点的操作
typedef struct stack
{
int top;
BiTree data[STACK_SIZE];
} Stack;
Stack g_stack;
// 初始化栈
void init_stack ()
{
g_stack.top = -1;
int i = 0;
for (i = 0; i < STACK_SIZE; ++i)
g_stack.data[i] = NULL;
}
// 判断栈空
int is_empty ()
{
return g_stack.top == -1;
}
// 判断栈满
int is_full ()
{
return g_stack.top == (STACK_SIZE - 1);
}
// 入栈
void push (BiTreeNode *node)
{
if (!is_full())
g_stack.data[++g_stack.top] = node;
}
// 出栈
BiTreeNode* pop ()
{
if (!is_empty())
return g_stack.data[g_stack.top--];
}
/******************************************
* function:中序线索化链表的遍历算法
* date: 20200113
* author: Herbert
* comment: 线索二叉链表的中序遍历 参数T为附加的头节点指针
* (线索二叉树左子树为线索则只想其前驱,右子树为线索则指向其后继)
*******************************************/
int mid_traverse_thread (BiTree T, void (*visit) (int data))
{
BiTreeNode *p = T->l_node; // 附加头节点的左指针指向真正的二叉树头节点
while ( p!= T ) // 访问结束p会指向附加的头节点
{
while (p->l_tag == NODE) // 找到第一个左子树为线索节点
p = p->l_node;
visit (p->data); // 访问节点
while (p->r_tag == INDEX && p->r_node != T) // 如果p的右子树是线索,表示右指针指向的就是p的后继节点则直接访问
{
p = p->r_node;
visit(p->data);
}
p = p->r_node;
}
return 0;
}
/******************************************
* function: 中序建立线索链表
* date: 2020-01-13
* author: Herbert
* comment: 在中序遍历过程中修改节点的左/右指针,一保存当前访问节点的前驱和后继信息
* 遍历过程中,附设指针pre,并始终保持指针pre指向当前访问的指针p所指节点的前驱
* 严蔚敏视频21 47分钟开始
*******************************************/
void mid_thread (BiTree T)
{
if (T)
{
mid_thread (T->l_node); // 左子树线索化
if (!p->l_node)
{
p->l_tag = INDEX;
p->l_node = pre;
}
if (!pre->r_node)
{
pre->r_tag = INDEX;
pre->r_node = p;
}
pre = p;
mid_thread (p->r_node); // 右子树线索化
}
}
<file_sep>#include <stdio.h>
// 本程序对常见的排序算法做一个梳理
/* 选择排序 */
void select_sort (int *arr, int len)
{
int i, j;
int min_pos;
int min_val;
int temp;
for (i = 0; i < len - 1; ++i)
{
min_val = arr[i];
for (j = i; j < len; ++j)
{
if (arr[j] < min_val)
{
min_val = arr[j];
min_pos = j;
}
}
if (i != min_pos)
{
temp = arr[i];
arr[i] = arr[min_pos];
arr[min_pos] = temp;
}
}
return;
}
/* 插入排序 */
void insert_sort (int *arr, int len)
{
int i;
int j;
int cur_num;
for (i = 1; i < len; ++i)
{
cur_num = arr[i];
for (j = i; j >= 1; --j)
{
if (arr[j] < arr[j-1])
{
arr[j] = arr[j-1];
arr[j-1] = cur_num;
continue;
}
}
}
return ;
}
/* 希尔排序 */
void shell_sort (int *arr, int len)
{}
void print_arr (int *arr, int len)
{
int i;
for (i = 0; i < len; ++i)
{
printf ("%d ", arr[i]);
}
printf ("\n");
return;
}
#define ARR_LEN 10
int main (void)
{
int arr[ARR_LEN] = {2,3,1,5,4,1,5,3,7,2};
//select_sort (arr, ARR_LEN);
insert_sort (arr, ARR_LEN);
print_arr (arr, ARR_LEN);
return 0;
}
<file_sep>#include <stdio.h>
#include <time.h>
#include <stdlib.h>
/* 这个程序实现链表的交并等操作 */
#define RANDOM_LIST_LEN ((rand()%6)+5)
#define BOOL unsigned char
#define TRUE 1
#define FALSE 0
typedef struct node
{
int data;
struct node *p_next;
} Node;
/****************************************************
* function: 根据一个生成的随机数生成此数据长度的链表
* author: herbert
* date: 2019-12-06
****************************************************/
Node* create_randLen_list ()
{
int list_len = RANDOM_LIST_LEN;
int i;
Node *head = NULL;
Node *new_node = NULL;
head = (Node*)malloc (sizeof(Node));
if (NULL == head)
return NULL;
head->p_next = NULL;
for (i = 0; i < list_len; ++i)
{
new_node = (Node*)malloc (sizeof(Node));
if (NULL == new_node)
return head;
scanf ("%d",&(new_node->data));
new_node->p_next = head->p_next;
head->p_next = new_node;
}
return head;
}
/****************************************************
* function: 释放链表
* author: herbert
* date: 2019-12-06
****************************************************/
void release_list (Node **head)
{
if (NULL == head || NULL == *head)
return;
// 释放数据节点
Node *p = (*head)->p_next;
Node *p_n = NULL;
while (p)
{
p_n = p->p_next;
free (p);
p = p->p_next;
}
// 释放链表头
free (*head);
*head = NULL;
}
/****************************************************
* function: 打印链表内容,链表内容都是int
* author: herbert
* date: 2019-12-06
****************************************************/
void debug_print_list (char *list_name, Node *head)
{
if (NULL == head || NULL == list_name)
return ;
printf ("%s :",list_name);
Node *p = head->p_next;
while (p)
{
printf ("%d ", p->data);
p = p->p_next;
}
printf ("\n");
}
/****************************************************
* function: 在链表中查找某个节点是否存在
* author: herbert
* date: 2019-12-06
****************************************************/
BOOL is_node_exist (Node *head, int data)
{
if (NULL == head)
return FALSE;
Node *p = head->p_next;
while (p)
{
if (p->data == data)
return TRUE;
p = p->p_next;
}
return FALSE;
}
/****************************************************
* function: 将链表B插入链表A
* author: herbert
* date: 2019-12-06
****************************************************/
Node* listSrc_insertTo_listDst (Node **head_dst, Node *head_src)
{
if (NULL == head_dst || NULL == *head_dst)
return NULL;
if (NULL == head_src)
return *head_dst;
Node *new_head = *head_dst;
Node *p = head_src->p_next;
Node *new_node = NULL;
while (p)
{
if (!is_node_exist (new_head, p->data))
{
new_node = (Node*)malloc (sizeof (Node));
if (NULL != new_node)
{
new_node->data = p->data;
new_node->p_next = new_head->p_next;
new_head->p_next = new_node;
}
else
{
return new_head;
}
}
p = p->p_next;
}
return *head_dst;
}
/****************************************************
* function: 合并A和B链表并去重
* author: herbert
* date: 2019-12-06
****************************************************/
Node* listA_U_listB (Node *head_a, Node *head_b)
{
if (NULL == head_a && NULL == head_b)
return NULL;
// 分配头节点
Node *new_head = (Node*)malloc (sizeof(Node));
if (NULL == new_head)
return NULL;
// 插入A链表
listSrc_insertTo_listDst (&new_head, head_a);
// 插入B链表
listSrc_insertTo_listDst (&new_head, head_b);
return new_head;
}
/****************************************************
* function: 给链表中插入一个还不存在的节点
* author: herbert
* date: 2019-12-06
****************************************************/
void insert_node_to_list (Node *head, Node *node)
{
if (NULL == head || NULL == node)
return ;
if (!is_node_exist (head, node->data))
{
node->p_next = head->p_next;
head->p_next = node;
}
return;
}
/****************************************************
* function: A和B链表求交集
* author: herbert
* date: 2019-12-07
* comment: 返回NULL表示操作失败
****************************************************/
Node *listA_n_listB (Node *head_a, Node *head_b)
{
if (NULL == head_a || NULL == head_b)
return NULL;
// 分配头节点
Node *new_head = (Node*)malloc (sizeof(Node));
if (NULL == new_head)
return NULL;
Node *p = head_a->p_next;
Node *new_node = NULL;
while (p)
{
if (is_node_exist (head_b, p->data))
{
new_node = (Node*)malloc (sizeof (Node));
if (NULL == new_node)
return NULL;
new_node->data = p->data;
insert_node_to_list (new_head, new_node);
}
p = p->p_next;
}
return new_head;
}
/****************************************************
* function: 逆转单链表
* author: herbert
* date: 2019-12-07
* comment: 递归方法 此函数针对没有引导头的链表
****************************************************/
Node *reverse_list_recursive_without_prehead (Node *head)
{
if (NULL == head || NULL == head->p_next)
{
return head;
}
Node *new_head = NULL;
new_head = reverse_list_recursive_without_prehead (head->p_next);
head->p_next->p_next = head;
head->p_next = NULL;
return new_head;
}
/****************************************************
* function: 逆转单链表
* author: herbert
* date: 2019-12-07
* comment: 递归方法 此函数针对有引导头的链表
****************************************************/
Node *reverse_list_recursive (Node *head)
{
if (NULL == head || NULL == head->p_next || NULL == head->p_next->p_next)
{
return head;
}
Node *new_head = head->p_next;
head->p_next = reverse_list_recursive_without_prehead (new_head);
return head;
}
/****************************************************
* function: 逆转单链表
* author: herbert
* date: 2019-12-07
* comment: 递推方法 (此方法没有下面的递推2简单明了)
****************************************************/
Node *reverse_list_recursion (Node *head)
{
if (NULL == head || NULL == head->p_next)
return head;
Node *pre = NULL;
Node *p = head->p_next;
Node *next = p->p_next;
while (next)
{
p->p_next = pre;
pre = p;
p = next;
next = next->p_next;
}
p->p_next = pre;
head->p_next = p;
return (head);
}
/****************************************************
* function: 逆转单链表
* author: herbert
* date: 2019-12-07
* comment: 递推方法
****************************************************/
Node *reverse_list_recursion_ex (Node *head)
{
if (NULL == head || NULL == head->p_next)
return head;
Node *pre = NULL;
Node *p = head->p_next;
Node *next = NULL;
while (p)
{
next = p->p_next;
p->p_next = pre;
pre = p;
p = next;
}
head->p_next = pre;
return head;
}
int main (void)
{
srand (time(NULL));
Node *head = create_randLen_list();
debug_print_list ("list1", head);
Node *head1 = reverse_list_recursive (head);
debug_print_list ("recursive", head1);
#if 0
//release_list (&head);
Node *head1 = create_randLen_list();
debug_print_list ("list2", head1);
//release_list (&head1);
Node *head2 = listA_U_listB (head, head1);
debug_print_list ("list_union",head2);
Node *head3 = listA_n_listB (head, head1);
debug_print_list ("list_n", head3);
Node *head4 = reverse_list_recursion (head);
debug_print_list ("recursion list1", head4);
Node *head5 = reverse_list_recursion_ex (head1);
debug_print_list ("recursion list2", head5);
release_list (&head);
release_list (&head1);
release_list (&head2);
release_list (&head3);
release_list (&head4);
release_list (&head5);
#endif
return 0;
}
<file_sep>#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include "sparse_array.h"
/**********************************************
* function: 生成一个稀疏矩阵
* author: Herbert
* date: 2019-12-26
**********************************************/
Sparse_matrix* create_sparse_matrix (Sparse_matrix **s_matrix)
{
if (NULL == s_matrix)
return NULL;
*s_matrix = (Sparse_matrix*)malloc (sizeof (Sparse_matrix));
if (*s_matrix == NULL)
return NULL;
memset (*s_matrix, 0, sizeof (Sparse_matrix));
return *s_matrix;
}
/**********************************************
* function: 判断一个行列坐标元素在矩阵中是否已经存在,如果存在则将其value带出
* author: Herbert
* date: 2019-12-26
**********************************************/
int is_matrix_node_exist (Sparse_matrix *s_matrix, int r_idx, int c_idx, int *val)
{
if (NULL == s_matrix)
return 0;
int i;
for (i = 0; i < s_matrix->node_num; ++i)
{
if ( (s_matrix->nodes[i].r_idx == r_idx) && (s_matrix->nodes[i].c_idx == c_idx) )
{
if (NULL != val)
*val = s_matrix->nodes[i].value;
return 1;
}
}
return 0;
}
/**********************************************
* function: 打印一个稀疏矩阵
* author: Herbert
* date: 2019-12-25
**********************************************/
void print_sparse_matrix (Sparse_matrix *s_matrix)
{
if (NULL == s_matrix)
return;
int i;
printf ("\nmatrix row = %d, col = %d, node_num = %d:\n", s_matrix->row, s_matrix->col, s_matrix->node_num);
for (i = 0; i < s_matrix->node_num; ++i)
{
printf ("(%d,%d) = %-2d ", s_matrix->nodes[i].r_idx, s_matrix->nodes[i].c_idx,s_matrix->nodes[i].value);
if ( (i+1) % 6 == 0)
printf ("\n");
}
printf ("\nrow_first_idx :\n");
for (i = 0; i < s_matrix->row+1; ++i)
printf ("%d ", s_matrix->row_first[i]);
printf ("\n");
return;
}
/**********************************************
* function: 实现一个普通的矩阵转置运算
* author: Herbert
* date: 2019-12-25
**********************************************/
int* matrix_transpose (int *matrix, int row, int col)
{
int i,j;
int *p = (int*)malloc (row * col * sizeof(int));
if (NULL == p)
return NULL;
for (i = 0; i < row; ++i)
{
for (j = 0; j < col; ++j)
{
p[j*row+i] = matrix[i*col+j];
}
}
return p;
}
/**********************************************
* function: 打印二维数组 用一维数组的形式表示二维数组,其实这也是二维数组的真实表示法
* author: Herbert
* date: 2019-12-25
**********************************************/
void print_two_dimensional_array (int *arr, int row, int col)
{
int i;
for (i = 0; i < row * col; ++i)
{
printf ("%2d ", arr[i]);
if ((i+1) % col == 0)
printf ("\n");
}
return ;
}
/**********************************************
* function: 初始化一个普通的二维数组(矩阵)
* author: Herbert
* date: 2019-12-26
**********************************************/
void init_two_dimensional_array (int *arr)
{
if (NULL == arr)
return ;
int non_zero_num = (ROW*COL) / 20 + 5;
int i;
for (i = 0; i < non_zero_num; ++i)
{
arr[rand()%(ROW*COL)] = rand () % 100;
}
return ;
}
/**********************************************
* function: 从普通的二维数据表示得到稀疏矩阵的表示
* author: Herbert
* date: 2019-12-26 将每行首元素在nodes中的索引存放在row_first中,
* 为了得到最后一行非零元素的个数,需要row_first比实际行数多一项,以便用来计算最后一行非零元素个数
**********************************************/
void get_sparse_matrix_from_normal_matrix (int *arr, int row, int col, Sparse_matrix *s_matrix)
{
if (NULL == arr || NULL == s_matrix)
return;
int i;
int j;
s_matrix->row = row;
s_matrix->col = col;
int node_num = 0;
int last_row_num = -1;
// 初始化行首元素在s_matrix->nodes中的索引为-1
for (i = 0; i <= row; ++i)
s_matrix->row_first[i] = -1;
for (i = 0; i < row * col; ++i)
{
if (arr[i] != 0)
{
s_matrix->nodes[node_num].r_idx = i / col;
s_matrix->nodes[node_num].c_idx = i % col;
s_matrix->nodes[node_num].value = arr[i];
if (i / col != last_row_num)
{
// 行号发生变化时可能中间有全0行,需要将全0行的row_first值设置为node_num
if (i / col - (last_row_num+1) > 0) // 说明在i/col行之前有全0行
{
// 将所有全0行的row_first值设置为node_num
for (j = 0; j < (i / col - (last_row_num+1)); ++j)
{
s_matrix->row_first[i/col-j-1] = node_num;
}
}
// 行号发生变化说明是新的一行开始,记录新的一行首元素的nodes索引。
s_matrix->row_first[i/col] = node_num;
last_row_num = i / col;
}
++node_num;
}
}
// 最后几行可能出现连续全0行,需要将这些行的row_first值都设置为node_num
for (i = row; i >= 0; --i)
{
if (s_matrix->row_first[i] == -1)
s_matrix->row_first[i] = node_num;
else
break;
}
s_matrix->node_num = node_num;
return;
}
/**********************************************
* function: 实现一个稀疏矩阵的转置运算
* author: Herbert
* date: 2019-12-27
**********************************************/
Sparse_matrix * sparse_matrix_transpose (Sparse_matrix * s_matrix)
{
if (NULL == s_matrix)
return NULL;
Sparse_matrix *trans_matrix = (Sparse_matrix*) malloc (sizeof (Sparse_matrix));
if (NULL == trans_matrix)
return NULL;
trans_matrix->row = s_matrix->col;
trans_matrix->col = s_matrix->row;
trans_matrix->node_num = s_matrix->node_num;
int i,j;
int node_pos = 0;
for (i = 0; i < s_matrix->node_num; ++i)
{
node_pos = 0;
for (j = 0; j < s_matrix->node_num; ++j)
{
if (i == j)
continue;
if (s_matrix->nodes[i].c_idx > s_matrix->nodes[j].c_idx)
{
++node_pos;
}
else if (s_matrix->nodes[i].c_idx == s_matrix->nodes[j].c_idx)
{
if (s_matrix->nodes[i].r_idx > s_matrix->nodes[j].r_idx)
++node_pos;
}
else
{
// do nothing;
}
}
trans_matrix->nodes[node_pos].r_idx = s_matrix->nodes[i].c_idx;
trans_matrix->nodes[node_pos].c_idx = s_matrix->nodes[i].r_idx;
trans_matrix->nodes[node_pos].value = s_matrix->nodes[i].value;
}
return trans_matrix;
}
/**********************************************
* function: 实现一个稀疏矩阵的转置运算(优化方法)
* author: Herbert
* date: 2019-12-27 严蔚敏视频14前10分钟
**********************************************/
Sparse_matrix * optimize_sparse_matrix_transpose (Sparse_matrix *s_matrix)
{
if (NULL == s_matrix)
return NULL;
Sparse_matrix *trans_matrix = (Sparse_matrix*) malloc (sizeof (Sparse_matrix));
if (NULL == trans_matrix)
return NULL;
trans_matrix->row = s_matrix->col;
trans_matrix->col = s_matrix->row;
trans_matrix->node_num = s_matrix->node_num;
int i,node_pos;
// 求原矩阵中每列元素个数并记录在数组中
int *col_num = (int *)malloc (s_matrix->col * sizeof (int));
if (col_num == NULL)
return NULL;
for (i = 0; i < s_matrix->node_num; ++i)
{
col_num[s_matrix->nodes[i].c_idx]++;
}
// 求原矩阵中每列首元素(对应转置矩阵每行首元素)在转置存储结构中的位置
int *row_first_pos = (int*)malloc (s_matrix->col * sizeof (int));
if (row_first_pos == NULL)
return NULL;
row_first_pos[0] = 0;
for (i = 1; i < s_matrix->col; ++i)
{
row_first_pos[i] = row_first_pos[i-1] + col_num[i-1];
}
// 遍历原矩阵,将每个node放入转置矩阵的对应位置
for (i = 0; i < s_matrix->node_num; ++i)
{
node_pos = row_first_pos[s_matrix->nodes[i].c_idx];
trans_matrix->nodes[node_pos].r_idx = s_matrix->nodes[i].c_idx;
trans_matrix->nodes[node_pos].c_idx = s_matrix->nodes[i].r_idx;
trans_matrix->nodes[node_pos].value = s_matrix->nodes[i].value;
row_first_pos[s_matrix->nodes[i].c_idx]++;
}
free (col_num);
free (row_first_pos);
return trans_matrix;
}
/**********************************************
* function: 增加接口以用户输入的行列号给用户返回一个指定的稀疏矩阵
* author: Herbert
* param: s_matrix: 生成的稀疏矩阵 row: 行数 col:列数
* date: 2019-12-28
* comment: 生成节点-> 把节点放入普通存储结构 -> 由普通存储结构转为稀疏矩阵存储结构
**********************************************/
Sparse_matrix *get_specific_sparse_matrix (Sparse_matrix **s_matrix, int row, int col)
{
if (NULL == s_matrix)
return NULL;
Sparse_matrix *sparse_matrix = NULL;
if ( NULL == (sparse_matrix = create_sparse_matrix (s_matrix)) )
return NULL;
int i = 0;
int r_idx,c_idx,value;
sparse_matrix->row = row;
sparse_matrix->col = col;
sparse_matrix->node_num = (row * col) / 10 + 3;
int *normal_dim_array = (int*)malloc (sizeof (int)*row*col);
if (NULL == normal_dim_array)
return NULL;
memset (normal_dim_array, 0, sizeof(int)*row*col);
// 生成稀疏矩阵的非零元素
while (i < sparse_matrix->node_num)
{
r_idx = rand () % row;
c_idx = rand () % col;
value = (rand () % 9) + 1; // 每个节点数值在1 ~ 9
if (normal_dim_array[r_idx*col+c_idx] != 0)
{
//此位置已经生成过元素,则重新生成
continue;
}
else
{
normal_dim_array[r_idx*col+c_idx] = value;
++i;
}
}
// 根据普通矩阵的表达形式生成稀疏矩阵的表达
get_sparse_matrix_from_normal_matrix (normal_dim_array, row, col, sparse_matrix);
free (normal_dim_array);
*s_matrix = sparse_matrix;
return *s_matrix;
}
/**********************************************
* function: 求稀疏矩阵的乘积
* author: Herbert
* param: m_matrix: 稀疏矩阵m n_matrix: 稀疏矩阵n
* date: 2019-12-29
* comment: Q[i][j] += M[i][k] * N[k][j];
**********************************************/
Sparse_matrix* multiply_sparse_matrix (Sparse_matrix *m_matrix, Sparse_matrix *n_matrix)
{
if (NULL == m_matrix || NULL == n_matrix)
return NULL;
if (m_matrix->col != n_matrix->row)
{
printf ("error: M matrix col != N matrix row\n");
return NULL;
}
Sparse_matrix *ret_sparse = NULL;
if ( NULL == (ret_sparse = create_sparse_matrix (&ret_sparse)) )
return NULL;
ret_sparse->row = m_matrix->row;
ret_sparse->col = n_matrix->col;
int cur_node = 0; // 当前处理的m矩阵的非零节点
int i,j;
int cur_row; // 当前处理的m矩阵节点的行坐标
int cur_col; // 当前处理的m矩阵节点的列坐标
int rst_non_zero = 0; // 结果中非零元素个数
int *add_value_arr = (int*)malloc (sizeof(int)*n_matrix->col);
if (NULL == add_value_arr)
return NULL;
while (cur_node < m_matrix->node_num)
{
memset (add_value_arr, 0, sizeof(int)*n_matrix->col);
cur_row = m_matrix->nodes[cur_node].r_idx;
// 遍历m矩阵行号相同的所有node节点
for (i = m_matrix->row_first[cur_row]; i < m_matrix->row_first[cur_row+1] ;++i)
{
// 对于m矩阵中某一个m_node,在n矩阵中找n->row=m->col的所有节点,将其乘积累加到add_value_arr中。
cur_col = m_matrix->nodes[cur_node].c_idx;
for (j = n_matrix->row_first[cur_col]; j < n_matrix->row_first[cur_col+1]; ++j)
{
add_value_arr[n_matrix->nodes[j].c_idx] += m_matrix->nodes[i].value * n_matrix->nodes[j].value;
}
++cur_node;
}
// 从add_value_arr中取出所有非零元素,将其存入到结果矩阵ret_sparse中
for (i = 0; i < n_matrix->col; ++i)
{
if (0 != add_value_arr[i])
{
ret_sparse->nodes[rst_non_zero].r_idx = cur_row;
ret_sparse->nodes[rst_non_zero].c_idx = i;
ret_sparse->nodes[rst_non_zero].value = add_value_arr[i];
++rst_non_zero;
}
}
}
ret_sparse->node_num = rst_non_zero;
return ret_sparse;
}
/**********************************************
* function: 以经典方式打印稀疏矩阵
* author: Herbert
* param: s_matrix: 稀疏矩阵
* date: 2019-12-29
* comment:
**********************************************/
void print_sparse_matrix_classic (Sparse_matrix *s_matrix)
{
if (NULL == s_matrix)
return;
int i;
int r_idx = 0;
int c_idx = 0;
int val;
for (i = 0; i < s_matrix->row*s_matrix->col; ++i)
{
r_idx = i / s_matrix->col;
c_idx = i % s_matrix->col;
if (c_idx == 0)
printf ("\n");
if (is_matrix_node_exist (s_matrix, r_idx, c_idx, &val))
{
printf ("%2d ", val);
}
else
{
printf ("%2d ", 0);
}
}
return ;
}
int main (void)
{
srand (time(NULL));
#if 0
int arr[ROW*COL] = {0};
// 初始化二维数组元素值
init_two_dimensional_array (arr);
printf ("\nsrc matrix is: \n");
print_two_dimensional_array (arr, ROW, COL);
// 使用普通方式计算matrix的转置
int *transpose_arr = matrix_transpose (arr, ROW, COL);
printf ("\ntranspose matrix is:\n");
print_two_dimensional_array (transpose_arr, COL, ROW);
Sparse_matrix *s_matrix = NULL;
create_sparse_matrix (&s_matrix);
// 从普通矩阵表示法得到稀疏矩阵表示结构
get_sparse_matrix_from_normal_matrix (arr, ROW, COL, s_matrix);
print_sparse_matrix (s_matrix);
// 自己实现的稀疏矩阵转置
Sparse_matrix *transpos_matrix = sparse_matrix_transpose (s_matrix);
print_sparse_matrix (transpos_matrix);
// 根据严蔚敏方法实现的稀疏矩阵转置运算
Sparse_matrix *opti_transpose_matrix = optimize_sparse_matrix_transpose (s_matrix);
print_sparse_matrix (opti_transpose_matrix);
free (opti_transpose_matrix);
free (transpos_matrix);
free (s_matrix);
#endif
Sparse_matrix *m_sparse_matrix = NULL;
get_specific_sparse_matrix (&m_sparse_matrix, 14, 12);
print_sparse_matrix_classic (m_sparse_matrix);
print_sparse_matrix (m_sparse_matrix);
Sparse_matrix *n_sparse_matrix = NULL;
get_specific_sparse_matrix (&n_sparse_matrix, 12, 18);
print_sparse_matrix_classic (n_sparse_matrix);
print_sparse_matrix (n_sparse_matrix);
Sparse_matrix *multiple_matrix = multiply_sparse_matrix (m_sparse_matrix, n_sparse_matrix);
print_sparse_matrix_classic (multiple_matrix);
print_sparse_matrix (multiple_matrix);
free (multiple_matrix);
free (n_sparse_matrix);
free (m_sparse_matrix);
return 0;
}
<file_sep>#include <stdio.h>
#include <stdlib.h>
/* 本程序用数组实现一个环形队列 */
#define INIT_QUEUE_SIZE 1024
typedef struct
{
int *queue;
int head;
int tail;
} *Queue;
int init_queue (Queue *q)
{
if (q == NULL)
return -1;
*q = (Queue)malloc (sizeof(q));
if (*q == NULL)
return -1;
(*q)->queue = (int*)malloc (INIT_QUEUE_SIZE*sizeof(int));
(*q)->head = 0;
(*q)->tail = 0;
return 0;
}
int is_queue_empty (Queue q)
{
if (NULL == q)
exit (-1);
return (q->head == q->tail);
}
int is_queue_full (Queue q)
{
if (q == NULL)
exit (-1);
return ( (q->tail+1)%INIT_QUEUE_SIZE == q->head );
}
int en_queue (Queue q, int data)
{
if (q == NULL)
return -1;
if (!is_queue_full (q))
{
q->queue[q->tail] = data;
q->tail = (q->tail + 1)%INIT_QUEUE_SIZE;
}
else
{
printf ("error: queue full.\n");
}
return 0;
}
int de_queue (Queue q, int *data)
{
if (q == NULL)
return (-1);
if (!is_queue_empty(q))
{
if (NULL != data)
*data = q->queue[q->head];
q->head = (q->head + 1) % INIT_QUEUE_SIZE;
}
else
{
printf ("error: queue empty.\n");
}
return 0;
}
int clear_queue (Queue q)
{
if (q)
{
free (q->queue);
free (q);
q = NULL;
}
}
int main (void)
{
Queue q;
init_queue (&q);
en_queue (q, 1);
int de_val = 0;
de_queue (q, &de_val);
printf ("%d\n", de_val);
en_queue (q, 2);
de_queue (q, &de_val);
printf ("%d\n", de_val);
clear_queue (q);
return 0;
}
<file_sep>#include <stdio.h>
#include <stdlib.h>
typedef struct BiTreeNode
{
int data;
struct BiTreeNode *l_node, *r_node;
// struct BiTreeNode *parent; // 加上此域表示三叉链表
} BiTreeNode, *BiTree;
#define STACK_SIZE 100
// 定义一个简单的栈结构用来进行非递归中序遍历二叉树
typedef struct stack
{
int top;
BiTree data[STACK_SIZE];
} Stack;
Stack g_stack;
// 初始化栈
void init_stack ()
{
g_stack.top = -1;
int i = 0;
for (i = 0; i < STACK_SIZE; ++i)
g_stack.data[i] = NULL;
}
// 判断栈空
int is_empty ()
{
return g_stack.top == -1;
}
// 判断栈满
int is_full ()
{
return g_stack.top == (STACK_SIZE - 1);
}
// 入栈
void push (BiTreeNode *node)
{
if (!is_full())
g_stack.data[++g_stack.top] = node;
}
// 出栈
BiTreeNode* pop ()
{
if (!is_empty())
return g_stack.data[g_stack.top--];
}
/****************************************************
* function: 根据一颗二叉树的先序遍历序列生成二叉树
* author: Herbert
* date: 2020-01-10
* comment: eg: 12300045000 0表示空树
1
/ \
2 4
/ /
3 5
****************************************************/
BiTree create_binary_tree_by_pre (BiTree *T)
{
int ch;
scanf ("%d", &ch);
if (ch != 0)
{
*T = (BiTree)malloc (sizeof(BiTreeNode));
if (NULL == T)
exit (1);
(*T)->data = ch;
(*T)->l_node = create_binary_tree_by_pre ( &(*T)->l_node );
(*T)->r_node = create_binary_tree_by_pre ( &(*T)->r_node );
}
else
{
return NULL;
}
return *T;
}
/****************************************************
* function: 先序遍历二叉树
* author: Herbert
* date: 2020-01-10
* comment:
****************************************************/
int pre_traverse_binary_tree (BiTree root)
{
if (!root)
return 0;
// 访问根节点数据
printf ("%d ", root->data);
pre_traverse_binary_tree (root->l_node);
pre_traverse_binary_tree (root->r_node);
return 0;
}
/****************************************************
* function: 中序遍历二叉树
* author: Herbert
* date: 2020-01-11
* comment:
****************************************************/
int inter_traverse_binary_tree (BiTree root)
{
if (!root)
return 0;
inter_traverse_binary_tree (root->l_node);
printf ("%d ", root->data);
inter_traverse_binary_tree (root->r_node);
return 0;
}
/****************************************************
* function: 后序遍历二叉树
* author: Herbert
* date: 2020-01-11
* comment:
****************************************************/
int post_traverse_binary_tree (BiTree root)
{
if (!root)
return 0;
post_traverse_binary_tree (root->l_node);
post_traverse_binary_tree (root->r_node);
printf ("%d ", root->data);
return 0;
}
/****************************************************
* function: 由二叉树的根找到它第一个左子树为空的节点并返回此节点
* author: Herbert
* date: 2020-01-12
* comment: 一直往左走,如果其节点左子树不为空则入栈
* return: 树为空则返回空,否则返回首个左子树为空的节点
****************************************************/
BiTreeNode * go_far_left_node (BiTree T)
{
if (NULL == T)
return NULL;
while (T->l_node)
{
push (T); //如果左子树不为空,则将节点入栈
T = T->l_node;
}
return T;
}
/****************************************************
* function: 非递归中序遍历二叉树
* author: Herbert
* date: 2020-01-12
* comment: 使用栈
****************************************************/
int inter_traverse_binary_tree_nonrecusive (BiTree root)
{
BiTreeNode *node = go_far_left_node (root);
while (node)
{
printf ("%d ", node->data); // 访问此节点
if (node->r_node)
{
node = go_far_left_node (node->r_node);
}
else if (!is_empty ())
{
node = pop ();
}
else
node = NULL;
}
return 0;
}
// 求叶子节点个数 先序
void get_leaf_num (BiTree root, int *leaf_num)
{
if (NULL == root)
return;
if (!root->l_node && !root->r_node)
(*leaf_num)++;
get_leaf_num (root->l_node, leaf_num);
get_leaf_num (root->r_node, leaf_num);
return;
}
// 求二叉树的深度 后序
int get_tree_depth (BiTree root)
{
if (NULL == root)
return 0;
int left_depth = 0;
int rigth_depth = 0;
int depth = 0;
left_depth = get_tree_depth (root->l_node);
rigth_depth = get_tree_depth (root->r_node);
depth = 1 + (left_depth > rigth_depth ? left_depth:rigth_depth);
return depth;
}
// 生成一个二叉树节点
BiTreeNode * create_tree_node (int data, BiTreeNode *l_node, BiTreeNode *r_node)
{
BiTreeNode *new_node = (BiTreeNode*)malloc (sizeof (BiTreeNode));
if (NULL == new_node)
exit (1);
new_node->data = data;
new_node->l_node = l_node;
new_node->r_node = r_node;
return new_node;
}
// 复制二叉树 后序
BiTreeNode *copy_binary_tree (BiTree root)
{
if (NULL == root)
return NULL;
BiTreeNode *new_node = (BiTreeNode*)malloc (sizeof (BiTreeNode));
if (NULL == new_node)
exit (1);
new_node->data = root->data;
new_node->l_node = copy_binary_tree (root->l_node);
new_node->r_node = copy_binary_tree (root->r_node);
return new_node;
}
int main (void)
{
BiTree tree = NULL;
// 根据输入序列生产二叉树 需要用0来表示空树
tree = create_binary_tree_by_pre (&tree);
// 打印出建立二叉树的先序序列
pre_traverse_binary_tree (tree);
printf ("\n");
// 中序遍历
inter_traverse_binary_tree (tree);
printf ("\n");
// 后序遍历
post_traverse_binary_tree (tree);
printf ("\n");
// 非递归方式中序遍历
inter_traverse_binary_tree_nonrecusive (tree);
printf ("\n");
int leaf_num = 0;
// 求二叉树叶子节点个数
get_leaf_num (tree, &leaf_num);
printf ("tree leaf num is: %d\n", leaf_num);
// 求二叉树深度
printf ("tree depth is: %d\n", get_tree_depth(tree));
// 复制二叉树
BiTreeNode *cp_tree = copy_binary_tree (tree);
// 中序遍历复制的二叉树
inter_traverse_binary_tree (cp_tree);
return 0;
}
<file_sep>/* 深度优先搜索DFS */
/* 连通图的深度优先搜索 */
void DFS (Graph G, int v)
{
visited[v] = TRUE; // 置v访问过标记
visitFunc (v); // 访问v
for (w = first_adj_vex (G,v); w!=0; w=next_adj_vex (G,v,w))
if (!visited[w])
DFS(G,w);
}
/* 非连通图的深度优先搜索 */
void DFSTraverse (Graph G, visitFunc)
{
for (v = 0; v < G.vex_num; ++v)
visited[v] = FALSE; // 访问标志数组初始化
for (v = 0; v < G.vex_num; ++v)
if (!visited[v])
DFS(G, v); //对尚未访问的定点调用DFS
}
/* 广度优先搜索BFST */
void BFSTraverse (Graph G, VisitFunc)
{
for (v = 0; v < G.vex_num; ++v)
visited[v] = FALSE;
InitQueue (Q); // 因为广度优先搜索要按照已遍历邻接点的次序再访问已遍历邻接点的邻接点,所以需要用队列辅助
for (v = 0; v < G.vex_num; ++v)
{
if (!visited[v]) // v尚未访问,则访问v并将v入队列
{
EnQueue (Q,v);
visited[v] = TRUE;
VisitFunc (v);
}
while (!QueueEmpty (Q))
{
DeQueue (Q,u); // 取出队列中首元素放入u
for (w=first_adj_vex(G,u); w!=0; w=next_adj_vex(G,u,w))
if (!visited[w])
{
EnQueue (Q,w);
visited[w] = TRUE;
VisitFunc (w);
}
}
}
}
<file_sep>#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "cross_list_matrix.h"
/* 本程序用十字链表来表示矩阵 */
/***************************************
* function: 创建一个节点
* author: Herbert
* date: 2019-12-30
* comment: 根据输入值新建一个node节点
***************************************/
matrix_node_t* create_node (int row, int col, int val)
{
matrix_node_t *p_node = (matrix_node_t*)malloc (sizeof (matrix_node_t));
if (NULL == p_node)
return NULL;
p_node->r_idx = row;
p_node->c_idx = col;
p_node->value = val;
p_node->row_next = NULL;
p_node->col_next = NULL;
return p_node;
}
/***************************************
* function: 初始化一个矩阵
* author: Herbert
* date: 2019-12-30
* comment: 生成一个空的十字链表表示的矩阵并返回它
***************************************/
Matrix *create_empty_matrix (int row, int col)
{
Matrix *matrix = (Matrix*)malloc(sizeof(Matrix));
if (NULL == matrix)
return NULL;
memset (matrix, 0, sizeof (Matrix));
matrix->row_num = row;
matrix->col_num = col;
// 初始化row_head和col_head数组
int i = 0;
for (i = 0; i < row; ++i)
{
matrix->row_head[i] = create_node (-1, -1, INVALID_DATA);
if (matrix->row_head[i] == NULL)
return NULL;
}
for (i = 0; i < col; ++i)
{
matrix->col_head[i] = create_node (-1, -1, INVALID_DATA);
if (matrix->col_head[i] == NULL)
return NULL;
}
return matrix;
}
/***************************************
* function: 判断一个节点是否已经存在
* author: Herbert
* date: 2019-12-30
* comment: 在插入一个新的节点时需要判断它是否已经存在
***************************************/
BOOL is_node_exist (Matrix *matrix, int row, int col)
{
if (NULL == matrix || 0 == matrix->node_num)
return FALSE;
matrix_node_t *p = matrix->row_head[row]->row_next;
while (p)
{
if (p->c_idx == col)
return TRUE;
p = p->row_next;
}
return FALSE;
}
/***************************************
* function: 获取一个node的值
* author: Herbert
* date: 2019-01-02
* comment:
***************************************/
int get_node_value (Matrix *matrix, int row, int col)
{
if (NULL == matrix || 0 == matrix->node_num)
return 0;
matrix_node_t *p = matrix->row_head[row]->row_next;
while (p)
{
if (p->c_idx == col)
{
return p->value;
}
p = p->row_next;
}
return 0;
}
/***************************************
* function: 获取一个node的值
* author: Herbert
* date: 2019-01-02
* comment:
***************************************/
int modify_node_value (Matrix *matrix, int row, int col, int new_value)
{
if (NULL == matrix || 0 == matrix->node_num)
return -1;
matrix_node_t *p = matrix->row_head[row]->row_next;
while (p)
{
if (p->c_idx == col)
{
p->value = new_value;
return 0;
}
p = p->row_next;
}
return 0;
}
/***************************************
* function: 将一个节点插入十字矩阵
* author: Herbert
* date: 2019-12-30
* comment:
***************************************/
int insert_node (Matrix *matrix, matrix_node_t *node)
{
if (NULL == matrix || NULL == node)
return -1;
if (node->r_idx >= matrix->row_num || node->c_idx >= matrix->col_num)
{
printf ("insert position error!\n");
return -1;
}
char input;
int row = node->r_idx;
int col = node->c_idx;
// 先插入到行的合适位置
matrix_node_t *p_row = matrix->row_head[row];
matrix_node_t *pre = matrix->row_head[row];
if (NULL == p_row->row_next)
p_row->row_next = node;
else
{
p_row = p_row->row_next;
while (p_row)
{
if (p_row->c_idx < node->c_idx)
{
pre = p_row;
p_row = p_row->row_next;
}
else if (p_row->c_idx == node->c_idx)
{
printf ("node (%d,%d) is exist, do you want to modify it? ", node->r_idx, node->c_idx);
scanf ("%c", &input);
if ('y' == input)
p_row->value = node->value;
else if ('n' == input)
;
return 0;
}
else
{
node->row_next = p_row;
pre->row_next = node;
break;
}
}
if (!p_row)
{
pre->row_next = node;
}
}
// 再插入到列的合适位置
matrix_node_t *p_col = matrix->col_head[col];
pre = matrix->col_head[col];
if (NULL == p_col->col_next)
{
p_col->col_next = node;
matrix->node_num++;
printf ("col %d first node insert. \n", col);
}
else
{
p_col = p_col->col_next;
while (p_col)
{
if (p_col->r_idx < node->r_idx)
{
pre = p_col;
p_col = p_col->col_next;
}
else if (p_col->r_idx == node->r_idx)
{
// never here! do nothing, the same position element has been processed when deal with p_row before;
return 0;
}
else
{
node->col_next = p_col;
pre->col_next = node;
matrix->node_num++;
printf ("col %d middle insert.\n", col);
break;
}
}
if (!p_col)
{
pre->col_next = node;
matrix->node_num++;
printf ("col %d tail insert.\n", col);
}
}
return 0;
}
/***************************************
* function: 从一个十字链表中删除一个节点(如果存在的话)
* author: Herbert
* date: 2020-01-01
* param: row,col 删除节点的行列坐标
* comment:
***************************************/
int delete_node (Matrix *matrix, int row, int col)
{
if (NULL == matrix)
return 0;
if (row >= matrix->row_num || col >= matrix->col_num)
{
printf ("delete position error!\n");
return -1;
}
// 节点不存在直接返回
if (!is_node_exist(matrix, row, col))
{
printf ("delete node not exist\n");
return 0;
}
// 先删除行指针,只改变指针不释放节点
matrix_node_t *p_row = matrix->row_head[row];
matrix_node_t *pre = p_row;
if (p_row->row_next == NULL)
{
//never here,节点不存在的情况在前面已经判断过了
return 0;
}
else
{
p_row = p_row->row_next;
while (p_row)
{
if (p_row->c_idx == col)
{
pre->row_next = p_row->row_next;
break;
}
else
{
pre = p_row;
p_row = p_row->row_next;
}
}
if (!p_row) // never here 此种情况还是没有找到节点
return 0;
}
// 再删除列指针,释放节点空间
matrix_node_t *p_col = matrix->col_head[col];
pre = p_col;
if (p_col->col_next == NULL)
{
// never here
return 0;
}
else
{
p_col = p_col->col_next;
while (p_col)
{
if (p_col->r_idx == row)
{
pre->col_next = p_col->col_next;
free (p_col);
matrix->node_num--;
return 0;
}
else
{
pre = p_col;
p_col = p_col->col_next;
}
}
if (!p_col)
return 0;
}
return 0;
}
/***************************************
* function: 实现矩阵相加
* author: Herbert
* date: 2020-01-01
* comment: 将矩阵B加到矩阵A
***************************************/
int matrix_add (Matrix *matrixA, Matrix* matrixB)
{
if (NULL == matrixA)
return -1;
if (NULL == matrixB)
return 0;
if (matrixA->row_num != matrixB->row_num || matrixA->col_num != matrixB->col_num)
return -1;
int i;
matrix_node_t *p_node = NULL;
int add_val = 0;
int a_node_val = 0;
matrix_node_t *new_node = NULL;
// 遍历B中每一个node做处理
for (i = 0; i < matrixB->row_num; ++i)
{
p_node = matrixB->row_head[i];
if (!p_node)
continue;
else
{
p_node = p_node->row_next;
while (p_node)
{
if (!is_node_exist (matrixA, p_node->r_idx, p_node->c_idx))
{ // 如果A中不存在此节点则创建一个节点插入A
new_node = create_node (p_node->r_idx, p_node->c_idx, p_node->value);
insert_node (matrixA, new_node);
}
else
{ // 如果A中已存在此节点则需要判断相加的值是否为0
a_node_val = get_node_value (matrixA, p_node->r_idx, p_node->c_idx);
add_val = a_node_val + p_node->value;
if (add_val)
{
modify_node_value (matrixA, p_node->r_idx, p_node->c_idx, add_val);
}
else
{
delete_node (matrixA, p_node->r_idx, p_node->c_idx);
}
}
p_node = p_node->row_next;
}
}
}
return 0;
}
/***************************************
* function: 以行序打印一个十字链表
* author: Herbert
* date: 2020-01-01
* comment:
***************************************/
void print_linked_matrix_by_row (Matrix *matrix)
{
if (NULL == matrix)
return;
int i = 0;
matrix_node_t *p_node = NULL;
printf ("matrix node_num = %d, nodes by row is:\n", matrix->node_num);
for (i = 0; i < matrix->row_num; ++i)
{
p_node = matrix->row_head[i]->row_next;
while (p_node)
{
printf ("(%d,%d)=%d ", p_node->r_idx, p_node->c_idx, p_node->value);
p_node = p_node->row_next;
}
}
printf ("\n");
return;
}
/***************************************
* function: 以列序打印一个十字链表
* author: Herbert
* date: 2020-01-01
* comment:
***************************************/
void print_linked_matrix_by_col (Matrix *matrix)
{
if (NULL == matrix)
return;
int i = 0;
matrix_node_t *p_node = NULL;
printf ("matrix node_num = %d, nodes by col is:\n", matrix->node_num);
for (i = 0; i < matrix->col_num; ++i)
{
p_node = matrix->col_head[i]->col_next;
while (p_node)
{
printf ("(%d,%d)=%d ", p_node->r_idx, p_node->c_idx, p_node->value);
p_node = p_node->col_next;
}
}
printf ("\n");
return;
}
int main (void)
{
// 创建A矩阵并且插入nodes
Matrix *matrixA = create_empty_matrix (10, 10);
int row, col, value;
matrix_node_t *node = NULL;
while (1)
{
printf ("Input A nodes: ");
scanf ("%d %d %d", &row, &col, &value);
if (row >= matrixA->row_num || col >= matrixA->col_num)
break;
node = create_node (row,col,value);
insert_node (matrixA, node);
}
print_linked_matrix_by_row (matrixA);
print_linked_matrix_by_col (matrixA);
// 创建B矩阵并插入nodes
Matrix *matrixB = create_empty_matrix (10, 10);
while (1)
{
printf ("Input B nodes: ");
scanf ("%d %d %d", &row, &col, &value);
if (row >= matrixB->row_num || col >= matrixB->col_num)
break;
node = create_node (row,col,value);
insert_node (matrixB, node);
}
print_linked_matrix_by_row (matrixB);
print_linked_matrix_by_col (matrixB);
// 矩阵A+B
matrix_add (matrixA, matrixB);
print_linked_matrix_by_row (matrixA);
print_linked_matrix_by_col (matrixA);
// 从A中删除一些节点
while (1)
{
printf ("delete A nodes: ");
scanf ("%d %d", &row, &col);
if (row >= matrixA->row_num || col >= matrixA->col_num)
break;
delete_node (matrixA, row, col);
}
print_linked_matrix_by_row (matrixA);
print_linked_matrix_by_col (matrixA);
return 0;
}
<file_sep>#ifndef _CROSS_LIST_MATRIX_H_
#define _CORSS_LIST_MATRIX_H_
#define MAX_ROW 50
#define MAX_COL 50
#define INVALID_DATA 0xFFFFFFFF
typedef unsigned char BOOL;
#define TRUE 1
#define FALSE 0
typedef struct matrix_node
{
int r_idx;
int c_idx;
int value;
struct matrix_node* row_next;
struct matrix_node* col_next;
} matrix_node_t;
typedef struct matrix
{
int node_num;
int row_num;
int col_num;
matrix_node_t *row_head[MAX_ROW];
matrix_node_t *col_head[MAX_COL];
} Matrix;
#endif
| a7b32dfbbb0b58a75352893f61f282b8da8b0e22 | [
"Markdown",
"C",
"Text"
] | 23 | C | HenryLion/C-projects | 37bb50bb8c052aeb95ac6fb361705bf404ffa0a6 | 90b92f9cfdea575a5dc93322b211ebadfae6b0a4 |
refs/heads/master | <repo_name>odira/TM<file_sep>/tmp/person/person.js
var datef = require('datef');
datef.lang('ru');
var pg = require('pg');
var params = {
host: 'ec2-46-137-73-65.eu-west-1.compute.amazonaws.com',
database: 'dtd9hnmschbrv',
user: 'jxiumgmvybfmvb',
password: '<PASSWORD>',
ssl: true
};
var sendSurnameFirstLetterList = function sendSurnameFirstLetterList(bot, msg)
{
var msgData = msg.data.split(' ');
var fromId = msg.from.id;
var client = new pg.Client(params);
client.connect(function (err) {
if (err) throw err;
var queryStr = 'SELECT pid,surname FROM person.person WHERE surname ilike \'' + msgData[1] + '\%\' AND shift=4';
client.query(queryStr, function(err, result) {
if (err) throw err;
bot.answerCallbackQuery(msg.id, 'Фамилий начинающихся на ' + msgData[1] + ' найдено ' + result.rows.length);
$inlineKeyboard = [];
for (var i = 0; i < result.rows.length; i++) {
var inlineKeyboardButton = {
text: result.rows[i].surname,
callback_data: '3 ' + result.rows[i].pid.toString()
};
$inlineKeyboard.push([inlineKeyboardButton]);
}
var inlineMessageText = 'Выберите пользователя:';
var inlineMessageOpts = {
parse_mode: "HTML",
reply_markup: { inline_keyboard: $inlineKeyboard }
};
client.end(function (err) {
if (err) throw err;
});
bot.sendMessage(fromId, inlineMessageText, inlineMessageOpts);
});
});
};
var sendPersonData = function sendPersonData(bot, msg)
{
var msgData = msg.data.split(' ');
var fromId = msg.from.id;
var client = new pg.Client(params);
client.connect(function (err) {
if (err) throw err;
var queryStr = 'SELECT * FROM person.person WHERE pid=' + msgData[1];
client.query(queryStr, function(err, result) {
if (err) throw err;
var messageText =
'<b>' + result.rows[0].surname + '</b>\n' +
'<b>' + result.rows[0].name + ' ' + result.rows[0].middlename + '</b>\n' +
'Дата рождения: ' + datef('dd MMMM YYYY', result.rows[0].birthday) + '\n\n' +
result.rows[0].division + ' ' + result.rows[0].subdivision + ' №' + result.rows[0].shift + '\n' +
result.rows[0].position + '\n' +
'Табельный номер: ' + result.rows[0].tabnum + '\n' +
'Допуски к работе: ' + result.rows[0].positionadmittance;
var messageOpts = {
parse_mode: "HTML",
};
client.end(function (err) {
if (err) throw err;
});
bot.sendMessage(fromId, messageText, messageOpts);
});
});
};
module.exports.sendSurnameFirstLetterList = sendSurnameFirstLetterList;
module.exports.sendPersonData = sendPersonData;
<file_sep>/tmp/safety/safety.js
var datef = require('datef');
datef.lang('ru');
var pg = require('pg');
var params = {
host: 'ec2-46-137-73-65.eu-west-1.compute.amazonaws.com',
database: 'dtd9hnmschbrv',
user: 'jxiumgmvybfmvb',
password: '<PASSWORD>',
ssl: true
};
var sendSafetyEventsList = function safetyEventsList(bot, msg)
{
var client = new pg.Client(params);
client.connect(function (err)
{
if (err) throw err;
var queryStr = 'SELECT * FROM safety.vw_safety WHERE department like \'РДЦ\'';
client.query(queryStr, function(err, result)
{
if (err) throw err;
bot.answerCallbackQuery(msg.id, 'Найдено ' + result.rows.length + ' записей');
$inlineKeyboard = [];
for (var i = 0; i < result.rows.length; i++)
{
var outputString = (i + 1).toString() + '. ' +
datef('dd MMMM YYYY', result.rows[i].date) + ' \n' +
'РДЦ смена №' + result.rows[i].shift + ' ' +
'место: ' + result.rows[i].location;
var inlineKeyboardButton = {
text: outputString,
callback_data: 'АвиационныйИнцидент ' + result.rows[i].pid.toString()
};
$inlineKeyboard.push([inlineKeyboardButton]);
}
var inlineMessageText = 'Выберите авиационный инцидент:';
var inlineMessageOpts = {
parse_mode: "HTML",
reply_markup: { inline_keyboard: $inlineKeyboard }
};
client.end(function (err) {
if (err) throw err;
});
bot.sendMessage(msg.chat.id, inlineMessageText, inlineMessageOpts);
});
});
};
var sendSafetyEvent = function sendSafetyEvent(bot, msg)
{
var msgData = msg.data.split(' ');
var fromId = msg.from.id;
var client = new pg.Client(params);
client.connect(function (err) {
if (err) throw err;
var queryStr = 'SELECT * FROM safety.vw_safety WHERE pid=' + msgData[1];
client.query(queryStr, function(err, result) {
if (err) throw err;
var messageText =
'<b>' + datef('dd MMMM YYYY', result.rows[0].date) + ' г.' + '</b> <i>район</i>: <b>' + result.rows[0].location + '</b>\n' +
'<b>' + result.rows[0].department + '</b> <i>смена №</i> <b>' + result.rows[0].shift + '</b>\n\n' +
'<i>Классификация события:</i>\n<b>' + result.rows[0].classification + '</b>';
if (result.rows[0].type.length > 0) {
messageText += '\n\n<i>Вид события:</i>\n<b>' + result.rows[0].type + '</b>';
}
if (result.rows[0].factor.length > 0) {
messageText += '\n\n<i>Факторы:</i>\n<b>' + result.rows[0].factor + '</b>';
}
if (result.rows[0].cause.length > 0) {
messageText += '\n\n<i>Причины события:</i>\n' + result.rows[0].cause;
}
if (result.rows[0].details.length > 0) {
messageText += '\n\n<i>Примечание:</i>\n' + result.rows[0].details;
}
var messageOpts = {
parse_mode: "HTML",
};
bot.sendMessage(fromId, messageText, messageOpts);
client.end(function (err) {
if (err) throw err;
});
});
});
};
module.exports.sendSafetyEventsList = sendSafetyEventsList;
module.exports.sendSafetyEvent = sendSafetyEvent;
<file_sep>/tmp/PersonMenu/index.js
//var pg = require('pg');
//var params = {
// host: 'ec2-46-137-73-65.eu-west-1.compute.amazonaws.com',
// database: 'dtd9hnmschbrv',
// user: 'jxiumgmvybfmvb',
// password: '<PASSWORD>',
// ssl: true
//};
//var datef = require('datef');
//datef.lang('ru');
var showPersonMenu = function showPersonMenu(bot, msg)
{
var msgChatId = msg.chat.id;
var messageText =
'<b>Служебная информация подразделения РДЦ</b> \u{2708}\n\n' +
'\u{26A0} <b>Внимание:</b> СТАДИЯ ТЕСТИРОВАНИЯ';
$keyboard = [
['График работы на год \u{1F4C5}', 'Техническая учеба на год \u{1F4C5}', 'График отпусков \u{1F4C5}'],
['Расстановка на смену \u{1F46B}', 'Персональные данные \u{1F464}'],
// ['Расстановка на смену \u{1F46B}', {text: 'Персональные данные \u{1F464}', request_contact: true}],
['Вернуться в Главное Меню \u{2B05}']
];
var messageOpts = {
parse_mode: "HTML",
reply_markup: {
keyboard: $keyboard,
resize_keyboard: true
}
};
bot.sendMessage(msgChatId, messageText, messageOpts);
};
var showPersonalDataMenu = function showPersonalDataMenu(bot, msg)
{
var msgChatId = msg.chat.id;
var messageText =
'<b>Персональные данные работников</b> \u{2708}\n\n' +
'\u{26A0} Внимание: СТАДИЯ ТЕСТИРОВАНИЯ';
$keyboard = [
['Вернуться в Служебное Меню \u{1F51E}', 'Вернуться в Главное Меню \u{2B05}']
];
var messageOpts = {
parse_mode: "HTML",
reply_markup: {
keyboard: $keyboard,
resize_keyboard: true
}
};
bot.sendMessage(msgChatId, messageText, messageOpts).then(function ()
{
var inlineMessageText = 'Выберите первую букву фамилии пользователя';
$inlineKeyboard = [
[{text: 'А', callback_data: '2 А'},
{text: 'Б', callback_data: '2 Б'},
{text: 'В', callback_data: '2 В'},
{text: 'Г', callback_data: '2 Г'},
{text: 'Д', callback_data: '2 Д'},
{text: 'Е', callback_data: '2 Е'}
],
[{text: 'Ж', callback_data: '2 Ж'},
{text: 'З', callback_data: '2 З'},
{text: 'И', callback_data: '2 И'},
{text: 'К', callback_data: '2 К'},
{text: 'Л', callback_data: '2 Л'},
{text: 'М', callback_data: '2 М'}
],
[{text: 'Н', callback_data: '2 Н'},
{text: 'О', callback_data: '2 О'},
{text: 'П', callback_data: '2 П'},
{text: 'Р', callback_data: '2 Р'},
{text: 'С', callback_data: '2 С'},
{text: 'Т', callback_data: '2 Т'}
],
[{text: 'У', callback_data: '2 У'},
{text: 'Ф', callback_data: '2 Ф'},
{text: 'Х', callback_data: '2 Х'},
{text: 'Ц', callback_data: '2 Ц'},
{text: 'Ч', callback_data: '2 Ч'},
{text: 'Ш', callback_data: '2 Ш'}
],
[{text: 'Щ', callback_data: '2 Щ'},
{text: 'Э', callback_data: '2 Э'},
{text: 'Ю', callback_data: '2 Ю'},
{text: 'Я', callback_data: '2 Я'}
]
];
var inlineMessageOpts = {
parse_mode: "HTML",
reply_markup: {
inline_keyboard: $inlineKeyboard
}
};
bot.sendMessage(msgChatId, inlineMessageText, inlineMessageOpts);
});
};
// bot.on('message', function(msg)
// {
// console.log('\n\n');
// console.log(msg);
// console.log('\n\n');
// if (msg.contact !== undefined) {
// console.log(msg.contact.phone_number);
// console.log('\n\n');
// var fromId = msg.from.id;
// var pgParams = {
// host: 'ec2-46-137-73-65.eu-west-1.compute.amazonaws.com',
// database: 'dtd9hnmschbrv',
// user: 'jxiumgmvybfmvb',
// password: '<PASSWORD>',
// ssl: true
// };
// var client = new pg.Client(pgParams);
// client.connect(function (err) {
// if (err) throw err;
// // var queryStr = 'SELECT surname,tabnum,mobilephone FROM person.person WHERE mobilephone like \'' + msg.contact.phone_number + '\'';
// var queryStr = 'SELECT surname,tabnum,mobilephone FROM person.person WHERE mobilephone like \'%' + '903-596-76-36' + '%\'';
// client.query(queryStr, function(err, result) {
// if (err) throw err;
// for (var i = 0; i < result.rows.length; i++) {
// var outputString = result.rows[i].surname + ' ' + result.rows[i].tabnum + ' ' + result.rows[i].mobilephone;
// }
// var inlineMessageText = 'Ваш контакт:' + outputString;
// client.end(function (err) {
// if (err) throw err;
// });
// bot.sendMessage(fromId, inlineMessageText);
// });
// });
// }
// });
module.exports.showPersonMenu = showPersonMenu;
module.exports.showPersonalDataMenu = showPersonalDataMenu;
<file_sep>/tmp/InformationMenu/index.js
var Safety = require('./safety');
var showInformationMenu = function showInformationMenu(bot, msg)
{
var msgChatId = msg.chat.id;
var messageText =
'<b>Служебная информация РДЦ\nсмены №4 (Пензенское направление)</b> \u{2708}\n\n' +
'На данный момент информация в данном разделе предназначена для работников ' +
'РДЦ смены №4 (Пензенское направление)\n\n' +
'\u{26A0} Внимание: СТАДИЯ ТЕСТИРОВАНИЯ';
$keyboard = [
['Статистика', 'База данных АП'],
['\u{1F4B1} Курс\nвалюты ЦБ', '\u{2600} Погода на\nаэродромах'],
['Вернуться в Главное Меню \u{2B05}']
];
var messageOpts = {
parse_mode: "HTML",
reply_markup: {
keyboard: $keyboard,
resize_keyboard: true
}
};
bot.sendMessage(msgChatId, messageText, messageOpts).then(function ()
{
bot.onText(/База данных/, function(message) { Safety.sendSafetyEventsList(bot, msg); });
bot.onText(/валюты/, function(message) { bot.sendMessage(msgChatId, '@TerkasCurrencyBot'); });
bot.onText(/Погода/, function(message) { bot.sendMessage(msgChatId, '@TerkasWeatherBot'); });
});
};
module.exports.showInformationMenu = showInformationMenu;
| 25a013716a85414755e8027938e5e19645f3e100 | [
"JavaScript"
] | 4 | JavaScript | odira/TM | 763a2660ecc49e0226aef0871362fad582790820 | 93ae80a9e6d58542b1cdb532bd7d7c6363bca0f2 |
refs/heads/master | <file_sep>/**
* Copyright 2017 Hortonworks.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
import React, {Component, PropTypes} from 'react';
import ReactDOM, {findDOMNode} from 'react-dom';
import {OverlayTrigger, Tooltip} from 'react-bootstrap';
import {ItemTypes} from '../../../utils/Constants';
import {DragSource} from 'react-dnd';
const nodeSource = {
beginDrag(props, monitor, component) {
const {
imgPath,
type,
name,
nodeType,
topologyComponentBundleId,
nodeLable
} = props;
return {
imgPath,
type,
name,
nodeType,
topologyComponentBundleId,
nodeLable
};
}
};
function collect(connect, monitor) {
return {connectDragSource: connect.dragSource(), isDragging: monitor.isDragging()};
}
@DragSource(ItemTypes.Nodes, nodeSource, collect)
export default class NodeContainer extends Component {
static propTypes = {
connectDragSource: PropTypes.func.isRequired,
isDragging: PropTypes.bool.isRequired,
imgPath: PropTypes.string.isRequired,
type: PropTypes.string.isRequired,
name: PropTypes.string.isRequired,
hideSourceOnDrag: PropTypes.bool.isRequired,
children: PropTypes.node,
nodeType: PropTypes.string.isRequired,
topologyComponentBundleId: PropTypes.number.isRequired,
defaultImagePath: PropTypes.string.isRequired
};
getDragableNode(connectDragSource) {
const {
imgPath,
nodeType,
type,
name,
topologyComponentBundleId,
defaultImagePath
} = this.props;
return connectDragSource(
<li>
<img src={imgPath} ref="img" onError={() => {
this.refs.img.src = defaultImagePath;
}}/> {name}
</li>
);
}
render() {
const {
hideSourceOnDrag,
imgPath,
type,
name,
connectDragSource,
isDragging,
children,
nodeType,
topologyComponentBundleId
} = this.props;
if (isDragging && hideSourceOnDrag) {
return null;
}
return this.getDragableNode(connectDragSource);
}
}
<file_sep>/**
* Copyright 2017 Hortonworks.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package com.hortonworks.streamline.streams.runtime.storm.bolt.query;
import com.google.common.collect.ImmutableMap;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.windowing.TupleWindow;
import com.hortonworks.streamline.streams.StreamlineEvent;
import com.hortonworks.streamline.streams.common.StreamlineEventImpl;
import com.hortonworks.streamline.streams.runtime.storm.bolt.StreamlineWindowedBolt;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
public class WindowedQueryBolt extends StreamlineWindowedBolt {
private OutputCollector collector;
// Map[StreamName -> Map[Key -> List<Tuple>] ]
HashMap<String, HashMap<Object, ArrayList<Tuple> >> hashedInputs = new HashMap<>(); // holds remaining streams
// Map[StreamName -> JoinInfo]
LinkedHashMap<String, JoinInfo> joinCriteria = new LinkedHashMap<>();
private String[][] outputKeys; // specified via bolt.select() ... used in declaring Output fields
private String[] dotSeparatedOutputKeyNames; // flattened (de nested) keyNames, used for naming output fields
private boolean streamLineStyleProjection = false;
private String outputStreamName;
// Use streamId, source component name OR field in tuple to distinguish incoming tuple streams
public enum StreamSelector { STREAM, SOURCE }
private final StreamSelector streamSelectorType;
/**
* StreamId to start the join with. Equivalent SQL ...
* select .... from streamId ...
* @param type Specifies whether 'streamId' refers to stream name/source component
* @param streamId name of stream/source component
* @param key the fieldName to use as key for the stream (used for performing joins)
*/
public WindowedQueryBolt(StreamSelector type, String streamId, String key) {
streamSelectorType = type;
joinCriteria.put(streamId, new JoinInfo(key) );
}
/**
* Defines the name of the output stream
* Note: This method 'appears' Streamline specific. See if it needs to be migrated to Storm
*/
public WindowedQueryBolt withOutputStream(String streamName) {
this.outputStreamName = streamName;
return this;
}
/**
* Performs inner Join.
* SQL : from priorStream inner join newStream on newStream.key = priorStream.key1
* same as: new WindowedQueryBolt(priorStream,key1). join(newStream, key, priorStream);
*
* Note: priorStream must be previously joined.
* Valid ex: new WindowedQueryBolt(s1,k1). join(s2,k2, s1). join(s3,k3, s2);
* Invalid ex: new WindowedQueryBolt(s1,k1). join(s3,k3, s2). join(s2,k2, s1);
*/
public WindowedQueryBolt join(String newStream, String key, String priorStream) {
return join_common(newStream, key, priorStream, JoinType.INNER);
}
/**
* Performs left Join.
* SQL : from stream1 left join stream2 on stream2.key = stream1.key1
* same as: new WindowedQueryBolt(stream1, key1). leftJoin(stream2, key, stream1);
*
* Note: priorStream must be previously joined
* Valid ex: new WindowedQueryBolt(s1,k1). leftJoin(s2,k2, s1). leftJoin(s3,k3, s2);
* Invalid ex: new WindowedQueryBolt(s1,k1). leftJoin(s3,k3, s2). leftJoin(s2,k2, s1);
*/
public WindowedQueryBolt leftJoin(String newStream, String key, String priorStream) {
return join_common(newStream, key, priorStream, JoinType.LEFT);
}
private WindowedQueryBolt join_common(String newStream, String key, String priorStream, JoinType joinType) {
hashedInputs.put(newStream, new HashMap<Object, ArrayList<Tuple>>());
JoinInfo joinInfo = joinCriteria.get(priorStream);
if( joinInfo==null )
throw new IllegalArgumentException("Stream '" + priorStream + "' was not previously declared");
joinCriteria.put(newStream, new JoinInfo(key, priorStream, joinInfo, joinType) );
return this;
}
/**
* Specify projection keys. i.e. Specifies the keys to include in the output.
* e.g: .select("key1, key2, key3")
* Nested Key names are supported for nested types:
* e.g: .select("outerKey1.innerKey1, outerKey1.innerKey2, outerKey2.innerKey3)"
* Inner types (non leaf) must be Map<> in order to support lookup by key name
* This selected keys implicitly declare the output fieldNames for the bolt based.
* @param commaSeparatedKeys
* @return
*/
public WindowedQueryBolt select(String commaSeparatedKeys) {
String[] keyNames = commaSeparatedKeys.split(",");
dotSeparatedOutputKeyNames = new String[keyNames.length];
outputKeys = new String[keyNames.length][];
for (int i = 0; i < keyNames.length; i++) {
dotSeparatedOutputKeyNames[i] = keyNames[i].trim();
outputKeys[i] = dotSeparatedOutputKeyNames[i].split("\\.");
}
return this;
}
/** This a convenience method specifically for Streamline that allows users to skip specifying the
* 'streamline-event.' prefix for every join key and projection key repeatedly
* Similar to select(), but has 3 differences:
* - the projected tuple is a StreamlineEvent object instead of regular Storm tuple
* - each key in 'commaSeparatedKeys' is automatically prefixed with 'streamline-event.'
* - updates each key in joinCriteria with prefix 'streamline-event.'
* Note: This will be kept Streamline specific and wont be migrated to Storm.
*/
public WindowedQueryBolt selectStreamLine(String commaSeparatedKeys) {
// prefix each key with "streamline-event."
String prefixedKeyNames = convertToStreamLineKeys(commaSeparatedKeys);
prefixJoinCriteriaKeys(); // update the join keys
streamLineStyleProjection = true;
return select(prefixedKeyNames);
}
/** Prefixes each key in the joinCriteria with "streamline-event." and preserves original insertion order
* Note: This will be kept Streamline specific and wont be migrated to Storm.
*/
private void prefixJoinCriteriaKeys() {
for (JoinInfo ji : joinCriteria.values()) {
ji.nestedKeyName = splice(StreamlineEvent.STREAMLINE_EVENT, ji.nestedKeyName);
if ( ji.otherKey!=null )
ji.otherKey = splice(StreamlineEvent.STREAMLINE_EVENT, ji.otherKey);
}
}
private String[] splice(String head, String[] tail) {
String[] result = new String[tail.length+1];
result[0]=head;
for (int i = 0; i < tail.length; i++) {
result[i+1] = tail[i];
}
return result;
}
// prefixes each key with 'streamline-event.'
private static String convertToStreamLineKeys(String commaSeparatedKeys) {
String[] keyNames = commaSeparatedKeys.replaceAll("\\s+","").split(",");
String prefix = StreamlineEvent.STREAMLINE_EVENT + ".";
return prefix + String.join("," + prefix, keyNames);
}
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
if (outputStreamName!=null) { // Note: StreamLine specific code
declarer.declareStream(outputStreamName, new Fields(StreamlineEvent.STREAMLINE_EVENT));
} else {
declarer.declare(new Fields(dotSeparatedOutputKeyNames));
}
}
@Override
public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
this.collector = collector;
// initialize the hashedInputs data structure
int i=0;
for ( String stream : joinCriteria.keySet() ) {
if(i>0) {
hashedInputs.put(stream, new HashMap<Object, ArrayList<Tuple>>());
}
++i;
}
if(outputKeys==null) {
throw new IllegalArgumentException("Must specify output fields via .select() method.");
}
}
@Override
public void execute(TupleWindow inputWindow) {
// 1) Perform Join
List<Tuple> currentWindow = inputWindow.get();
JoinAccumulator joinResult = hashJoin(currentWindow);
// 2) Emit results
for (ResultRecord resultRecord : joinResult.getRecords()) {
ArrayList<Object> outputTuple = resultRecord.getOutputFields();
if ( outputStreamName==null )
collector.emit( outputTuple );
else
collector.emit( outputStreamName, outputTuple );
}
}
private void clearHashedInputs() {
for (HashMap<Object, ArrayList<Tuple>> mappings : hashedInputs.values()) {
mappings.clear();
}
}
protected JoinAccumulator hashJoin(List<Tuple> tuples) {
clearHashedInputs();
JoinAccumulator probe = new JoinAccumulator();
// 1) Build phase - Segregate tuples in the Window into streams.
// First stream's tuples go into probe, rest into HashMaps in hashedInputs
String firstStream = joinCriteria.keySet().iterator().next();
for (Tuple tuple : tuples) {
String streamId = getStreamSelector(tuple);
if ( ! streamId.equals(firstStream) ) {
Object key = getKeyField(streamId, tuple);
ArrayList<Tuple> recs = hashedInputs.get(streamId).get(key);
if(recs == null) {
recs = new ArrayList<Tuple>();
hashedInputs.get(streamId).put(key, recs);
}
recs.add(tuple);
} else {
ResultRecord probeRecord = new ResultRecord(tuple, joinCriteria.size() == 1);
probe.insert( probeRecord ); // first stream's data goes into the probe
}
}
// 2) Join the streams in order of streamJoinOrder
int i=0;
for (String streamName : joinCriteria.keySet() ) {
boolean finalJoin = (i==joinCriteria.size()-1);
if(i>0) {
probe = doJoin(probe, hashedInputs.get(streamName), joinCriteria.get(streamName), finalJoin);
}
++i;
}
return probe;
}
// Dispatches to the right join method (inner/left/right/outer) based on the joinInfo.joinType
protected JoinAccumulator doJoin(JoinAccumulator probe, HashMap<Object, ArrayList<Tuple>> buildInput, JoinInfo joinInfo, boolean finalJoin) {
final JoinType joinType = joinInfo.getJoinType();
switch ( joinType ) {
case INNER:
return doInnerJoin(probe, buildInput, joinInfo, finalJoin);
case LEFT:
return doLeftJoin(probe, buildInput, joinInfo, finalJoin);
case RIGHT:
case OUTER:
default:
throw new RuntimeException("Unsupported join type : " + joinType.name() );
}
}
// inner join - core implementation
protected JoinAccumulator doInnerJoin(JoinAccumulator probe, Map<Object, ArrayList<Tuple>> buildInput, JoinInfo joinInfo, boolean finalJoin) {
String[] probeKeyName = joinInfo.getOtherKey();
JoinAccumulator result = new JoinAccumulator();
for (ResultRecord rec : probe.getRecords()) {
Object probeKey = rec.getField(joinInfo.otherStream, probeKeyName);
if (probeKey!=null) {
ArrayList<Tuple> matchingBuildRecs = buildInput.get(probeKey);
if(matchingBuildRecs!=null) {
for (Tuple matchingRec : matchingBuildRecs) {
ResultRecord mergedRecord = new ResultRecord(rec, matchingRec, finalJoin);
result.insert(mergedRecord);
}
}
}
}
return result;
}
// left join - core implementation
protected JoinAccumulator doLeftJoin(JoinAccumulator probe, Map<Object, ArrayList<Tuple>> buildInput, JoinInfo joinInfo, boolean finalJoin) {
String[] probeKeyName = joinInfo.getOtherKey();
JoinAccumulator result = new JoinAccumulator();
for (ResultRecord rec : probe.getRecords()) {
Object probeKey = rec.getField(joinInfo.otherStream, probeKeyName);
if (probeKey!=null) {
ArrayList<Tuple> matchingBuildRecs = buildInput.get(probeKey); // ok if its return null
if (matchingBuildRecs!=null && !matchingBuildRecs.isEmpty() ) {
for (Tuple matchingRec : matchingBuildRecs) {
ResultRecord mergedRecord = new ResultRecord(rec, matchingRec, finalJoin);
result.insert(mergedRecord);
}
} else {
ResultRecord mergedRecord = new ResultRecord(rec, null, finalJoin);
result.insert(mergedRecord);
}
}
}
return result;
}
// Identify the key for the stream, and look it up in 'tuple'. key can be nested key: outerKey.innerKey
private Object getKeyField(String streamId, Tuple tuple) {
JoinInfo ji = joinCriteria.get(streamId);
if(ji==null) {
throw new RuntimeException("Join information for '" + streamId + "' not found. Check the join clauses.");
}
return getNestedField(ji.getNestedKeyName(), tuple);
}
// Steps down into a nested tuple based on the nestedKeyName
protected static Object getNestedField(String[] nestedKeyName, Tuple tuple) {
Object curr = null;
for (int i=0; i < nestedKeyName.length; i++) {
if (i==0) {
if (tuple.contains(nestedKeyName[i]) )
curr = tuple.getValueByField(nestedKeyName[i]);
else
return null;
} else {
curr = ((Map) curr).get(nestedKeyName[i]);
if (curr==null)
return null;
}
}
return curr;
}
private String getStreamSelector(Tuple ti) {
switch (streamSelectorType) {
case STREAM:
return ti.getSourceStreamId();
case SOURCE:
return ti.getSourceComponent();
default:
throw new RuntimeException(streamSelectorType + " stream selector type not yet supported");
}
}
protected enum JoinType {INNER, LEFT, RIGHT, OUTER}
/** Describes how to join the other stream with the current stream */
protected static class JoinInfo implements Serializable {
final static long serialVersionUID = 1L;
String[] nestedKeyName; // nested key name for the current stream: outer.inner -> { "outer", "inner }
String otherStream; // name of the other stream to join with
String[] otherKey; // key name of the other stream
JoinType joinType; // nature of join
// nestedKeys uses dot separated key names... outer.inner.innermostKey
public JoinInfo(String nestedKey) {
this.nestedKeyName = nestedKey.split("\\.");
this.otherStream = null;
this.otherKey = null;
this.joinType = null;
}
public JoinInfo(String nestedKey, String otherStream, JoinInfo otherStreamJoinInfo, JoinType joinType) {
this.nestedKeyName = nestedKey.split("\\.");
this.otherStream = otherStream;
this.otherKey = otherStreamJoinInfo.nestedKeyName;
this.joinType = joinType;
}
public String[] getNestedKeyName() {
return nestedKeyName;
}
public String getOtherStream() {
return otherStream;
}
public String[] getOtherKey() {
return otherKey;
}
public JoinType getJoinType() {
return joinType;
}
} // class JoinInfo
// Join helper to concat fields to the record
protected class ResultRecord {
ArrayList<Tuple> tupleList = new ArrayList<>(); // contains one Tuple per Stream being joined
ArrayList<Object> outputFields = null; // refs to fields that will be part of output fields
// 'generateOutputFields' enables us to avoid projection unless it is the final stream being joined
public ResultRecord(Tuple tuple, boolean generateOutputFields) {
tupleList.add(tuple);
if(generateOutputFields) {
outputFields = doProjection(tupleList, outputKeys);
}
}
public ResultRecord(ResultRecord lhs, Tuple rhs, boolean generateOutputFields) {
if(lhs!=null)
tupleList.addAll(lhs.tupleList);
if(rhs!=null)
tupleList.add(rhs);
if(generateOutputFields) {
outputFields = doProjection(tupleList, outputKeys);
}
}
public ArrayList<Object> getOutputFields() {
return outputFields;
}
public Object getField(String stream, String[] nestedFieldName) {
for (Tuple tuple : tupleList) {
if(getStreamSelector(tuple).equals(stream))
return getNestedField(nestedFieldName, tuple);
}
return null;
}
}
protected class JoinAccumulator {
ArrayList<ResultRecord> records = new ArrayList<>();
public void insert(ResultRecord tuple) {
records.add( tuple );
}
public Collection<ResultRecord> getRecords() {
return records;
}
}
// Performs projection on the tuples based on the 'projectionKeys'
protected ArrayList<Object> doProjection(ArrayList<Tuple> tuples, String[][] projectionKeys) {
if(streamLineStyleProjection)
return doProjectionStreamLine(tuples, outputKeys);
ArrayList<Object> result = new ArrayList<>(projectionKeys.length);
// Todo: optimize this computation... perhaps inner loop can be outside to avoid rescanning tuples
for ( int i = 0; i < projectionKeys.length; i++ ) {
boolean missingField = true;
for ( Tuple tuple : tuples ) {
Object field = getNestedField(projectionKeys[i], tuple ) ;
if (field != null) {
result.add(field);
missingField=false;
break;
}
}
if(missingField) { // add a null for missing fields (usually in case of outer joins)
result.add(null);
}
}
return result;
}
// Performs projection and creates output tuple structure as expected by StreamLine compliant
protected ArrayList<Object> doProjectionStreamLine(ArrayList<Tuple> tuplesRow, String[][] projectionKeys) {
StreamlineEventImpl.Builder eventBuilder = StreamlineEventImpl.builder();
// Todo: note to self: may be able to optimize this ... perhaps inner loop can be outside to avoid rescanning tuples
for ( int i = 0; i < projectionKeys.length; i++ ) {
String flattenedKey = dotSeparatedOutputKeyNames[i];
String outputKeyName = flattenedKey.substring(flattenedKey.indexOf('.')+1); // drop the "streamline-event." prefix
for ( Tuple cell : tuplesRow ) {
Object field = getNestedField(projectionKeys[i], cell) ;
if (field != null) {
eventBuilder.put(outputKeyName, field);
break;
}
}
}
ArrayList<Object> resultRow = new ArrayList<>();
StreamlineEventImpl slEvent = eventBuilder.dataSourceId("multiple sources").build();
resultRow.add(slEvent);
return resultRow;
}
}
<file_sep>/**
* Copyright 2017 Hortonworks.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
import React, {Component, PropTypes} from 'react';
import ReactDOM, {findDOMNode} from 'react-dom';
import {OverlayTrigger, Tooltip, Accordion, Panel, PanelGroup} from 'react-bootstrap';
import {ItemTypes, Components} from '../../../utils/Constants';
import {DragSource} from 'react-dnd';
import NodeContainer from './NodeContainer';
import state from '../../../app_state';
import _ from 'lodash';
import Utils from '../../../utils/Utils';
import {Scrollbars} from 'react-custom-scrollbars';
const nodeSource = {
beginDrag(props, monitor, component) {
const {left, top} = props;
return {left, top};
}
};
function collect(connect, monitor) {
return {connectDragSource: connect.dragSource(), isDragging: monitor.isDragging()};
}
@DragSource(ItemTypes.ComponentNodes, nodeSource, collect)
export default class ComponentNodeContainer extends Component {
static propTypes = {
connectDragSource: PropTypes.func.isRequired,
isDragging: PropTypes.bool.isRequired,
left: PropTypes.number.isRequired,
top: PropTypes.number.isRequired,
hideSourceOnDrag: PropTypes.bool.isRequired
};
constructor(props) {
super(props);
let {bundleArr} = this.props;
if (!bundleArr) {
bundleArr = {
sourceBundle: [],
processorsBundle: [],
sinksBundle: []
};
}
this.state = {
datasources: Utils.sortArray(bundleArr.sourceBundle, 'name', true),
processors: Utils.sortArray(bundleArr.processorsBundle, 'name', true),
sinks: Utils.sortArray(bundleArr.sinksBundle, 'name', true)
};
}
componentWillReceiveProps(nextProps, oldProps) {
if (nextProps.bundleArr != this.props.bundleArr) {
this.setState({
datasources: Utils.sortArray(nextProps.bundleArr.sourceBundle, 'name', true),
processors: Utils.sortArray(nextProps.bundleArr.processorsBundle, 'name', true),
sinks: Utils.sortArray(nextProps.bundleArr.sinksBundle, 'name', true)
});
}
}
render() {
const {hideSourceOnDrag, left, top, isDragging} = this.props;
if (isDragging && hideSourceOnDrag) {
return null;
}
return (
<div className="component-panel right" style={{
height: window.innerHeight - 60
}}>
<div className="btnDrag-wrapper">
<button className="btn-draggable"></button>
</div>
<div className="panel-wrapper" style={{
height: window.innerHeight - 90
}}>
<Scrollbars autoHide autoHeightMin={452} renderThumbHorizontal= { props => <div style = { { display: "none" } } />}>
<div className="inner-panel">
<h6 className="component-title">
Source
</h6>
<ul className="component-list">
{this.state.datasources.map((source, i) => {
return (<NodeContainer key={i} imgPath={"styles/img/icon-" + source.subType.toLowerCase() + ".png"} name={source.name.toUpperCase()} type={source.type} nodeLable={source.name.toUpperCase()} nodeType={source.subType} hideSourceOnDrag={false} topologyComponentBundleId={source.id} defaultImagePath='styles/img/icon-source.png'/>);
})
}
</ul>
<h6 className="component-title">
Processor
</h6>
<ul className="component-list">
{this.state.processors.map((processor, i) => {
if (processor.subType === 'CUSTOM') {
let config = processor.topologyComponentUISpecification.fields,
name = _.find(config, {fieldName: "name"});
return (<NodeContainer key={i} imgPath="styles/img/icon-custom.png" name={name
? name.defaultValue
: 'Custom'} nodeLable={name
? name.defaultValue
: 'Custom'} type={processor.type} nodeType="Custom" hideSourceOnDrag={false} topologyComponentBundleId={processor.id} defaultImagePath='styles/img/icon-processor.png'/>);
} else {
return (<NodeContainer key={i} imgPath={"styles/img/icon-" + processor.subType.toLowerCase() + ".png"} name={processor.name.toUpperCase()} nodeLable={processor.name.toUpperCase()} type={processor.type} nodeType={processor.subType} hideSourceOnDrag={false} topologyComponentBundleId={processor.id} defaultImagePath='styles/img/icon-processor.png'/>);
}
})
}
</ul>
<h6 className="component-title">
Sink
</h6>
<ul className="component-list">
{this.state.sinks.map((sink, i) => {
return (<NodeContainer key={i} imgPath={"styles/img/icon-" + sink.subType.toLowerCase() + ".png"} name={sink.name.toUpperCase()} nodeLable={sink.name.toUpperCase()} type={sink.type} nodeType={sink.subType} hideSourceOnDrag={false} topologyComponentBundleId={sink.id} defaultImagePath='styles/img/icon-sink.png'/>);
})
}
</ul>
</div>
</Scrollbars>
</div>
<div className="btnDrag-wrapper">
<button className="btn-draggable"></button>
</div>
</div>
);
}
}
| e8c2c66f430fd0d8b64125b493cc584985dc1b6e | [
"JavaScript",
"Java"
] | 3 | JavaScript | roshannaik/streamline | fb59d2a614a398adfa8e6219b0514cc4e1d815b2 | d59948ea184853f7aba845716604f9e297d46944 |
refs/heads/master | <file_sep># GCDDemo
swift3.0
<file_sep>//
// ViewController.swift
// GCDDemo
//
// Created by 婉卿容若 on 2016/12/6.
// Copyright © 2016年 婉卿容若. All rights reserved.
//
import UIKit
// 喵大推荐的单例写法
class MyManager {
private static let sharedInstance = MyManager()
class var sharedManager: MyManager {
return sharedInstance
}
}
class ViewController: UIViewController {
// 学习地址:http://swift.gg/2016/11/30/grand-central-dispatch/
@IBOutlet weak var imageView: UIImageView!
var inactiveQueue: DispatchQueue!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
// 串行
// syncSample()
//asyncSample()
//queueWithQosSyncSample()
//queueWithQosAsyncSample()
//并行
// conQueueSyncSample()
// conQueueAsyncSample()
// conQueueWithQosSyncSample()
// conQueueWithQosAsyncSample()
// 手动
// noAutoAction()
// if let queue = inactiveQueue {
// queue.activate()
// }
// queueWithDelay()
// fetchImage()
// useWorkItem()
// groupQueue()
// barrierDemo()
// semaphoreDemo()
// MARK: - dispatch_once 废弃 ,建议使用懒加载
// 全局常量
let constant = MyManager()
// 全局 variable
var variable: MyManager = {
let v = MyManager()
return v
}()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
//MARK: - 串行队列 serial
extension ViewController{
// 串行同步
func syncSample(){
let queue = DispatchQueue(label: "com.zhengwenxiang")
queue.sync {
for i in 0..<10{
print("👍 ", i)
}
print("current thread is \(Thread.current)")
}
queue.sync {
for i in 20..<30{
print("🌹 ", i)
}
print("current thread02 is \(Thread.current)")
}
for i in 100..<110{
print("🌶 ", i)
}
print("Main thread is \(Thread.current)")
}
// 串行异步
func asyncSample(){
let queue = DispatchQueue(label: "com.zhengwenxiang")
queue.async {
for i in 0..<10{
print("👍 ", i)
}
print("current thread is \(Thread.current)")
}
queue.async {
for i in 20..<30{
print("🌹 ", i)
}
print("current thread02 is \(Thread.current)")
}
for i in 100..<110{
print("🌶 ", i)
}
print("Main thread is \(Thread.current)")
}
//用于指定任务重要程度以及优先级的信息,在 GCD 中被称为 Quality of Service(QoS)
//如果没有指定 QoS,则队列会使用默认优先级进行初始化
/* 优先级从上到下一次递减 -- priority
userInteractive
userInitiated
default
utility
background
unspecified
*/
// 串行 + 优先级 + 同步
func queueWithQosSyncSample(){
let queue01 = DispatchQueue(label: "com.zhengwenxiang", qos: DispatchQoS.userInitiated)
// let queue01 = DispatchQueue(label: "com.zhengwenxiang", qos: DispatchQoS.background)
// let queue02 = DispatchQueue(label: "com.zhengwenxiang02", qos: DispatchQoS.userInitiated)
let queue02 = DispatchQueue(label: "com.zhengwenxiang02", qos: DispatchQoS.utility)
queue01.sync {
for i in 0..<10{
print("👍 ", i)
}
print("current thread00 is \(Thread.current)")
}
queue01.sync {
for i in 100..<110{
print("👎 ", i)
}
print("current thread02 is \(Thread.current)")
}
queue02.sync {
for i in 200..<210{
print("🌶 ", i)
}
print("current thread20 is \(Thread.current)")
}
queue02.sync {
for i in 300..<310{
print("🐔 ", i)
}
print("current thread21 is \(Thread.current)")
}
for i in 1000..<1010 {
print("🐷 ", i)
}
print("Main thread is \(Thread.current)")
}
// 串行 + 优先级 + 异步
func queueWithQosAsyncSample(){
let queue01 = DispatchQueue(label: "com.zhengwenxiang", qos: DispatchQoS.userInitiated)
// let queue01 = DispatchQueue(label: "com.zhengwenxiang", qos: DispatchQoS.background)
// let queue02 = DispatchQueue(label: "com.zhengwenxiang02", qos: DispatchQoS.userInitiated)
let queue02 = DispatchQueue(label: "com.zhengwenxiang02", qos: DispatchQoS.utility)
queue01.async {
for i in 0..<10{
print("👍 ", i)
}
print("current thread00 is \(Thread.current)")
}
queue01.async {
for i in 100..<110{
print("👎 ", i)
}
print("current thread02 is \(Thread.current)")
}
queue02.async {
for i in 200..<210{
print("🌶 ", i)
}
print("current thread20 is \(Thread.current)")
}
queue02.async {
for i in 300..<310{
print("🐔 ", i)
}
print("current thread21 is \(Thread.current)")
}
for i in 1000..<1010 {
print("🐷 ", i)
}
print("Main thread is \(Thread.current)")
}
}
//MARK: - 并行队列 concurrent
extension ViewController{
// 并行同步
func conQueueSyncSample(){
/*
这个 attributes 参数也可以接受另一个名为 initiallyInactive 的值。如果使用这个值,任务不会被自动执行,而是需要开发者手动去触发。
*/
let anotherQueue = DispatchQueue(label: "com.zhengwenxiang.con", qos: .utility, attributes: .concurrent)
//initiallyInactive属性的串行队列
// let anotherQueue = DispatchQueue(label: "com.zhengwenxiang.con", qos: .utility, attributes: .initiallyInactive)
// initiallyInactive属性的并行队列
// let anotherQueue = DispatchQueue(label: "com.zhengwenxiang.con", qos: .utility, attributes: [.concurrent, .initiallyInactive])
// inactiveQueue = anotherQueue
anotherQueue.sync {
for i in 0..<10{
print("👍 ", i)
}
print("current thread is \(Thread.current)")
}
anotherQueue.sync {
for i in 100..<110{
print("🌶 ", i)
}
print("current thread02 is \(Thread.current)")
}
// anotherQueue.async {
// for i in 1000..<1010 {
// print("🎩 ", i)
// }
// }
for i in 2000..<2010 {
print("🐷 ", i)
}
print("Main thread is \(Thread.current)")
}
// 并行异步
func conQueueAsyncSample(){
//let anotherQueue = DispatchQueue(label: "com.zhengwenxiang.con", qos: .utility)
/*
这个 attributes 参数也可以接受另一个名为 initiallyInactive 的值。如果使用这个值,任务不会被自动执行,而是需要开发者手动去触发。
*/
let anotherQueue = DispatchQueue(label: "com.zhengwenxiang.con", qos: .utility, attributes: .concurrent)
anotherQueue.async {
for i in 0..<10{
print("👍 ", i)
}
print("current thread is \(Thread.current)")
}
anotherQueue.async {
for i in 100..<110{
print("🌶 ", i)
}
print("current thread02 is \(Thread.current)")
}
// anotherQueue.async {
// for i in 1000..<1010 {
// print("🎩 ", i)
// }
// }
for i in 2000..<2010 {
print("🐷 ", i)
}
print("Main thread is \(Thread.current)")
}
// 并行 + 优先级 + 同步
func conQueueWithQosSyncSample(){
let anotherQueue = DispatchQueue(label: "com.zhengwenxiang.con", qos: .userInitiated, attributes: .concurrent)
let anotherQueue02 = DispatchQueue(label: "com.zhengwenxiang.con02", qos: .utility, attributes: .concurrent)
anotherQueue.sync {
for i in 0..<10{
print("👍 ", i)
}
print("current thread00 is \(Thread.current)")
}
anotherQueue.sync {
for i in 100..<110{
print("🌶 ", i)
}
print("current thread01 is \(Thread.current)")
}
anotherQueue02.sync {
for i in 1000..<1010 {
print("🎩 ", i)
}
print("current thread20 is \(Thread.current)")
}
anotherQueue02.sync {
for i in 2000..<2010 {
print("🐔 ", i)
}
print("current thread21 is \(Thread.current)")
}
for i in 3000..<3010 {
print("🐷 ", i)
}
print("Main thread is \(Thread.current)")
}
// 并行 + 优先级 + 异步
func conQueueWithQosAsyncSample(){
let anotherQueue = DispatchQueue(label: "com.zhengwenxiang.con", qos: .userInitiated, attributes: .concurrent)
let anotherQueue02 = DispatchQueue(label: "com.zhengwenxiang.con02", qos: .utility, attributes: .concurrent)
anotherQueue.async {
for i in 0..<10{
print("👍 ", i)
}
print("current thread00 is \(Thread.current)")
}
anotherQueue.async {
for i in 100..<110{
print("🌶 ", i)
}
print("current thread01 is \(Thread.current)")
}
anotherQueue02.async {
for i in 1000..<1010 {
print("🎩 ", i)
}
print("current thread20 is \(Thread.current)")
}
anotherQueue02.async {
for i in 2000..<2010 {
print("🐔 ", i)
}
print("current thread21 is \(Thread.current)")
}
for i in 3000..<3010 {
print("🐷 ", i)
}
print("Main thread is \(Thread.current)")
}
}
// MARK: - 手动执行
extension ViewController{
// 程序员手动开启队列 initiallyInactive
func noAutoAction(){
/*
这个 attributes 参数也可以接受另一个名为 initiallyInactive 的值。如果使用这个值,任务不会被自动执行,而是需要开发者手动去触发。
*/
//initiallyInactive属性的串行队列
// let anotherQueue = DispatchQueue(label: "com.zhengwenxiang.con", qos: .utility, attributes: .initiallyInactive)
// initiallyInactive属性的并行队列
let anotherQueue = DispatchQueue(label: "com.zhengwenxiang.con", qos: .utility, attributes: [.concurrent, .initiallyInactive])
inactiveQueue = anotherQueue
anotherQueue.sync {
for i in 0..<10{
print("👍 ", i)
}
print("current thread is \(Thread.current)")
}
anotherQueue.sync {
for i in 100..<110{
print("🌶 ", i)
}
print("current thread02 is \(Thread.current)")
}
// anotherQueue.async {
// for i in 1000..<1010 {
// print("🎩 ", i)
// }
// }
for i in 2000..<2010 {
print("🐷 ", i)
}
print("Main thread is \(Thread.current)")
}
}
// MARK: - 延迟执行
extension ViewController{
func queueWithDelay(){
let delayQueue = DispatchQueue(label: "com.zhengwenxiang.delay", qos: .userInitiated)
print(Date())
let additionalTime: DispatchTimeInterval = .seconds(2)
// delayQueue.asyncAfter(deadline: .now() + additionalTime){
// print(Date())
// }
delayQueue.asyncAfter(deadline: .now() + additionalTime, execute:{
print(Date())
})
}
}
// MARK: - 访问主队列和全局队列
extension ViewController{
func globalAndMainQueue(){
// let globelQueue = DispatchQueue.global()
let globelQueue = DispatchQueue.global(qos: .userInitiated)
globelQueue.async {
for i in 0...10{
print("🇨🇳 ",i)
}
}
DispatchQueue.main.async {
// do something
}
}
}
// MARK: - download image
extension ViewController{
func fetchImage(){
let imageUrl = URL(string: "http://www.appcoda.com/wp-content/uploads/2015/12/blog-logo-dark-400.png")!
let session = URLSession(configuration: .default)
let task = session.dataTask(with: imageUrl, completionHandler:{ (imageData, response, error) in
if let data = imageData{
print("Did download image data")
DispatchQueue.main.async {
self.imageView.image = UIImage(data: data)
}
}
})
task.resume()
//
// let imageURL: URL = URL(string: "http://www.appcoda.com/wp-content/uploads/2015/12/blog-logo-dark-400.png")!
//
// (URLSession(configuration: URLSessionConfiguration.default)).dataTask(with: imageURL, completionHandler: { (imageData, response, error) in
// if let data = imageData {
// print("Did download image data")
//
// DispatchQueue.main.async {
// self.imageView.image = UIImage(data: data)
// }
//
// }
// }).resume()
}
}
// MARK: - workItem
extension ViewController{
// DispatchWorkItem 是一个代码块,它可以在任意一个队列上被调用,因此它里面的代码可以在后台运行,也可以在主线程运行
func useWorkItem(){
var value = 10
let workItem = DispatchWorkItem{
value += 5
}
workItem.perform()// 使用任务对象 -- 会在主线程中调用任务项,或者使用其他队列来执行
print("🈚️ ", value)
let queue = DispatchQueue.global()
// queue.async {
// workItem.perform()
// print("😍 ", value)
// }
queue.async(execute: workItem) // 便捷使用方法 -- 这句和上面那个一起执行程序会挂,同一个队列针对同一个代码块进行了操作...
workItem.wait() // 等待 wokItem执行完再继续向下进行
print("👌 ", value)
// 当一个任务项被调用后,你可以通知主队列(或者任何其它你想要的队列)
workItem.notify(queue: DispatchQueue.main, execute: {
print("value = ", value) // 它是在任务项被执行的时候打印的
})
}
}
// MARK: - DispatchGroup
extension ViewController{
func groupQueue(){
// 如果想在dispatch_queue中所有的任务执行完成后再做某种操作可以使用DispatchGroup
// 将队列放入DispatchGroup
let group = DispatchGroup()
let queueBook = DispatchQueue(label: "book")
queueBook.async(group: group, execute: {
// download book
print("download book")
})
let queueVideo = DispatchQueue(label: "video")
queueVideo.async(group: group, execute: {
// download video
print("download video")
})
group.wait() // 如果有多个并发队列在一个组里,我们想在这些操作执行完了再继续,调用wait
group.notify(queue: DispatchQueue.main, execute: {
// download successed
print("download successed")
})
}
}
// MARK: - barrier => DispatchWorkItemFlags
extension ViewController{
/*
假设我们有一个并发的队列用来读写一个数据对象。如果这个队列里的操作是读的,那么可以多个同时进行。如果有写的操作,则必须保证在执行写入操作时,不会有读取操作在执行,必须等待写入完成后才能读取,否则就可能会出现读到的数据不对。在之前我们用dipatch_barrier实现。
现在属性放在了DispatchWorkItemFlags里。
文/没故事的卓同学(简书作者)
原文链接:http://www.jianshu.com/p/fc78dab5736f
著作权归作者所有,转载请联系作者获得授权,并标注“简书作者”。
*/
func barrierDemo(){
var value = 10
let wirte = DispatchWorkItem(qos: .default, flags: .barrier, block:{
value += 100
print("Please waiting for writing data")
})
let dataQueue = DispatchQueue(label: "data", qos: .default, attributes: .concurrent)
dataQueue.async(execute: wirte)
dataQueue.async {
print("I am waiting for value = ", value)
}
}
}
// MARK: - 信号量dispatch_semaphore_t => DispatchSemaphore
extension ViewController{
// 为了线程安全的统计数量,我们会使用信号量作计数
func semaphoreDemo(){
let semaphore = DispatchSemaphore(value: 5)
semaphore.wait() // 信号量减一
semaphore.signal() // 信号量加一
}
}
| 2abce8989ea8bd4481ddf5f3e809604077e1eeb4 | [
"Markdown",
"Swift"
] | 2 | Markdown | wanqingrongruo/GCDDemo | 1e61946b31c3070deecb5ef7679da40e636a0c8a | d9bab903833d992f40f3a97824d70730fb2ea723 |
refs/heads/master | <repo_name>chocofactoryswap/chocofactory-contracts<file_sep>/scripts/poolInfoByPid.js
const MASTER = artifacts.require("KtKtMaster");
const user = '<KEY>';
function toN(bn){
if( ! bn ) return '-';
let n = bn.toNumber();
return n / 1E18;
}
module.exports = async function() {
const cli = await MASTER.deployed();
const pid = '0';
console.log('KtKtMaster', cli.address);
const r = await cli.poolInfoByPid(pid);
let allocPoint = toN(r._allocPoint);
let lastRewardBlock = r._lastRewardBlock.toNumber();
let accKtKtPerShare = toN(r._accKtKtPerShare);
console.log('lastRewardBlock', lastRewardBlock);
console.log('accKtKtPerShare', accKtKtPerShare);
process.exit(0);
};
<file_sep>/README.md
# ChcolateFactory Swap Contracts
http://chocofactory.org
Feel free to read the code.
More details coming soon.
## Deployed Contracts / Hash
- DeployerAddress - 0xDAF1D6AB3268b4fAf348B470A28951d89629D306
- DevAddress - 0xDAF1D6AB3268b4fAf348B470A28951d89629D306
- Timelock - 0x50eD0ccC4068768a9611ba377E3862ee0D5E092E
# KtKt
- KtKtToken - <PASSWORD>
- KtKtMaster - 0x5162e4d2c22cC309771455D967518b6746E74691
## MMS
- MMSToken - 0x5<PASSWORD>
- MMSMaster - 0x0fc<PASSWORD>17f4<PASSWORD>c<PASSWORD>Fda18A3ee
# Exchange
- (Uni|chocofactory)swapV2Factory - 0xDEBA8b1314dC39c796B0035b88EB921A0A49e768
- (Uni|chocofactory)swapV2Router02 - 0xB6fdd734f6180568F604E0fF2e597677c1E36918
- (Uni|chocofactory)swapV2Pair init code hash - `4ba018<KEY>`
## License
WTFPL
<file_sep>/migrations/2_timelock.js
const Timelock = artifacts.require("Timelock");
module.exports = function(deployer, network, accounts) {
const admin_ = accounts[0];
deployer.deploy(Timelock, admin_);
};<file_sep>/scripts/updatePool.js
const MASTER = artifacts.require("KtKtMaster");
const user = '<KEY>';
module.exports = async function() {
const cli = await MASTER.deployed();
const pid = '0';
console.log('KtKtMaster', cli.address);
await cli.updatePool(pid);
console.log('updatePool');
process.exit(0);
};
<file_sep>/scripts/add_pool.js
const MASTER = artifacts.require("KtKtMaster");
module.exports = async function(deployer) {
const cli = await MASTER.deployed();
const _allocPoint = '1';
//const _lpToken = '0x1d05072d22270bde9ae2eb55eeddc5d2753ff27e'; //KtKt
const _lpToken = '<KEY>'; //busd
const _withUpdate = false;
console.log('address', cli.address);
const r = await cli.add(_allocPoint, _lpToken, _withUpdate);
console.log('add', r);
process.exit(0);
};
<file_sep>/scripts/approve.js
const MASTER = artifacts.require("KtKtMaster");
module.exports = async function(deployer) {
const cli = await MASTER.deployed();
const _allocPoint = '0';
const _lpToken = '<KEY>';
const _withUpdate = true;
console.log('address', cli.address);
const r = await cli.add(_allocPoint, _lpToken, _withUpdate);
console.log('add', r);
process.exit(0);
};
| 21fe017caf048c317519fdad0d903e159979bba8 | [
"JavaScript",
"Markdown"
] | 6 | JavaScript | chocofactoryswap/chocofactory-contracts | a0c398945964af022c9744e8641a345f9b78e6a9 | 8a31263a5e199349259aadf2a188c2dc97003c64 |
refs/heads/master | <repo_name>liamwhite/boorutextile<file_sep>/lib/textile/nodes.rb
# frozen_string_literal: true
require 'cgi'
class MultiNode
def initialize(nodes)
@nodes = nodes || []
end
def build
@nodes.map(&:build).join('')
end
end
class TextNode
def initialize(text)
@text = text
end
# Hook for booru monkeypatch
def build
CGI.escapeHTML(@text).gsub("\n", '<br>')
end
end
class RawTextNode
def initialize(text)
@text = text
end
def build
CGI.escapeHTML(@text).gsub("\n", '<br>')
end
end
class HTMLNode
def initialize(tag_name, inner, attributes = {})
@tag_name = tag_name
@inner = inner
@attributes = attributes || {}
end
def build
output = []
output << '<'
output << @tag_name
@attributes.each do |name, value|
output << ' '
output << name
output << '="'
output << CGI.escapeHTML(value)
output << '"'
end
output << '>'
output << @inner.build
output << '</'
output << @tag_name
output << '>'
output.join('')
end
end
class ImageNode
def initialize(src)
@src = src
end
def build
output = []
output << '<img src="'
output << transform_src
output << '"/>'
output.join('')
end
# Hook for booru monkeypatch
def transform_src
CGI.escapeHTML(@src)
end
end
<file_sep>/lib/textile/parser.rb
# frozen_string_literal: true
require 'textile/nodes'
module TextileParser
extend self
def parse(text)
ary = []
operand(ary, text) until text.empty?
MultiNode.new(ary)
end
def find_syms(text)
# Find possible symbol matches
syms = SYM_TO_INDEX.map { |sym, index| [sym, text.index(index)] }
.reject { |sym, index| index.nil? }
# Sort by starting position - closer is better
syms = syms.sort_by{ |x| x[1] }
# Get associated regexps and find first
matchdata = nil
match = syms.map { |sym, index| [sym, SYM_TO_REGEX[sym]] }
.detect { |sym, re| matchdata = re.match(text) }
# [sym, matchdata]
[match[0], matchdata] if match
end
def operand(ary, text)
sym, md = find_syms(text)
if sym.nil? || md.nil?
# No match, consume entire string.
return ary << TextNode.new(text.slice!(0 .. text.length))
end
# Consume string before match.
if md.pre_match.size > 0
ary << TextNode.new(text.slice!(0 ... md.pre_match.size))
end
# Act on match.
# FIXME: Separate logic for string consumption:
case sym
when :raw_bracket
balanced = balance_markup(text, md.to_s, '[==', '==]').match(SYM_TO_REGEX[:raw_bracket])[1]
ary << RawTextNode.new(balanced)
when :bq_author
balanced = balance_markup(text, md.to_s, BQ_LEFT, '[/bq]').match(SYM_TO_REGEX[:bq_author])[2]
ary << HTMLNode.new(:blockquote, parse(balanced), title: $1)
when :bq
balanced = balance_markup(text, md.to_s, BQ_LEFT, '[/bq]').match(SYM_TO_REGEX[:bq])[1]
ary << HTMLNode.new(:blockquote, parse(balanced))
when :spoiler
balanced = balance_markup(text, md.to_s, '[spoiler]', '[/spoiler]').match(SYM_TO_REGEX[:spoiler])[1]
ary << HTMLNode.new(:span, parse(balanced), class: 'spoiler')
else
text.slice!(0 .. md.to_s.size)
end
case sym
when :raw
ary << RawTextNode.new(md[1])
when :link_title_bracket, :link_title
ary << HTMLNode.new(:a, parse(md[1]), title: md[2], href: md[3])
when :link_bracket, :link
ary << HTMLNode.new(:a, parse(md[1]), href: md[2])
when :image_link_title_bracket, :image_link_title
ary << HTMLNode.new(:a, ImageNode.new(md[1]), title: md[2], href: md[3])
when :image_link_bracket, :image_link
ary << HTMLNode.new(:a, ImageNode.new(md[1]), href: md[2])
when :image_title_bracket, :image_title
ary << HTMLNode.new(:span, ImageNode.new(md[1]), title: md[2])
when :image_bracket, :image
ary << ImageNode.new(md[1])
when :dblbold_bracket, :dblbold
ary << HTMLNode.new(:b, parse(md[1]))
when :bold_bracket, :bold
ary << HTMLNode.new(:strong, parse(md[1]))
when :dblitalic_bracket, :dblitalic
ary << HTMLNode.new(:i, parse(md[1]))
when :italic_bracket, :italic
ary << HTMLNode.new(:em, parse(md[1]))
when :code_bracket, :code
ary << HTMLNode.new(:code, parse(md[1]))
when :ins_bracket, :ins
ary << HTMLNode.new(:ins, parse(md[1]))
when :sup_bracket, :sup
ary << HTMLNode.new(:sup, parse(md[1]))
when :del_bracket, :del
ary << HTMLNode.new(:del, parse(md[1]))
when :sub_bracket, :sub
ary << HTMLNode.new(:sub, parse(md[1]))
when :cite_bracket, :cite
ary << HTMLNode.new(:cite, parse(md[1]))
end
end
private
# Find the longest substring that contains balanced markup,
# or the whole string if this is impossible.
def balance_markup(text, matched, left, right)
both = Regexp.union(left, right)
left = Regexp.union(left)
right = Regexp.union(right)
s = StringScanner.new(matched)
n, lowest_pos = 0, 0
i = loop do
match = s.scan(both)
case
when match =~ left
n += 1
when match =~ right
n -= 1
lowest_pos = s.pos
else
m = s.scan_until(both)
s.pos = s.pos - s.matched.size if m
s.terminate if m.nil?
end
break lowest_pos.pred if n.zero? || s.eos?
end
text.slice!(0 .. i)
matched[0 .. i]
end
# Properly nesting operator pairs:
# [bq][/bq] [bq="author"][/bq]
# [spoiler][/spoiler]
# [== ==]
# Non-nesting operator pairs:
# == " ! ** * __ _ @ + ^ - ~ ??
# Ruby \s does not match extra unicode space characters.
RX_SPACE_CHARS = ' \t\u00a0\u1680\u180E\u2000-\u200A\u202F\u205F\u3000'
RX_URL = %r{
(?:http:\/\/|https:\/\/|\/\/|\/|\#) # protocol
(?:[^%#{RX_SPACE_CHARS}"!\n\r]|%[0-9a-fA-F]{2})+ # path
[^#{RX_SPACE_CHARS}`~!@$^&"\n\r\*_+\-=\[\]\\|;:,.'?\#)] # invalid
}x
BQ_LEFT = /\[bq="([^"]*)"\]|\[bq\]/
# Symbol table, in operator precedence order:
# 0. Symbol name.
# 1. Start string for optimized matching.
# 2. Complete match definition.
SYMS = [
[:raw_bracket, '[==', /\[==(.*)==\]/],
[:bq_author, '[bq="', /\[bq="([^"]*)"\](.*)\[\/bq\]/],
[:bq, '[bq]', /\[bq\](.*)\[\/bq\]/],
[:spoiler, '[spoiler]', /\[spoiler\](.*)\[\/spoiler\]/],
[:raw, '==', /==(.*)==/],
[:link_title_bracket, '["', /\A\["([^"]*)\(([^\)]*)\)":(#{RX_URL})\]/],
[:link_title, '"', /"([^"]*)\(([^\)]*)\)":(#{RX_URL})/],
[:link_bracket, '["', /\["([^"]*)":(#{RX_URL})\]/],
[:link, '"', /"([^"]*)":(#{RX_URL})/],
[:image_link_title_bracket, '[!', /\[!(#{RX_URL})\(([^\)]*)\)!:(#{RX_URL})\]/],
[:image_link_title, '!', /!(#{RX_URL})\(([^\)]*)\)!:(#{RX_URL})/],
[:image_link_bracket, '[!', /\[!(#{RX_URL})!:(#{RX_URL})\]/],
[:image_link, '!', /!(#{RX_URL})!:(#{RX_URL})/],
[:image_title_bracket, '[!', /\[!(#{RX_URL})\(([^\)]*)\)!\]/],
[:image_title, '!', /!(#{RX_URL})\(([^\)]*)\)!/],
[:image_bracket, '[!', /\[!(#{RX_URL})!\]/],
[:image, '!', /!(#{RX_URL})!/],
[:dblbold_bracket, '[**', /\[\*\*((?:.|\n.|\n(?=\*\*\]))+?)\*\*\]/],
[:dblbold, '**', /\*\*((?:.|\n.|\n(?=\*\*))+?)\*\*/],
[:bold_bracket, '[*', /\[\*((?:.|\n.|\n(?=\*\]))+?)\*\]/],
[:bold, '*', /\*((?:.|\n.|\n(?=\*\]))+?)\*/],
[:dblitalic_bracket, '[__', /\[__((?:.|\n.|\n(?=__\]))+?)__\]/],
[:dblitalic, '__', /__((?:.|\n.|\n(?=__))+?)__/],
[:italic_bracket, '[_', /\[_((?:.|\n.|\n(?=_\]))+?)_\]/],
[:italic, '_', /_((?:.|\n.|\n(?=_))+?)_/],
[:code_bracket, '[@', /\[@((?:.|\n.|\n(?=@\]))+?)@\]/],
[:code, '@', /@((?:.|\n.|\n(?=@))+?)@/],
[:ins_bracket, '[+', /\[\+((?:.|\n.|\n(?=\+\]))+?)\+\]/],
[:ins, '+', /\+((?:.|\n.|\n(?=\+))+?)\+/],
[:sup_bracket, '[^', /\[\^((?:.|\n.|\n(?=\^\]))+?)\^\]/],
[:sup, '^', /\^((?:.|\n.|\n(?=\^))+?)\^/],
[:del_bracket, '[-', /\[\-((?:.|\n.|\n(?=\-\]))+?)\-\]/],
[:del, '-', /\-((?:.|\n.|\n(?=\-))+?)\-/],
[:sub_bracket, '[~', /\[\~((?:.|\n.|\n(?=\~\]))+?)\~\]/],
[:sub, '~', /\~((?:.|\n.|\n(?=\~))+?)\~/],
[:cite_bracket, '[??', /\[\?\?((?:.|\n.|\n(?=\?\?\]))+?)\?\?\]/],
[:cite, '??', /\?\?((?:.|\n.|\n(?=\?\?))+?)\?\?/],
]
SYM_TO_INDEX = Hash[SYMS.map { |name, index, re| [name, index] }]
SYM_TO_REGEX = Hash[SYMS.map { |name, index, re| [name, re] }]
end
<file_sep>/lib/textile.rb
require 'textile/parser'
module Textile
def self.parse(text)
TextileParser.parse(text.dup)
end
end
<file_sep>/textile.gemspec
$:.push File.expand_path("../lib", __FILE__)
require 'textile/version'
Gem::Specification.new do |s|
s.name = 'textile'
s.version = Textile::VERSION.dup
s.license = "MIT"
s.summary = "Recursive-descent style Textile parser"
s.description = "Recursive-descent style Textile parser"
s.authors = ["<NAME>"]
s.email = '<EMAIL>'
s.files = `git ls-files`.split("\n")
s.require_paths = ["lib"]
end
| 9c041356808726d4d807b2d2e6799312f5c6cb9f | [
"Ruby"
] | 4 | Ruby | liamwhite/boorutextile | 8eec432155edef807a893373c60ba611f5772085 | 206a580b3943b7f6482bce3691a20cf57c30f1cf |
refs/heads/master | <file_sep>#ifndef MAINWINDOW_H
#define MAINWINDOW_H
#include <QMainWindow>
#include <QLabel>
#include <QStack>
class QPushButton;
namespace Ui {
class MainWindow;
}
class MainWindow : public QMainWindow
{
Q_OBJECT
public:
explicit MainWindow(QWidget *parent = 0);
~MainWindow();
private:
Ui::MainWindow *ui;
enum State {
normal,
hungry,
sick,
sleepy,
dirty,
inAction
};
enum Action {
happy,
eat,
heal,
shower,
play
};
enum HandlerType {
nothing,
killCurrentNeed
};
enum NormalStateAction {
stand,
walk,
noAction
};
State currentState = normal;
QLabel *bibiContainer = NULL;
QLabel *lightOffContainer = NULL;
QLabel *fullnessBar = NULL;
QLabel *happinessBar = NULL;
QStack<State> stateStack;
int happiness = 5;
int fullness = 5;
bool isInAction = false;
NormalStateAction lastNormalStateAction = noAction;
void setupUiComponents();
void setupStaticBackground();
void setupAnimatedBackground();
void setupStatus();
void drawStaticImageAt(std::string, int, int, int, int);
void setupButtons();
void setupBibi();
void setupHourClock();
void born();
void happyWithFinishHandler(HandlerType);
void setFullnessToFull();
void increaseFullness();
void decreaseFullness();
void syncFullness();
void setHappinessToFull();
void increaseHappiness();
void decreaseHappiness();
void syncHappiness();
void setFullnessTo(int);
void setHappinessTo(int);
void changeBibiToState(State);
void stopBackgroundAnimation();
void changeBibiAnimationTo(std::string);
void removePreviousAnimationIfExists();
void hourlyWorkAssignment(int);
bool randomYesInEvery(int val);
void randomChangeToStand();
void randomChangeToWalk();
void standInNormalStateForced();
int backgroundAnimationTimerId;
int backgroundImageOffset = 0;
int backgroundImageFaceRight = true;
void checkAndTurnBackgroundImageFacing();
void turnOffLight();
void turnOnLight();
bool isLightOn();
void delay();
protected:
void timerEvent(QTimerEvent *event);
private slots:
void hourClockTickHandler();
void buttonEatHandler();
void buttonShowerHandler();
void buttonHealHandler();
void buttonPlayHandler();
void buttonTurnOffLightHandler();
void nullFinishHandler();
void killCurrentNeedFinishHandler();
void addAndDisplayNewState(State);
void standInNormalState();
void walkInNormalState();
void morningArrived();
};
#endif // MAINWINDOW_H
<file_sep>#include "mainwindow.h"
#include "ui_mainwindow.h"
#include <QMovie>
#include <QLabel>
#include <QPushButton>
#include <QTime>
#include <QTimer>
#include <QThread>
#include <QApplication>
#include <QDateTime>
#include <QIcon>
#include <string>
#include <ctime>
MainWindow::MainWindow(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::MainWindow) {
ui->setupUi(this);
setupUiComponents();
}
void MainWindow::setupUiComponents() {
setupStaticBackground();
setupStatus();
setupButtons();
setupBibi();
setupHourClock();
}
void MainWindow::setupStaticBackground() {
QPixmap background(":/background");
QRegion exposed;
QPalette palette;
background.scroll(-backgroundImageOffset, 0, background.rect(), &exposed);
palette.setBrush(QPalette::Background, background);
this->setPalette(palette);
}
void MainWindow::setupAnimatedBackground() {
backgroundAnimationTimerId = startTimer(20);
}
void MainWindow::setupStatus() {
drawStaticImageAt(":/fullness", -10, -10, 135, 135);
drawStaticImageAt(":/happiness", 290, -10, 135, 135);
setFullnessTo(3);
setHappinessTo(3);
}
void MainWindow::drawStaticImageAt(std::string resName, int x, int y, int w, int h) {
QLabel *topLevelLabel = new QLabel(this);
QPixmap pixmap(QString::fromStdString(resName));
topLevelLabel->setPixmap(pixmap);
topLevelLabel->setGeometry(x, y, w, h);
topLevelLabel->show();
}
void MainWindow::setFullnessToFull() {
fullness = 5;
syncFullness();
}
void MainWindow::increaseFullness() {
if(fullness < 5) {
fullness ++;
}
syncFullness();
}
void MainWindow::decreaseFullness() {
if(fullness > 0) {
fullness --;
}
syncFullness();
if(fullness == 0) {
if(stateStack.isEmpty() || stateStack.top() != hungry) {
addAndDisplayNewState(hungry);
}
}
}
void MainWindow::syncFullness() {
setFullnessTo(fullness);
}
void MainWindow::setHappinessToFull() {
happiness = 5;
syncHappiness();
}
void MainWindow::increaseHappiness() {
if(happiness < 5) {
happiness ++;
}
syncHappiness();
}
void MainWindow::decreaseHappiness() {
if(happiness > 0) {
happiness --;
}
syncHappiness();
}
void MainWindow::syncHappiness() {
setHappinessTo(happiness);
}
void MainWindow::setFullnessTo(int val) {
fullness = val;
if(fullnessBar == NULL) {
fullnessBar = new QLabel(this);
fullnessBar->setGeometry(100, 38, 160, 36);
}
std::string resName = ":/fullness" + std::to_string (val);
QPixmap pixmap(QString::fromStdString(resName));
fullnessBar->setPixmap(pixmap);
fullnessBar->show();
}
void MainWindow::setHappinessTo(int val) {
happiness = val;
if(happinessBar == NULL) {
happinessBar = new QLabel(this);
happinessBar->setGeometry(400, 38, 160, 36);
}
std::string resName = ":/happiness" + std::to_string (val);
QPixmap pixmap(QString::fromStdString(resName));
happinessBar->setPixmap(pixmap);
happinessBar->show();
}
void MainWindow::setupBibi() {
born();
}
void MainWindow::setupHourClock() {
QTimer *clock = new QTimer(this);
connect(clock, SIGNAL(timeout()), this, SLOT(hourClockTickHandler()));
clock->start(1000 * 60 * 60); // hourly
}
void MainWindow::born() {
isInAction = true;
changeBibiAnimationTo(":/stateBorn");
QTimer *timer = new QTimer(this);
timer->singleShot(7000, this, SLOT(nullFinishHandler()));
}
void MainWindow::happyWithFinishHandler(HandlerType handlerType) {
isInAction = true;
stopBackgroundAnimation();
changeBibiAnimationTo(":/stateHappy");
QTimer *timer = new QTimer(this);
switch(handlerType) {
case nothing:
timer->singleShot(5000, this, SLOT(nullFinishHandler()));
break;
case killCurrentNeed:
timer->singleShot(5000, this, SLOT(killCurrentNeedFinishHandler()));
break;
}
}
void MainWindow::nullFinishHandler() {
isInAction = false;
changeBibiToState(normal);
}
void MainWindow::killCurrentNeedFinishHandler() {
isInAction = false;
if(stateStack.isEmpty()) {
changeBibiToState(normal);
return;
}
stateStack.pop();
if(stateStack.isEmpty()) {
qDebug("is empty, and switching to normal");
changeBibiToState(normal);
} else {
qDebug("is not empty");
changeBibiToState(stateStack.top());
}
}
void MainWindow::changeBibiToState(State state) {
switch (state) {
case normal:
standInNormalStateForced();
break;
case hungry:
changeBibiAnimationTo(":/stateHungry");
break;
case sick:
changeBibiAnimationTo(":/stateSick");
break;
case sleepy:
changeBibiAnimationTo(":/stateSleep");
break;
case dirty:
changeBibiAnimationTo(":/stateDirty");
break;
}
}
void MainWindow::walkInNormalState() {
if(!stateStack.isEmpty() || isInAction || lastNormalStateAction == walk) {
qDebug("walk handle is ignored");
return;
}
setupAnimatedBackground();
if(backgroundImageFaceRight) {
changeBibiAnimationTo(":/stateWalk");
} else {
changeBibiAnimationTo(":/stateWalkBackward");
}
lastNormalStateAction = walk;
randomChangeToStand();
}
void MainWindow::randomChangeToStand() {
srand(time(0));
int randomTime = ((rand() % 20) + 5) * 1000; // 20~40 seconds
qDebug("random change to stand time: %d", randomTime);
QTimer *timer = new QTimer(this);
timer->singleShot(randomTime, this, SLOT(standInNormalState()));
}
void MainWindow::standInNormalState() {
if(lastNormalStateAction == stand) {
return;
}
}
void MainWindow::standInNormalStateForced() {
qDebug("in normal state stand");
stopBackgroundAnimation();
if(!stateStack.isEmpty() || isInAction) {
qDebug("stand handle is ignored");
return;
}
changeBibiAnimationTo(":/stateNormal");
lastNormalStateAction = stand;
randomChangeToWalk();
}
void MainWindow::randomChangeToWalk() {
srand(time(0));
int randomTime = ((rand() % 20) + 5) * 1000; // 20~40 second
qDebug("random change to walk time: %d", randomTime);
QTimer *timer = new QTimer(this);
timer->singleShot(randomTime, this, SLOT(walkInNormalState()));
}
void MainWindow::addAndDisplayNewState(State newState) {
if(!stateStack.isEmpty() && newState == stateStack.top()) {
return;
}
stopBackgroundAnimation();
stateStack.push(newState);
changeBibiToState(newState);
}
void MainWindow::stopBackgroundAnimation() {
if(backgroundAnimationTimerId) {
killTimer(backgroundAnimationTimerId);
backgroundAnimationTimerId = 0;
}
}
void MainWindow::changeBibiAnimationTo(std::string resName) {
removePreviousAnimationIfExists();
QMovie *bibi = new QMovie(QString::fromStdString(resName));
if (!bibi->isValid()) {
qDebug("bibi animation is not found");
}
bibiContainer = new QLabel(this);
bibiContainer->setMovie(bibi);
bibiContainer->setGeometry(150, 130, 300, 390);
bibiContainer->show();
bibi->start();
}
void MainWindow::removePreviousAnimationIfExists() {
if (bibiContainer != NULL) {
bibiContainer->deleteLater();
bibiContainer = NULL;
}
}
void MainWindow::hourClockTickHandler() {
QDateTime now = QDateTime::currentDateTime();
int hour = now.toString("H").toInt();
qDebug("%d", hour);
hourlyWorkAssignment(hour);
}
void MainWindow::hourlyWorkAssignment(int hour) {
if(hour % 2 == 0) {
decreaseFullness();
}
if(hour % 3 == 0) {
decreaseHappiness();
}
if(hour == 19) { // 7pm
addAndDisplayNewState(dirty);
}
if(hour >= 22 || hour <= 7) {
if(stateStack.isEmpty() || stateStack.top() != sleepy) {
addAndDisplayNewState(sleepy);
}
}
if(hour >= 8 && hour <= 21) {
if(!isLightOn()) {
morningArrived();
}
}
if(randomYesInEvery(72)) {
addAndDisplayNewState(sick);
}
}
bool MainWindow::randomYesInEvery(int val) {
srand(time(0));
return (rand() % val) == 0; // 20~40 seconds
}
void MainWindow::setupButtons() {
connect(ui->buttonEat, SIGNAL(clicked()), this , SLOT(buttonEatHandler()));
connect(ui->buttonShower, SIGNAL(clicked()), this , SLOT(buttonShowerHandler()));
connect(ui->buttonHeal, SIGNAL(clicked()), this , SLOT(buttonHealHandler()));
connect(ui->buttonPlay, SIGNAL(clicked()), this , SLOT(buttonPlayHandler()));
connect(ui->buttonTurnOffLight, SIGNAL(clicked()), this , SLOT(buttonTurnOffLightHandler()));
}
void MainWindow::buttonEatHandler() {
qDebug("eat button clicked");
if(isInAction) {
qDebug("in action, blocked");
return;
}
if(stateStack.isEmpty()) {
increaseFullness();
happyWithFinishHandler(nothing);
} else if (stateStack.top() == hungry) {
increaseFullness();
happyWithFinishHandler(killCurrentNeed);
}
}
void MainWindow::buttonShowerHandler() {
qDebug("shower button clicked");
if(isInAction) {
qDebug("in action, blocked");
return;
}
if(!stateStack.isEmpty() && stateStack.top() == dirty) {
increaseHappiness();
happyWithFinishHandler(killCurrentNeed);
}
}
void MainWindow::buttonHealHandler() {
qDebug("heal button clicked");
if(isInAction) {
qDebug("in action, blocked");
return;
}
if(!stateStack.isEmpty() && stateStack.top() == sick) {
setHappinessToFull();
happyWithFinishHandler(killCurrentNeed);
}
}
void MainWindow::buttonPlayHandler() {
qDebug("play button clicked");
if(isInAction) {
qDebug("in action, blocked");
return;
}
if(!stateStack.isEmpty()) {
return;
}
increaseHappiness();
happyWithFinishHandler(nothing);
}
void MainWindow::buttonTurnOffLightHandler() {
qDebug("turn off light button clicked");
if(isInAction) {
qDebug("in action, blocked");
return;
}
if(!stateStack.isEmpty() && stateStack.top() == sleepy) {
turnOffLight();
}
}
void MainWindow::turnOffLight() {
QMovie *lightOffScene = new QMovie(QString::fromStdString(":/lightOffScene"));
if (!lightOffScene->isValid()) {
qDebug("lightOffScene animation is not found");
}
lightOffContainer = new QLabel(this);
lightOffContainer->setMovie(lightOffScene);
lightOffContainer->setGeometry(0, 0, 600, 600);
lightOffContainer->show();
lightOffScene->start();
}
void MainWindow::turnOnLight() {
if(lightOffContainer != NULL) {
lightOffContainer->deleteLater();
lightOffContainer = NULL;
}
}
bool MainWindow::isLightOn() {
return lightOffContainer == NULL;
}
void MainWindow::morningArrived() {
turnOnLight();
happyWithFinishHandler(killCurrentNeed);
}
MainWindow::~MainWindow() {
killTimer(backgroundAnimationTimerId);
delete ui;
}
void MainWindow::timerEvent(QTimerEvent *event) {
QPixmap background(":/background");
QRegion exposed;
QPalette palette;
background.scroll(-backgroundImageOffset, 0, background.rect(), &exposed);
palette.setBrush(QPalette::Background, background);
this->setPalette(palette);
int delta = 1;
if (backgroundImageFaceRight) {
backgroundImageOffset += delta;
} else {
backgroundImageOffset -= delta;
}
checkAndTurnBackgroundImageFacing();
}
void MainWindow::checkAndTurnBackgroundImageFacing() {
if (backgroundImageOffset >= 1800 - 600) {
backgroundImageFaceRight = false;
if(!isInAction && stateStack.isEmpty()) {
changeBibiAnimationTo(":/stateWalkBackward");
}
} else if (backgroundImageOffset <= 0) {
backgroundImageFaceRight = true;
if(!isInAction && stateStack.isEmpty()) {
changeBibiAnimationTo(":/stateWalk");
}
}
}
void MainWindow::delay() {
QTime dieTime= QTime::currentTime().addSecs(1);
while (QTime::currentTime() < dieTime) {
QCoreApplication::processEvents(QEventLoop::AllEvents, 100);
}
}
<file_sep># Chicken Bibi
Have your little chicken Bibi on your desktop!

## Install
- Mac ([download](https://raw.githubusercontent.com/heronyang/chicken-bibi/master/build/bibi.app.zip))
## User Review
> 放著看起來可愛可愛的
| 2609ae8565b0831ec9056f01b0b654f56bf87bbe | [
"Markdown",
"C++"
] | 3 | C++ | heronyang/chicken-bibi | 0b4a2ec40be6420f32108a8e3e63b7b84dd26a13 | da183f5c5ba6f1e2af638319070a6cf276e88129 |
refs/heads/master | <file_sep>/* jshint esversion: 6 */
// Solve the following prompts using recursion.
// 1. Calculate the factorial of a number. The factorial of a non-negative integer n,
// denoted by n!, is the product of all positive integers less than or equal to n.
// Example: 5! = 5 x 4 x 3 x 2 x 1 = 120
// factorial(5); // 120
let factorial = function(n)
{
if (n < 0) return null;
else if (n === 0 || n === 1) return 1;
return n * factorial(n - 1);
};
// 2. Compute the sum of an array of integers.
// sum([1,2,3,4,5,6]); // 21
/* STRATEGY:
1) Base case: when array is empty, return 0
2) Else: recursively return last number + all but last numbers
* Make sure to make copy of array and not mutate original
*/
let sum = function(array)
{
if (array.length === 0) return 0;
copyArr = array.slice(0);
return copyArr.pop() + sum(copyArr);
// Another way:
// return array[array.length - 1] + sum(array.slice(0, -1));
};
// 3. Sum all numbers in an array containing nested arrays.
// arraySum([1,[2,3],[[4]],5]); // 15
/* STRATEGY:
Use a for loop to go thru each element
1) Base case: when element is a number, return total + elem
2) Else if elem is array: return running total + recursive call to arraySum
*/
let arraySum = function(array)
{
let total = 0;
for (let i = 0; i < array.length; i++)
{
if (Array.isArray(array[i]))
{
total = total + arraySum(array[i]);
} else
{
total = total + array[i];
}
}
return total;
};
// 4. Check if a number is even.
/*
Definition:
* a number is even if it is one more than an odd number
* a number is odd if it is one more than an even number
* 0 is even
* NEGATIVE numbers can be odd/even; it is symmetrical to positive #s
STRATEGY:
If number is negative, turn it positive
1) Base case #1: 0 is Even
2) Base case #2: 1 is Odd
3) Return recursively number - 2
3B)(why 2? even & oddness varies every OTHER number)
*/
let isEven = function(n)
{
if (n < 0) n = -n;
else if (n === 0) return true;
else if (n === 1) return false;
return isEven(n - 2);
};
// 5. Sum all integers below a given integer.
// sumBelow(10); // 45 because 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1
// sumBelow(7); // 21 because 6 + 5 + 4 + 3 + 2 + 1
/* STRATEGY:
1) Base case: if n is 1 or 0 then return 0
2) if n is positive, go DOWNWARDS until hit base cases, similar to factorial 2B) return one less than n + recursively call sumBy(n - 1)
3) if n is negative, go UPWARDS until hit 0
*/
let sumBelow = function(n)
{
if (n === 1 || n === 0) return 0;
if (n > 0)
{
return n - 1 + sumBelow(n - 1);
} else
{
return n + 1 + sumBelow(n + 1);
}
};
// 6. Get the integers within a range (x, y).
// NOTE: NON INCLUSIVE start & end
// range(2,9); // [3,4,5,6,7,8]
/* STRATEGY:
1) x will be the moving target that's changing, y is fixed 'destination'
2) same pattern in cases of both negative and positive x
3) so the 2 different case is when x is greater than or less than y
3B) range(7, 2) vs range(2, 7)
4) Case 1: x < y (e.g: range(2, 7))
4B) base case: x is one LESS than y, or x = y, return []
4C) else, COUNT UP to y
* recursively return range of x + 1,
* concat to array literal with beginning val x + 1
5) Case 2: x > y (e.g range(7, 2))
5B) base case: x is one MORE than y, or x = y, return []
5C) else, COUNT DOWN to y
* return recursively range of x - 1,
* concat to array literal w/ beginning val of x - 1
*/
let range = function(x, y)
{
if (x < y)
{
if (x === (y - 1) || x === y) return [];
else return [x + 1].concat(range(x + 1, y));
} else
{
if (x === (y + 1) || x === y) return [];
return [x - 1].concat(range(x - 1, y));
}
};
// 7. Compute the exponent of a number.
// The exponent of a number says how many times the base number is used as a factor.
// 8^2 = 8 x 8 = 64. Here, 8 is the base and 2 is the exponent.
// exponent(4,3); // 64
// https://www.khanacademy.org/computing/computer-science/algorithms/recursive-algorithms/a/computing-powers-of-a-number
/* STRATEGY:
1) Base case: if exponent is 0, return 1
2) if exponent is POSITIVE: count down exponent until it hits base case
return base * recursively call exponent(base, exponent - 1)
*** For optimization ***
2C) EVEN: store return val of recursively calling exponent / 2
& multiply return val by itself.
3) if exponent is NEGATIVE: pass exponent in the form of
num ^ -exp = 1 / num ^ exp
3B) remember to put negative sign in front of exp to turn exponent back to positive!
*/
let exponent = function(base, exp)
{
if (exp === 0) return 1;
if (exp < 0)
{
return 1 / exponent(base, -exp);
}
else if (isEven(exp)) //optimized for even exponents
{
result = exponent(base, exp / 2);
return result * result;
// Note the below works but result in more calls b/c does not store result in variable
// return exponent(base, exp / 2) * exponent(base, exp / 2);
} else if (!isEven(exp))
{
return base * exponent(base, exp - 1);
}
};
/* 8. Determine if a number is a power of two.
i.e: 2 ^ something = number
powerOfTwo(1); // true
powerOfTwo(16); // true
powerOfTwo(10); // false
STRATEGY:
1) base case is when n is 1, return true,
2) if n is 0 or negative return false
3) recursively return the number divide by 2
*/
let powerOfTwo = function(n)
{
if (n === 1) return true;
else if (n < 1) return false;
return powerOfTwo(n / 2);
};
// 9. Write a function that reverses a string.
// "abc" --> "cba"
/* STRATEGY:
1) base case: string has 1 charac, just return the entire string
2) else return the last charac + recursively call reverse on all but last
*/
let reverse = function(string)
{
if (string.length === 1) return string;
return string.slice(-1) + reverse(string.slice(0, -1));
};
/* 10. Write a function that determines if a string is a palindrome.
e.g:
odd # of characs: 'kayak' --> true
even # of characs: 'adda' --> true
STRATEGY:
1) base case: string is 1 charac left (odd) or 0 charac left (even), then return true
2) else compare the first and last character, if that's equivalent
2B) then recursively return all characters in between
3) finally return false if first & charac are not equal
*/
let palindrome = function(string)
{
if (string.length === 1 || string.length === 0) return true;
else if (string.slice(0, 1).toLowerCase() === string.slice(-1).toLowerCase())
{
return palindrome(string.slice(1, -1));
}
return false;
};
/* 11. Write a function that returns the remainder of x divided by y without using the
modulo (%) operator.
modulo(5,2) // 1
modulo(-5,2) // -1
modulo(17,5) // 2
modulo(22,6) // 4
STRATEGY:
* keep subtracting y until it becomes less than x, then it'll hit the main base case
* I.e: if the first number is smaller than the second, return that number
1) Base cases:
1A) y is 0, return NaN (e.g: 5 % 0 = NaN)
1B) y is negative, turn y to positive (e.g: 4 % -5 = 4)
1C) x is negative, invert x as well as the function (e.g: -4 % 5 = -4)
1D) main case: x is smaller than y, return x (e.g: 4 % 5 = 4)
2) Recursive case: return function of x subtract y, until it hits one of the base cases
*/
let modulo = function(x, y)
{
if (y === 0) return NaN;
if (y < 0) y = -y;
if (x < 0) return -modulo(-x, y);
if (x < y) return x;
return modulo(x - y, y);
};
/* 12. Write a function that multiplies two numbers without using the * operator or
Math methods. e.g: multiply(5, 6), multiply(5, -6)
STRATEGY:
* Similar to factorial and range problems
1) Base cases:
1B) y or x is 0, return 0 (anything multiply by 0 is 0)
1C) when y is 1, return x (anything multiply by 1 is itself)
2) If y is POSITIVE, COUNT DOWN y until it hits base case
2B) recursively add x to the recursive calls of y - 1
3) If y is NEGATIVE, COUNT UP y until it hits base case
3B) recursively add NEGATIVE x to recursive calls of y + 1
3C) Why? if x is positive & y is negative, will turn x negative & get a negative answer
And if x is negative & y is negative, will turn x positive & get a positive answer
*/
let multiply = function(x, y)
{
if (y === 0 || x === 0) return 0;
if (y === 1) return x;
if (y > 0)
{
return x + multiply(x, y - 1);
} else if (y < 0)
{
return -x + multiply(x, y + 1);
}
};
/* 13. Write a function that divides two numbers without using the / operator or
Math methods to arrive at an approximate quotient (ignore decimal endings).
E.g: divide(6, 2) or divide(-6, 2) or divide(-6, -2)
NOTE: This is INTEGER DIVISION
STRATEGY:
1) Base cases:
1B) if the divisor (y) is 0, return NaN (e.g: 5/0)
1C) if the dividend (x) is 0, return 0 (e.g: 0/5)
1D) if the dividend is less than divisor (e.g: 1/5 ) or less than negative divisor (e.g: 1/-5 = -1/5 ), return 0
1E) if divisor - dividend is 0 then return 1 (e.g: 5/5)
2) else return 1 + recursive return of divisor - dividend
E.g:
6/2 --> 1 + ((6-2), 2) --> 1 + ((4-2),2) --> hits base case -->
return 1 + the other 2 ones earlier = 3
NOTE: test cases do not account for evenly divided answers for negatives
(-6 / 2) since that is much more difficult
*/
let divide = function(x, y)
{
if (y === 0) return NaN;
if (x - y === 0) return 1;
if (x === 0 || x < -y || x < y) return 0;
if (x > 0)
{
return 1 + divide(x - y, y);
} else
{
return -1 + divide(x + y, y);
}
};
/* 14. Find the greatest common divisor (gcd) of two positive numbers. The GCD of two
integers is the greatest integer that divides both x and y with no remainder.
gcd(4,36); // 4
* NOTE: the test cases here returns null for negative numbers
* Use Euclidean Algorithm: https://www.khanacademy.org/computing/computer-science/cryptography/modarithmetic/a/the-euclidean-algorithm
STRATEGY:
1) Base cases: if either x or y is 0, return the other number
1B) return null if x or y is negative
(only b/c those are test cases; IRL, GCD for negative numbers is always positive)
2) Get the remainder of x / y
2B) y is now x, & remainder is now y. Recursively call function on y and remainder
*/
let gcd = function(x, y)
{
if (x === 0) return y;
if (y === 0) return x;
if (x < 0 || y < 0) return null;
let remainder = x % y;
return gcd(y, remainder);
};
/* 15. Write a function that compares each character of two strings and returns true if
both are identical.
compareStr('house', 'houses') // false
compareStr('tomato', 'tomato') // true
STRATEGY: similar to palindrome problem
1) Base case: both strings are empty, then return true
2) if first charac of each is equal, then recursively compare the rest
3) otherwise return false
*/
let compareStr = function(str1, str2)
{
if (str1 === "" && str2 === "") return true;
if (str1.slice(0, 1) === str2.slice(0, 1))
{
return compareStr(str1.slice(1), str2.slice(1));
}
return false;
};
/* 16. Write a function that accepts a string and creates an array where each letter
occupies an index of the array.
* Works similar to split on empty string ""
* createArray('hologram')).to.eql(['h','o','l','o','g','r','a','m'])
STRATEGY:
1) base case: if string is empty return empty array
2) return the first charac of string in array literal, concat to recursive calls
to all but first charac of string
*/
let createArray = function(str)
{
if (str === "") return [];
return [str.slice(0, 1)].concat(createArray(str.slice(1)));
};
/* 17. Reverse the order of an array
STRATEGY:
1) base case: if array is empty, then return []
2) else make copy of array, pop off the last elem in copy,
concat it to the recursive call of the function on all but last elements in the arr
*/
let reverseArr = function(array)
{
if (array.length === 0) return [];
let copyArr = array.slice(0);
return [copyArr.pop()].concat(reverseArr(copyArr));
};
/* 18. Create a new array with a given value and length.
buildList(0,5) // [0,0,0,0,0]
buildList(7,3) // [7,7,7]
STRATEGY:
* COUNT DOWN length for each recursive call
1) Base case: if length is 0, return empty array []
2) recursively return the value in array literal, concat to
recursive calls of function, decreasing length by 1 each time
*/
let buildList = function(value, length)
{
if (length === 0) return [];
return [value].concat(buildList(value, length - 1));
};
/* 19. Implement FizzBuzz. Given integer n, return an array of the string representations of 1 to n.
For multiples of three, output 'Fizz' instead of the number.
For multiples of five, output 'Buzz' instead of the number.
For numbers which are multiples of both three and five, output “FizzBuzz” instead of the number.
fizzBuzz(5) // ['1','2','Fizz','4','Buzz']
STRATEGY: COUNT DOWN to base case of n = 0
1) Base case: when n is 0, return empty array
2) Have a variable to hold either n, or one of the fizzbuzz variations
2B) turn that elem to string, add it to array literal, and concat to recursive calls
of fizzBuzz, decreasing n by 1 each time.
* NOTE: the order of concat is important; it's different from prev examples!
* Why? To prevent array from printing out in reverse!
*/
let fizzBuzz = function(n)
{
let elem = n;
if (n === 0) return [];
else if (n % 3 === 0 && n % 5 === 0) elem = "FizzBuzz";
else if (n % 3 === 0) elem = "Fizz";
else if (n % 5 === 0) elem = "Buzz";
return fizzBuzz(n - 1).concat([elem.toString()]);
};
/*
20. Count the occurence of a value in a list.
countOccurrence([2,7,4,4,1,4], 4) // 3
countOccurrence([2,'banana',4,4,1,'banana'], 'banana') // 2
STRATEGY:
1) Base case: if the array is empty then return 0
2) if the first value of array is equal to target, return
1 plus recursively result of calling the function on all but first element
2B) else return recursive result of calling the function on all but first element
*/
let countOccurrence = function(array, value)
{
if (array.length === 0) return 0;
if (array[0] === value)
{
return 1 + countOccurrence(array.slice(1), value);
} else
{
return countOccurrence(array.slice(1), value);
}
};
// 21. Write a recursive version of map.
// rMap([1,2,3], timesTwo); // [2,4,6]
let rMap = function(array, callback) {
};
// 22. Write a function that counts the number of times a key occurs in an object.
// let obj = {'e':{'x':'y'},'t':{'r':{'e':'r'},'p':{'y':'r'}},'y':'e'};
// countKeysInObj(obj, 'r') // 1
// countKeysInObj(obj, 'e') // 2
let countKeysInObj = function(obj, key) {
};
// 23. Write a function that counts the number of times a value occurs in an object.
// let obj = {'e':{'x':'y'},'t':{'r':{'e':'r'},'p':{'y':'r'}},'y':'e'};
// countValuesInObj(obj, 'r') // 2
// countValuesInObj(obj, 'e') // 1
let countValuesInObj = function(obj, value) {
};
// 24. Find all keys in an object (and nested objects) by a provided name and rename
// them to a provided new name while preserving the value stored at that key.
let replaceKeysInObj = function(obj, oldKey, newKey) {
};
/* 25. Get the first n Fibonacci numbers. In the Fibonacci sequence, each subsequent
number is the sum of the previous two.
Example: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34.....
fibonacci(5); // [0,1,1,2,3,5]
STRATEGY:
1) Base case: if number is 1 or 2 return its corresponding array from [0] to [0, 1, 1]
* Use slice, & make ending number + 1 to account for zero indexing
2) Recursively return the function on n - 1.
When it hits base case, it will percolate up the stack
Get the sum of the last & second to last numbers in the array
Push them into the array
Return the array
*/
let fibonacci = function(n)
{
if (n <= 0) return null;
if (n < 3)
{
return [0, 1, 1].slice(0, n + 1);
}
let arr = fibonacci(n - 1);
arr.push(arr[arr.length - 1] + arr[arr.length - 2]);
return arr;
};
// 26. Return the Fibonacci number located at index n of the Fibonacci sequence.
// [0,1,1,2,3,5,8,13,21]
// nthFibo(5); // 5
// nthFibo(7); // 13
// nthFibo(3); // 2
let nthFibo = function(n)
{
if (n < 0) return null;
if (n <= 1) return n;
return nthFibo(n - 1) + nthFibo(n - 2);
};
// 27. Given an array of words, return a new array containing each word capitalized.
// let words = ['i', 'am', 'learning', 'recursion'];
// capitalizedWords(words); // ['I', 'AM', 'LEARNING', 'RECURSION']
let capitalizeWords = function(array) {
};
// 28. Given an array of strings, capitalize the first letter of each index.
// capitalizeFirst(['car','poop','banana']); // ['Car','Poop','Banana']
let capitalizeFirst = function(array) {
};
// 29. Return the sum of all even numbers in an object containing nested objects.
// let obj1 = {
// a: 2,
// b: {b: 2, bb: {b: 3, bb: {b: 2}}},
// c: {c: {c: 2}, cc: 'ball', ccc: 5},
// d: 1,
// e: {e: {e: 2}, ee: 'car'}
// };
// nestedEvenSum(obj1); // 10
let nestedEvenSum = function(obj) {
};
/* 30. Flatten an array containing nested arrays.
flatten([1,[2],[3,[[4]]],5]); // [1,2,3,4,5]
STRATEGY:
1) Base case: regular element (non-array)
2) Through a loop, check each element, if it is an array,
recursively call the function on it again.
Add it to the variable elem,
2B) use concat to merge that call and save it to result variable at the end
of each loop iteration
*/
let flatten = function(array)
{
let result = [];
for (let i = 0; i < array.length; i++)
{
let elem;
if (Array.isArray(array[i]))
{
elem = flatten(array[i]);
} else
{
elem = array[i];
}
result = result.concat(elem);
}
return result;
};
// 31. Given a string, return an object containing tallies of each letter.
// letterTally('potato'); // {p:1, o:2, t:2, a:1}
let letterTally = function(str, obj) {
};
// 32. Eliminate consecutive duplicates in a list. If the list contains repeated
// elements they should be replaced with a single copy of the element. The order of the
// elements should not be changed.
// compress([1,2,2,3,4,4,5,5,5]) // [1,2,3,4,5]
// compress([1,2,2,3,4,4,2,5,5,5,4,4]) // [1,2,3,4,2,5,4]
let compress = function(list) {
};
// 33. Augument every element in a list with a new value where each element is an array
// itself.
// augmentElements([[],[3],[7]], 5); // [[5],[3,5],[7,5]]
let augmentElements = function(array, aug) {
};
// 34. Reduce a series of zeroes to a single 0.
// minimizeZeroes([2,0,0,0,1,4]) // [2,0,1,4]
// minimizeZeroes([2,0,0,0,1,0,0,4]) // [2,0,1,0,4]
let minimizeZeroes = function(array) {
};
// 35. Alternate the numbers in an array between positive and negative regardless of
// their original sign. The first number in the index always needs to be positive.
// alternateSign([2,7,8,3,1,4]) // [2,-7,8,-3,1,-4]
// alternateSign([-2,-7,8,3,-1,4]) // [2,-7,8,-3,1,-4]
let alternateSign = function(array) {
};
// 36. Given a string, return a string with digits converted to their word equivalent.
// Assume all numbers are single digits (less than 10).
// numToText("I have 5 dogs and 6 ponies"); // "I have five dogs and six ponies"
let numToText = function(str) {
};
// *** EXTRA CREDIT ***
// 37. Return the number of times a tag occurs in the DOM.
let tagCount = function(tag, node) {
};
/* 38. Write a function for binary search.
let array = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15];
binarySearch(array, 5) // 5
https://www.khanacademy.org/computing/computer-science/algorithms/binary-search/a/binary-search
STRATEGY:
1) Base cases:
1A) if target is equal to element at midpoint, return midpoint
1B) if min is greater than max return null
2) Else, similar to iterative version:
* if target is less than elem at midpoint: look to the LEFT
recursively return function, passing in a new max as the midpoint - 1
* if target is more than elem at midpoint: look to the RIGHT
recursively return function, passing in a new min as the midpoint + 1
IMPORTANT CONCEPTS:
Note 1: how to find the midpoint correctly? 2 ways
1) (max - min)/2 + min
* WHY the extra + min? to offset when you're looking on the RIGHT
* E.g: from the left everything is OK with the formula (max - min) / 2
* From 0 to 5, midpoint is 2
* But from the RIGHT: 0 to 5, what is midpoint now? Still 2? No!
* Need to add the min (5) to 2, to get the correct midpoint! This is 7!!!
2) (max + min) / 2
* Same as above but is simpler
Note 2:
another way is to put the calls into the if condition. But MUST use <= or >=
if (min >= max)
{
//recursive calls
}
return null
*/
let binarySearch = function(array, target, min, max)
{
let copy = array.slice();
if (min === undefined) min = 0;
if (max === undefined) max = copy.length - 1;
if (min > max) return null; //see note 2
let midIndex = Math.floor(((max + min) / 2) );
if (target === copy[midIndex]) return midIndex;
else if (target < copy[midIndex])
{
return binarySearch(copy, target, min, midIndex - 1);
} else
{
return binarySearch(copy, target, midIndex + 1, max);
}
};
// 39. Write a merge sort function.
// mergeSort([34,7,23,32,5,62]) // [5,7,23,32,34,62]
// https://www.khanacademy.org/computing/computer-science/algorithms/merge-sort/a/divide-and-conquer-algorithms
let mergeSort = function(array) {
};
// 40. Deeply clone objects and arrays.
// let obj1 = {a:1,b:{bb:{bbb:2}},c:3};
// let obj2 = clone(obj1);
// console.log(obj2); // {a:1,b:{bb:{bbb:2}},c:3}
// obj1 === obj2 // false
let clone = function(input) {
};
| 54d105e63a4476aab2b1101e2ebe05e90da4e1d5 | [
"JavaScript"
] | 1 | JavaScript | mandytrinh/recursion-prompts | e638d1cab294fd6ba9b9915552703790710e3259 | 25fc43768bec5e1a62266a576592c3f2a8b5a3cc |
refs/heads/main | <file_sep># Bookshop-managament-system-project<file_sep>//*******Bookshop******
#include<stdio.h>
#include<stdlib.h>
#include<conio.h>
#include<string.h>
struct bookshop
{
char date[40];
char cname[40];
double Number;
char bname[40];
char barcode[40];
float cost;
};
struct bookshop s; // object of bookshop class
void write(); //Function Diclaration // Function Store Data
void display(); //Function Display Data
void edit();
void delrecord(); //edit data
void write() //Function Defination
{
char ch='y';
FILE *rec;
rec=fopen("record.txt","ab+");
while(ch=='y'||ch=='Y' )
{
printf("\n ENTER DATE :: >");
scanf("%s",&s.date);
fflush(stdin);
printf(" CUSTOMER NAME :: >");
gets(s.cname);
printf(" CUSTOMER MOBILE :: >");
scanf("%lf",&s.Number);
fflush(stdin);
printf(" BOOK NAME :: >");
gets(s.bname);
fflush(stdin);
printf(" BARCODE NUMBER :: >");
gets(s.barcode);
printf(" BOOK Rs. :: >");
scanf("%f",&s.cost);
fwrite(&s,sizeof(s),1,rec);
printf("\n IF YOU WANT TO CREATE MORE RECORD\n Y/N :: >");
ch=getche();
}
fclose(rec);
printf("\n\n ENTRY RECORD SUCCESS!! \n");
}
void display() //Function Defination
{
char search[40];
int found=0, choise;
FILE *rec;
rec=fopen("record.txt","rb");
printf(" PRESS <1> SEARCH BY BARCODE\n PRESS <2> SEARCH BY DATE\n CHOISE :: >");
scanf("%d",&choise);
if(choise==1)
{
printf("\n ENTER BARCODE NUMBER :: >");
scanf("%s",&search);
}
else if(choise==2)
{
printf("\n ENTER DATE :: >");
scanf("%s",&search);
}
if(rec==NULL)
{
printf("\nFILE ERROR");
return;
}
else
{
while(fread(&s,sizeof(s),1,rec))
{
if(strcmp(s.barcode,search)==0||strcmp(s.date,search)==0)
{
found=1;
printf("\n DATE :: %s ",s.date);
printf("\n CUSTOMER NAME :: %s",strupr(s.cname));
printf("\n CUSTOMER MOBILE :: %.lf",s.Number);
printf("\n BOOK NAME :: %s",strupr(s.bname));
printf("\n BOOK BARCODE :: %s",s.barcode);
printf("\n BOOK Rs. :: %.2f\n",s.cost);
}
}
}
if(found==0)
{
printf(" RECORD NOT FOUND");
}
fclose(rec);
}
void edit() //Function Defination
{
char user[40];
char password[40];
int num;
char lm[20];
int size;
int flag=0;
FILE *rec;
rec=fopen("record.txt","r+b");
printf("\n WHICH BARCODE RECORD DO YOU WANT\n TO MODIFY :: >");
scanf("%s",&lm);
if(rec==NULL)
{
printf("\nFILE ERROR");
return;
}
while(fread(&s,sizeof(s),1,rec))
{
if(strcmp(s.barcode,lm)==0)
{
flag=1;
printf("\n DATE :: %s ",s.date);
printf("\n CUSTOMER NAME :: %s",strupr(s.cname));
printf("\n CUSTOMER MOBILE :: %.lf",s.Number);
printf("\n BOOK NAME :: %s",strupr(s.bname));
printf("\n BOOK BARCODE :: %s",s.barcode);
printf("\n BOOK Rs. :: %.2f\n",s.cost);
lable:
printf("\n IF YOU WANT TO MODIFY THIS RECORD");
printf("\n PLEASE ENTER USER NAME AND PASSWORD");
fflush(stdin);
printf("\t\n\n USER NAME :: >");
gets(user);
printf(" PASSWORD :: >");
gets(password);
if((strcmp(user,"Ravi")==0) && (strcmp(password,"<PASSWORD>")==0))
{
printf("\n PRESS <1> FOR DATE");
printf("\n PRESS <2> FOR CUSTOMER NAME");
printf("\n PRESS <3> FOR BOOK NAME");
printf("\n PRESS <4> FOR BOOK Rs.");
printf("\n *************************************\n");
printf(" \n PLEASE ENTER YOUR CHOISE::>");
scanf("%d",&num);
switch (num)
{
case 1:
{
printf("\n ENTER DATE :: >");
scanf("%s",&s.date);
break;
}
case 2:
{
fflush(stdin);
printf(" CUSTOMER NAME :: >");
gets(s.cname);
break;
}
case 3:
{
fflush(stdin);
printf(" BOOK NAME :: >");
gets(s.bname);
break;
}
case 4:
{
printf(" BOOK Rs. :: >");
scanf("%f",&s.cost);
break;
}
}
size=sizeof(s);
fseek(rec,-size,SEEK_CUR);
fwrite(&s,sizeof(s),1,rec);
fclose(rec);
printf(" \nRECORD UPDATE!!");
return;
}
if((strcmp(user,"Ravi")!=0) || (strcmp(password,"<PASSWORD>")!=0))
{
printf(" WRONG PASSWORD PLEASE TRY AGAIN\n");
goto lable;
}
}
}
if(flag==0)
{
printf("\n RECORD NOT FOUND");
}
}
void main() //Function Calling Section
{
char user[40];
char password[40];
int choise;
printf("\n *****************************************");
printf("\n * WELCOME TO RAVIRANJAN BOOKSHOP *");
printf("\n *****************************************");
printf("\t\n\n PLEASE ENTER USER NAME AND PASSWORD");
printf("\t\n\n USER NAME :: >");
gets(user);
printf(" PASSWORD :: >");
gets(password);
if((strcmp(user,"Ravi")==0) && (strcmp(password,"<PASSWORD>")==0))
{
do
{
printf("\t\n\n\n *********WELCOME TO MY BOOKSHOP********\n");
printf("\n PRESS <1> FOR CREAT RECORD");
printf("\n PRESS <2> FOR SEARCH RECORD");
printf("\n PRESS <3> FOR MODIFY RECORD");
printf("\n PRESS <4> FOR DELETE RECORD");
printf("\n PRESS <0> FOR EXIT PROGRAM");
printf("\n\n YOUR CHOISE :: > ");
scanf("%d",&choise);
printf("\n ***************************************\n");
switch (choise)
{
case 1:
{
write(); //Function Calling
break;
}
case 2:
{
display(); //Function Calling
break;
}
case 3:
{
edit(); //Function Calling
break;
}
case 4:
{
delrecord(); //Function Calling
break;
}
case 0:
{
exit(0);
}
default:
{
printf("\nINVALID ENTRY");
}
}
}
while(choise!=0);
}
else
{
printf("\n\n WRONG PASSWORD PLEASE TRY AGAIN");
}
}
void delrecord() //Function Defination
{
char user[40];
char password[40];
char lm[20];
FILE *rec,*del;
printf("\n WHICH BARCODE RECORD DO YOU \n WANT TO DELETE :: >");
scanf("%s",&lm);
printf("\n IF YOU WANT TO DELETE THIS RECORD");
printf("\n PLEASE ENTER USER NAME AND PASSWORD");
lable:
fflush(stdin);
printf("\t\n\n USER NAME :: >");
gets(user);
printf(" PASSWORD :: >");
gets(password);
if((strcmp(user,"Ravi")==0) && (strcmp(password,"<PASSWORD>")==0))
{
rec=fopen("record.txt","r");
del=fopen("delete.txt","w+");
if(rec==NULL)
{
printf("\nFILE ERROR");
return;
}
else
while(fread(&s,sizeof(s),1,rec))
{
if(strcmp(s.barcode,lm)!=0)
{
fwrite(&s,sizeof(s),1,del);
}
}
fclose(rec);
fclose(del);
remove("record.txt");
rename("delete.txt","record.txt");
printf(" \nRECORD DELETED!!");
}
else
{
printf(" WRONG PASSWORD PLEASE TRY AGAIN\n");
goto lable;
}
}
//Thank you!!!!!
| 139461463951bb0547e3a91b24fbef9353c68477 | [
"Markdown",
"C"
] | 2 | Markdown | Raviranjankumar12/Bookshop-managament-system-project | 5f2aaf785270d46a3d6963cc071bc70bbaa3d22d | a7693bad2e681dc3ea98e0e65a324f63235c93b0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.